hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de4f23bfb5a827684724b1fa6940e53745dbb142 | 1,166 | py | Python | krpc_client.py | janismac/ksp_rtls_launch_to_rendezvous | 195ebfb5aacf1a857aaaf0a69bf071d93d887efd | [
"Apache-2.0"
]
| 1 | 2020-11-07T15:53:19.000Z | 2020-11-07T15:53:19.000Z | krpc_client.py | janismac/ksp_rtls_launch_to_rendezvous | 195ebfb5aacf1a857aaaf0a69bf071d93d887efd | [
"Apache-2.0"
]
| null | null | null | krpc_client.py | janismac/ksp_rtls_launch_to_rendezvous | 195ebfb5aacf1a857aaaf0a69bf071d93d887efd | [
"Apache-2.0"
]
| 1 | 2020-11-07T15:56:06.000Z | 2020-11-07T15:56:06.000Z | import sys
import subprocess
import time
import json
import krpc
import math
import scipy.integrate
import numpy as np
from PrePlanningChecklist import PrePlanningChecklist
from PlannerUiPanel import PlannerUiPanel
from MainUiPanel import MainUiPanel
from ConfigUiPanel import ConfigUiPanel
from AutopilotUiPanel import AutopilotUiPanel
from predict_orbit_BCBF import predict_orbit_BCBF
def main():
conn = krpc.connect()
ui_config = ConfigUiPanel(conn)
get_config_callback = lambda: ui_config.config
pre_planning_checklist = PrePlanningChecklist(conn, get_config_callback)
get_checklist_callback = lambda: pre_planning_checklist.get_checklist()
ui_planner = PlannerUiPanel(conn, get_checklist_callback, ui_config)
ui_autopilot = AutopilotUiPanel(conn)
ui_main = MainUiPanel(conn, ui_config, ui_planner, ui_autopilot)
while True:
ui_main.update()
if ui_main.do_restart:
conn.close()
time.sleep(1.0)
return
while True:
try:
main()
#time.sleep(2.0)
except krpc.error.RPCError:
time.sleep(4.0)
#except ValueError:
# time.sleep(4.0)
| 25.911111 | 76 | 0.736707 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.04717 |
de4fbddd1a8e5c3c47f15c39acb99e707f22e65b | 617 | py | Python | src/alerter.py | Jawgo/DiscordBot | 43dccce80aa8d8bd51b44c0de732fd70d9194672 | [
"MIT"
]
| null | null | null | src/alerter.py | Jawgo/DiscordBot | 43dccce80aa8d8bd51b44c0de732fd70d9194672 | [
"MIT"
]
| null | null | null | src/alerter.py | Jawgo/DiscordBot | 43dccce80aa8d8bd51b44c0de732fd70d9194672 | [
"MIT"
]
| null | null | null | import os
from discord import Webhook, RequestsWebhookAdapter, Colour, Embed
def send_alert(item):
hook = os.environ.get("WEB_HOOK")
webhook = Webhook.from_url(hook, adapter=RequestsWebhookAdapter())
embedVar = Embed(title="Stock Hunter")
if item.in_stock:
embedVar.description = "{} **IN STOCK** at [{}]({})".format(item.item_name, item.domain, item.url)
embedVar.colour = Colour.green()
else:
embedVar.description = "{} **out of stock** at [{}]({})".format(item.item_name, item.domain, item.url)
embedVar.colour = Colour.red()
webhook.send(embed=embedVar)
| 36.294118 | 110 | 0.666126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.139384 |
de50a4c4fb04e2350cc10caa2aea9a7a75fcac8c | 4,593 | py | Python | dataset_preproc/preproc_video/face_extract.py | RicardoP0/multimodal-matchmap | aa44c574a57073833004172734394882889d8d3b | [
"MIT"
]
| null | null | null | dataset_preproc/preproc_video/face_extract.py | RicardoP0/multimodal-matchmap | aa44c574a57073833004172734394882889d8d3b | [
"MIT"
]
| null | null | null | dataset_preproc/preproc_video/face_extract.py | RicardoP0/multimodal-matchmap | aa44c574a57073833004172734394882889d8d3b | [
"MIT"
]
| null | null | null | #%%
#https://github.com/timesler/facenet-pytorch
from facenet_pytorch import MTCNN, extract_face
import torch
import numpy as np
import mmcv, cv2
import os
import matplotlib.pyplot as plt
from PIL import Image
# %%
#%%
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
print(os.getcwd())
mtcnn = MTCNN(keep_all=True, device=device,image_size=100)
video_dir = "VIDEO_FILES/"
dest_path = 'VIDEO_PROCESSED/'
dir_list = os.listdir(video_dir)
dir_list.sort()
if not os.path.exists(dest_path):
os.makedirs(dest_path)
#%%
# %%
#iemocap
k = 1 #session to process
video_dir = "IEMOCAP_full_release.tar/IEMOCAP_full_release/Session{}/dialog/avi/DivX".format(k)
dir_list = os.listdir(video_dir)
dir_list.sort()
dir_list = [x for x in dir_list if x[0] =='S']
i=0
#%%
dir_list
path = 'datasets/IEMOCAP/CLIPPED_VIDEOS/' + 'Session{}/'.format(k)
if not os.path.exists(path):
os.makedirs(path)
dir_list
#%%
#divide each video and manually crop around face
video_dir = "IEMOCAP_full_release.tar/IEMOCAP_full_release/Session{}/dialog/avi/DivX".format(k)
dir_list = os.listdir(video_dir)
dir_list.sort()
dir_list = [x for x in dir_list if x[0] =='S']
path = 'IEMOCAP/CLIPPED_VIDEOS/' + 'Session{}/'.format(k)
if not os.path.exists(path):
os.makedirs(path)
for file_name in dir_list:
print(file_name)
video = mmcv.VideoReader(video_dir + '/'+file_name)
if 'F_' in file_name:
new_file_left = path + file_name[:-4] + '_F.avi'
new_file_right = path +file_name[:-4] + '_M.avi'
else:
new_file_left = path +file_name[:-4] + '_M.avi'
new_file_right = path + file_name[:-4] + '_F.avi'
h,w,c = video[0].shape
dim = (300,280)
fourcc = cv2.VideoWriter_fourcc(*'FMP4')
#left
video_tracked = cv2.VideoWriter(new_file_left, fourcc, 25.0, dim)
i=0
for frame in video:
h,w,c = frame.shape
#left
#different boxes for each session
#box (left, upper, right, lower)-tuple
#ses1 [120:int(h- 690),120:int(w/2.4)]
#ses2 [150:int(h - 660),120:int(w/2.4)]
#ses5 [120:int(h - 690),120:int(w/2.4)]
#[130:int(h/2.18),120:int(w/2.4)]
video_tracked.write(frame[100:h-100,:300])
video_tracked.release()
del video_tracked
print(h,w,c)
dim = (370,280)
# #right
video_tracked = cv2.VideoWriter(new_file_right, fourcc, 25.0, dim)
for frame in video:
h,w,c = frame.shape
#right
#ses1 [150:int(h - 660),int(w/1.5):int(w-60)]
#ses2 [150:int(h - 660),int(w/1.5):int(w-60)]
#ses5 [150:int(h - 660),int(w/1.5):int(w-60)]
video_tracked.write(frame[100:h-100,350:])
video_tracked.release()
del video, video_tracked
#%%
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
print(os.getcwd())
mtcnn = MTCNN(keep_all=True, device=device,image_size=2000,margin=5)
i = 1
video_dir = "../../../../datasets/IEMOCAP/CLIPPED_VIDEOS/Session{}/".format(i)
dir_list = os.listdir(video_dir)
dir_list.sort()
dir_list = [x for x in dir_list if x[0] =='S']
dir_list
#%%
file_list = dir_list
path = '../datasets/IEMOCAP/FACE_VIDEOS/Session{}/'.format(i)
if not os.path.exists(path):
os.makedirs(path)
#%%
#%%
#track using mtcnn
for file_name in file_list:
video = mmcv.VideoReader(video_dir + file_name)
frames = [Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) for frame in video]
frames_tracked = []
for x, frame in enumerate(frames):
#print('\rTracking frame: {}'.format(i + 1), end='')
# Detect faces
boxes, _ = mtcnn.detect(frame)
if not boxes is None:
# print(boxes[0])
im_array = extract_face(frame, boxes[0],image_size=112,margin=50)
#im_array = im_array.permute(1,2,0)
img = im_array #Image.fromar ray(np.uint8(im_array.numpy()))
# Add to frame list
frames_tracked.append(img)
else:
frames_tracked.append(img)
dim = frames_tracked[0].size
print(len(frames),len(frames_tracked))
new_file = path + '/' + file_name
print(new_file)
fourcc = cv2.VideoWriter_fourcc(*'FMP4')
video_tracked = cv2.VideoWriter(new_file, fourcc, 25.0, dim)
for frame in frames_tracked:
video_tracked.write(cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR))
video_tracked.release()
del video, video_tracked, frames_tracked, frames
| 29.254777 | 95 | 0.642717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,231 | 0.268017 |
de51709d96e27d7e3576d5ee6ad6f2ebabdc7ebc | 1,441 | py | Python | launch/gazebo.launch.py | fly4future/fog_gazebo_resources | 1af1aa2d3a5e7c67bf39605655ca96a154daa4b3 | [
"BSD-3-Clause"
]
| null | null | null | launch/gazebo.launch.py | fly4future/fog_gazebo_resources | 1af1aa2d3a5e7c67bf39605655ca96a154daa4b3 | [
"BSD-3-Clause"
]
| null | null | null | launch/gazebo.launch.py | fly4future/fog_gazebo_resources | 1af1aa2d3a5e7c67bf39605655ca96a154daa4b3 | [
"BSD-3-Clause"
]
| null | null | null | """Launch Gazebo server and client with command line arguments."""
from launch import LaunchDescription
from launch.substitutions import LaunchConfiguration
from launch.actions import DeclareLaunchArgument
from launch.actions import IncludeLaunchDescription
from launch.actions import ExecuteProcess
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
return LaunchDescription([
DeclareLaunchArgument('gui', default_value='true',
description='Set to "false" to run gazebo headless.'),
DeclareLaunchArgument('world', default_value='',
description='Specify gazebo world file name in gazebo_package'),
IncludeLaunchDescription(
PythonLaunchDescriptionSource([get_package_share_directory("gazebo_ros"), '/launch/gzserver.launch.py']),
launch_arguments = {
'server_required': 'false',
'verbose': 'true',
'world': LaunchConfiguration('world'),
}.items()
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource([get_package_share_directory("gazebo_ros"), '/launch/gzclient.launch.py']),
condition=IfCondition(LaunchConfiguration('gui')),
),
])
| 38.945946 | 117 | 0.696738 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.217904 |
de5241403b212e20d0b5a9c1eb86d5461e49bad7 | 957 | py | Python | hlrl/torch/utils/contexts/training.py | Chainso/HLRL | 584f4ed2fa4d8b311a21dbd862ec9434833dd7cd | [
"MIT"
]
| null | null | null | hlrl/torch/utils/contexts/training.py | Chainso/HLRL | 584f4ed2fa4d8b311a21dbd862ec9434833dd7cd | [
"MIT"
]
| null | null | null | hlrl/torch/utils/contexts/training.py | Chainso/HLRL | 584f4ed2fa4d8b311a21dbd862ec9434833dd7cd | [
"MIT"
]
| null | null | null | from contextlib import contextmanager
import torch.nn as nn
@contextmanager
def evaluate(module: nn.Module):
"""
A context manager for evaluating the module.
Args:
module: The module to switch to evaluating in the context.
Returns:
A generator for the context of the module.
"""
training = module.training
try:
module.eval()
yield module
finally:
# Switch batch to training if needed
if training:
module.train()
@contextmanager
def training(module: nn.Module):
"""
A context manager for training the module.
Args:
module: The module to switch to training in the context.
Returns:
A generator for the context of the module.
"""
training = module.training
try:
module.train()
yield module
finally:
# Switch batch to training if needed
if not training:
module.eval()
| 20.804348 | 66 | 0.6186 | 0 | 0 | 860 | 0.898642 | 892 | 0.932079 | 0 | 0 | 474 | 0.495298 |
de53cfe343832488633720622d964252c48b5617 | 3,180 | py | Python | test/test_postfix.py | JoseTomasTocino/toptal-calculator | baeb69fdeca81699d655e1f2f11f03f2a3972ab7 | [
"Unlicense"
]
| null | null | null | test/test_postfix.py | JoseTomasTocino/toptal-calculator | baeb69fdeca81699d655e1f2f11f03f2a3972ab7 | [
"Unlicense"
]
| null | null | null | test/test_postfix.py | JoseTomasTocino/toptal-calculator | baeb69fdeca81699d655e1f2f11f03f2a3972ab7 | [
"Unlicense"
]
| null | null | null | import unittest
from calculator import tokens, evaluator
from calculator.parser import tokenize, infix_to_postfix
class MyTestPostfixCase(unittest.TestCase):
def test_simple_operator(self):
expression = "2 + 1"
computed_token_list = tokenize(expression)
postfix_token_list = infix_to_postfix(computed_token_list)
token_list = [
tokens.OperandToken(2),
tokens.OperandToken(1),
tokens.PlusOperatorToken(),
]
self.assertListEqual(postfix_token_list, token_list)
def test_multiple_operators(self):
expression = "2 + 1 * 5"
computed_token_list = tokenize(expression)
postfix_token_list = infix_to_postfix(computed_token_list)
token_list = [
tokens.OperandToken(2),
tokens.OperandToken(1),
tokens.OperandToken(5),
tokens.ProductOperatorToken(),
tokens.PlusOperatorToken(),
]
self.assertListEqual(postfix_token_list, token_list)
def test_multiple_operators_reversed(self):
expression = "2 * 1 + 5"
computed_token_list = tokenize(expression)
postfix_token_list = infix_to_postfix(computed_token_list)
token_list = [
tokens.OperandToken(2),
tokens.OperandToken(1),
tokens.ProductOperatorToken(),
tokens.OperandToken(5),
tokens.PlusOperatorToken(),
]
self.assertListEqual(postfix_token_list, token_list)
def test_parenthesis(self):
expression = "2 * (1 + 5)"
computed_token_list = tokenize(expression)
postfix_token_list = infix_to_postfix(computed_token_list)
token_list = [
tokens.OperandToken(2),
tokens.OperandToken(1),
tokens.OperandToken(5),
tokens.PlusOperatorToken(),
tokens.ProductOperatorToken()
]
self.assertListEqual(postfix_token_list, token_list)
def test_missing_left_parenthesis(self):
expression = "2 * 2) + 1 + 5"
computed_token_list = tokenize(expression)
with self.assertRaises(RuntimeError):
postfix_token_list = infix_to_postfix(computed_token_list)
def test_missing_right_parenthesis(self):
expression = "2 * (1 + 5"
computed_token_list = tokenize(expression)
with self.assertRaises(RuntimeError):
postfix_token_list = infix_to_postfix(computed_token_list)
def test_simple_function(self):
expression = "sin 5"
computed_token_list = tokenize(expression)
postfix_token_list = infix_to_postfix(computed_token_list)
token_list = [
tokens.OperandToken(5),
tokens.SinFunctionToken(),
]
self.assertListEqual(postfix_token_list, token_list)
def test_equation_in_postfix_not_allowed(self):
with self.assertRaises(RuntimeError):
evaluator.evaluate('(5 + 2)', True)
with self.assertRaises(RuntimeError):
evaluator.evaluate('x + 1', True)
with self.assertRaises(RuntimeError):
evaluator.evaluate('x = 5', True)
| 31.485149 | 70 | 0.646226 | 3,062 | 0.962893 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.031447 |
de55352cff35ae8596924966eb4c23a46054b461 | 1,124 | py | Python | Weather API/app.py | TanushreeShaw/Weather | 0bebe029536f579bbd9d28c07d3e33f3438a1a56 | [
"MIT"
]
| null | null | null | Weather API/app.py | TanushreeShaw/Weather | 0bebe029536f579bbd9d28c07d3e33f3438a1a56 | [
"MIT"
]
| null | null | null | Weather API/app.py | TanushreeShaw/Weather | 0bebe029536f579bbd9d28c07d3e33f3438a1a56 | [
"MIT"
]
| null | null | null | from flask import Flask,
render_template, request
import requests
import json
import os
app = Flask(__name__)
picfolder = os.path.join('static','pics')
app.config['UPLOAD_FOLDER'] = picfolder
@app.route('/temperature', methods=['POST'])
def temperature():
pic1 = os.path.join(app.config['UPLOAD_FOLDER'], "weather1.jpg")
zipcode = request.form['zip']
r = requests.get('http://api.openweathermap.org/data/2.5/weather?zip='+zipcode+',us&appid=a802a031af600ff7a4811e9cd20fee1d')
json_object = r.json()
temp_k = float(json_object['main']['temp'])
temp_f = (temp_k - 273.15) * 1.8 + 32
pressure = float(json_object['main']['pressure'])
humidity = float(json_object['main']['humidity'])
latitude = json_object['coord']['lat']
longitude = json_object['coord']['lon']
return render_template('temperature.html', image = pic1, latitude=latitude, longitude=longitude, temp=temp_f, pressure=pressure, humidity=humidity)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
| 33.058824 | 152 | 0.670819 | 0 | 0 | 0 | 0 | 847 | 0.753559 | 0 | 0 | 291 | 0.258897 |
de559c2b5884fa9c7d514b793b602e0875f672ea | 561 | py | Python | core/urls.py | cybernetisk/internsystem | b81faa0deef08153032e56d5740173e5a6cf3ad9 | [
"MIT"
]
| null | null | null | core/urls.py | cybernetisk/internsystem | b81faa0deef08153032e56d5740173e5a6cf3ad9 | [
"MIT"
]
| 38 | 2017-12-21T10:10:54.000Z | 2022-03-07T20:54:37.000Z | core/urls.py | cybernetisk/internsystem | b81faa0deef08153032e56d5740173e5a6cf3ad9 | [
"MIT"
]
| 6 | 2018-06-01T21:04:34.000Z | 2020-01-14T15:26:26.000Z | from django.conf.urls import url
from core.views import me
from core.rest import CardViewSet, UserViewSet, NfcCardViewSet, GroupViewSet
from core.utils import SharedAPIRootRouter
# SharedAPIRootRouter is automatically imported in global urls config
router = SharedAPIRootRouter()
router.register(r"core/users", UserViewSet, basename="users")
router.register(r"core/cards", CardViewSet, basename="voucher_cards")
router.register(r"core/nfc", NfcCardViewSet)
router.register(r"core/groups", GroupViewSet)
urlpatterns = [
url(r"^api/me$", me, name="me"),
]
| 33 | 76 | 0.787879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.279857 |
de5df9efa200676cbee6ac7078451697101f76eb | 2,931 | py | Python | flora_tools/experiments/measure_time_irq_process.py | Atokulus/flora-tools | 6f878a4495e4dcb6b9bc19a75aaac37b9dfb16b0 | [
"MIT"
]
| 1 | 2020-11-20T16:36:17.000Z | 2020-11-20T16:36:17.000Z | flora_tools/experiments/measure_time_irq_process.py | Atokulus/flora-tools | 6f878a4495e4dcb6b9bc19a75aaac37b9dfb16b0 | [
"MIT"
]
| null | null | null | flora_tools/experiments/measure_time_irq_process.py | Atokulus/flora-tools | 6f878a4495e4dcb6b9bc19a75aaac37b9dfb16b0 | [
"MIT"
]
| null | null | null | from flora_tools.experiment import *
class MeasureTimeIRQProcess(Experiment):
def __init__(self):
description = "Measures the time needed for an IRQ to be processed."
Experiment.__init__(self, description)
def run(self, bench, iterations=10000):
self.iterations = iterations
Experiment.run(self, bench)
columns = ['time', 'window', 'precision', 'modulation', 'band', 'react', 'finish']
df = pd.DataFrame(columns=columns)
df.index.name = 'sample'
for i in range(0, self.iterations):
configuration = RadioConfiguration.get_random_configuration(tx=False, irq_direct=True)
self.bench.devkit_a.cmd(configuration.cmd)
math = RadioMath(configuration)
min_window = 0.0001
min_precision = 5E-6
window, points, precision = self.bench.scope.get_next_valid_window(min_window, min_precision)
time.sleep(0.01)
self.bench.scope.init_measurement(window, trigger_rise=True, trigger_channel="DIO1", points=points)
self.bench.scope.delay_acquisition_setup_time(window=window)
self.bench.devkit_a.cmd("radio send")
wave = self.bench.scope.finish_measurement(channels=[1, 2])
if wave is not None:
nss_indices = utilities.get_edges(wave[0])
dio1_indices = utilities.get_edges(wave[1])
if 3 < len(nss_indices) < 100:
nss_react = nss_indices[0][0]
nss_finish = nss_indices[3][0]
else:
nss_react = np.nan
nss_finish = np.nan
if 1 < len(dio1_indices) < 100:
dio1_rise = dio1_indices[0][0]
delay_react = (nss_react - dio1_rise) * self.bench.scope.sample_period
delay_finish = (nss_finish - dio1_rise) * self.bench.scope.sample_period
else:
delay_react = np.nan
delay_finish = np.nan
item = [dt.datetime.now(), window, self.bench.scope.sample_period, configuration.modulation,
configuration.band, delay_react, delay_finish]
else:
item = [dt.datetime.now(), window, self.bench.scope.sample_period, configuration.modulation,
configuration.band, np.nan, np.nan]
df.loc[i] = item
print(item)
df.to_csv("{}.csv".format(self.name))
def analyze(self, df: pd.DataFrame):
df.dropna()
delay_react = df.react
delay_finish = df.finish
columns = ['delay_react', 'delay_react_err', 'delay_finish', 'delay_finish_err']
timings = pd.DataFrame(columns=columns)
timings.loc[0] = [delay_react.mean(), delay_react.std(), delay_finish.mean(), delay_finish.std()]
return timings
| 37.576923 | 111 | 0.588536 | 2,891 | 0.986353 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.070966 |
de5e91c132fdc9f05dd13b11b8708a82b0c0f470 | 213 | py | Python | 6P/REDES/restAPI/main/serializers.py | rwnicholas/fluffy-potato | 52ccd25cf77f8cebce1420e7fe9028a277811986 | [
"MIT"
]
| null | null | null | 6P/REDES/restAPI/main/serializers.py | rwnicholas/fluffy-potato | 52ccd25cf77f8cebce1420e7fe9028a277811986 | [
"MIT"
]
| null | null | null | 6P/REDES/restAPI/main/serializers.py | rwnicholas/fluffy-potato | 52ccd25cf77f8cebce1420e7fe9028a277811986 | [
"MIT"
]
| null | null | null | from rest_framework import serializers
from main.models import Suco
class SucoSerializer(serializers.ModelSerializer):
class Meta:
model = Suco
fields = ('nome', 'litros', 'link', 'qtd_disp')
| 26.625 | 55 | 0.704225 | 143 | 0.671362 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.140845 |
de5f40f2fa117e9d234c38567381795609e6e892 | 183 | py | Python | gpytorch/kernels/keops/__init__.py | wjmaddox/gpytorch | 679f437fa71f8e15d98b3d256924ecf4b52c0448 | [
"MIT"
]
| 1 | 2019-09-16T16:58:54.000Z | 2019-09-16T16:58:54.000Z | gpytorch/kernels/keops/__init__.py | wjmaddox/gpytorch | 679f437fa71f8e15d98b3d256924ecf4b52c0448 | [
"MIT"
]
| null | null | null | gpytorch/kernels/keops/__init__.py | wjmaddox/gpytorch | 679f437fa71f8e15d98b3d256924ecf4b52c0448 | [
"MIT"
]
| null | null | null | from .matern_kernel import MaternKernel
from .rbf_kernel import RBFKernel
from .spectralgp_kernel import SpectralGPKernel
__all__ = ["MaternKernel", "RBFKernel", "SpectralGPKernel"]
| 30.5 | 59 | 0.825137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.234973 |
de61a2c63bd8bf8c89dfa8db3b212f5ada8c9268 | 271 | py | Python | bc/recruitment/migrations/0018_merge_20200324_1630.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
]
| 1 | 2021-02-27T07:27:17.000Z | 2021-02-27T07:27:17.000Z | bc/recruitment/migrations/0018_merge_20200324_1630.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
]
| null | null | null | bc/recruitment/migrations/0018_merge_20200324_1630.py | Buckinghamshire-Digital-Service/buckinghamshire-council | bbbdb52b515bcdfc79a2bd9198dfa4828405370e | [
"BSD-3-Clause"
]
| 1 | 2021-06-09T15:56:54.000Z | 2021-06-09T15:56:54.000Z | # Generated by Django 2.2.10 on 2020-03-24 16:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("recruitment", "0017_merge_20200318_1104"),
("recruitment", "0013_image_block"),
]
operations = []
| 19.357143 | 52 | 0.664207 | 185 | 0.682657 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.435424 |
de61aeb69172f0bbf84a85482ba65c30efe863a2 | 1,901 | py | Python | main.py | SHGoldfarb/fantastic-barnacle | 64650155ef8172530a6f88be6e7361bfc7e6bfa2 | [
"MIT"
]
| null | null | null | main.py | SHGoldfarb/fantastic-barnacle | 64650155ef8172530a6f88be6e7361bfc7e6bfa2 | [
"MIT"
]
| null | null | null | main.py | SHGoldfarb/fantastic-barnacle | 64650155ef8172530a6f88be6e7361bfc7e6bfa2 | [
"MIT"
]
| null | null | null | import requests
import os
from datetime import datetime
import pandas as pd
def ensure_folder_exists(foldername):
try:
# Create tmp folder
os.mkdir(foldername)
print("Directory created: " + foldername)
except FileExistsError:
pass
def download_and_save(url, filename):
print("Downloading " + url)
response = requests.get(url)
with open(filename, 'wb') as file:
for chunk in response.iter_content(chunk_size=128):
file.write(chunk)
def file_exists(filename):
return os.path.isfile(filename)
def get_data():
tmp_folder_name = "tmp"
ensure_folder_exists(tmp_folder_name)
active_cases_url = "https://raw.githubusercontent.com/MinCiencia/\
Datos-COVID19/master/output/producto19/CasosActivosPorComuna.csv"
phases_url = "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/\
master/output/producto74/paso_a_paso.csv"
todays_date_string = str(datetime.date(datetime.now()))
active_cases_file_name = "active_cases_{}.csv".format(todays_date_string)
phases_file_name = "phases_{}.csv".format(todays_date_string)
active_cases_file_path = os.path.join(
tmp_folder_name, active_cases_file_name)
phases_file_path = os.path.join(tmp_folder_name, phases_file_name)
if not (file_exists(active_cases_file_path)):
download_and_save(active_cases_url, active_cases_file_path)
if not (file_exists(phases_file_path)):
download_and_save(phases_url, phases_file_path)
# Load data
cases = pd.read_csv(active_cases_file_path)
phases = pd.read_csv(phases_file_path)
return (cases, phases)
def process_and_merge(cases, phases):
# counties = {}
pass
def main():
# Fetch
cases, phases = get_data()
# Process
data = process_and_merge(cases, phases)
# Plot
print(data)
if __name__ == "__main__":
main()
| 23.7625 | 78 | 0.711731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.196213 |
de6435cdbc67360ee94636dc50bd704495e2b720 | 382 | py | Python | dump/yoloCarAccident/generate.py | lovishchopra/ITRI-Car-Accident | 96a1ffa25eacfb2885ea1fa0852a91c8bb5ec95d | [
"MIT"
]
| null | null | null | dump/yoloCarAccident/generate.py | lovishchopra/ITRI-Car-Accident | 96a1ffa25eacfb2885ea1fa0852a91c8bb5ec95d | [
"MIT"
]
| null | null | null | dump/yoloCarAccident/generate.py | lovishchopra/ITRI-Car-Accident | 96a1ffa25eacfb2885ea1fa0852a91c8bb5ec95d | [
"MIT"
]
| null | null | null | import os
import yoloCarAccident as yc
# yc.find('test.txt')
f1 = open('result2.txt','r')
i = 0
s = ""
for lines in f1:
if(i<80000):
s += lines
i+=1
else:
f2 = open('test.txt','w')
f2.write(s)
f2.close()
try:
yc.find('test.txt')
except ValueError:
pass
s = ""
i = 0
# break
# f2 = open('test.txt','w')
# f2.write(s)
# f2.close()
# yc.find('test.txt') | 13.172414 | 28 | 0.557592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.376963 |
de65eb26862ea6588043a83de4e49020ae4daf2c | 1,853 | py | Python | socketserver_extra.py | sim642/pyqwebirc | cd0cc120eacd3eea60b827ff7b2b157ab4a5dd1e | [
"MIT"
]
| null | null | null | socketserver_extra.py | sim642/pyqwebirc | cd0cc120eacd3eea60b827ff7b2b157ab4a5dd1e | [
"MIT"
]
| 2 | 2017-01-04T18:24:00.000Z | 2017-01-04T18:50:32.000Z | socketserver_extra.py | sim642/pyqwebirc | cd0cc120eacd3eea60b827ff7b2b157ab4a5dd1e | [
"MIT"
]
| null | null | null | import socketserver
import socket
class TextStreamRequestHandler(socketserver.BaseRequestHandler):
"""Define textual self.rfile and self.wfile for stream sockets."""
# Default buffer sizes for rfile, wfile.
# We default rfile to buffered because otherwise it could be
# really slow for large data (a getc() call per byte); we make
# wfile unbuffered because (a) often after a write() we want to
# read and we need to flush the line; (b) big writes to unbuffered
# files are typically optimized by stdio even when big reads
# aren't.
rbufsize = -1
wbufsize = -1
# A timeout to apply to the request socket, if not None.
timeout = None
# Disable nagle algorithm for this socket, if True.
# Use only when wbufsize != 0, to avoid small packets.
disable_nagle_algorithm = False
rnewline = None
wnewline = None
def setup(self):
self.connection = self.request
if self.timeout is not None:
self.connection.settimeout(self.timeout)
if self.disable_nagle_algorithm:
self.connection.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, True)
self.rfile = self.connection.makefile('r', self.rbufsize, newline=self.rnewline)
self.wfile = self.connection.makefile('w', self.wbufsize, newline=self.wnewline)
def finish(self):
if not self.wfile.closed:
try:
self.wfile.flush()
except socket.error:
# An final socket error may have occurred here, such as
# the local error ECONNABORTED.
pass
self.wfile.close()
self.rfile.close()
class ReusingMixIn:
allow_reuse_address = True
class ReusingThreadingTCPServer(ReusingMixIn, socketserver.ThreadingTCPServer):
pass
| 33.089286 | 88 | 0.652455 | 1,811 | 0.977334 | 0 | 0 | 0 | 0 | 0 | 0 | 679 | 0.366433 |
de681128c0eb4ded13f92d6720603223e15efc17 | 4,560 | py | Python | train_n_test/train_decoder.py | kamieen03/style-transfer-net | c9f56aa579553be8c72f37ce975ba88dbd775605 | [
"BSD-2-Clause"
]
| 2 | 2019-12-14T14:59:22.000Z | 2020-01-30T16:17:28.000Z | train_n_test/train_decoder.py | kamieen03/style-transfer-net | c9f56aa579553be8c72f37ce975ba88dbd775605 | [
"BSD-2-Clause"
]
| null | null | null | train_n_test/train_decoder.py | kamieen03/style-transfer-net | c9f56aa579553be8c72f37ce975ba88dbd775605 | [
"BSD-2-Clause"
]
| 1 | 2020-01-16T20:03:35.000Z | 2020-01-16T20:03:35.000Z | #!/usr/bin/env python3
import os, sys
sys.path.append(os.path.abspath(__file__ + "/../../")) # just so we can use 'libs'
import torch.utils.data
import torch.optim as optim
from torch import nn
import numpy as np
import torch
from libs.Loader import Dataset
from libs.shufflenetv2 import ShuffleNetV2AutoEncoder
BATCH_SIZE = 32
CROP_SIZE = 400
ENCODER_SAVE_PATH = f'models/regular/shufflenetv2_x1_encoder.pth'
DECODER_SAVE_PATH = f'models/regular/shufflenetv2_x1_decoder.pth'
EPOCHS = 20
class Trainer(object):
def __init__(self):
datapath = '../data/'
# set up datasets
self.train_set = self.load_dataset(datapath+'mscoco/train/')
self.valid_set = self.load_dataset(datapath+'mscoco/validate/')
# set up model
self.model = ShuffleNetV2AutoEncoder().cuda()
# load encoder
#self.model.encoder.eval()
#for param in self.model.encoder.parameters():
# param.requires_grad = False
# load decoder
try:
self.model.decoder.load_state_dict(torch.load(DECODER_SAVE_PATH))
self.model.encoder.load_state_dict(torch.load(ENCODER_SAVE_PATH))
except:
print("Decoder weights not found. Proceeding with new ones...")
self.model.train()
self.criterion = nn.MSELoss()
self.optimizer = optim.Adam(self.model.parameters(), lr=1e-4)
def load_dataset(self, path):
"""Load the datasets"""
dataset = Dataset(path, CROP_SIZE)
loader = torch.utils.data.DataLoader(dataset = dataset,
batch_size = BATCH_SIZE,
shuffle = True,
num_workers = 8,
drop_last = True)
return loader
def train(self):
best_val = 1e9
flag = False
with open('shufflenetv2_log.txt', 'w+') as f:
for epoch in range(1, EPOCHS+1): # count from one
#if epoch == 2:
# for g in self.optimizer.param_groups:
# g['lr'] = 1e-3
#if epoch == 4:
# for g in self.optimizer.param_groups:
# g['lr'] = 1e-4
self.train_single_epoch(epoch, f)
val = self.validate_single_epoch(epoch, f)
if val < best_val:
best_val = val
torch.save(self.model.decoder.state_dict(), DECODER_SAVE_PATH)
torch.save(self.model.encoder.state_dict(), ENCODER_SAVE_PATH)
#if val < 0.01 and not flag:
# flag = True
# for g in self.optimizer.param_groups:
# g['lr'] = 1e-5
def train_single_epoch(self, epoch, f):
batch_num = len(self.train_set) # number of batches in training epoch
self.model.train()
for batch_i, content in enumerate(self.train_set):
content = content[0].cuda()
self.optimizer.zero_grad()
out = self.model(content)
loss = self.criterion(out, content)
loss.backward()
self.optimizer.step()
print(f'Train Epoch: [{epoch}/{EPOCHS}] ' +
f'Batch: [{batch_i+1}/{batch_num}] ' +
f'Loss: {loss:.6f}')
f.write(f'Train Epoch: [{epoch}/{EPOCHS}] ' +
f'Batch: [{batch_i+1}/{batch_num}] ' +
f'Loss: {loss:.6f}\n')
def validate_single_epoch(self, epoch, f):
batch_num = len(self.valid_set) # number of batches in training epoch
self.model.eval()
losses = []
with torch.no_grad():
for batch_i, content in enumerate(self.valid_set):
content = content[0].cuda()
out = self.model(content)
loss = self.criterion(content, out)
losses.append(loss.item())
print(f'Validate Epoch: [{epoch}/{EPOCHS}] ' +
f'Batch: [{batch_i+1}/{batch_num}] ' +
f'Loss: {loss:.6f}')
f.write(f'Validate Epoch: [{epoch}/{EPOCHS}] ' +
f'Batch: [{batch_i+1}/{batch_num}] ' +
f'Loss: {loss:.6f}\n')
f.write(f'Mean: {np.mean(np.array(losses))}\n')
return np.mean(np.array(losses))
def main():
c = Trainer()
c.train()
if __name__ == '__main__':
main()
| 35.905512 | 84 | 0.53114 | 3,979 | 0.872588 | 0 | 0 | 0 | 0 | 0 | 0 | 1,236 | 0.271053 |
de69814605b1835959a1ffdafc1b9774d60d18ad | 75 | py | Python | utils/__init__.py | bitst0rm-st3/AutomaticPackageReloader | b48699420ccadb3c1a8796a1a7275f70089f0934 | [
"MIT"
]
| null | null | null | utils/__init__.py | bitst0rm-st3/AutomaticPackageReloader | b48699420ccadb3c1a8796a1a7275f70089f0934 | [
"MIT"
]
| null | null | null | utils/__init__.py | bitst0rm-st3/AutomaticPackageReloader | b48699420ccadb3c1a8796a1a7275f70089f0934 | [
"MIT"
]
| null | null | null | from .progress_bar import ProgressBar
from .read_config import read_config
| 25 | 37 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
de6c1a64c58a8aca902a8fc78dd2204b84031a65 | 2,871 | py | Python | src/main/create/c_chains_user_json.py | WikiCommunityHealth/wikimedia-revert | b584044d8b6a61a79d98656db356bf1f74d23ee0 | [
"MIT"
]
| null | null | null | src/main/create/c_chains_user_json.py | WikiCommunityHealth/wikimedia-revert | b584044d8b6a61a79d98656db356bf1f74d23ee0 | [
"MIT"
]
| null | null | null | src/main/create/c_chains_user_json.py | WikiCommunityHealth/wikimedia-revert | b584044d8b6a61a79d98656db356bf1f74d23ee0 | [
"MIT"
]
| null | null | null |
#%%
# PAGE EXAMPLE
# {'title': 'Zuppa_di_pesce_(film)',
# 'chains': [{'revisions': ['95861493', '95861612', '95973728'],
# 'users': {'93.44.99.33': '', 'Kirk39': '63558', 'AttoBot': '482488'},
# 'len': 3,
# 'start': '2018-04-01 04:54:40.0',
# 'end': '2018-04-05 07:36:26.0'}],
# 'n_chains': 1,
# 'n_reverts': 3,
# 'mean': 3.0,
# 'longest': 3,
# 'M': 0,
# 'lunghezze': {'3': 1}}
import json
from datetime import datetime
import numpy as np
import pandas as pd
import os
import shutil
from utils import utils
import sys
language = sys.argv[1]
dataset_folder = f'/home/gandelli/dev/data/{language}/chains/page/'
output = f'/home/gandelli/dev/data/{language}/chains/user/'
#%% get users from the json page
def get_users():
users = {}
i = 10 # number of files in the wars folder
for i in range (0,i):
dump_in = open(f"{dataset_folder}wars_{i}.json")
line = dump_in.readline()
while(line != ''):
line = dump_in.readline()
if line == '{}]' or line == ''or line == '{}]{}]':
continue
try:
page = json.loads(line[:-2])
except:
print(line[:-2])
for chain in page['chains']:
for user in chain['users']:
users.setdefault(user, []).append(chain)
return users
# input a dict of users with the chains joined
def compute_users(users):
i = 0
for user,chains in users.items():
name = user
total_reverts = 0
longest = 0
lunghezze = np.zeros(200)
g , involved = utils.getG(chains)
for chain in chains:
total_reverts += chain['len']
longest = max(longest, chain['len'])
lunghezze[chain['len']] +=1
save_user(name, chains, total_reverts, longest, g, lunghezze, i)
i+=1
finish_files()
def save_user(name, chains, total_reverts, longest, g, lunghezze, n):
mean = round(total_reverts/len(chains), 1)
lun = {}
n_files = 10
path = f"{output}wars_{ n % n_files}.json"
dump_out = open(path, 'a')
filesize = os.path.getsize(path)
for i in range(1,len(lunghezze)):
if(lunghezze[i] > 0):
lun[i] = int(lunghezze[i])
if filesize == 0:
dump_out.write('[')
dump_out.write(json.dumps({'user': name, 'chains': chains,'n_chains' : len(chains),'n_reverts': total_reverts,'mean': mean, 'longest': longest, 'G' : g , 'lunghezze': lun})+',\n')
def finish_files():
for filename in os.listdir(output):
dump_out = open(output+filename, 'a')
# andrebbe cancellata la virgola, uso questo trick per farlo sintatticamente corretto
dump_out.write('{}]')
#%%
shutil.rmtree(output)
os.mkdir(output)
users = get_users()
compute_users(users)
# %%
| 26.1 | 183 | 0.563915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 887 | 0.308952 |
de6c4ab063a946c3b3fd6bbb89fa20997b2be723 | 5,105 | py | Python | src/carts/views.py | dhaval6552/ecommerce-2 | ab80fbbf15c0fbd37db94cfd7aa9a3ac0b46c737 | [
"MIT"
]
| null | null | null | src/carts/views.py | dhaval6552/ecommerce-2 | ab80fbbf15c0fbd37db94cfd7aa9a3ac0b46c737 | [
"MIT"
]
| null | null | null | src/carts/views.py | dhaval6552/ecommerce-2 | ab80fbbf15c0fbd37db94cfd7aa9a3ac0b46c737 | [
"MIT"
]
| null | null | null | from django.contrib.auth.forms import AuthenticationForm
from django.core.urlresolvers import reverse
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin,DetailView
from django.shortcuts import render,get_object_or_404,redirect
from django.http import HttpResponseRedirect,Http404,JsonResponse
from django.views.generic.edit import FormMixin
from orders.forms import GuestCheckoutForm
from products.models import Variation
from carts.models import Cart,CartItem
# Create your views here.
class ItemCountView(View):
def get(self,request,*args,**kwargs):
if request.is_ajax:
cart_id = self.request.session.get("cart_id")
if cart_id==None:
count=0
else:
cart=Cart.objects.get(id=cart_id)
count=cart.items.count()
request.session["cart_item_count"]=count
return JsonResponse({"count":count})
else:
raise Http404
class CartView(SingleObjectMixin,View):
model = Cart
template_name="carts/view.html"
def get_object(self, *args,**kwargs):
self.request.session.set_expiry(0)
cart_id = self.request.session.get("cart_id")
if cart_id == None:
cart = Cart()
cart.tax_percentage=0.075
cart.save()
cart_id = cart.id
self.request.session["cart_id"] = cart.id
cart = Cart.objects.get(id=cart_id)
if self.request.user.is_authenticated():
cart.user = self.request.user
cart.save()
return cart
def get(self,request, *args,**kwargs):
cart=self.get_object()
item_id= request.GET.get("item")
delete_item=request.GET.get("delete",False)
flash_message=""
item_added = False
if item_id:
item_instance=get_object_or_404(Variation,id=item_id)
qty= request.GET.get("qty",1)
try:
if int(qty)<1:
delete_item=True
except:
raise Http404
cart_item , created=CartItem.objects.get_or_create(cart=cart,item=item_instance)
if created:
flash_message="Successfully added to the cart"
item_added=True
if delete_item:
flash_message="Item removed Successfully"
cart_item.delete()
else:
if not created:
flash_message="Quantity updated successfully"
cart_item.quantity=qty
cart_item.save()
if not request.is_ajax():
return HttpResponseRedirect(reverse('cart'))
if request.is_ajax():
try:
total= cart_item.line_item_total
except:
total=None
try:
subtotal= cart_item.cart.subtotal
except:
subtotal=None
try:
cart_total= cart_item.cart.total
except:
cart_total=None
try:
tax_total= cart_item.cart.tax_total
except:
tax_total=None
try:
total_item= cart_item.cart.items.count()
except:
total_item=0
data={
"deleted":delete_item ,
"item-added": item_added,
"line_total":total,
"subtotal":subtotal,
"tax_total":tax_total,
"cart_total":cart_total,
"flash_message":flash_message,
"total_item":total_item,
}
return JsonResponse(data)
context={
"object":self.get_object()
}
template=self.template_name
return render(request,template,context)
class CheckoutView(FormMixin,DetailView):
model = Cart
template_name = "carts/checkout_view.html"
form_class = GuestCheckoutForm
def get_object(self, *args,**kwargs):
cart_id = self.request.session.get("cart_id")
if cart_id == None:
return redirect("cart")
cart = Cart.objects.get(id=cart_id)
return cart
def get_context_data(self, *args, **kwargs):
context=super(CheckoutView,self).get_context_data(*args,**kwargs)
user_can_continue=False
if not self.request.user.is_authenticated():
context["login_form"]=AuthenticationForm()
context["next_url"] = self.request.build_absolute_uri()
if self.request.user.is_authenticated():
user_can_continue=True
context["user_can_continue"]=user_can_continue
context["form"]=self.get_form()
return context
def post(self,request,*args,**kwargs):
form=self.get_form()
if form.is_valid():
print form.cleaned_data.get("email")
return self.form_valid(form)
else:
return self.form_invalid(form)
def get_success_url(self):
return reverse("checkout") | 33.807947 | 92 | 0.582174 | 4,563 | 0.89383 | 0 | 0 | 0 | 0 | 0 | 0 | 416 | 0.081489 |
de71e1c800cd0628725b2dd49b907881044e1b6d | 721 | py | Python | Python/PythonCgiMock03/src/maincgi/test/TestCgiMainXml.py | tduoth/JsObjects | eb3e2a8b1f47d0da53c8b1a85a7949269711932f | [
"MIT"
]
| 22 | 2015-02-26T09:07:18.000Z | 2020-05-10T16:22:05.000Z | Python/PythonCgiMock03/src/maincgi/test/TestCgiMainXml.py | tduoth/JsObjects | eb3e2a8b1f47d0da53c8b1a85a7949269711932f | [
"MIT"
]
| 123 | 2016-04-05T18:32:41.000Z | 2022-03-13T21:09:21.000Z | Python/PythonCgiMock03/src/maincgi/test/TestCgiMainXml.py | tduoth/JsObjects | eb3e2a8b1f47d0da53c8b1a85a7949269711932f | [
"MIT"
]
| 56 | 2015-03-19T22:26:37.000Z | 2021-12-06T02:52:02.000Z | #!/usr/bin/python
'''
Created on May 23, 2012
@author: Charlie
'''
import unittest
from mock import patch
import xml.etree.ElementTree as ET
from TestCgiMainBase import TestCgiMainBase
@patch('cgi.FieldStorage')
class TestCgiMainXml(TestCgiMainBase):
def tearDown(self):
pass
def testName(self, MockClass):
self.setUpMock(MockClass)
xml = self.xmlExercise.valueOfRainCore()
tree = ET.XML(xml)
key = tree.find('has_rain1')
test = key.text
self.assertEqual(test, 'True')
self.assertNotEqual(test, 'False')
#self.assertEqual(len(items), 4, 'expected four items in list')
if __name__ == "__main__":
unittest.main() | 24.862069 | 71 | 0.647712 | 455 | 0.631068 | 0 | 0 | 482 | 0.668516 | 0 | 0 | 181 | 0.25104 |
de725902f24523dc4f0cb06e33505cadc76a710c | 38,783 | py | Python | pysim/epcstd.py | larioandr/thesis-rfidsim | 6a5b3ef02964ff2d49bf5dae55af270801af28a5 | [
"MIT"
]
| null | null | null | pysim/epcstd.py | larioandr/thesis-rfidsim | 6a5b3ef02964ff2d49bf5dae55af270801af28a5 | [
"MIT"
]
| null | null | null | pysim/epcstd.py | larioandr/thesis-rfidsim | 6a5b3ef02964ff2d49bf5dae55af270801af28a5 | [
"MIT"
]
| null | null | null | from enum import Enum
import random
import collections
import numpy as np
#
#######################################################################
# Data Types
#######################################################################
#
class DivideRatio(Enum):
DR_8 = ('0', 8.0, '8')
DR_643 = ('1', 64.0/3, '64/3')
# noinspection PyInitNewSignature
def __init__(self, code, value, string):
self._code = code
self._value = value
self._string = string
@property
def code(self):
return self._code
def eval(self):
return self._value
def __str__(self):
return self._string
class _Session(object):
def __init__(self, code, index, string):
self._code = code
self._index = index
self._string = string
@property
def code(self): return self._code
@property
def index(self): return self._index
@property
def string(self): return self._string
def power_on_value(self, interval, persistence, stored_value):
stored_value = stored_value if stored_value is not None \
else InventoryFlag.A
return InventoryFlag.A if interval > persistence else stored_value
def __str__(self):
return self._string
class _Session0(_Session):
def __init__(self):
super().__init__('00', 0, 'S0')
def power_on_value(self, interval, persistence, stored_value):
return InventoryFlag.A
class Session(Enum):
S0 = _Session0()
S1 = _Session('01', 1, 'S1')
S2 = _Session('10', 2, 'S2')
S3 = _Session('11', 3, 'S3')
# noinspection PyInitNewSignature
def __init__(self, session_obj):
assert isinstance(session_obj, _Session)
self.__session__ = session_obj
@property
def code(self):
return self.__session__.code
@property
def index(self):
return self.__session__.index
@property
def string(self):
return self.__session__.string
def power_on_value(self, interval, persistence, stored_value):
return self.__session__.power_on_value(
interval, persistence, stored_value)
def __str__(self):
return self.__session__.__str__()
class TagEncoding(Enum):
FM0 = ('00', 1, "FM0")
M2 = ('01', 2, "M2")
M4 = ('10', 4, "M4")
M8 = ('11', 8, "M8")
# noinspection PyInitNewSignature
def __init__(self, code, symbols_per_bit, string):
self._code = code
self._symbols_per_bit = symbols_per_bit
self._string = string
@property
def code(self):
return self._code
@property
def symbols_per_bit(self):
return self._symbols_per_bit
def __str__(self):
return self._string
@staticmethod
def get(m):
if m == 1:
return TagEncoding.FM0
elif m == 2:
return TagEncoding.M2
elif m == 4:
return TagEncoding.M4
elif m == 8:
return TagEncoding.M8
else:
raise ValueError("m must be 1,2,4 or 8, but {} found".format(m))
class _InvFlag(object):
def __init__(self, value, name, code):
self._value, self._name, self._code = value, name, code
@property
def value(self): return self._value
@property
def code(self): return self._code
@property
def name(self): return self._name
def invert(self): raise NotImplementedError
def __str__(self): return self._name
class _InvFlagA(_InvFlag):
def __init__(self): super().__init__(0, 'A', '0')
def invert(self): return InventoryFlag.B
class _InvFlagB(_InvFlag):
def __init__(self): super().__init__(1, 'B', '1')
def invert(self): return InventoryFlag.A
class InventoryFlag(Enum):
A = _InvFlagA()
B = _InvFlagB()
# noinspection PyInitNewSignature
def __init__(self, obj):
self._obj = obj
def __getattr__(self, item):
if item in {'value', 'name', 'invert', 'code'}:
return getattr(self._obj, item)
else:
raise AttributeError
def __str__(self):
return self._obj.__str__()
class _SelFlag(object):
def __init__(self, code, string):
self._code = code
self._string = string
@property
def code(self): return self._code
def __str__(self): return self._string
def match(self, flag):
raise NotImplementedError
class _SelAll(_SelFlag):
def __init__(self): super().__init__('00', 'ALL')
def match(self, flag): return True
class _SelTrue(_SelFlag):
def __init__(self): super().__init__('11', 'SL')
def match(self, flag): return flag
class _SelFalse(_SelFlag):
def __init__(self): super().__init__('10', '~SL')
def match(self, flag): return not flag
class SelFlag(Enum):
ALL = _SelAll()
NOT_SEL = _SelFalse()
SEL = _SelTrue()
# noinspection PyInitNewSignature
def __init__(self, sel_obj):
assert isinstance(sel_obj, _SelFlag)
self.__sel__ = sel_obj
@property
def code(self): return self.__sel__.code
def __str__(self): return self.__sel__.__str__()
def match(self, flag): return self.__sel__.match(flag)
class MemoryBank(Enum):
RESERVED = ('00', 'Reserved')
EPC = ('01', 'EPC')
TID = ('10', 'TID')
USER = ('11', 'User')
# noinspection PyInitNewSignature
def __init__(self, code, string):
self._code = code
self._string = string
@property
def code(self):
return self._code
def __str__(self):
return self._string
class CommandCode(Enum):
QUERY = ('1000', 'Query')
QUERY_REP = ('00', 'QueryRep')
ACK = ('01', 'ACK')
REQ_RN = ('11000001', 'Req_RN')
READ = ('11000010', 'Read')
# noinspection PyInitNewSignature
def __init__(self, code, string):
self._code = code
self._string = string
@property
def code(self):
return self._code
def __str__(self):
return self._string
class TempRange(Enum):
NOMINAL = (False, "nominal")
EXTENDED = (True, "extended")
# noinspection PyInitNewSignature
def __init__(self, extended, string):
self._extended = extended
self._string = string
@property
def extended(self):
return self._extended
def __str__(self):
return self._string
#
#######################################################################
# Default system-wide Reader Parameters
#######################################################################
#
class StdParams:
tari = 6.25e-6
rtcal = 1.5625e-05
trcal = 3.125e-05
delim = 12.5e-6
Q = 4
divide_ratio = DivideRatio.DR_8
tag_encoding = TagEncoding.FM0
sel = SelFlag.ALL
session = Session.S0
target = InventoryFlag.A
trext = False
read_default_bank = MemoryBank.TID
read_default_word_ptr = 0
read_default_word_count = 4 # FIXME: check this!
temp_range = TempRange.NOMINAL
access_ops = [] # this list contains reader commands for tag access
default_epc = "FF" * 12
default_read_data = "FF" * 8
default_rn = 0x0000
default_crc5 = 0x00
default_crc16 = 0x0000
stdParams = StdParams()
#
#######################################################################
# Tag Operations
#######################################################################
#
class TagOp:
pass
class TagReadOp(TagOp):
bank = MemoryBank.TID
word_ptr = 0
word_count = 0
def __init__(self):
super().__init__()
#
#######################################################################
# API for encoding basic types
#######################################################################
#
def encode_bool(value):
return '1' if value else '0'
def encode_int(value, n_bits):
value %= 2 ** n_bits
return "{:0{width}b}".format(value, width=n_bits)
def encode_word(value):
return encode_int(value, 16)
def encode_byte(value):
return encode_int(value, 8)
def encode_ebv(value, first_block=True):
prefix = '0' if first_block else '1'
if value < 128:
return prefix + format(value, '07b')
else:
return encode_ebv(value >> 7, first_block=False) + \
encode_ebv(value % 128, first_block=first_block)
#
#######################################################################
# Commands
#######################################################################
#
class Command:
def __init__(self, code):
super().__init__()
self._code = code
@property
def code(self):
return self._code
def encode(self):
raise NotImplementedError
@property
def bitlen(self):
s = self.encode()
return len(s)
class Query(Command):
def __init__(self, dr=None, m=None, trext=None, sel=None, session=None,
target=None, q=None, crc=None):
super().__init__(CommandCode.QUERY)
self.dr = dr if dr is not None else stdParams.divide_ratio
self.m = m if m is not None else stdParams.tag_encoding
self.trext = trext if trext is not None else stdParams.trext
self.sel = sel if sel is not None else stdParams.sel
self.session = session if session is not None else stdParams.session
self.target = target if target is not None else stdParams.target
self.q = q if q is not None else stdParams.Q
self.crc = crc if crc is not None else stdParams.default_crc5
def encode(self):
return (self.code.code + self.dr.code + self.m.code +
encode_bool(self.trext) + self.sel.code + self.session.code +
self.target.code + encode_int(self.q, 4) +
encode_int(self.crc, 5))
def __str__(self):
return "{o.code}{{DR({o.dr}),{o.m},TRext({trext}),{o.sel}," \
"{o.session},{o.target},Q({o.q}),CRC(0x{o.crc:02X})}}" \
"".format(o=self, trext=(1 if self.trext else 0))
class QueryRep(Command):
def __init__(self, session=None):
super().__init__(CommandCode.QUERY_REP)
self.session = session if session is not None else stdParams.session
def encode(self):
return self.code.code + self.session.code
def __str__(self):
return "{o.code}{{{o.session}}}".format(o=self)
class Ack(Command):
def __init__(self, rn):
super().__init__(CommandCode.ACK)
self.rn = rn if rn is not None else stdParams.default_rn
def encode(self):
return self.code.code + encode_int(self.rn, 16)
def __str__(self):
return "{o.code}{{0x{o.rn:04X}}}".format(o=self)
class ReqRN(Command):
def __init__(self, rn=None, crc=None):
super().__init__(CommandCode.REQ_RN)
self.rn = rn if rn is not None else stdParams.default_rn
self.crc = crc if crc is not None else stdParams.default_crc16
def encode(self):
return self.code.code + encode_word(self.rn) + encode_word(self.crc)
def __str__(self):
return "{o.code}{{RN(0x{o.rn:04X}),CRC(0x{o.crc:04X})}}".format(o=self)
class Read(Command):
def __init__(self, bank=None, word_ptr=None, word_count=None,
rn=None, crc=None):
super().__init__(CommandCode.READ)
self.bank = (bank if bank is not None
else stdParams.read_default_bank)
self.word_ptr = (word_ptr if word_ptr is not None
else stdParams.read_default_word_ptr)
self.word_count = (word_count if word_count is not None
else stdParams.read_default_word_count)
self.rn = rn if rn is not None else stdParams.default_rn
self.crc = crc if crc is not None else stdParams.default_crc16
def encode(self):
return (self.code.code + self.bank.code + encode_ebv(self.word_ptr) +
encode_byte(self.word_count) + encode_word(self.rn) +
encode_word(self.crc))
def __str__(self):
return "{o.code}{{{o.bank},WordPtr(0x{o.word_ptr:02X})," \
"WordCount({o.word_count}),RN(0x{o.rn:04X})," \
"CRC(0x{o.crc:04X})}}".format(o=self)
#
#######################################################################
# Tag replies
#######################################################################
#
class ReplyType(Enum):
QUERY_REPLY = 0
ACK_REPLY = 1
REQRN_REPLY = 2
READ_REPLY = 3
class Reply:
def __init__(self, reply_type):
super().__init__()
self.__type = reply_type
@property
def bitlen(self):
raise NotImplementedError()
@property
def reply_type(self):
return self.__type
class QueryReply(Reply):
def __init__(self, rn=0x0000):
super().__init__(ReplyType.QUERY_REPLY)
self.rn = rn
@property
def bitlen(self):
return 16
def __str__(self):
return "Reply(0x{o.rn:04X})".format(o=self)
def to_bytes(value):
if isinstance(value, str):
return list(bytearray.fromhex(value))
elif isinstance(value, collections.Iterable):
value = list(value)
for b in value:
if not isinstance(b, int) or not (0 <= b < 256):
raise ValueError("each array element must represent a byte")
return value
else:
raise ValueError("value must be a hex string or bytes collections")
class AckReply(Reply):
def __init__(self, epc="", pc=0x0000, crc=0x0000):
super().__init__(ReplyType.ACK_REPLY)
self._data = to_bytes(epc)
self.pc = pc
self.crc = crc
@property
def bitlen(self):
return 32 + len(self._data) * 8
@property
def epc(self):
return self._data
def get_epc_string(self, byte_separator=""):
return byte_separator.join("{:02X}".format(b) for b in self._data)
def __str__(self):
return "Reply{{PC(0x{o.pc:04X}),EPC({epc})," \
"CRC(0x{o.crc:04X})}}".format(
o=self, epc=self.get_epc_string())
class ReqRnReply(Reply):
def __init__(self, rn=0x0000, crc=0x0000):
super().__init__(ReplyType.REQRN_REPLY)
self.rn = rn
self.crc = crc
@property
def bitlen(self):
return 32
def __str__(self):
return "Reply{{RN(0x{o.rn:04X}),CRC(0x{o.crc:04X})}}".format(o=self)
class ReadReply(Reply):
def __init__(self, data="", rn=0x0000, crc=0x0000, header=False):
super().__init__(ReplyType.READ_REPLY)
self.rn = rn
self.crc = crc
self.header = header
self._data = to_bytes(data)
@property
def memory(self):
return self._data
def get_memory_string(self, byte_separator=""):
return byte_separator.join("{:02X}".format(b) for b in self._data)
@property
def bitlen(self):
return 33 + len(self.memory) * 8
def __str__(self):
return "Reply{{Header({header}),Memory({data}),RN(0x{o.rn:04X})," \
"CRC(0x{o.crc:04X})}}".format(
header=int(self.header), data=self.get_memory_string(), o=self)
#
#######################################################################
# Preambles and frames
#######################################################################
#
class ReaderSync:
DELIM = 12.5e-6
def __init__(self, tari, rtcal, delim=DELIM):
super().__init__()
self.tari = tari
self.rtcal = rtcal
self.delim = delim
@property
def data0(self): return self.tari
@property
def data1(self): return self.rtcal - self.tari
@property
def duration(self): return self.delim + self.tari + self.rtcal
def __str__(self):
return "{{(Delim({}us),Tari({}us),RTcal({}us)}}".format(
self.delim * 1e6, self.tari * 1e6, self.rtcal * 1e6)
class ReaderPreamble(ReaderSync):
def __init__(self, tari, rtcal, trcal, delim=ReaderSync.DELIM):
super().__init__(tari=tari, rtcal=rtcal, delim=delim)
self.trcal = trcal
@property
def duration(self): return super().duration + self.trcal
def __str__(self):
return "{{Delim({}us),Tari({}us),RTcal({}us)," \
"TRcal({}us)}}".format(self.delim * 1e6, self.tari * 1e6,
self.rtcal * 1e6, self.trcal * 1e6)
class TagPreamble:
def __init__(self, extended=False):
super().__init__()
self.extended = extended
@property
def bitlen(self):
raise NotImplementedError
@property
def encoding(self):
raise NotImplementedError
def get_duration(self, blf):
return (self.bitlen * self.encoding.symbols_per_bit) / blf
class FM0Preamble(TagPreamble):
def __init__(self, extended=False):
super().__init__(extended)
@property
def bitlen(self):
return 18 if self.extended else 6
@property
def encoding(self):
return TagEncoding.FM0
def __str__(self):
return "{{({}){},{},trext({})}}".format(
self.bitlen, "0..01010v1" if self.extended else "1010v1",
self.encoding, 1 if self.extended else 0)
class MillerPreamble(TagPreamble):
def __init__(self, m, extended=False):
super().__init__(extended)
self._encoding = MillerPreamble._get_and_validate_encoding(m)
@property
def m(self):
return self._encoding.symbols_per_bit
@m.setter
def m(self, value):
self._encoding = MillerPreamble._get_and_validate_encoding(value)
@property
def bitlen(self):
return 22 if self.extended else 10
@property
def encoding(self): return self._encoding
@staticmethod
def _get_and_validate_encoding(m):
enc = TagEncoding.get(m)
if enc not in [TagEncoding.M2, TagEncoding.M4, TagEncoding.M8]:
raise ValueError("Miller encodings supported are M2, M4, M8")
return enc
def __str__(self):
return "{{({}){},{},trext({})}}".format(
self.bitlen, "DD..DD010111" if self.extended else "DDDD010111",
self.encoding, 1 if self.extended else 0)
def create_tag_preamble(encoding, extended=False):
if encoding == TagEncoding.FM0:
return FM0Preamble(extended)
else:
return MillerPreamble(m=encoding.symbols_per_bit, extended=extended)
class ReaderFrame:
def __init__(self, preamble, command):
super().__init__()
self.preamble = preamble
self.command = command
@property
def body_duration(self):
encoded_string = (self.command if isinstance(self.command, str)
else self.command.encode())
n_bits = {'0': 0, '1': 0}
for b in encoded_string:
n_bits[b] += 1
d0 = self.preamble.data0
d1 = self.preamble.data1
return n_bits['0'] * d0 + n_bits['1'] * d1
@property
def preamble_duration(self):
return self.preamble.duration
@property
def duration(self):
return self.body_duration + self.preamble.duration
def __str__(self):
return "Frame{{{o.preamble}{o.command}}}".format(o=self)
class TagFrame:
def __init__(self, preamble, reply):
super().__init__()
self.preamble = preamble
self.reply = reply
def get_body_duration(self, blf):
m = self.preamble.encoding.symbols_per_bit
return (self.reply.bitlen * m) / blf
def get_duration(self, blf):
m = self.preamble.encoding.symbols_per_bit
t_preamble = self.preamble.get_duration(blf)
t_body = self.get_body_duration(blf)
t_suffix = m / blf
return t_preamble + t_body + t_suffix
def __str__(self):
return "Frame{{{o.preamble}{o.reply}}}".format(o=self)
# FIXME: not vectorized
def tag_preamble_bitlen(encoding=None, trext=None):
encoding = encoding if encoding is not None else stdParams.tag_encoding
trext = trext if trext is not None else stdParams.trext
if encoding is TagEncoding.FM0:
return 18 if trext else 6
else:
return 22 if trext else 10
def tag_preamble_duration(blf=None, encoding=None, trext=None):
blf = blf if blf is not None else get_blf()
bitlen = tag_preamble_bitlen(encoding, trext)
return (bitlen * encoding.symbols_per_bit) / blf
#
#######################################################################
# Reader and Tag frames helpers and accessors
#######################################################################
#
def reader_frame_duration(command, tari=None, rtcal=None, trcal=None,
delim=None):
tari = tari if tari is not None else stdParams.tari
rtcal = rtcal if rtcal is not None else stdParams.rtcal
trcal = trcal if trcal is not None else stdParams.trcal
delim = delim if delim is not None else stdParams.delim
if isinstance(command, Query) or (
isinstance(command, str) and
command.startswith(CommandCode.QUERY.code)):
preamble = ReaderPreamble(tari, rtcal, trcal, delim)
else:
preamble = ReaderSync(tari=tari, rtcal=rtcal, delim=delim)
frame = ReaderFrame(preamble, command)
return frame.duration
def tag_frame_duration(reply, blf=None, encoding=None, trext=None):
blf = blf if blf is not None else get_blf()
encoding = encoding if encoding is not None else stdParams.tag_encoding
trext = trext if trext is not None else stdParams.trext
preamble = create_tag_preamble(encoding, trext)
frame = TagFrame(preamble, reply)
return frame.get_duration(blf)
def command_duration(command_code,
tari=None, rtcal=None, trcal=None, delim=None, dr=None,
m=None, trext=None, sel=None, session=None, target=None,
q=None, rn=None, bank=None, word_ptr=None,
word_count=None, crc5=None, crc16=None):
if command_code is CommandCode.QUERY:
return query_duration(tari, rtcal, trcal, delim, dr, m, trext, sel,
session, target, q, crc5)
elif command_code is CommandCode.QUERY_REP:
return query_rep_duration(tari, rtcal, trcal, delim, session)
elif command_code is CommandCode.ACK:
return ack_duration(tari, rtcal, trcal, delim, rn)
elif command_code is CommandCode.REQ_RN:
return reqrn_duration(tari, rtcal, trcal, delim, rn, crc16)
elif command_code is CommandCode.READ:
return read_duration(tari, rtcal, trcal, delim, bank, word_ptr,
word_count, rn, crc16)
else:
raise ValueError("unrecognized command code = {}".format(command_code))
# noinspection PyTypeChecker
def query_duration(tari=None, rtcal=None, trcal=None, delim=None, dr=None,
m=None, trext=None, sel=None, session=None, target=None,
q=None, crc=None):
return reader_frame_duration(Query(dr, m, trext, sel, session, target,
q, crc), tari, rtcal, trcal, delim)
# noinspection PyTypeChecker
def query_rep_duration(tari=None, rtcal=None, trcal=None, delim=None,
session=None):
return reader_frame_duration(QueryRep(session), tari, rtcal, trcal,
delim)
# noinspection PyTypeChecker
def ack_duration(tari=None, rtcal=None, trcal=None, delim=None, rn=None):
return reader_frame_duration(Ack(rn), tari, rtcal, trcal, delim)
# noinspection PyTypeChecker
def reqrn_duration(tari=None, rtcal=None, trcal=None, delim=None, rn=None,
crc=None):
return reader_frame_duration(ReqRN(rn, crc), tari, rtcal, trcal, delim)
# noinspection PyTypeChecker
def read_duration(tari=None, rtcal=None, trcal=None, delim=None, bank=None,
word_ptr=None, word_count=None, rn=None, crc=None):
return reader_frame_duration(Read(bank, word_ptr, word_count, rn, crc),
tari, rtcal, trcal, delim)
def reply_duration(reply_type, dr=None, trcal=None, encoding=None, trext=None,
epc_bytelen=None, words_count=None):
if reply_type is ReplyType.QUERY_REPLY:
return query_reply_duration(dr, trcal, encoding, trext)
elif reply_type is ReplyType.ACK_REPLY:
return ack_reply_duration(dr, trcal, encoding, trext, epc_bytelen)
elif reply_type is ReplyType.REQRN_REPLY:
return reqrn_reply_duration(dr, trcal, encoding, trext)
elif reply_type is ReplyType.READ_REPLY:
return read_reply_duration(dr, trcal, encoding, trext, words_count)
else:
raise ValueError("unrecognized reply type = {}".format(reply_type))
def __reply_duration(bs=0, dr=None, trcal=None, encoding=None, trext=None):
bitrate = tag_bitrate(dr, trcal, encoding)
preamble_bs = tag_preamble_bitlen(encoding, trext)
suffix_bs = 1
return (preamble_bs + bs + suffix_bs) / bitrate
def query_reply_duration(dr=None, trcal=None, encoding=None, trext=None):
return __reply_duration(16, dr, trcal, encoding, trext)
def ack_reply_duration(dr=None, trcal=None, encoding=None, trext=None,
epc_bytelen=None):
epc_bytelen = epc_bytelen if epc_bytelen is not None else \
len(to_bytes(stdParams.default_epc))
return __reply_duration(32 + epc_bytelen * 8, dr, trcal, encoding, trext)
def reqrn_reply_duration(dr=None, trcal=None, encoding=None, trext=None):
return __reply_duration(32, dr, trcal, encoding, trext)
def read_reply_duration(dr=None, trcal=None, encoding=None, trext=None,
words_count=None):
words_count = words_count if words_count is not None else \
stdParams.read_default_word_count
return __reply_duration(words_count * 16 + 33, dr, trcal, encoding, trext)
#
#######################################################################
# Link timings estimation
#######################################################################
#
def get_blf(dr=None, trcal=None):
dr = dr if dr is not None else stdParams.divide_ratio
trcal = trcal if trcal is not None else stdParams.trcal
return dr.eval() / trcal
def tag_bitrate(dr=None, trcal=None, encoding=None):
encoding = encoding if encoding is not None else stdParams.tag_encoding
blf = get_blf(dr, trcal)
return blf / encoding.symbols_per_bit
def get_frt(trcal=None, dr=None, temp_range=None):
trcal = trcal if trcal is not None else stdParams.trcal
dr = dr if dr is not None else stdParams.divide_ratio
temp_range = (temp_range if temp_range is not None
else stdParams.temp_range)
if dr is DivideRatio.DR_643:
if temp_range is TempRange.EXTENDED:
f = [(33.633, 0.15), (66.033, 0.22), (82.467, 0.15),
(84.133, 0.10), (131.967, 0.12), (198.00, 0.07),
(227.25, 0.05)]
else:
f = [(33.633, 0.15), (66.033, 0.22), (67.367, 0.10),
(82.467, 0.12), (131.967, 0.10), (198.00, 0.07),
(227.25, 0.05)]
else:
if temp_range is TempRange.EXTENDED:
f = [(24.7500, 0.19), (30.9375, 0.15), (49.50, 0.10),
(75.0000, 0.07), (202.0, 0.04)]
else:
f = [(24.75, 0.19), (25.25, 0.10), (30.9375, 0.12),
(49.50, 0.10), (75.00, 0.07), (202.000, 0.04)]
for highest_trcal, frt in f:
if trcal < highest_trcal * 1e-6:
return frt
return f[-1][1]
def get_pri(trcal=None, dr=None):
trcal = trcal if trcal is not None else stdParams.trcal
dr = dr if dr is not None else stdParams.divide_ratio
return trcal / dr.eval()
def min_link_t(param_index, rtcal=None, trcal=None, dr=None, temp=None):
rtcal = rtcal if rtcal is not None else stdParams.rtcal
trcal = trcal if trcal is not None else stdParams.trcal
dr = dr if dr is not None else stdParams.divide_ratio
temp = temp if temp is not None else stdParams.temp_range
if param_index is not None:
if param_index in [1, 5, 6]:
pri = get_pri(trcal, dr)
frt = get_frt(trcal, dr, temp)
return max(rtcal, pri * 10.0) * (1.0 - frt) - 2e-6
elif param_index == 2:
return 3.0 * get_pri(trcal, dr)
elif param_index == 3:
return 0.0
elif param_index == 4:
return 2.0 * rtcal
elif param_index == 7:
return max(link_t2_max(trcal, dr), 250e-6)
else:
raise ValueError("1 <= n <= 7, but n={} found".format(param_index))
else:
return [min_link_t(n, rtcal, trcal, dr, temp) for n in range(1, 8)]
def max_link_t(param_index, rtcal=None, trcal=None, dr=None, temp=None):
rtcal = rtcal if rtcal is not None else stdParams.rtcal
trcal = trcal if trcal is not None else stdParams.trcal
dr = dr if dr is not None else stdParams.divide_ratio
temp = temp if temp is not None else stdParams.temp_range
if param_index is not None:
if param_index == 1:
pri = get_pri(trcal, dr)
frt = get_frt(trcal, dr, temp)
return max(rtcal, pri * 10.0) * (1.0 + frt) + 2e-6
elif param_index == 2:
return 20.0 * get_pri(trcal, dr)
elif 5 <= param_index <= 7:
return 0.02
elif param_index == 3 or param_index == 4:
return float('inf')
else:
raise ValueError("1 <= param_index <= 7, but param_index={} found"
"".format(param_index))
else:
return [max_link_t(n, rtcal, trcal, dr, temp) for n in range(1, 8)]
def link_t(param_index=None, rtcal=None, trcal=None, dr=None, temp=None):
if param_index is None:
return [link_t(n, rtcal, trcal, dr, temp) for n in range(1, 8)]
else:
return (min_link_t(param_index, rtcal, trcal, dr, temp),
max_link_t(param_index, rtcal, trcal, dr, temp))
def link_t1_min(rtcal=None, trcal=None, dr=None, temp=None):
return min_link_t(1, rtcal, trcal, dr, temp)
def link_t1_max(rtcal=None, trcal=None, dr=None, temp=None):
return max_link_t(1, rtcal, trcal, dr, temp)
def link_t2_min(trcal=None, dr=None):
return min_link_t(2, trcal=trcal, dr=dr)
def link_t2_max(trcal=None, dr=None):
return max_link_t(2, trcal=trcal, dr=dr)
def link_t3():
return min_link_t(3)
def link_t4(rtcal=None):
return min_link_t(4, rtcal=rtcal)
def link_t5_min(rtcal=None, trcal=None, dr=None, temp=None):
return min_link_t(1, rtcal, trcal, dr, temp)
def link_t5_max():
return max_link_t(5)
def link_t6_min(rtcal=None, trcal=None, dr=None, temp=None):
return min_link_t(1, rtcal, trcal, dr, temp)
def link_t6_max():
return max_link_t(6)
def link_t7_min(trcal=None, dr=None):
return min_link_t(7, trcal=trcal, dr=dr)
def link_t7_max():
return max_link_t(7)
#
#######################################################################
# Slot duration estimation
#######################################################################
#
class SlotType(Enum):
EMPTY = 0
COLLISION = 1
INVENTORY = 2
ACCESS = 3
def slot_duration(slot_type, access_ops=None, tari=None, rtcal=None,
trcal=None, delim=None, dr=None, temp=None, m=None,
trext=None, sel=None, session=None, target=None, q=None,
rn=None, epc=None, crc5=None, crc16=None, is_first=False):
rn = rn if rn is not None else random.randint(0x0000, 0xFFFF)
t4 = link_t4(rtcal)
if is_first:
t_query = query_duration(tari, rtcal, trcal, delim, dr, m, trext, sel,
session, target, q, crc5)
else:
t_query = query_rep_duration(tari, rtcal, trcal, delim, session)
if slot_type is SlotType.EMPTY:
t1 = link_t1_max(rtcal, trcal, dr, temp)
t3 = link_t3()
return t_query + np.maximum(t1 + t3, t4)
t1_min = link_t1_min(rtcal, trcal, dr, temp)
t2 = link_t2_min(trcal, dr)
t_rn16 = query_reply_duration(dr, trcal, m, trext)
t_inventory_rn16 = t_query + np.maximum(t1_min + t_rn16 + t2, t4)
if slot_type is SlotType.COLLISION:
return t_inventory_rn16
t_ack = ack_duration(tari, rtcal, trcal, delim, rn)
t_ack_reply = ack_reply_duration(dr, trcal, m, trext, epc)
t_inventory = (t_inventory_rn16 + t_ack +
np.maximum(t1_min + t_ack_reply + t2, t4))
if slot_type is SlotType.INVENTORY or (
slot_type is SlotType.ACCESS and access_ops is None):
return t_inventory
assert slot_type is SlotType.ACCESS
# From here on, assume that slot_type is ACCESS
t_access = 0
for op in access_ops:
if isinstance(op, TagReadOp):
t_read_cmd = read_duration(tari, rtcal, trcal, delim, op.bank,
op.word_ptr, op.word_count, rn, crc16)
t_read_reply = read_reply_duration(dr, trcal, m, trext,
op.word_count)
t_access += np.maximum(t_read_cmd + t1_min + t_read_reply + t2, t4)
else:
raise ValueError("unrecognized tag operation = {}".format(op))
return t_inventory + t_access
def slot_duration_min(slot_type, access_ops=None, tari=None, rtcal=None,
trcal=None, delim=None, dr=None, temp=None, epc=None,
is_first=False):
return slot_duration(
slot_type, access_ops, tari, rtcal, trcal, delim, dr, temp,
m=TagEncoding.FM0, trext=False, sel=SelFlag.ALL, session=Session.S0,
target=InventoryFlag.A, q=0, rn=0, epc=epc, crc5=0, crc16=0,
is_first=is_first)
def slot_duration_max(slot_type, access_ops=None, tari=None, rtcal=None,
trcal=None, delim=None, dr=None, temp=None, epc=None,
is_first=False):
return slot_duration(
slot_type, access_ops, tari, rtcal, trcal, delim, dr, temp,
m=TagEncoding.M8, trext=True, sel=SelFlag.SEL, session=Session.S3,
target=InventoryFlag.B, q=15, rn=0xFFFF, epc=epc, crc5=0x1F,
crc16=0xFFFF, is_first=is_first)
#
#######################################################################
# Round duration estimation
#######################################################################
#
def estimate_inventory_round():
pass # TODO
def estimate_inventory_round_min():
pass # TODO
def estimate_inventory_round_max():
pass # TODO
def estimate_inventory_round_pmf():
pass # TODO
#
#######################################################################
# Various helpers
#######################################################################
#
# noinspection PyTypeChecker
def get_elementary_timings(tari=None, rtcal=None, trcal=None, delim=None,
temp=None, dr=None, m=None, trext=None, sel=None,
session=None, target=None, q=None, bank=None,
word_ptr=None, word_count=None, rn=0, crc=0,
epc="00112233445566778899AABB",
mem="FFEEDDCCBBAA", pc=0):
tari = tari if tari is not None else stdParams.tari
rtcal = rtcal if rtcal is not None else stdParams.rtcal
trcal = trcal if trcal is not None else stdParams.trcal
delim = delim if delim is not None else stdParams.delim
temp = temp if temp is not None else stdParams.temp_range
dr = dr if dr is not None else stdParams.divide_ratio
m = m if m is not None else stdParams.tag_encoding
trext = trext if trext is not None else stdParams.trext
sel = sel if sel is not None else stdParams.sel
session = session if session is not None else stdParams.session
target = target if target is not None else stdParams.target
q = q if q is not None else stdParams.Q
bank = bank if bank is not None else stdParams.read_default_bank
word_ptr = word_ptr if word_ptr is not None else \
stdParams.read_default_word_ptr
word_count = word_count if word_count is not None else \
stdParams.read_default_word_count
query = Query(dr, m, trext, sel, session, target, q, crc)
query_rep = QueryRep(session)
ack = Ack(rn)
req_rn = ReqRN(rn, crc)
read = Read(bank, word_ptr, word_count, rn, crc)
query_reply = QueryReply(rn)
ack_reply = AckReply(epc, pc, crc)
req_rn_reply = ReqRnReply(rn, crc)
read_reply = ReadReply(mem, rn, crc)
blf = get_blf(dr, trcal)
ret = {
'Tari': tari,
'TRcal': trcal,
'RTcal': rtcal,
'Delim': delim,
'TempRange': temp,
'TRext': trext,
'Q': q,
'DR': dr,
'M': m,
'Target': target,
'Sel': sel,
'Session': session,
'Bank': bank,
'WordPtr': word_ptr,
'WordCount': word_count,
'Query': reader_frame_duration(query, tari, rtcal, trcal, delim),
'QueryRep': reader_frame_duration(query_rep, tari, rtcal, trcal,
delim),
'ACK': reader_frame_duration(ack, tari, rtcal, trcal, delim),
'ReqRN': reader_frame_duration(req_rn, tari, rtcal, trcal, delim),
'Read': reader_frame_duration(read, tari, rtcal, trcal, delim),
'RN16': tag_frame_duration(query_reply, blf, m, trext),
'Response': tag_frame_duration(ack_reply, blf, m, trext),
'Handle': tag_frame_duration(req_rn_reply, blf, m, trext),
'Data': tag_frame_duration(read_reply, blf, m, trext)
}
for timer_index in range(1, 8):
t = link_t(timer_index, rtcal=rtcal, trcal=trcal, dr=dr, temp=temp)
ret["T{}(min)".format(timer_index)] = t[0]
ret["T{}(max)".format(timer_index)] = t[1]
return ret
def prettify_elementary_timings(timings):
timings_fields = tuple(elem for tupl in
(("T{}(min)".format(n), "T{}(max)".format(n))
for n in range(1, 8))
for elem in tupl)
us_fields = ('Tari', 'TRcal', 'RTcal', 'Delim', 'Query', 'QueryRep', 'ACK',
'ReqRN', 'Read', 'RN16', 'Response', 'Handle', 'Data'
) + timings_fields
ordered_fields = (
'Tari', 'RTcal', 'TRcal', 'Delim', 'TempRange', 'TRext',
'Q', 'DR', 'M', 'Target', 'Sel', 'Session', 'Bank',
'WordPtr', 'WordCount') + timings_fields + (
'Query', 'QueryRep', 'ACK',
'ReqRN', 'Read', 'RN16', 'Response', 'Handle', 'Data')
ret = []
for k in ordered_fields:
s = "{:12s}: ".format(k)
if k in us_fields:
s += "{:>14.8f} us".format(timings[k] / 1e-6)
else:
s += str(timings[k])
ret.append(s)
return "\n".join(ret)
| 30.610103 | 79 | 0.592682 | 17,220 | 0.444009 | 0 | 0 | 3,536 | 0.091174 | 0 | 0 | 4,631 | 0.119408 |
de72e8f348089a00d8a491df1f651cf4a945ca9c | 1,500 | py | Python | Heap/378-Kth_Smalles_Element_in_a_Sorted_Matrix.py | dingwenzheng730/Leet | c08bd48e8dcc6bca41134d218d39f66bfc112eaf | [
"MIT"
]
| 1 | 2021-06-15T21:01:53.000Z | 2021-06-15T21:01:53.000Z | Heap/378-Kth_Smalles_Element_in_a_Sorted_Matrix.py | dingwenzheng730/Leet | c08bd48e8dcc6bca41134d218d39f66bfc112eaf | [
"MIT"
]
| null | null | null | Heap/378-Kth_Smalles_Element_in_a_Sorted_Matrix.py | dingwenzheng730/Leet | c08bd48e8dcc6bca41134d218d39f66bfc112eaf | [
"MIT"
]
| null | null | null | '''
Given an n x n matrix where each of the rows and columns are sorted in ascending order, return the kth smallest element in the matrix.
Note that it is the kth smallest element in the sorted order, not the kth distinct element.
Input: matrix = [[1,5,9],[10,11,13],[12,13,15]], k = 8
Output: 13
Explanation: The elements in the matrix are [1,5,9,10,11,12,13,13,15], and the 8th smallest number is 13
Input: matrix = [[1,5,9],[10,11,13],[12,13,15]], k = 2
Output: 10
Input: [[1,5,9],[10,11,13],[12,13,15]], k= 9
Output: 15
Input: [[2]], k= 1
Output: 2
Precondition:
n >= 1
k <= n*n
No int overflow
C1: Single element
C2: k = n^2
C3: k <= n
C4: k > n
Algo:
Brute force: get elements and sort O(n^2logn^2)
Heap:
x = min(k, n)
Runtime: klogx
Space: O(x)
if n >= k:
compare the first column is enough
if n < k
for each row, we have a pointer, use a heap to record the pointer value,
for k times, pop out the smaller pointer and update that pointer to its next value in its list
Init a heap, the heap size should be min of k and n()
'''
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
n = len(matrix)
x = min(n, k)
min_heap = []
for r in range(x):
heapq.heappush(min_heap, (matrix[r][0], r, 0))
while k:
element, r, c = heapq.heappop(min_heap)
if c < n-1:
heapq.heappush(min_heap, (matrix[r][c+1], r, c+1))
k -=1
return element
| 24.193548 | 134 | 0.611333 | 439 | 0.292667 | 0 | 0 | 0 | 0 | 0 | 0 | 1,050 | 0.7 |
de73b0477272b09621a0a7e87406fe9c6c2a1f06 | 5,088 | py | Python | baseStation/test/vision/service/test_visionService.py | olgam4/design3 | 6e05d123a24deae7dda646df535844a158ef5cc0 | [
"WTFPL"
]
| null | null | null | baseStation/test/vision/service/test_visionService.py | olgam4/design3 | 6e05d123a24deae7dda646df535844a158ef5cc0 | [
"WTFPL"
]
| null | null | null | baseStation/test/vision/service/test_visionService.py | olgam4/design3 | 6e05d123a24deae7dda646df535844a158ef5cc0 | [
"WTFPL"
]
| null | null | null | from unittest import TestCase
from unittest.mock import Mock
import numpy as np
from pathfinding.domain.angle import Angle
from pathfinding.domain.coord import Coord
from vision.domain.image import Image
from vision.domain.rectangle import Rectangle
from vision.infrastructure.cvVisionException import CameraDoesNotExistError
from vision.service.visionService import VisionService
class TestVisionService(TestCase):
valid_camera_ids_int = [0, 2]
valid_camera_ids_str = ['0', '2']
invalid_camera_id_int = 1
invalid_camera_id_str = '1'
calibration_file_path = 'path'
image = Image(np.zeros((50, 50, 3)))
def setUp(self) -> None:
self.camera_factory = Mock()
self.play_area_finder = Mock()
self.goal_finder = Mock()
self.source_finder = Mock()
self.obstacle_finder = Mock()
self.robot_finder = Mock()
self.camera_calibration_factory = Mock()
self.camera_calibration = Mock()
self.camera_drawer = Mock()
self.vision_service = VisionService(self.camera_factory, self.camera_calibration_factory, self.camera_drawer,
self.play_area_finder, self.goal_finder, self.source_finder,
self.obstacle_finder, self.robot_finder)
def initialiseService(self) -> None:
self.camera = Mock()
self.camera_factory.create_camera = Mock(return_value=self.camera)
self.camera.take_picture = Mock(return_value=self.image)
self.camera_calibration_factory.load_calibration_from_file = Mock(return_value=self.camera_calibration)
self.camera_calibration.rectify_image = Mock(return_value=self.image)
self.vision_service.set_camera(self.valid_camera_ids_str[0], self.calibration_file_path)
def test_when_service_first_created_then_it_is_not_initialized(self) -> None:
self.assertFalse(self.vision_service._initialized.is_set())
def test_when_camera_ids_requested_then_ids_from_camera_factory_returned_as_string(self) -> None:
self.camera_factory.get_cameras = Mock(return_value=self.valid_camera_ids_int)
expected_values = self.valid_camera_ids_str
actual_values = self.vision_service.get_camera_ids()
self.assertListEqual(expected_values, actual_values)
def test_when_camera_set_with_valid_id_then_service_is_initialized(self) -> None:
self.initialiseService()
self.camera_factory.create_camera.assert_called_with(self.valid_camera_ids_int[0])
self.camera.take_picture.assert_called_once()
self.camera_calibration_factory.load_calibration_from_file.assert_called_with(self.calibration_file_path,
self.image)
self.camera_calibration.rectify_image.assert_called_once()
self.assertTrue(self.vision_service._initialized.is_set())
def test_when_camera_set_with_invalid_id_then_CameraDoesNotExistError_is_raised(self) -> None:
self.camera_factory.create_camera = Mock(side_effect=CameraDoesNotExistError(self.invalid_camera_id_int))
self.assertRaises(CameraDoesNotExistError,
self.vision_service.set_camera, self.invalid_camera_id_str, self.calibration_file_path)
def test_when_updated_then_attached_observers_are_notified(self) -> None:
self.initialiseService()
observer = Mock()
self.vision_service.attach(observer)
self.vision_service.update()
observer.update.assert_called_once()
def test_when_get_goal_then_center_of_goal_and_orientation_are_returned_as_real_coordinate(self) -> None:
self.initialiseService()
expected_coord = Coord(0, 0)
expected_angle = Angle(0)
self.goal_finder.find = Mock(return_value=(Rectangle(0, 0, 10, 8), expected_angle))
self.camera_calibration.convert_table_pixel_to_real = Mock(return_value=Coord(0, 0))
position = self.vision_service.get_goal()
actual_coord = position.coordinate
actual_angle = position.orientation
self.camera_calibration.convert_table_pixel_to_real.assert_called_with(Coord(5, 4))
self.assertEqual(expected_coord, actual_coord)
self.assertEqual(expected_angle, actual_angle)
def test_when_get_source_then_center_of_source_and_orientation_are_returned_as_real_coordinate(self) -> None:
self.initialiseService()
expected_coord = Coord(0, 0)
expected_angle = Angle(0)
self.source_finder.find = Mock(return_value=(Rectangle(0, 0, 10, 8), expected_angle))
self.camera_calibration.convert_table_pixel_to_real = Mock(return_value=Coord(0, 0))
position = self.vision_service.get_source()
actual_coord = position.coordinate
actual_angle = position.orientation
self.camera_calibration.convert_table_pixel_to_real.assert_called_with(Coord(5, 4))
self.assertEqual(expected_coord, actual_coord)
self.assertEqual(expected_angle, actual_angle)
| 45.428571 | 117 | 0.725825 | 4,702 | 0.924135 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.002948 |
de758aaeb7ae98b14c58fbe707173fad48237087 | 8,753 | py | Python | bmdal/layer_features.py | dholzmueller/bmdal_reg | 1a9e9c19fbd350ec32a2bd7b505e7015df7dc9bf | [
"Apache-2.0"
]
| 3 | 2022-03-19T21:30:10.000Z | 2022-03-30T08:20:48.000Z | bmdal/layer_features.py | dholzmueller/bmdal_reg | 1a9e9c19fbd350ec32a2bd7b505e7015df7dc9bf | [
"Apache-2.0"
]
| null | null | null | bmdal/layer_features.py | dholzmueller/bmdal_reg | 1a9e9c19fbd350ec32a2bd7b505e7015df7dc9bf | [
"Apache-2.0"
]
| null | null | null | from .feature_maps import *
import torch.nn as nn
class LayerGradientComputation:
"""
Abstract base class that can be used as a second base class
for layers that support the computation of gradient features
"""
def __init__(self):
super().__init__() # in case this is used with multiple inheritance
def get_feature_map(self) -> FeatureMap:
"""
:return: Returns a FeatureMap object that can compute feature map / kernel values
on the data provided by pop_feature_data()
"""
raise NotImplementedError()
def before_forward(self) -> None:
"""
Callback that is called before the data is passed through the model in a forward pass
and gradients are computed in a backward pass.
This method can be used to set up hooks that grab input data and gradients in both forward and backward pass.
"""
raise NotImplementedError()
def pop_feature_data(self) -> FeatureData:
"""
:return: This method should return the feature data
corresponding to the inputs that were last passed through the model.
This feature data should be usable by the feature map returned by get_feature_map()
"""
raise NotImplementedError()
class ModelGradTransform(DataTransform):
"""
A DataTransform object that passes data through a NN model
in order to obtain feature data corresponding to gradients
"""
def __init__(self, model: nn.Module, grad_layers: List[LayerGradientComputation]):
"""
:param model: The model to be computed gradients of
:param grad_layers: All layers of the model whose parameters we want to compute gradients of
"""
self.model = model
self.grad_layers = grad_layers
def forward(self, feature_data: FeatureData, idxs: Indexes) -> FeatureData:
"""
:param feature_data: Feature data to be passed through the model
:param idxs: indexes of the feature data that should be passed through the model
:return: feature data provided by the layers
"""
for grad_layer in self.grad_layers:
grad_layer.before_forward()
old_training = self.model.training
self.model.eval()
X = feature_data.get_tensor(idxs)
y = self.model(X) # implicitly calls hooks that were set by l.before_forward()
y.backward(torch.ones_like(y))
with torch.no_grad():
for p in self.model.parameters():
p.grad = None
self.model.train(old_training)
data = ListFeatureData([layer_comp.pop_feature_data() for layer_comp in self.grad_layers])
return data
def create_grad_feature_map(model: nn.Module, grad_layers: List[LayerGradientComputation],
use_float64: bool = False) -> FeatureMap:
"""
Creates a feature map corresponding to phi_{grad} or phi_{ll}, depending on which layers are provided.
:param model: Model to compute gradients of
:param grad_layers: All layers of the model whose parameters we want to compute gradients of
:param use_float64: Set to true if the gradient features should be converted to float64 after computing them
:return: Returns a feature map corresponding to phi_{grad} for the given layers.
"""
tfms = [ModelGradTransform(model, grad_layers)]
if use_float64:
tfms.append(ToDoubleTransform())
return SequentialFeatureMap(SumFeatureMap([l.get_feature_map() for l in grad_layers]),
tfms)
# ----- Specific LayerGradientComputation implementation(s) for linear layers
class GeneralLinearGradientComputation(LayerGradientComputation):
"""
Implements LayerGradientFeatures for general linear layers.
It can also be used with the Neural Tangent Parameterization since it includes a weight factor and bias factor.
(These are called sigma_w and sigma_b in the paper.)
"""
def __init__(self, layer: nn.Module, in_features: int, out_features: int,
weight_factor: float = 1.0, bias_factor: float = 1.0):
"""
:param layer: nn.Module object implementing a linear (fully-connected) layer,
whose gradients should be computed.
:param in_features: Input dimension of the layer.
:param out_features: Output dimension of the layer.
:param weight_factor: Factor sigma_w by which the weight matrix is multiplied in the forward pass.
:param bias_factor: Factor sigma_w by which the bias is multiplied in the forward pass.
"""
super().__init__()
self.layers = [layer] # dirty hack to avoid infinite recursion in PyTorch if layer is self.
self.in_features = in_features
self.out_features = out_features
self.weight_factor = weight_factor
self.bias_factor = bias_factor
self._input_data = None
self._grad_output_data = None
self._input_hook = None
self._grad_output_hook = None
def get_feature_map(self) -> FeatureMap:
# gradients wrt to this layer are an outer product of the input and the output gradient,
# so we can use a ProductFeatureMap
# the +1 is for the bias
return ProductFeatureMap([IdentityFeatureMap(n_features=self.in_features+1),
IdentityFeatureMap(n_features=self.out_features)])
def set_input_(self, value: torch.Tensor):
# this is used to have a method to call in the hooks
self._input_data = value
def set_grad_output_(self, value: torch.Tensor):
# this is used to have a method to call in the hooks
self._grad_output_data = value
def before_forward(self):
# sets up hooks that store the input and grad_output
self._input_hook = self.layers[0].register_forward_hook(
lambda layer, inp, output, s=self: s.set_input_(inp[0].detach().clone()))
self._grad_output_hook = self.layers[0].register_full_backward_hook(
lambda layer, grad_input, grad_output, s=self: s.set_grad_output_(grad_output[0].detach().clone()))
def pop_feature_data(self) -> FeatureData:
# remove the hooks
self._input_hook.remove()
self._grad_output_hook.remove()
# compute the adjusted input \tilde{x} from the paper
inp = torch.cat([self.weight_factor * self._input_data,
self.bias_factor * torch.ones(self._input_data.shape[0], 1, device=self._input_data.device)],
dim=1)
# feature data for the two IdentityFeatureMaps in the ProductFeatureMap, given by inputs and grad_outputs
fd = ListFeatureData([TensorFeatureData(inp), TensorFeatureData(self._grad_output_data)])
# allow to release memory earlier
self._input_data = None
self._grad_output_data = None
return fd
class LinearGradientComputation(GeneralLinearGradientComputation):
"""
This class implements a gradient computation for nn.Linear layers.
"""
def __init__(self, layer: nn.Linear):
super().__init__(layer=layer, in_features=layer.in_features, out_features=layer.out_features)
class LinearLayer(GeneralLinearGradientComputation, nn.Module):
"""
Linear layer that implements LayerGradientFeatures, i.e., it can be used for computing gradient-based kernels.
This linear layer does not initialize weight and bias itself,
instead it assumes that they are passed as arguments to the constructor.
It can also be used with the Neural Tangent Parameterization since it includes a weight factor and bias factor.
(These are called sigma_w and sigma_b in the paper.)
"""
def __init__(self, weight: torch.Tensor, bias: torch.Tensor, weight_factor: float, bias_factor: float):
"""
:param weight: Weight matrix parameter of shape [in_features, out_features].
Compared to torch.nn.Linear, this is transposed.
:param bias: Bias parameter of shape [out_features]
:param weight_factor: Factor sigma_w by which the weight matrix is multiplied in the forward pass.
:param bias_factor: Factor sigma_w by which the bias is multiplied in the forward pass.
"""
super().__init__(self, in_features=weight.shape[0], out_features=weight.shape[1],
weight_factor=weight_factor, bias_factor=bias_factor)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.weight_factor = weight_factor
self.bias_factor = bias_factor
def forward(self, x: torch.Tensor):
return self.weight_factor * x.matmul(self.weight) + self.bias_factor * self.bias
| 44.207071 | 118 | 0.682052 | 7,733 | 0.883469 | 0 | 0 | 0 | 0 | 0 | 0 | 4,250 | 0.485548 |
de759ba42ef02e88463fee41b02959bd0f0ddd2c | 35,389 | py | Python | pinsey/gui/MainWindow.py | RailKill/Pinsey | 72a283e6c5683b27918b511d80e45c3af4e67539 | [
"MIT"
]
| 3 | 2021-02-01T06:47:06.000Z | 2022-01-09T05:54:35.000Z | pinsey/gui/MainWindow.py | RailKill/Pinsey | 72a283e6c5683b27918b511d80e45c3af4e67539 | [
"MIT"
]
| 4 | 2019-10-23T09:52:36.000Z | 2022-03-11T23:17:23.000Z | pinsey/gui/MainWindow.py | RailKill/Pinsey | 72a283e6c5683b27918b511d80e45c3af4e67539 | [
"MIT"
]
| null | null | null | from configparser import ConfigParser
from configparser import DuplicateSectionError
from PyQt5 import QtCore, QtGui, QtWidgets
from pinsey import Constants
from pinsey.Utils import clickable, center, picture_grid, horizontal_line, resolve_message_sender, name_set, windows
from pinsey.gui.MessageWindow import MessageWindow
from pinsey.gui.component.BrowseListing import BrowseListing
from pinsey.gui.component.DislikesListing import DislikesListing
from pinsey.gui.component.LikesListing import LikesListing
from pinsey.handler.DecisionHandler import DecisionHandler
from pinsey.handler.LikesHandler import LikesHandler
from pinsey.thread.DownloadPhotosThread import DownloadPhotosThread
from pinsey.thread.LikesBotThread import LikesBotThread
from pinsey.thread.SessionThread import SessionThread
from pinsey.thread.MatchesThread import MatchesThread
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, app):
super(MainWindow, self).__init__()
# Initialize Window GUI controls.
self.label_status = QtWidgets.QLabel()
self.txt_location = QtWidgets.QLineEdit()
self.txt_auth = QtWidgets.QLineEdit()
self.txt_id = QtWidgets.QLineEdit()
self.txt_img_threshold = QtWidgets.QLineEdit()
self.txt_face_threshold = QtWidgets.QLineEdit()
self.txt_bio_threshold = QtWidgets.QLineEdit()
self.txt_pickup_threshold = QtWidgets.QLineEdit()
self.chk_decision = QtWidgets.QCheckBox('Decision-Making', self)
self.chk_exclude_friends = QtWidgets.QCheckBox('Exclude Facebook Friends', self)
self.chk_exclude_mutual = QtWidgets.QCheckBox('Exclude Mutual Friends', self)
self.chk_autochat = QtWidgets.QCheckBox('Autonomous Chatting', self)
self.chk_respond_list = QtWidgets.QCheckBox('Respond from List', self)
self.chk_respond_bot = QtWidgets.QCheckBox('Respond using Cleverbot', self)
self.profile_area = QtWidgets.QScrollArea()
self.matches_area = QtWidgets.QScrollArea()
self.chk_refresh = QtWidgets.QCheckBox('Refresh every: ')
self.txt_refresh_interval = QtWidgets.QLineEdit()
# Initialize system tray icon and menu.
tray_menu = QtWidgets.QMenu()
restore_action = tray_menu.addAction('Restore')
restore_action.triggered.connect(self.restore_window)
close_action = tray_menu.addAction('Exit')
close_action.triggered.connect(self.close)
self.tray_icon = QtWidgets.QSystemTrayIcon(QtGui.QIcon(Constants.ICON_FILEPATH))
self.tray_icon.activated.connect(self.tray_event)
self.tray_icon.setContextMenu(tray_menu)
self.tray_icon.show()
# Initialize application variables.
self.app = app
self.session = None
self.friend_list = []
self.download_thread = []
self.matches_thread = None
self.session_thread = None
self.likes_bot = None
self.likes_handler = LikesHandler()
self.filter_list = ['Date Added', 'Name', 'Age', 'Distance KM']
self.likeslisting = LikesListing('Reload', self.likes_handler, self.filter_list)
self.dislikeslisting = DislikesListing('Reload', self.likes_handler, self.filter_list)
self.browselisting = BrowseListing('Refresh', self.likes_handler, self.filter_list[1:])
self.setWindowTitle(Constants.APP_NAME)
self.setWindowIcon(QtGui.QIcon(Constants.ICON_FILEPATH))
self.setMinimumWidth(500)
self.resize(800, 480)
center(self)
# Run startup methods to setup the GUI.
self.read_settings()
self.setup_tabs()
self.connect_tinder() # Start Tinder session.
self.decision_change()
'''
+=======================================+
| GUI METHODS: Resizing, UI setup, etc. |
+=======================================+
'''
def setup_tabs(self):
tabs = QtWidgets.QTabWidget()
# Resize width and height
tabs.resize(250, 150)
# Add tabs
tabs.addTab(self.setup_settings(), 'Settings')
tabs.addTab(self.setup_profile(), 'Profile')
tabs.addTab(self.likeslisting, 'Liked')
tabs.addTab(self.dislikeslisting, 'Disliked')
tabs.addTab(self.browselisting, 'Browse')
tabs.addTab(self.setup_matches(), 'Matches')
# Set main window layout
self.setCentralWidget(tabs)
self.show()
def setup_settings(self):
# Set layout of settings tab
tab_settings = QtWidgets.QWidget()
label_location = QtWidgets.QLabel('Location:')
label_auth = QtWidgets.QLabel('Facebook Auth Token:')
label_id = QtWidgets.QLabel('Facebook Profile ID:')
label_img_threshold = QtWidgets.QLabel('Minimum Number of Good Images:')
label_face_threshold = QtWidgets.QLabel('Faces Found Threshold:')
label_bio_threshold = QtWidgets.QLabel('Biography Minimum Length:')
label_friend_exclusion = QtWidgets.QLabel('Friend Exclusion: ')
label_pickup_threshold = QtWidgets.QLabel('Pick-up after X Messages:')
btn_save = QtWidgets.QPushButton('Save Settings', self)
btn_save.setFixedHeight(50)
btn_save.clicked.connect(self.save_settings)
btn_start = QtWidgets.QPushButton('Start Pinning', self)
btn_start.clicked.connect(lambda: self.start_botting(btn_start))
btn_start.setFixedHeight(50)
exclusion_widget = QtWidgets.QWidget()
exclusion_widget.setLayout(QtWidgets.QHBoxLayout())
exclusion_widget.layout().addWidget(self.chk_exclude_friends)
exclusion_widget.layout().addWidget(self.chk_exclude_mutual)
exclusion_widget.layout().addStretch()
self.label_status.setAlignment(QtCore.Qt.AlignCenter)
self.txt_id.setEchoMode(QtWidgets.QLineEdit.Password)
self.txt_auth.setEchoMode(QtWidgets.QLineEdit.Password)
self.txt_img_threshold.setValidator(QtGui.QIntValidator())
self.txt_face_threshold.setValidator(QtGui.QIntValidator())
self.txt_bio_threshold.setValidator(QtGui.QIntValidator())
self.txt_pickup_threshold.setValidator(QtGui.QIntValidator())
self.chk_decision.setStyleSheet(Constants.CSS_FONT_CATEGORY)
self.chk_decision.stateChanged.connect(self.decision_change)
self.chk_autochat.setStyleSheet(Constants.CSS_FONT_CATEGORY)
grid = QtWidgets.QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.label_status, 1, 0, 1, 2)
grid.addWidget(label_location, 2, 0)
grid.addWidget(self.txt_location, 2, 1)
grid.addWidget(label_auth, 3, 0)
grid.addWidget(self.txt_auth, 3, 1)
grid.addWidget(label_id, 4, 0)
grid.addWidget(self.txt_id, 4, 1)
grid.addWidget(horizontal_line(), 5, 0, 1, 2)
grid.addWidget(self.chk_decision, 6, 0, 1, 2)
grid.addWidget(label_img_threshold, 7, 0)
grid.addWidget(self.txt_img_threshold, 7, 1)
grid.addWidget(label_face_threshold, 8, 0)
grid.addWidget(self.txt_face_threshold, 8, 1)
grid.addWidget(label_bio_threshold, 9, 0)
grid.addWidget(self.txt_bio_threshold, 9, 1)
grid.addWidget(label_friend_exclusion, 10, 0)
grid.addWidget(exclusion_widget, 10, 1)
grid.addWidget(horizontal_line(), 11, 0, 1, 2)
grid.addWidget(self.chk_autochat, 12, 0, 1, 2)
grid.addWidget(self.chk_respond_list, 13, 0, 1, 2)
grid.addWidget(self.chk_respond_bot, 14, 0, 1, 2)
grid.addWidget(label_pickup_threshold, 15, 0)
grid.addWidget(self.txt_pickup_threshold, 15, 1)
grid.addWidget(horizontal_line(), 16, 0, 1, 2)
grid.addWidget(btn_save, 17, 0)
grid.addWidget(btn_start, 17, 1)
tab_settings.setLayout(grid)
return tab_settings
def setup_profile(self):
tab_profile = QtWidgets.QWidget()
tab_profile.setLayout(QtWidgets.QVBoxLayout())
tab_profile.layout().addWidget(self.profile_area)
return tab_profile
def setup_matches(self):
tab_matches = QtWidgets.QWidget()
tab_matches.setLayout(QtWidgets.QVBoxLayout())
match_refresh_widget = QtWidgets.QWidget()
match_refresh_widget.setLayout(QtWidgets.QHBoxLayout())
self.txt_refresh_interval.setValidator(QtGui.QIntValidator(10, 300))
self.txt_refresh_interval.setText("60") # Default 60 second refresh interval
lbl_refresh_unit = QtWidgets.QLabel('seconds')
match_refresh_widget.layout().addWidget(self.chk_refresh)
match_refresh_widget.layout().addWidget(self.txt_refresh_interval)
match_refresh_widget.layout().addWidget(lbl_refresh_unit)
match_refresh_widget.layout().addStretch()
btn_refresh = QtWidgets.QPushButton('Refresh', self)
btn_refresh.clicked.connect(self.load_matches)
match_refresh_widget.layout().addWidget(btn_refresh)
tab_matches.layout().addWidget(match_refresh_widget)
tab_matches.layout().addWidget(self.matches_area)
return tab_matches
def load_profile(self):
def populate(data, thread):
self.download_thread.remove(thread)
profile_widget = QtWidgets.QWidget()
profil = self.session.profile
# 1. Profile picture grid.
number_of_photos = Constants.NUMBER_OF_PHOTOS
pp_layout = picture_grid(data, Constants.THUMBNAIL_SIZE, number_of_photos)
# 2. Name and gender of user.
label_name = name_set(profil.name, profil.gender, 0, profil.banned)
pp_layout.addWidget(label_name, number_of_photos, 0, 1, number_of_photos)
# 3. Biography.
def bio_truncate():
# Tinder counts emojis as 2 characters. Find and manipulate them so the character count is correct.
emoji_raw = Constants.EMOJI_PATTERN.findall(txt_bio.toPlainText())
number_of_emojis = 0
for emoji in emoji_raw:
number_of_emojis += len(emoji)
# Encode to UTF-8, emojis are counted as 4 characters.
bio_true_length = len(txt_bio.toPlainText().encode()) - (number_of_emojis * 2)
label_chars.setText(str(biography_max_length - len(txt_bio.toPlainText().encode()) +
(number_of_emojis * 2)) + remaining_chars)
if bio_true_length > biography_max_length:
txt_bio.setPlainText(txt_bio.toPlainText()[:biography_max_length - number_of_emojis])
txt_bio.moveCursor(QtGui.QTextCursor.End)
biography_max_length = 500
label_bio = QtWidgets.QLabel('Biography: ')
remaining_chars = ' characters remaining'
label_chars = QtWidgets.QLabel(str(biography_max_length) + remaining_chars)
bio_widget = QtWidgets.QWidget()
bio_widget.setLayout(QtWidgets.QHBoxLayout())
bio_widget.layout().addWidget(label_bio)
bio_widget.layout().addStretch()
bio_widget.layout().addWidget(label_chars)
pp_layout.addWidget(bio_widget, number_of_photos + 1, 0, 1, number_of_photos)
# Profile may have no biography yet.
try:
bio_text = profil.bio
except KeyError:
bio_text = ''
txt_bio = QtWidgets.QPlainTextEdit(bio_text)
txt_bio.setFont(QtGui.QFont('Segoe UI Symbol', 16))
txt_bio.textChanged.connect(bio_truncate)
bio_truncate()
pp_layout.addWidget(txt_bio, number_of_photos + 2, 0, 1, number_of_photos)
# Form layout setup.
form_layout = QtWidgets.QFormLayout()
# form_layout.setLabelAlignment(QtCore.Qt.AlignRight)
form_widget = QtWidgets.QWidget()
form_widget.setLayout(form_layout)
pp_layout.addWidget(form_widget, number_of_photos + 3, 0, 1, number_of_photos)
form_label_style = 'margin-top: 0.3em'
# 4. Gender
radio_gender_male = QtWidgets.QRadioButton('Male')
radio_gender_female = QtWidgets.QRadioButton('Female')
if profil.gender == 'male':
radio_gender_male.setChecked(True)
else:
radio_gender_female.setChecked(True)
gender_widget = QtWidgets.QWidget()
gender_widget.setLayout(QtWidgets.QHBoxLayout())
gender_widget.layout().addWidget(radio_gender_male)
gender_widget.layout().addWidget(radio_gender_female)
label_gender = QtWidgets.QLabel('Gender: ')
label_gender.setStyleSheet(form_label_style)
form_layout.addRow(label_gender, gender_widget)
# 5. Discoverable?
label_discoverable = QtWidgets.QLabel('Discoverable: ')
chk_discoverable = QtWidgets.QCheckBox()
chk_discoverable.setChecked(profil.discoverable)
form_layout.addRow(label_discoverable, chk_discoverable)
# 6. Maximum distance filter.
label_distance = QtWidgets.QLabel('Maximum Distance: ')
label_distance.setStyleSheet(form_label_style)
slider_distance = QtWidgets.QSlider(QtCore.Qt.Horizontal)
slider_distance.setRange(1, 100)
slider_distance.setSingleStep(1)
slider_distance.setValue(profil.distance_filter)
slider_distance.valueChanged.connect(
lambda: (label_distance_value.setText(str(round(slider_distance.value() * 1.6)) + 'km')))
label_distance_value = QtWidgets.QLabel(str(round(slider_distance.value() * 1.6)) + 'km')
distance_widget = QtWidgets.QWidget()
distance_widget.setLayout(QtWidgets.QHBoxLayout())
distance_widget.layout().addWidget(slider_distance)
distance_widget.layout().addWidget(label_distance_value)
form_layout.addRow(label_distance, distance_widget)
# 7. Age filter.
def max_slider_handle():
label_age_max.setText('55+' if slider_age_max.value() > 54 else str(slider_age_max.value()))
slider_age_min.setRange(18, 46 if slider_age_max.value() > 46 else slider_age_max.value())
def min_slider_handle():
label_age_min.setText(str(slider_age_min.value()))
slider_age_max.setRange(slider_age_min.value(), 55)
label_age = QtWidgets.QLabel('Age: ')
label_age.setStyleSheet(form_label_style)
label_to = QtWidgets.QLabel(' to ')
slider_age_max = QtWidgets.QSlider(QtCore.Qt.Horizontal)
slider_age_max.setRange(profil.age_filter_min, 55)
slider_age_max.setSingleStep(1)
slider_age_max.setValue(55 if profil.age_filter_max > 54 else profil.age_filter_max)
slider_age_max.valueChanged.connect(max_slider_handle)
label_age_max = QtWidgets.QLabel('55+' if slider_age_max.value() > 54 else str(slider_age_max.value()))
slider_age_min = QtWidgets.QSlider(QtCore.Qt.Horizontal)
slider_age_min.setRange(18, 46 if profil.age_filter_max > 46 else profil.age_filter_max)
slider_age_min.setSingleStep(1)
slider_age_min.setValue(profil.age_filter_min)
slider_age_min.valueChanged.connect(min_slider_handle)
label_age_min = QtWidgets.QLabel(str(slider_age_min.value()))
age_widget = QtWidgets.QWidget()
age_widget.setLayout(QtWidgets.QHBoxLayout())
age_widget.layout().addWidget(label_age_min)
age_widget.layout().addWidget(slider_age_min)
age_widget.layout().addWidget(label_to)
age_widget.layout().addWidget(slider_age_max)
age_widget.layout().addWidget(label_age_max)
form_layout.addRow(label_age, age_widget)
# 8. Interested in which gender?
label_interested = QtWidgets.QLabel('Interested in: ')
label_interested.setStyleSheet(form_label_style)
chk_interested_male = QtWidgets.QCheckBox('Male')
chk_interested_male.setChecked('male' in list(profil.interested_in))
chk_interested_female = QtWidgets.QCheckBox('Female')
chk_interested_female.setChecked('female' in list(profil.interested_in))
interested_widget = QtWidgets.QWidget()
interested_widget.setLayout(QtWidgets.QHBoxLayout())
interested_widget.layout().addWidget(chk_interested_male)
interested_widget.layout().addWidget(chk_interested_female)
form_layout.addRow(label_interested, interested_widget)
# 9. Save button.
def save_profile():
# Must have an interested gender before proceeding.
if not chk_interested_male.isChecked() and not chk_interested_female.isChecked():
QtWidgets.QMessageBox().critical(self, 'Profile Error',
'You must be interested in at least one gender.')
return
# Set profile values.
try:
profile.bio = txt_bio.toPlainText()
except KeyError:
self.session.update_profile({
"bio": txt_bio.toPlainText()
})
profile.discoverable = chk_discoverable.isChecked()
profile.distance_filter = slider_distance.value()
profile.age_filter_min = slider_age_min.value()
profile.age_filter_max = 1000 if slider_age_max.value() > 54 else slider_age_max.value()
# Workaround due to pynder 0.0.13 not yet supporting "gender" and "interested in" changes.
gender_filter = 2
profil.interested = []
profil.sex = (0, 'male') if radio_gender_male.isChecked() else (1, 'female')
if chk_interested_male.isChecked():
gender_filter -= 2
profil.interested.append(0)
if chk_interested_female.isChecked():
gender_filter -= 1
profil.interested.append(1)
self.session.update_profile({
"interested_in": profil.interested,
"gender_filter": gender_filter,
"gender": profil.sex[0]
# "squads_discoverable": False
})
QtWidgets.QMessageBox.information(self, 'Profile Saved', 'Profile information has been updated.')
reload_profile()
def reload_profile():
# Refresh GUI.
label_name.setText(name_set(profil.name, profil.sex[1], 0, profil.banned).text())
try:
txt_bio.setPlainText(profil.bio)
except KeyError:
txt_bio.setPlainText('')
chk_discoverable.setChecked(profil.discoverable)
slider_distance.setValue(profil.distance_filter)
label_distance_value.setText(str(round(slider_distance.value() * 1.6)) + 'km')
slider_age_max.setRange(profil.age_filter_min, 55)
slider_age_max.setValue(55 if profil.age_filter_max > 54 else profil.age_filter_max)
label_age_max.setText('55+' if slider_age_max.value() > 54 else str(slider_age_max.value()))
slider_age_min.setRange(18, 46 if profil.age_filter_max > 46 else profil.age_filter_max)
slider_age_min.setValue(profil.age_filter_min)
label_age_min.setText(str(slider_age_min.value()))
chk_interested_male.setChecked(0 in list(profil.interested)) # interested_in workaround.
chk_interested_female.setChecked(1 in list(profil.interested)) # interested_in workaround.
btn_save_profile = QtWidgets.QPushButton('Update Profile')
btn_save_profile.setFixedHeight(50)
btn_save_profile.clicked.connect(save_profile)
pp_layout.addWidget(btn_save_profile, number_of_photos + 4, 0, 1, number_of_photos)
profile_widget.setLayout(pp_layout)
self.profile_area.setWidget(profile_widget)
self.profile_area.setAlignment(QtCore.Qt.AlignCenter)
# Download profile images and then populate the profile GUI.
profile = self.session.profile
download_thread = DownloadPhotosThread(profile.photos)
download_thread.data_downloaded.connect(lambda data, thread=download_thread: populate(data, thread))
self.download_thread.append(download_thread)
download_thread.start()
def load_matches(self, interval=0):
def load_thumbnail(photo, label, thread):
self.download_thread.remove(thread)
thumbnail = QtGui.QImage()
thumbnail.loadFromData(photo[0].data)
label.setPixmap(QtGui.QPixmap(thumbnail))
def populate_matches(data):
matches = data
#updates = list(self.session.updates())
#updates_balloon_message = ''
matches_list = QtWidgets.QWidget()
matches_list.setLayout(QtWidgets.QVBoxLayout())
for match in matches:
"""
# Show notification if it is in updates.
for update in updates:
if match.user.id == update.user.id:
updates_balloon_message += update.user.name
if not update.messages:
updates_balloon_message += ' (NEW) '
updates_balloon_message += '\n'
"""
# Load thumbnail of match.
label_thumbnail = QtWidgets.QLabel()
label_thumbnail.setFixedWidth(Constants.THUMBNAIL_SIZE / 2)
label_thumbnail.setFixedHeight(Constants.THUMBNAIL_SIZE / 2)
label_thumbnail.setScaledContents(True)
download_thread = DownloadPhotosThread([next(match.user.photos)])
download_thread.data_downloaded.connect(
lambda data, l=label_thumbnail, t=download_thread: load_thumbnail(data, l, t)
)
self.download_thread.append(download_thread)
download_thread.start()
# Create name set.
label_name = name_set(match.user.name, match.user.gender, match.user.age)
# Create match date label.
label_match_date = QtWidgets.QLabel('<b>Match Date: </b>' +
match.match_date.strftime("%B %d, %Y at %I:%M%p"))
# Create last message text.
if match.messages:
last_message = match.messages[len(match.messages) - 1]
last_poster = resolve_message_sender(last_message, match)
display_message = last_poster + last_message.body
else:
display_message = 'Conversation not started.'
label_last_message = QtWidgets.QLabel(display_message)
# Create notification text.
#label_notification = QtWidgets.QLabel('NEW UPDATE!' if match in updates else '')
#label_notification.setStyleSheet(Constants.CSS_FONT_NOTIFICATION)
# Create a card for each match.
card_widget = QtWidgets.QWidget()
card_layout = QtWidgets.QGridLayout()
card_layout.setSpacing(10)
card_layout.addWidget(label_thumbnail, 1, 0, 5, 1)
card_layout.addWidget(label_name, 1, 1)
card_layout.addWidget(label_match_date, 2, 1)
card_layout.addWidget(label_last_message, 3, 1)
#card_layout.addWidget(label_notification, 4, 1)
card_widget.setLayout(card_layout)
clickable(card_widget).connect(lambda m=match: (
windows.append(MessageWindow(m, self.friend_list))
))
matches_list.layout().addWidget(card_widget)
# Check if any MessageWindow for this match. If there is, update the messages area.
for window in windows:
if isinstance(window, MessageWindow) and match == window.match:
window.load_messages(match.messages)
self.matches_area.setWidget(matches_list)
self.matches_area.setAlignment(QtCore.Qt.AlignCenter)
"""
if updates_balloon_message:
self.tray_icon.showMessage('Pinsey: New Update!', updates_balloon_message)
"""
if self.chk_refresh.isChecked():
self.load_matches(int(self.txt_refresh_interval.text()))
self.matches_thread = MatchesThread(self.session, interval)
self.matches_thread.data_downloaded.connect(populate_matches)
self.matches_thread.start()
'''
+================================================================+
| HANDLING METHODS: Events, background, saving preferences, etc. |
+================================================================+
'''
def closeEvent(self, event):
for window in windows:
window.close() # Close all windows associated with this window.
super(MainWindow, self).closeEvent(event)
self.app.exit()
def changeEvent(self, event):
if event.type() == QtCore.QEvent.WindowStateChange:
# TODO: Check if windowState = 3, happens when minimize on fullscreen window.
if self.windowState() == QtCore.Qt.WindowMinimized:
for window in windows:
window.setWindowFlags(self.windowFlags() | QtCore.Qt.Tool) # Required to properly hide window.
window.hide() # Hides all windows associated with this window.
self.setWindowFlags(self.windowFlags() | QtCore.Qt.Tool) # Required to properly hide window.
self.hide()
def tray_event(self, reason):
if reason == QtWidgets.QSystemTrayIcon.DoubleClick:
self.restore_window()
def restore_window(self):
if self.isHidden():
for window in windows:
window.setWindowFlags(self.windowFlags() & ~QtCore.Qt.Tool) # Required to properly show window.
window.showNormal()
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.Tool) # Required to properly show window.
self.showNormal()
def connect_tinder(self):
def session_connected(data):
if data.session:
if data.exception:
QtWidgets.QMessageBox.warning(self, 'Warning', str(data.exception))
self.session = data.session
self.friend_list = list(self.session.get_fb_friends())
self.label_status.setText(status_text + '<span style="color:green;font-weight:bold">Online</span>')
self.load_profile() # Automatically load profile after session is ready.
self.load_matches() # Automatically load matches after session is ready.
# Update user listing.
self.likeslisting.friend_list = self.friend_list
self.likeslisting.refresh()
self.dislikeslisting.friend_list = self.friend_list
self.dislikeslisting.refresh()
self.browselisting.friend_list = self.friend_list
self.browselisting.session = self.session
else:
self.session = None
self.label_status.setText(status_text + '<span style="color:red;font-weight:bold">Offline</span>')
QtWidgets.QMessageBox.critical(self, 'Error', str(data.exception))
status_text = 'Tinder Status: '
if self.txt_location.text() and self.txt_id.text() and self.txt_auth.text():
self.session_thread = SessionThread(self.txt_id.text(), self.txt_auth.text(), self.txt_location.text())
self.session_thread.data_downloaded.connect(session_connected)
self.session_thread.start()
self.label_status.setText(status_text + '<span style="color:orange;font-weight:bold">Connecting...</span>')
else:
self.session = None
self.label_status.setText(status_text + '<span style="color:red;font-weight:bold">Offline</span>')
QtWidgets.QMessageBox.information(self, 'Connect to Tinder', 'In order to start using Pinsey, you will need '
'to key in your rough location (similar to how '
'you would search on Google Maps), Facebook '
'authentication token from Tinder, and Facebook '
'profile ID. Then, click Save Settings and it '
'will start connecting to Tinder.\n\n'
'If you are unsure how to obtain some of the '
'values required, please visit: '
'<a href="http://railkill.com/pinsey">'
'http://railkill.com/pinsey</a>')
def decision_change(self):
"""Handles decision-making checkbox state change."""
if self.chk_decision.isChecked():
self.txt_img_threshold.setDisabled(False)
self.txt_face_threshold.setDisabled(False)
self.txt_bio_threshold.setDisabled(False)
self.chk_exclude_friends.setDisabled(False)
self.chk_exclude_mutual.setDisabled(False)
else:
self.txt_img_threshold.setDisabled(True)
self.txt_face_threshold.setDisabled(True)
self.txt_bio_threshold.setDisabled(True)
self.chk_exclude_friends.setDisabled(True)
self.chk_exclude_mutual.setDisabled(True)
def read_settings(self):
"""Reads saved user preferences and loads it into the application. Otherwise, load defaults."""
config = ConfigParser()
if config.read(Constants.CONFIG_DATA_DIR + 'config.ini'):
self.txt_location.setText(config.get('Authentication', 'location'))
self.txt_auth.setText(config.get('Authentication', 'auth'))
self.txt_id.setText(config.get('Authentication', 'id'))
self.txt_id.setText(config.get('Authentication', 'id'))
self.chk_decision.setChecked(config.getboolean('Decision', 'enabled'))
self.txt_img_threshold.setText(config.get('Decision', 'img_threshold'))
self.txt_face_threshold.setText(config.get('Decision', 'face_threshold'))
self.txt_bio_threshold.setText(config.get('Decision', 'bio_threshold'))
self.chk_exclude_friends.setChecked(config.getboolean('Decision', 'exclude_friends'))
self.chk_exclude_mutual.setChecked(config.getboolean('Decision', 'exclude_mutual'))
self.chk_autochat.setChecked(config.getboolean('Chat', 'enabled'))
self.chk_respond_list.setChecked(config.getboolean('Chat', 'respond_list'))
self.chk_respond_bot.setChecked(config.getboolean('Chat', 'respond_bot'))
self.txt_pickup_threshold.setText(config.get('Chat', 'pickup_threshold'))
def save_settings(self):
config = ConfigParser()
config_path = Constants.CONFIG_DATA_DIR + 'config.ini'
config.read(config_path)
try:
config.add_section('Authentication')
except DuplicateSectionError:
pass
config.set('Authentication', 'location', self.txt_location.text())
config.set('Authentication', 'auth', self.txt_auth.text())
config.set('Authentication', 'id', self.txt_id.text())
try:
config.add_section('Decision')
except DuplicateSectionError:
pass
config.set('Decision', 'enabled', str(self.chk_decision.isChecked()))
config.set('Decision', 'img_threshold', self.txt_img_threshold.text())
config.set('Decision', 'face_threshold', self.txt_face_threshold.text())
# TODO: insert filepath of cascade, for user customizability
config.set('Decision', 'bio_threshold', self.txt_bio_threshold.text())
config.set('Decision', 'exclude_friends', str(self.chk_exclude_friends.isChecked()))
config.set('Decision', 'exclude_mutual', str(self.chk_exclude_mutual.isChecked()))
try:
config.add_section('Chat')
except DuplicateSectionError:
pass
config.set('Chat', 'enabled', str(self.chk_autochat.isChecked()))
config.set('Chat', 'respond_list', str(self.chk_respond_list.isChecked()))
# TODO: insert filepath of response list, for user customizability
config.set('Chat', 'respond_bot', str(self.chk_respond_bot.isChecked()))
config.set('Chat', 'pickup_threshold', self.txt_pickup_threshold.text())
with open(config_path, 'w') as f:
config.write(f)
QtWidgets.QMessageBox.information(self, 'Information', 'Settings saved.')
self.connect_tinder()
def start_botting(self, button):
if self.session:
decision_handler = None
if not self.txt_img_threshold.text():
self.txt_img_threshold.setText(str(Constants.THRESHOLD_IMG_DEFAULT))
if not self.txt_face_threshold.text():
self.txt_face_threshold.setText(str(Constants.THRESHOLD_FACE_DEFAULT))
if not self.txt_bio_threshold.text():
self.txt_bio_threshold.setText(str(Constants.THRESHOLD_BIO_DEFAULT))
if self.chk_decision.isChecked():
decision_handler = DecisionHandler(
int(self.txt_img_threshold.text()),
int(self.txt_face_threshold.text()),
int(self.txt_bio_threshold.text()),
self.chk_exclude_friends.isChecked(),
self.chk_exclude_mutual.isChecked()
)
self.likes_bot = LikesBotThread(self.session, self.likes_handler, decision_handler)
self.likes_bot.start()
if self.chk_autochat.isChecked():
self.matches_thread.start_bot()
button.setText('Stop Pinning')
button.clicked.disconnect()
button.clicked.connect(lambda: self.stop_botting(button))
else:
QtWidgets.QMessageBox.critical(self, 'Unable to Start Pinning', 'You are not connected to Tinder yet.')
def stop_botting(self, button):
self.likes_bot.stop()
self.matches_thread.stop_bot()
button.setText('Start Pinning')
button.clicked.disconnect()
button.clicked.connect(lambda: self.start_botting(button))
| 50.700573 | 121 | 0.628755 | 34,531 | 0.975755 | 0 | 0 | 0 | 0 | 0 | 0 | 5,733 | 0.161999 |
de7659b57f254205c0bc591d8af1e1375127f4d8 | 336 | py | Python | Chat app/Check IP.py | ArturWagnerBusiness/Projects-2018-2020 | 37a217dc325f3ba42d8a7a1a743e5b6f8fab5df4 | [
"MIT"
]
| null | null | null | Chat app/Check IP.py | ArturWagnerBusiness/Projects-2018-2020 | 37a217dc325f3ba42d8a7a1a743e5b6f8fab5df4 | [
"MIT"
]
| null | null | null | Chat app/Check IP.py | ArturWagnerBusiness/Projects-2018-2020 | 37a217dc325f3ba42d8a7a1a743e5b6f8fab5df4 | [
"MIT"
]
| null | null | null | from os import system as c
i = "ipconfig"
input(c(i))
# import win32clipboard
# from time import sleep as wait
# set clipboard data
# while True:
# win32clipboard.OpenClipboard()
# win32clipboard.EmptyClipboard()
# win32clipboard.SetClipboardText('Clipboard Blocked!')
# win32clipboard.CloseClipboard()
# wait(0.1)
| 24 | 59 | 0.720238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.839286 |
de766a3b6f5c4477c098e9f336005c2394afbbc1 | 1,506 | py | Python | app/api/api_v1/tasks/emails.py | cdlaimin/fastapi | 4acf1a1da4a1eedd81a3bdf6256661c2464928b9 | [
"BSD-3-Clause"
]
| null | null | null | app/api/api_v1/tasks/emails.py | cdlaimin/fastapi | 4acf1a1da4a1eedd81a3bdf6256661c2464928b9 | [
"BSD-3-Clause"
]
| null | null | null | app/api/api_v1/tasks/emails.py | cdlaimin/fastapi | 4acf1a1da4a1eedd81a3bdf6256661c2464928b9 | [
"BSD-3-Clause"
]
| null | null | null | # -*- encoding: utf-8 -*-
"""
@File : emails.py
@Contact : [email protected]
@License : (C)Copyright 2017-2018, Liugroup-NLPR-CASIA
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/9/27 10:22 下午 wuxiaoqiang 1.0 None
"""
import asyncio
from email.mime.text import MIMEText
import aiosmtplib
from app.core.celery_app import celery_app
from app.core.config import settings
async def sendemail(to_addr: str, code: str):
title = '<html><body><h3>亲爱的<a data-auto-link="1" href="mailto:%s" target="_blank">%s</a>,您好:</h3>' % (
to_addr, to_addr)
body = f'<p>请点击以下链接进行激活登录 <a href="%s">http://127.0.0.1:8000/api/v1/users/activated?code={code}</a></p>'
tail = '如果您并不是此网站用户,可能是其他用户误输入了您的邮箱地址。</body></html>'
html = title + body + tail
msg = MIMEText(html, 'html', 'utf-8')
msg['From'] = settings.EMAIL_USER
msg['To'] = to_addr
msg['Subject'] = "欢迎注册此网站"
try:
async with aiosmtplib.SMTP(hostname=settings.EMAIL_HOSTNAEM, port=settings.EMAIL_PORT, use_tls=True,
username=settings.EMAIL_USER, password=settings.EMAIL_PASSWORD) as smtp:
await smtp.send_message(msg)
except aiosmtplib.SMTPException as e:
print(e)
raise e
@celery_app.task(acks_late=True, autoretry_for=(Exception,), retry_kwargs={'max_retries': 3})
def decoratorEmail(To: str, code: str = "123456"):
asyncio.run(sendemail(To, code))
| 34.227273 | 108 | 0.625498 | 0 | 0 | 0 | 0 | 181 | 0.111728 | 966 | 0.596296 | 719 | 0.443827 |
de76f5e1a1407299a65c28e63772cca898458059 | 13,487 | py | Python | lightwood/encoders/text/distilbert.py | ritwik12/lightwood | 7975688355fba8b0f8349dd55a1b6cb625c3efd0 | [
"MIT"
]
| null | null | null | lightwood/encoders/text/distilbert.py | ritwik12/lightwood | 7975688355fba8b0f8349dd55a1b6cb625c3efd0 | [
"MIT"
]
| null | null | null | lightwood/encoders/text/distilbert.py | ritwik12/lightwood | 7975688355fba8b0f8349dd55a1b6cb625c3efd0 | [
"MIT"
]
| null | null | null | import time
import copy
import random
import logging
from functools import partial
import numpy as np
import torch
from torch.utils.data import DataLoader
from transformers import DistilBertModel, DistilBertForSequenceClassification, DistilBertTokenizer, AlbertModel, AlbertForSequenceClassification, DistilBertTokenizer, AlbertTokenizer, AdamW, get_linear_schedule_with_warmup
from lightwood.config.config import CONFIG
from lightwood.constants.lightwood import COLUMN_DATA_TYPES, ENCODER_AIM
from lightwood.mixers.helpers.default_net import DefaultNet
from lightwood.mixers.helpers.ranger import Ranger
from lightwood.mixers.helpers.shapes import *
from lightwood.mixers.helpers.transformer import Transformer
from lightwood.api.gym import Gym
class DistilBertEncoder:
def __init__(self, is_target=False, aim=ENCODER_AIM.BALANCE):
self.name = 'Text Transformer Encoder'
self._tokenizer = None
self._model = None
self._pad_id = None
self._pytorch_wrapper = torch.FloatTensor
self._max_len = None
self._max_ele = None
self._prepared = False
self._model_type = None
self.desired_error = 0.01
self.max_training_time = CONFIG.MAX_ENCODER_TRAINING_TIME
self._head = None
# Possible: speed, balance, accuracy
self.aim = aim
if self.aim == ENCODER_AIM.SPEED:
# uses more memory, takes very long to train and outputs weird debugging statements to the command line, consider waiting until it gets better or try to investigate why this happens (changing the pretrained model doesn't seem to help)
self._classifier_model_class = AlbertForSequenceClassification
self._embeddings_model_class = AlbertModel
self._tokenizer_class = AlbertTokenizer
self._pretrained_model_name = 'albert-base-v2'
self._model_max_len = 768
if self.aim == ENCODER_AIM.BALANCE:
self._classifier_model_class = DistilBertForSequenceClassification
self._embeddings_model_class = DistilBertModel
self._tokenizer_class = DistilBertTokenizer
self._pretrained_model_name = 'distilbert-base-uncased'
self._model_max_len = 768
if self.aim == ENCODER_AIM.ACCURACY:
self._classifier_model_class = DistilBertForSequenceClassification
self._embeddings_model_class = DistilBertModel
self._tokenizer_class = DistilBertTokenizer
self._pretrained_model_name = 'distilbert-base-uncased'
self._model_max_len = 768
device_str = "cuda" if CONFIG.USE_CUDA else "cpu"
if CONFIG.USE_DEVICE is not None:
device_str = CONFIG.USE_DEVICE
self.device = torch.device(device_str)
def _train_callback(self, error, real_buff, predicted_buff):
logging.info(f'{self.name} reached a loss of {error} while training !')
@staticmethod
def categorical_train_function(model, data, gym, test=False):
input, real = data
input = input.to(gym.device)
labels = torch.tensor([torch.argmax(x) for x in real]).to(gym.device)
outputs = gym.model(input, labels=labels)
loss, logits = outputs[:2]
if not test:
loss.backward()
gym.optimizer.step()
gym.scheduler.step()
gym.optimizer.zero_grad()
return loss
@staticmethod
def numerical_train_function(model, data, gym, backbone, test=False):
input, real = data
input = input.to(gym.device)
real = real.to(gym.device)
embeddings = backbone(input)[0][:,0,:]
outputs = gym.model(embeddings)
loss = gym.loss_criterion(outputs, real)
if not test:
loss.backward()
gym.optimizer.step()
gym.scheduler.step()
gym.optimizer.zero_grad()
return loss
def prepare_encoder(self, priming_data, training_data=None):
if self._prepared:
raise Exception('You can only call "prepare_encoder" once for a given encoder.')
priming_data = [x if x is not None else '' for x in priming_data]
self._max_len = min(max([len(x) for x in priming_data]),self._model_max_len)
self._tokenizer = self._tokenizer_class.from_pretrained(self._pretrained_model_name)
self._pad_id = self._tokenizer.convert_tokens_to_ids([self._tokenizer.pad_token])[0]
# @TODO: Support multiple targets if they are all categorical or train for the categorical target if it's a mix (maybe ?)
# @TODO: Attach a language modeling head and/or use GPT2 and/or provide outputs better suited to a LM head (which will be the mixer) if the output if text
if training_data is not None and 'targets' in training_data and len(training_data['targets']) ==1 and training_data['targets'][0]['output_type'] == COLUMN_DATA_TYPES.CATEGORICAL and CONFIG.TRAIN_TO_PREDICT_TARGET:
self._model_type = 'classifier'
self._model = self._classifier_model_class.from_pretrained(self._pretrained_model_name, num_labels=len(set(training_data['targets'][0]['unencoded_output'])) + 1).to(self.device)
batch_size = 10
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in self._model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.000001},
{'params': [p for n, p in self._model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=10, num_training_steps=len(priming_data) * 15/20)
gym = Gym(model=self._model, optimizer=optimizer, scheduler=scheduler, loss_criterion=None, device=self.device, name=self.name)
input = [self._tokenizer.encode(x[:self._max_len], add_special_tokens=True) for x in priming_data]
tokenized_max_len = max([len(x) for x in input])
input = torch.tensor([x + [self._pad_id] * (tokenized_max_len - len(x)) for x in input])
real = training_data['targets'][0]['encoded_output']
merged_data = list(zip(input,real))
train_data_loader = DataLoader(merged_data[:int(len(merged_data)*9/10)], batch_size=batch_size, shuffle=True)
test_data_loader = DataLoader(merged_data[int(len(merged_data)*9/10):], batch_size=batch_size, shuffle=True)
best_model, error, training_time = gym.fit(train_data_loader=train_data_loader, test_data_loader=test_data_loader, desired_error=self.desired_error, max_time=self.max_training_time, callback=self._train_callback, eval_every_x_epochs=1, max_unimproving_models=10, custom_train_func=partial(self.categorical_train_function,test=False), custom_test_func=partial(self.categorical_train_function,test=True))
self._model = best_model.to(self.device)
elif all([x['output_type'] == COLUMN_DATA_TYPES.NUMERIC or x['output_type'] == COLUMN_DATA_TYPES.CATEGORICAL for x in training_data['targets']]) and CONFIG.TRAIN_TO_PREDICT_TARGET:
self.desired_error = 0.01
self._model_type = 'generic_target_predictor'
self._model = self._embeddings_model_class.from_pretrained(self._pretrained_model_name).to(self.device)
batch_size = 10
self._head = DefaultNet(ds=None, dynamic_parameters={},shape=funnel(768, sum( [ len(x['encoded_output'][0]) for x in training_data['targets'] ] ), depth=5), selfaware=False)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in self._head.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.000001},
{'params': [p for n, p in self._head.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8)
#optimizer = Ranger(self._head.parameters(),lr=5e-5)
# num_training_steps is kind of an estimation
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=10, num_training_steps=len(priming_data) * 15/20)
criterion = torch.nn.MSELoss()
gym = Gym(model=self._head, optimizer=optimizer, scheduler=scheduler, loss_criterion=criterion, device=self.device, name=self.name)
input = [self._tokenizer.encode(x[:self._max_len], add_special_tokens=True) for x in priming_data]
tokenized_max_len = max([len(x) for x in input])
input = torch.tensor([x + [self._pad_id] * (tokenized_max_len - len(x)) for x in input])
real = [[]] * len(training_data['targets'][0]['encoded_output'])
for i in range(len(real)):
for target in training_data['targets']:
real[i] = real[i] + target['encoded_output'][i]
real = torch.tensor(real)
merged_data = list(zip(input,real))
train_data_loader = DataLoader(merged_data[:int(len(merged_data)*9/10)], batch_size=batch_size, shuffle=True)
test_data_loader = DataLoader(merged_data[int(len(merged_data)*9/10):], batch_size=batch_size, shuffle=True)
self._model.eval()
best_model, error, training_time = gym.fit(train_data_loader=train_data_loader, test_data_loader=test_data_loader, desired_error=self.desired_error, max_time=self.max_training_time, callback=self._train_callback, eval_every_x_epochs=1, max_unimproving_models=10, custom_train_func=partial(self.numerical_train_function, backbone=self._model, test=False), custom_test_func=partial(self.numerical_train_function, backbone=self._model, test=True))
self._head = best_model.to(self.device)
else:
self._model_type = 'embeddings_generator'
self._model = self._embeddings_model_class.from_pretrained(self._pretrained_model_name).to(self.device)
self._prepared = True
def encode(self, column_data):
encoded_representation = []
self._model.eval()
with torch.no_grad():
for text in column_data:
if text is None:
text = ''
input = torch.tensor(self._tokenizer.encode(text[:self._max_len], add_special_tokens=True)).to(self.device).unsqueeze(0)
if self._model_type == 'generic_target_predictor':
embeddings = self._model(input)
output = self._head(embeddings[0][:,0,:])
encoded_representation.append(output.tolist()[0])
elif self._model_type == 'classifier':
output = self._model(input)
logits = output[0]
predicted_targets = logits[0].tolist()
encoded_representation.append(predicted_targets)
else:
output = self._model(input)
embeddings = output[0][:,0,:].cpu().numpy()[0]
encoded_representation.append(embeddings)
return self._pytorch_wrapper(encoded_representation)
def decode(self, encoded_values_tensor, max_length = 100):
# When test is an output... a bit trickier to handle this case, thinking on it
pass
if __name__ == "__main__":
# Generate some tests data
import random
from sklearn.metrics import r2_score
import logging
from lightwood.encoders.numeric import NumericEncoder
logging.basicConfig(level=logging.DEBUG)
random.seed(2)
priming_data = []
primting_target = []
test_data = []
test_target = []
for i in range(0,300):
if random.randint(1,5) == 3:
test_data.append(str(i) + ''.join(['n'] * i))
#test_data.append(str(i))
test_target.append(i)
#else:
priming_data.append(str(i) + ''.join(['n'] * i))
#priming_data.append(str(i))
primting_target.append(i)
output_1_encoder = NumericEncoder()
output_1_encoder.prepare_encoder(primting_target)
encoded_data_1 = output_1_encoder.encode(primting_target)
encoded_data_1 = encoded_data_1.tolist()
enc = DistilBertEncoder()
enc.prepare_encoder(priming_data, training_data={'targets': [{'output_type': COLUMN_DATA_TYPES.NUMERIC, 'encoded_output': encoded_data_1}, {'output_type': COLUMN_DATA_TYPES.NUMERIC, 'encoded_output': encoded_data_1}]})
encoded_predicted_target = enc.encode(test_data).tolist()
predicted_targets_1 = output_1_encoder.decode(torch.tensor([x[:4] for x in encoded_predicted_target]))
predicted_targets_2 = output_1_encoder.decode(torch.tensor([x[4:] for x in encoded_predicted_target]))
for predicted_targets in [predicted_targets_1, predicted_targets_2]:
real = list(test_target)
pred = list(predicted_targets)
# handle nan
for i in range(len(pred)):
try:
float(pred[i])
except:
pred[i] = 0
print(real[0:25], '\n', pred[0:25])
encoder_accuracy = r2_score(real, pred)
print(f'Categorial encoder accuracy for: {encoder_accuracy} on testing dataset')
#assert(encoder_accuracy > 0.5)
| 46.993031 | 456 | 0.671091 | 10,790 | 0.80003 | 0 | 0 | 981 | 0.072737 | 0 | 0 | 1,675 | 0.124194 |
de775456d4d41592b9970922b77c527e29122163 | 4,542 | py | Python | scripts/scopdominfo.py | stivalaa/cuda_satabsearch | b947fb711f8b138e5a50c81e7331727c372eb87d | [
"MIT"
]
| null | null | null | scripts/scopdominfo.py | stivalaa/cuda_satabsearch | b947fb711f8b138e5a50c81e7331727c372eb87d | [
"MIT"
]
| null | null | null | scripts/scopdominfo.py | stivalaa/cuda_satabsearch | b947fb711f8b138e5a50c81e7331727c372eb87d | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
###############################################################################
#
# scomdominfo.py - Report information folds and classes of a list of SCOP sids
#
# File: scomdominfo.py
# Author: Alex Stivala
# Created: November 2008
#
# $Id: scopdominfo.py 3009 2009-12-08 03:01:48Z alexs $
#
###############################################################################
"""
Report information on the folds, superfamilies and classes of a list
of SCOP domain identifiers (sids).
See usage in docstring for main()
SCOP and ASTRAL data is obtained using the Bio.SCOP library (Casbon et
al 2006 'A high level interface to SCOP and ASTRAL implemented in
Python' BMC Bioinformatics 7:10) and depends on having the data
downloaded, in SCOP_DIR (defined below).
Downloaded SCOP files from
http://scop.mrc-lmb.cam.ac.uk/scop/parse/index.html
and ASTRAL files (in scopseq-1.73) from
http://astral.berkeley.edu/scopseq-1.73.html
The files downlaoded are:
/local/charikar/SCOP/:
dir.cla.scop.txt_1.73
dir.des.scop.txt_1.73
dir.hie.scop.txt_1.73
/local/charikar/SCOP/scopseq-1.73:
astral-scopdom-seqres-all-1.73.fa
astral-scopdom-seqres-sel-gs-bib-95-1.73.id
Other files there are indices built by Bio.SCOP when first used.
"""
import sys,os
from Bio.SCOP import *
from pathdefs import SCOP_DIR,SCOP_VERSION
#-----------------------------------------------------------------------------
#
# Function definitions
#
#-----------------------------------------------------------------------------
def write_scopdom_info(scopsid_list, fh, scop):
"""
Write information about the list of SCOP sids (domain identifiers)
in the scopsid_list to fh. For each domain write the fold and class,
then write stats about number of different folds represented
and the number of domains in each class.
Parameters:
scopsid_list - list of SCOP sids (domain ids)
fh - open (write) filehandle to write to
scop - previously built Bio.SCOP Scop instance
Return value:
None.
"""
superfamily_count = {} # dict of {sf_sunid : count} counting domains in eac superfamily
fold_count= {} # dict of {fold_sunid : count} counting domains in each fold
class_count={} # dict of {class_sunid : count} counting domains in each class
for sid in scopsid_list:
scop_dom = scop.getDomainBySid(sid)
scop_superfamily = scop_dom.getAscendent('superfamily')
scop_fold = scop_dom.getAscendent('fold')
scop_class = scop_dom.getAscendent('class')
if superfamily_count.has_key(scop_superfamily.sunid):
superfamily_count[scop_superfamily.sunid] += 1
else:
superfamily_count[scop_superfamily.sunid] = 1
if fold_count.has_key(scop_fold.sunid):
fold_count[scop_fold.sunid] += 1
else:
fold_count[scop_fold.sunid] = 1
if class_count.has_key(scop_class.sunid):
class_count[scop_class.sunid] += 1
else:
class_count[scop_class.sunid] = 1
fh.write('%s\t(%s) %s\t%s\t%s\n' % (sid, scop_superfamily.sccs,scop_superfamily.description, scop_fold.description, scop_class.description))
num_domains = len(scopsid_list)
num_superfamilies = len(superfamily_count)
num_folds = len(fold_count)
num_classes = len(class_count)
fh.write('Totals: %d domains\t%d superfamilies\t%d folds\t%d classes\n' %
(num_domains, num_superfamilies, num_folds, num_classes))
fh.write('Class distribution:\n')
for (class_sunid, count) in class_count.iteritems():
fh.write('\t%s:\t%d\n' % (scop.getNodeBySunid(class_sunid).description,
count))
#-----------------------------------------------------------------------------
#
# Main
#
#-----------------------------------------------------------------------------
def usage(progname):
"""
Print usage message and exit
"""
sys.stderr.write("Usage: " +progname +
" < domainidlist\n")
sys.exit(1)
def main():
"""
main for scomdominfo.py
Usage: scomdominfo.py < domainidlist
The list of SCOP domain ids (sids) is read from stdin
Output is written to stdout.
"""
if len(sys.argv) != 1:
usage(os.path.basename(sys.argv[0]))
# read SCOP data
scop = Scop(dir_path=SCOP_DIR,version=SCOP_VERSION)
scopsid_list = sys.stdin.read().split('\n')[:-1]
write_scopdom_info(scopsid_list, sys.stdout, scop)
if __name__ == "__main__":
main()
| 30.689189 | 148 | 0.610524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,660 | 0.585645 |
de79c16d6df471bd5320f3fc4154354634f400a7 | 1,334 | py | Python | serverless/pytorch/foolwood/siammask/nuclio/model_handler.py | arthurtibame/cvat | 0062ecdec34a9ffcad33e1664a7cac663bec4ecf | [
"MIT"
]
| null | null | null | serverless/pytorch/foolwood/siammask/nuclio/model_handler.py | arthurtibame/cvat | 0062ecdec34a9ffcad33e1664a7cac663bec4ecf | [
"MIT"
]
| null | null | null | serverless/pytorch/foolwood/siammask/nuclio/model_handler.py | arthurtibame/cvat | 0062ecdec34a9ffcad33e1664a7cac663bec4ecf | [
"MIT"
]
| 1 | 2021-09-17T10:19:30.000Z | 2021-09-17T10:19:30.000Z | # Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
from tools.test import *
import os
class ModelHandler:
def __init__(self):
# Setup device
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.backends.cudnn.benchmark = True
base_dir = "/opt/nuclio/SiamMask/experiments/siammask_sharp"
class configPath:
config = os.path.join(base_dir, "config_davis.json")
self.config = load_config(configPath)
from custom import Custom
siammask = Custom(anchors=self.config['anchors'])
self.siammask = load_pretrain(siammask, os.path.join(base_dir, "SiamMask_DAVIS.pth"))
self.siammask.eval().to(self.device)
def infer(self, image, shape, state):
if state is None: # init tracking
x, y, w, h = shape
target_pos = np.array([x + w / 2, y + h / 2])
target_sz = np.array([w, h])
state = siamese_init(image, target_pos, target_sz, self.siammask,
self.config['hp'], device=self.device)
else: # track
state = siamese_track(state, image, mask_enable=True, refine_enable=True,
device=self.device)
shape = state['ploygon'].flatten()
return {"shape": shape, "state": state}
| 34.205128 | 93 | 0.614693 | 1,223 | 0.916792 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.17991 |
de79c50bcf2db093ce388c48ecf4f5cdef4ddb45 | 10,842 | py | Python | pynmt/__init__.py | obrmmk/demo | b5deb85b2b2bf118b850f93c255ee88d055156a8 | [
"MIT"
]
| null | null | null | pynmt/__init__.py | obrmmk/demo | b5deb85b2b2bf118b850f93c255ee88d055156a8 | [
"MIT"
]
| null | null | null | pynmt/__init__.py | obrmmk/demo | b5deb85b2b2bf118b850f93c255ee88d055156a8 | [
"MIT"
]
| 1 | 2021-11-23T14:04:36.000Z | 2021-11-23T14:04:36.000Z | import torch
import torch.nn as nn
from torch.nn import (TransformerEncoder, TransformerDecoder,
TransformerEncoderLayer, TransformerDecoderLayer)
from torch import Tensor
from typing import Iterable, List
import math
import os
import numpy as np
try:
from janome.tokenizer import Tokenizer
except ModuleNotFoundError:
import os
os.system('pip install janome')
from janome.tokenizer import Tokenizer
from google_drive_downloader import GoogleDriveDownloader
# デバイスの指定
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('DEVICE :', DEVICE)
# SRC (source) : 原文
SRC_LANGUAGE = 'jpn'
# TGT (target) : 訳文
TGT_LANGUAGE = 'py'
# special_token IDX
UNK_IDX, PAD_IDX, SOS_IDX, EOS_IDX = 0, 1, 2, 3
tokenizer = Tokenizer(os.path.join(os.path.dirname(
__file__), 'janomedic.csv'), udic_type="simpledic", udic_enc="utf8", wakati=True)
def jpn_tokenizer(text):
return [token for token in tokenizer.tokenize(text) if token != " " and len(token) != 0]
class Seq2SeqTransformer(nn.Module):
def __init__(self,
num_encoder_layers: int,
num_decoder_layers: int,
emb_size: int,
nhead: int,
src_vocab_size: int,
tgt_vocab_size: int,
dim_feedforward: int = 512,
dropout: float = 0.1):
super(Seq2SeqTransformer, self).__init__()
encoder_layer = TransformerEncoderLayer(d_model=emb_size, nhead=nhead,
dim_feedforward=dim_feedforward)
self.transformer_encoder = TransformerEncoder(
encoder_layer, num_layers=num_encoder_layers)
decoder_layer = TransformerDecoderLayer(d_model=emb_size, nhead=nhead,
dim_feedforward=dim_feedforward)
self.transformer_decoder = TransformerDecoder(
decoder_layer, num_layers=num_decoder_layers)
self.generator = nn.Linear(emb_size, tgt_vocab_size)
self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size)
self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size)
self.positional_encoding = PositionalEncoding(
emb_size, dropout=dropout)
def forward(self,
src: Tensor,
tgt: Tensor,
src_mask: Tensor,
tgt_mask: Tensor,
src_padding_mask: Tensor,
tgt_padding_mask: Tensor,
memory_key_padding_mask: Tensor):
src_emb = self.positional_encoding(self.src_tok_emb(src))
tgt_emb = self.positional_encoding(self.tgt_tok_emb(tgt))
memory = self.transformer_encoder(src_emb, src_mask, src_padding_mask)
outs = self.transformer_decoder(tgt_emb, memory, tgt_mask, None,
tgt_padding_mask, memory_key_padding_mask)
return self.generator(outs)
def encode(self, src: Tensor, src_mask: Tensor):
return self.transformer_encoder(self.positional_encoding(
self.src_tok_emb(src)), src_mask)
def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor):
return self.transformer_decoder(self.positional_encoding(
self.tgt_tok_emb(tgt)), memory,
tgt_mask)
class PositionalEncoding(nn.Module):
def __init__(self,
emb_size: int,
dropout: float,
maxlen: int = 5000):
super(PositionalEncoding, self).__init__()
den = torch.exp(- torch.arange(0, emb_size, 2)
* math.log(10000) / emb_size)
pos = torch.arange(0, maxlen).reshape(maxlen, 1)
pos_embedding = torch.zeros((maxlen, emb_size))
pos_embedding[:, 0::2] = torch.sin(pos * den)
pos_embedding[:, 1::2] = torch.cos(pos * den)
pos_embedding = pos_embedding.unsqueeze(-2)
self.dropout = nn.Dropout(dropout)
self.register_buffer('pos_embedding', pos_embedding)
def forward(self, token_embedding: Tensor):
return self.dropout(token_embedding +
self.pos_embedding[:token_embedding.size(0), :])
class TokenEmbedding(nn.Module):
def __init__(self, vocab_size: int, emb_size):
super(TokenEmbedding, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.emb_size = emb_size
def forward(self, tokens: Tensor):
return self.embedding(tokens.long()) * math.sqrt(self.emb_size)
# モデルが予測を行う際に、未来の単語を見ないようにするためのマスク
def generate_square_subsequent_mask(sz):
mask = (torch.triu(torch.ones((sz, sz), device=DEVICE)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float(
'-inf')).masked_fill(mask == 1, float(0.0))
return mask
def sequential_transforms(*transforms):
def func(txt_input):
for transform in transforms:
txt_input = transform(txt_input)
return txt_input
return func
def tensor_transform(token_ids: List[int]):
return torch.cat((torch.tensor([SOS_IDX]),
torch.tensor(token_ids),
torch.tensor([EOS_IDX])))
def beam_topk(model, ys, memory, beamsize):
ys = ys.to(DEVICE)
tgt_mask = (generate_square_subsequent_mask(
ys.size(0)).type(torch.bool)).to(DEVICE)
out = model.decode(ys, memory, tgt_mask)
out = out.transpose(0, 1)
prob = model.generator(out[:, -1])
next_prob, next_word = prob.topk(k=beamsize, dim=1)
return next_prob, next_word
# greedy search を使って翻訳結果 (シーケンス) を生成
def beam_decode(model, src, src_mask, max_len, beamsize, start_symbol):
src = src.to(DEVICE)
src_mask = src_mask.to(DEVICE)
ys_result = {}
memory = model.encode(src, src_mask).to(DEVICE) # encode の出力 (コンテキストベクトル)
# 初期値 (beamsize)
ys = torch.ones(1, 1).fill_(start_symbol).type(torch.long).to(DEVICE)
next_prob, next_word = beam_topk(model, ys, memory, beamsize)
next_prob = next_prob[0].tolist()
# <sos> + 1文字目 の候補 (list の長さはbeamsizeの数)
ys = [torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(
next_word[:, idx].item())], dim=0) for idx in range(beamsize)]
for i in range(max_len-1):
prob_list = []
ys_list = []
# それぞれの候補ごとに次の予測トークンとその確率を計算
for ys_token in ys:
next_prob, next_word = beam_topk(model, ys_token, memory, len(ys))
# 予測確率をリスト (next_prob) に代入
next_prob = next_prob[0].tolist()
# 1つのリストに結合
prob_list.extend(next_prob)
ys = [torch.cat([ys_token, torch.ones(1, 1).type_as(src.data).fill_(
next_word[:, idx].item())], dim=0) for idx in range(len(ys))]
ys_list.extend(ys)
# prob_list の topk のインデックスを prob_topk_idx で保持
prob_topk_idx = list(reversed(np.argsort(prob_list).tolist()))
prob_topk_idx = prob_topk_idx[:len(ys)]
# print('@@', prob_topk_idx)
# ys に新たな topk 候補を代入
ys = [ys_list[idx] for idx in prob_topk_idx]
next_prob = [prob_list[idx] for idx in prob_topk_idx]
# print('@@orig', prob_list)
# print('@@next', next_prob)
pop_list = []
for j in range(len(ys)):
# EOS トークンが末尾にあったら、ys_result (返り値) に append
if ys[j][-1].item() == EOS_IDX:
ys_result[ys[j]] = next_prob[j]
pop_list.append(j)
# ys_result に一度入ったら、もとの ys からは抜いておく
# (ys の長さが変わるので、ところどころbeamsize ではなく len(ys) を使用している箇所がある)
for l in sorted(pop_list, reverse=True):
del ys[l]
# ys_result が beamsize よりも大きかった時に、処理を終える
if len(ys_result) >= beamsize:
break
return ys_result
class NMT(object):
vocab: object
def __init__(self, vocab_file):
self.vocab = torch.load(vocab_file)
self.SRC_VOCAB_SIZE = len(self.vocab[SRC_LANGUAGE])
self.TGT_VOCAB_SIZE = len(self.vocab[TGT_LANGUAGE])
self.src_transform = sequential_transforms(jpn_tokenizer, # Tokenization
# Numericalization
self.vocab[SRC_LANGUAGE],
tensor_transform) # Add SOS/EOS and create tensor
self.EMB_SIZE = 512
self.NHEAD = 8
self.FFN_HID_DIM = 512
self.BATCH_SIZE = 128
self.NUM_ENCODER_LAYERS = 3
self.NUM_DECODER_LAYERS = 3
self.transformer = Seq2SeqTransformer(self.NUM_ENCODER_LAYERS, self.NUM_DECODER_LAYERS,
self.EMB_SIZE, self.NHEAD, self.SRC_VOCAB_SIZE, self.TGT_VOCAB_SIZE,
self.FFN_HID_DIM)
for p in self.transformer.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
self.transformer = self.transformer.to(DEVICE)
def load(self, trained_model):
self.transformer.load_state_dict(torch.load(trained_model))
def translate_beam(self, src_sentence: str, beamsize=5):
"""
複数の翻訳候補をリストで返す。
"""
pred_list = []
self.transformer.eval()
src = self.src_transform(src_sentence).view(-1, 1)
num_tokens = src.shape[0]
src_mask = (torch.zeros(num_tokens, num_tokens)).type(torch.bool)
tgt_tokens = beam_decode(
self.transformer, src, src_mask, max_len=num_tokens + 5, beamsize=beamsize, start_symbol=SOS_IDX)
prob_list = list(tgt_tokens.values())
tgt_tokens = list(tgt_tokens.keys())
for idx in list(reversed(np.argsort(prob_list).tolist())):
pred_list.append(" ".join(self.vocab[TGT_LANGUAGE].lookup_tokens(
list(tgt_tokens[idx].cpu().numpy()))).replace("<sos>", "").replace("<eos>", ""))
return pred_list, sorted(prob_list, reverse=True)
special_token = ['<A>', '<B>', '<C>', '<D>', '<E>']
def make_pynmt(model_id='1zMTrsmcyF2oXpWKe0bIZ7Ej1JBjVq7np', vocab_id='13C39jfdkkmE2mx-1K9PFXqGST84j-mz8', model_file='./model_DS.pt', vocab_file="./vocab_obj_DS.pth"):
GoogleDriveDownloader.download_file_from_google_drive(
file_id=model_id, dest_path=model_file, unzip=False)
GoogleDriveDownloader.download_file_from_google_drive(
file_id=vocab_id, dest_path=vocab_file, unzip=False)
nmt = NMT(vocab_file)
nmt.load(model_file)
def pynmt(sentence):
# candidate = re.findall(r'[a-zA-Z"\']+', sentence)
# for idx in range(len(candidate)):
# sentence = sentence.replace(candidate[idx], special_token[idx])
# print(sentence)
pred, prob = nmt.translate_beam(sentence)
return pred, prob
# print(pred)
# print(prob)
return pynmt
| 36.14 | 168 | 0.620365 | 5,721 | 0.504053 | 0 | 0 | 0 | 0 | 0 | 0 | 1,673 | 0.147401 |
de7a78e426a815b7bd976727be3160a469af797a | 9,185 | py | Python | probedb/certs/builddb.py | dingdang2012/tlsprober | 927f6177939470235bf336bca27096369932fc66 | [
"Apache-2.0"
]
| 1 | 2019-01-30T13:18:02.000Z | 2019-01-30T13:18:02.000Z | probedb/certs/builddb.py | dingdang2012/tlsprober | 927f6177939470235bf336bca27096369932fc66 | [
"Apache-2.0"
]
| null | null | null | probedb/certs/builddb.py | dingdang2012/tlsprober | 927f6177939470235bf336bca27096369932fc66 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2010-2012 Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import standalone
import probedb.probedata2.models as Prober
import probedb.certs.models as Certs
import probedb.resultdb2.models as Results
from django.db.models import Q
import certhandler
import threading
import Queue
"""
Update the database so that the certificate attributes are set for all certificates
Used in case there is a failure in the automatic registration of certificates and
setting of attributes
"""
keys = {}
selfsigned_keys ={}
failed=0
upgraded = 0
EV_conditions = set([
Certs.CertificateConditions.CERTC_EXTENDED_VALIDATION_CERT,
Certs.CertificateConditions.CERTC_NOT_EXTENDED_VALIDATION_CERT
])
summaries = dict([(x.id, x) for x in Results.ResultSummaryList.objects.all()])
i=0;
for x in summaries.itervalues():
x.start()
if 0:
for c,d in Results.ResultCondition.RESULTC_VALUES:
x.check_condition(c)
if 0:
i=0;
for certificate in Prober.Certificate.objects.filter(issuer_b64=None).iterator():
cert = certhandler.Certificate(certificate.certificate_b64)
if not cert:
continue
certificate.issuer_b64 = cert.IssuerNameDER()
certificate.subject_b64 = cert.SubjectNameDER()
certificate.save()
i+=1
if i%100 == 0:
print i
print "finished issuer"
if 0:
i=0
for certificate in Prober.Certificate.objects.filter(subject_b64=None).iterator():
cert = certhandler.Certificate(certificate.certificate_b64)
if not cert:
continue
certificate.issuer_b64 = cert.IssuerNameDER()
certificate.subject_b64 = cert.SubjectNameDER()
certificate.save()
i+=1
if i%100 == 0:
print i
print "finished subject"
if 0:
i=0
for certificate in Certs.CertAttributes.objects.filter(serial_number=None).iterator():
cert = certhandler.Certificate(certificate.cert.certificate_b64)
if not cert:
continue
serial = str(cert.GetSerialNumber())
if len(serial) >100:
serial = "NaN"
certificate.serial_number = serial
certificate.save()
i+=1
if i%100 == 0:
print i
print "finished serial numbers"
if 1:
print "building database"
def update_cert(x):
#try:
if True:
attr = Certs.CertAttributes()
attr.Construct()
attr.SetUpFromCert(x)
condition_list = attr.GetConditions() & EV_conditions
for z in x.proberesult_set.filter(server_cert = x):
if z.part_of_run_id not in summaries:
continue
summary = summaries.get(z.part_of_run_id)
result_cond = z.GetConditions()
result_cond1 = set(result_cond)
result_cond -= EV_conditions
result_cond.update(condition_list)
if result_cond1 != result_cond:
z.result_summary_group = summary.get_condition_group(summary.part_of_run, result_cond)
z.save()
for y in z.resultentry_set.all():
result_cond = y.GetConditions()
result_cond1 = set(result_cond)
result_cond -= EV_conditions
result_cond.update(condition_list)
if result_cond1 != result_cond:
y.result_summary_group = summary.get_condition_group(summary.part_of_run, result_cond)
y.save()
#except:
# raise
# pass
def do_update_cert(queue, progress_queue, i):
while True:
k = queue.get()
try:
x = Prober.Certificate.objects.get(id=k)
update_cert(x)
except:
pass
progress_queue.put(True)
queue.task_done()
def __ProgressCounter(queue):
i=0
while True:
queue.get()
i += 1
if i%100 == 0:
print "Processed ", i, "servers so far"
queue.task_done()
update_queue = Queue.Queue()
finished_queue = Queue.Queue()
num_probers = 100
threads = []
for i in range(num_probers):
new_thread = threading.Thread(target=do_update_cert, args=(update_queue,finished_queue, i))
new_thread.daemon = True
new_thread.start()
threads.append(new_thread)
progress_thread = threading.Thread(target=__ProgressCounter, args=(finished_queue,))
progress_thread.daemon = True
progress_thread.start()
i=0;
c_ids = list(Prober.Certificate.objects.filter(certattributes=None).values_list("id", flat=True))
print len(c_ids)
for k in c_ids:
#for x in Prober.Certificate.objects.iterator():
i+=1
if i % 100 == 0:
print i
update_queue.put(k)
update_queue.join()
finished_queue.join()
if 0:
print "Marking site certificates"
i=0;
#for k in list(Certs.CertAttributes.objects.filter(cert__server_cert__id__gt =0).distinct().values_list("id", flat=True)):
c_ids = Certs.CertAttributes.objects.filter(cert_kind = Certs.CertAttributes.CERT_UNKNOWN).values_list("cert_id", flat=True)
c_cids = list(Prober.ProbeResult.objects.exclude(server_cert__id__in = c_ids).filter(server_cert__id__gt =0).distinct().values_list("server_cert__id", flat=True))
for k in c_ids :
i+=1
if i % 100 == 0:
print i
try:
x = Certs.CertAttributes.objects.get(cert__id = k)
if x.cert_kind == Certs.CertAttributes.CERT_SELFSIGNED:
x.cert_kind = Certs.CertAttributes.CERT_SELFSIGNED_SERVER
else:
x.cert_kind = Certs.CertAttributes.CERT_SERVER
x.save()
except:
pass
if 0:
print "Locating intermediates"
i=0;
already_fixed = set()
#for k in list(Certs.CertAttributes.objects.filter(cert__server_cert__id__gt =0).distinct().values_list("id", flat=True)):
for k in list(Certs.CertAttributes.objects.exclude(cert_kind__in =[Certs.CertAttributes.CERT_SELFSIGNED,
Certs.CertAttributes.CERT_SELFSIGNED_SERVER,
Certs.CertAttributes.CERT_INTERMEDIATE_CA,
Certs.CertAttributes.CERT_XSIGN_CA,
Certs.CertAttributes.CERT_SERVER,] ).
filter(cert__proberesult__server_cert__id__gt =0).
distinct().
values_list("id", flat=True)):
i+=1
if i % 100 == 0:
print i
if k in already_fixed:
continue;
x = Certs.CertAttributes.objects.get(id = k)
for y in x.cert.proberesult_set.filter(server_cert__id__gt =0):
certs0 = [(z, certhandler.Certificate(z.certificate_b64)) for z in y.certificates.all()
if z.certattributes.cert_kind not in [Certs.CertAttributes.CERT_SELFSIGNED_SERVER, Certs.CertAttributes.CERT_SERVER]]
if not certs0:
continue;
certs = {}
for (z, c) in certs0:
if not c:
continue
subject = c.SubjectNameLine()
certs.setdefault(subject,[]).append((z,c))
if not certs:
continue
site = certhandler.Certificate(y.server_cert.certificate_b64)
if not site:
continue
last = site
while True:
issuer = last.IssuerNameLine()
if issuer not in certs:
break;
signer = None
cert = None
for (z,c) in certs[issuer]:
if last.IsSignedBy(c):
signer = z
cert = c
break;
del certs[issuer] # prevent infinite loop
if not signer:
break;
if signer.certattributes.cert_kind in [Certs.CertAttributes.CERT_SELFSIGNED, Certs.CertAttributes.CERT_TRUSTED_ROOT, ]:
break; # Root, already set
if signer.certattributes.cert_kind == Certs.CertAttributes.CERT_UNKNOWN or signer.certattributes.cert_kind =="":
signer.certattributes.cert_kind = Certs.CertAttributes.CERT_INTERMEDIATE_CA
signer.certattributes.save()
already_fixed.add(signer.id)
last = cert
break;
if 0:
print "Locating intermediates #2"
i=0;
already_fixed = set()
name_matches = 0
signed_by = 0
#for k in list(Certs.CertAttributes.objects.filter(cert__server_cert__id__gt =0).distinct().values_list("id", flat=True)):
for k in list(Certs.CertAttributes.objects.exclude(cert_kind__in =[Certs.CertAttributes.CERT_SELFSIGNED,
Certs.CertAttributes.CERT_SELFSIGNED_SERVER,
Certs.CertAttributes.CERT_INTERMEDIATE_CA,
Certs.CertAttributes.CERT_XSIGN_CA,
Certs.CertAttributes.CERT_SERVER,] ).
distinct().
values_list("id", flat=True)):
i+=1
if i % 100 == 0:
print i
if k in already_fixed:
continue;
x = Certs.CertAttributes.objects.get(id = k)
cert = certhandler.Certificate(x.cert.certificate_b64)
if not cert:
continue
assert not cert.IsSelfSigned()
subject = x.subject_oneline
for y in Certs.CertAttributes.objects.filter(issuer_oneline=subject):
name_matches += 1
cert_cand = certhandler.Certificate(y.cert.certificate_b64)
if not cert_cand:
continue;
if cert_cand.IsSignedBy(cert):
signed_by += 1
if x.cert_kind in [Certs.CertAttributes.CERT_UNKNOWN, ""]:
x.cert_kind = Certs.CertAttributes.CERT_INTERMEDIATE_CA
x.save()
already_fixed.add(x.id)
break
print "Name matches: ", name_matches
print "Signed by: ",signed_by
print "completed" | 27.665663 | 163 | 0.706369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,547 | 0.168427 |
de7b6b105a985b6f30d92b604636b34c85675300 | 498 | py | Python | Alp/cap_alp/E.py | Ashokkommi0001/patterns | daa1a1d8f3bc6e021e02a0e34458e2c178fc71d2 | [
"MIT"
]
| 2 | 2021-03-17T12:08:22.000Z | 2021-03-17T12:11:10.000Z | Alp/cap_alp/E.py | Ashokkommi0001/patterns | daa1a1d8f3bc6e021e02a0e34458e2c178fc71d2 | [
"MIT"
]
| null | null | null | Alp/cap_alp/E.py | Ashokkommi0001/patterns | daa1a1d8f3bc6e021e02a0e34458e2c178fc71d2 | [
"MIT"
]
| 1 | 2021-03-17T11:49:39.000Z | 2021-03-17T11:49:39.000Z | def for_E():
for row in range(7):
for col in range(5):
if (col==0 ) or (row==0 or row==3 or row==6):
print("*",end=" ")
else:
print(end=" ")
print()
def while_E():
i=0
while i<7:
j=0
while j<5:
if (j==0 ) or (i==0 or i==3 or i==6):
print("*",end=" ")
else:
print(end=" ")
j+=1
i+=1
print()
| 22.636364 | 58 | 0.317269 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.040161 |
de7c4534ed26f1d3158aaf6b53415fa79e0c249d | 574 | py | Python | patron/__init__.py | rafaelaraujobsb/patron | b2d23d4149a5f48156a4a2b0638daac33a66cc6a | [
"MIT"
]
| null | null | null | patron/__init__.py | rafaelaraujobsb/patron | b2d23d4149a5f48156a4a2b0638daac33a66cc6a | [
"MIT"
]
| null | null | null | patron/__init__.py | rafaelaraujobsb/patron | b2d23d4149a5f48156a4a2b0638daac33a66cc6a | [
"MIT"
]
| null | null | null | from flask import Flask
from loguru import logger
from flasgger import Swagger
from patron.api import api_bp
logger.add("api.log", format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}", rotation="500 MB")
template = {
"swagger": "2.0",
"info": {
"title": "PATRON",
"description": "",
"version": "0.0.1"
},
"consumes": [
"application/json"
],
"produces": [
"application/json"
]
}
app = Flask(__name__)
swagger = Swagger(app, template=template)
app.register_blueprint(api_bp, url_prefix='/api')
| 19.793103 | 102 | 0.602787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.344948 |
de7dc549a1952d8dda02b33f493f1bb859b37917 | 735 | py | Python | src/perceptron.py | tomoki/deep-learning-from-scratch | 0b6144806b6b79462d6d65616a64b1774f876973 | [
"MIT"
]
| 1 | 2018-08-31T09:39:11.000Z | 2018-08-31T09:39:11.000Z | src/perceptron.py | tomoki/deep-learning-from-scratch | 0b6144806b6b79462d6d65616a64b1774f876973 | [
"MIT"
]
| null | null | null | src/perceptron.py | tomoki/deep-learning-from-scratch | 0b6144806b6b79462d6d65616a64b1774f876973 | [
"MIT"
]
| null | null | null | import numpy as np
import matplotlib.pylab as plt
def step_function(x):
y = x > 0
return y.astype(np.int)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(0, x)
def AND(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.7
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
def NAND(x1, x2):
x = np.array([x1, x2])
w = np.array([-0.5, -0.5])
b = 0.7
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
def OR(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.2
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
| 17.093023 | 31 | 0.469388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
de8007cfcf1b7fa53b4609e54f0ca14a7d5ba1bb | 210 | py | Python | notebooks/_solutions/case4_air_quality_analysis18.py | jorisvandenbossche/2018-Bordeaux-pandas-course | 3f6b9fe6f02c2ab484c3f9744d7d39b926438dd6 | [
"BSD-3-Clause"
]
| 3 | 2019-07-23T15:14:03.000Z | 2020-11-10T06:12:18.000Z | notebooks/_solutions/case4_air_quality_analysis18.py | jorisvandenbossche/2018-Bordeaux-pandas-course | 3f6b9fe6f02c2ab484c3f9744d7d39b926438dd6 | [
"BSD-3-Clause"
]
| null | null | null | notebooks/_solutions/case4_air_quality_analysis18.py | jorisvandenbossche/2018-Bordeaux-pandas-course | 3f6b9fe6f02c2ab484c3f9744d7d39b926438dd6 | [
"BSD-3-Clause"
]
| 3 | 2020-03-04T23:40:20.000Z | 2021-11-04T16:41:10.000Z | # with tidy long table
fig, ax = plt.subplots()
sns.violinplot(x='station', y='no2', data=data_tidy[data_tidy['datetime'].dt.year == 2011], palette="GnBu_d", ax=ax)
ax.set_ylabel("NO$_2$ concentration (µg/m³)") | 52.5 | 116 | 0.704762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.40566 |
de82bbe06365e1885857bfec2f5eb9144e01b08c | 1,729 | py | Python | dncnn/dncnn.py | kTonpa/DnCNN | aca7e07ccbe6b75bee7d4763958dade4a8eee609 | [
"MIT"
]
| null | null | null | dncnn/dncnn.py | kTonpa/DnCNN | aca7e07ccbe6b75bee7d4763958dade4a8eee609 | [
"MIT"
]
| null | null | null | dncnn/dncnn.py | kTonpa/DnCNN | aca7e07ccbe6b75bee7d4763958dade4a8eee609 | [
"MIT"
]
| null | null | null | """
Project: dncnn
Author: khalil MEFTAH
Date: 2021-11-26
DnCNN: Deep Neural Convolutional Network for Image Denoising model implementation
"""
import torch
from torch import nn
import torch.nn.functional as F
# helper functions
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# main classe
class DnCNN(nn.Module):
def __init__(
self,
num_layers=17,
num_features=64,
kernel_size=3,
padding=1,
image_channels=1,
image_size=64
):
super(DnCNN, self).__init__()
layers = []
layers.append(nn.Conv2d(in_channels=image_channels, out_channels=num_features, kernel_size=kernel_size, padding=padding, bias=True))
layers.append(nn.ReLU(inplace=True))
for _ in range(num_layers - 2):
layers.append(nn.Conv2d(in_channels=num_features, out_channels=num_features, kernel_size=kernel_size, padding=padding, bias=True))
layers.append(nn.BatchNorm2d(num_features))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(in_channels=num_features, out_channels=image_channels, kernel_size=kernel_size, padding=padding, bias=True))
self.dncnn = nn.Sequential(*layers)
@torch.no_grad()
@eval_decorator
def denoise(self, y):
return self(y)
def forward(self, y, return_loss=False, x=None):
n = self.dncnn(y)
if not return_loss:
return y-n
# calculate the L2 loss
return F.mse_loss(n, y-x)
| 25.80597 | 142 | 0.638519 | 1,246 | 0.720648 | 0 | 0 | 85 | 0.049161 | 0 | 0 | 197 | 0.113939 |
de848d1a58c8622dd6042ce58386b34d78eaa285 | 41,886 | py | Python | scripts/fabfile/tasks.py | Alchem-Lab/deneva | 5201ef12fd8235fea7833709b8bffe45f53877eb | [
"Apache-2.0"
]
| 88 | 2017-01-19T03:15:24.000Z | 2022-03-30T16:22:19.000Z | scripts/fabfile/tasks.py | Alchem-Lab/deneva | 5201ef12fd8235fea7833709b8bffe45f53877eb | [
"Apache-2.0"
]
| null | null | null | scripts/fabfile/tasks.py | Alchem-Lab/deneva | 5201ef12fd8235fea7833709b8bffe45f53877eb | [
"Apache-2.0"
]
| 22 | 2017-01-20T10:22:31.000Z | 2022-02-10T18:55:36.000Z | #!/usr/bin/python
from __future__ import print_function
import logging
from fabric.api import task,run,local,put,get,execute,settings
from fabric.decorators import *
from fabric.context_managers import shell_env,quiet
from fabric.exceptions import *
from fabric.utils import puts,fastprint
from time import sleep
from contextlib import contextmanager
import traceback
import os,sys,datetime,re,ast
import itertools
import glob,shlex,subprocess
import pprint
sys.path.append('..')
from environment import *
from experiments import *
from experiments import configs
from helper import get_cfgs,get_outfile_name,get_execfile_name,get_args,CONFIG_PARAMS,FLAG
# (see https://github.com/fabric/fabric/issues/51#issuecomment-96341022)
logging.basicConfig()
paramiko_logger = logging.getLogger("paramiko.transport")
paramiko_logger.disabled = True
COLORS = {
"info" : 32, #green
"warn" : 33, #yellow
"error" : 31, #red
"debug" : 36, #cyan
}
#OUT_FMT = "[{h}] {p}: {fn}:".format
PP = pprint.PrettyPrinter(indent=4)
NOW=datetime.datetime.now()
STRNOW=NOW.strftime("%Y%m%d-%H%M%S")
os.chdir('../..')
#MAX_TIME_PER_EXP = 60 * 2 # in seconds
MAX_TIME_PER_EXP = 60 * 10 # in seconds
EXECUTE_EXPS = True
SKIP = False
CC_ALG = ""
set_env()
@task
@hosts('localhost')
def using_vcloud():
set_env_vcloud()
@task
@hosts('localhost')
def using_istc():
set_env_istc()
@task
@hosts('localhost')
def using_ec2():
set_env_ec2()
@task
@hosts('localhost')
def using_local():
set_env_local()
## Basic usage:
## fab using_vcloud run_exps:experiment_1
## fab using_local run_exps:experiment_1
## fab using_istc run_exps:experiment_1
@task
@hosts('localhost')
def run_exps(exps,skip_completed='False',exec_exps='True',dry_run='False',iterations='1',check='True',delay='',same_node='False',overlap='False',shmem='True',cram='False'):
global SKIP, EXECUTE_EXPS,NOW,STRNOW
ITERS = int(iterations)
SKIP = skip_completed == 'True'
EXECUTE_EXPS = exec_exps == 'True'
CHECK = check == 'True'
env.dry_run = dry_run == 'True'
env.same_node = same_node == 'True'
env.overlap = overlap == 'True'
env.cram = cram == 'True'
if env.cluster != "ec2":
env.shmem = shmem == 'True'
if env.dry_run:
with color(level="warn"):
puts("this will be a dry run!",show_prefix=True)
with color():
puts("running experiment set:{}".format(exps),show_prefix=True)
# Make sure all experiment binaries exist
if CHECK:
execute(check_binaries,exps)
# Run experiments
for i in range(ITERS):
NOW=datetime.datetime.now()
STRNOW=NOW.strftime("%Y%m%d-%H%M%S")
execute(run_exp_old,exps,delay=delay)
# execute(run_exp,exps,delay=delay)
## Basic usage:
## fab using_vcloud network_test
## fab using_istc network_test:4
@task
@hosts(['localhost'])
def network_test(num_nodes=16,exps="network_experiment",skip_completed='False',exec_exps='True'):
env.batch_mode = False
global SKIP, EXECUTE_EXPS, MAX_TIME_PER_EXP
SKIP = skip_completed == 'True'
EXECUTE_EXPS = exec_exps == 'True'
MAX_TIME_PER_EXP = 60
num_nodes = int(num_nodes)
execute(check_binaries,exps)
if num_nodes < 2 or len(env.hosts) < num_nodes:
with color(level="error"):
puts("not enough hosts in ifconfig!",show_prefix=True)
abort()
exp_hosts=env.hosts[0:num_nodes]
pairs = list(itertools.combinations(exp_hosts,2))
for pair in pairs:
set_hosts(list(pair))
execute(run_exp,exps,network_test=True)
@task
@parallel
def check_cpu():
put("test_cpu.out",env.rem_homedir)
run("chmod a+x test_cpu.out; time ./test_cpu.out")
@task
@hosts('localhost')
def delete_local_results():
local("rm -f results/*");
@task
#@hosts('localhost')
@parallel
def delete_remote_results():
if env.cluster == "istc":
if env.shmem:
run("rm -f /dev/shm/results*.out")
else:
run("rm -f /home/%s/results*.out" % env.user)
else:
run("rm -f /home/ubuntu/results*.out")
@task
@parallel
def copy_schema():
if env.dry_run:
return
schemas = ["benchmarks/TPCC_full_schema.txt","benchmarks/YCSB_schema.txt","benchmarks/PPS_schema.txt"]
# Copying regular files should always succeed unless node is down
for schema in schemas:
if env.shmem:
put(schema,"/dev/shm/")
else:
put(schema,env.rem_homedir)
@task
@parallel
def copy_binaries(exp_fname):
if env.dry_run:
return
executable_files = ["rundb","runcl"]
succeeded = True
# Copying executable files may fail if a process is running the executable
with settings(warn_only=True):
for f in (executable_files):
local_fpath = os.path.join("binaries","{}{}".format(exp_fname,f))
if env.shmem:
remote_fpath = os.path.join("/dev/shm/","{}{}".format(exp_fname,f))
else:
remote_fpath = os.path.join(env.rem_homedir,"{}{}".format(exp_fname,f))
#res = put(f,env.rem_homedir,mirror_local_mode=True)
res = put(local_fpath,remote_fpath,mirror_local_mode=True)
if not res.succeeded:
with color("warn"):
puts("WARN: put: {} -> {} failed!".format(f,env.rem_homedir),show_prefix=True)
succeeded = False
break
if not succeeded:
with color("warn"):
puts("WARN: killing all executables and retrying...",show_prefix=True)
killall()
# If this fails again then we abort
for f in (executable_files):
local_fpath = os.path.join("binaries","{}{}".format(exp_fname,f))
if env.shmem:
remote_fpath = os.path.join("/dev/shm",f)
else:
remote_fpath = os.path.join(env.rem_homedir,f)
#res = put(f,env.rem_homedir,mirror_local_mode=True)
res = put(local_fpath,remote_fpath,mirror_local_mode=True)
if not res.succeeded:
with color("error"):
puts("ERROR: put: {} -> {} failed! (2nd attempt)... Aborting".format(f,env.rem_homedir),show_prefix=True)
abort()
@task
@parallel
def copy_ifconfig():
files = ["ifconfig.txt"]
# Copying regular files should always succeed unless node is down
for f in files:
if env.shmem:
put(f,"/dev/shm/")
else:
put(f,env.rem_homedir)
@task
@parallel
def copy_files(schema,exp_fname):
if env.dry_run:
return
executable_files = ["rundb","runcl"]
# if CC_ALG == "CALVIN":
# executable_files.append("runsq")
files = ["ifconfig.txt"]
files.append(schema)
succeeded = True
# Copying regular files should always succeed unless node is down
for f in files:
if env.shmem:
put(f,"/dev/shm/")
else:
put(f,env.rem_homedir)
# Copying executable files may fail if a process is running the executable
with settings(warn_only=True):
for f in (executable_files):
local_fpath = os.path.join("binaries","{}{}".format(exp_fname,f))
if env.shmem:
remote_fpath = os.path.join("/dev/shm/",f)
else:
remote_fpath = os.path.join(env.rem_homedir,f)
#res = put(f,env.rem_homedir,mirror_local_mode=True)
res = put(local_fpath,remote_fpath,mirror_local_mode=True)
if not res.succeeded:
with color("warn"):
puts("WARN: put: {} -> {} failed!".format(f,env.rem_homedir),show_prefix=True)
succeeded = False
break
if not succeeded:
with color("warn"):
puts("WARN: killing all executables and retrying...",show_prefix=True)
killall()
# If this fails again then we abort
for f in (executable_files):
local_fpath = os.path.join("binaries","{}{}".format(exp_fname,f))
if env.shmem:
remote_fpath = os.path.join("/dev/shm",f)
else:
remote_fpath = os.path.join(env.rem_homedir,f)
#res = put(f,env.rem_homedir,mirror_local_mode=True)
res = put(local_fpath,remote_fpath,mirror_local_mode=True)
if not res.succeeded:
with color("error"):
puts("ERROR: put: {} -> {} failed! (2nd attempt)... Aborting".format(f,env.rem_homedir),show_prefix=True)
abort()
#delay is in ms
@task
@parallel
def set_delay(delay='10'):
run("sudo tc qdisc add dev eth0 root netem delay {}ms".format(delay))
#delay is in ms
@task
@parallel
def reset_delay():
run("sudo tc qdisc del dev eth0 root")
@task
@parallel
def sync_clocks(max_offset=0.01,max_attempts=1,delay=15):
if env.dry_run:
return True
offset = sys.float_info.max
attempts = 0
while attempts < max_attempts:
if env.cluster == "ec2":
res = run("ntpdate -q 0.amazon.pool.ntp.org")
else:
res = run("ntpdate -q clock-2.cs.cmu.edu")
offset = float(res.stdout.split(",")[-2].split()[-1])
#print "Host ",env.host,": offset = ",offset
if abs(offset) < max_offset:
break
sleep(delay)
if env.cluster == "ec2":
res = run("sudo ntpdate -b 0.amazon.pool.ntp.org")
else:
res = run("sudo ntpdate -b clock-2.cs.cmu.edu")
sleep(delay)
attempts += 1
return attempts < max_attempts
@task
@hosts('localhost')
def compile():
compiled = False
with quiet():
compiled = local("make clean; make -j8",capture=True).succeeded
if not compiled:
with settings(warn_only=True):
compiled = local("make -j8") # Print compilation errors
if not compiled:
with color("error"):
puts("ERROR: cannot compile code!",show_prefix=True)
@task
@parallel
def killall():
with settings(warn_only=True):
if not env.dry_run:
run("pkill -f rundb")
run("pkill -f runcl")
# run("pkill -f runsq")
@task
@parallel
def run_cmd(cmd):
run(cmd)
@task
@parallel
def put_cmd(cmd):
put(cmd,env.rem_homedir,mirror_local_mode=True)
@task
@parallel
def deploy(schema_path,nids,exps,runfiles,fmt):
nid = iter(nids[env.host])
exp = iter(exps[env.host])
runfile = iter(runfiles[env.host])
succeeded = True
with shell_env(SCHEMA_PATH=schema_path):
with settings(warn_only=True,command_timeout=MAX_TIME_PER_EXP):
# if env.same_node:
cmd = ''
for r in env.roledefs["servers"]:
if r == env.host:
nn = nid.next()
rfile = runfile.next()
args = get_args(fmt,exp.next())
if env.shmem:
cmd += "(/dev/shm/{}rundb -nid{} {}>> /dev/shm/results{}.out 2>&1 &);".format(rfile,nn,args,nn)
# cmd += "(/dev/shm/rundb -nid{} >> /dev/shm/results{}.out 2>&1 &);".format(nn,nn)
else:
cmd += "(./{}rundb -nid{} {}>> results{}.out 2>&1 &);".format(rfile,nn,args,nn)
for r in env.roledefs["clients"]:
if r == env.host:
nn = nid.next()
rfile = runfile.next()
args = get_args(fmt,exp.next())
if env.shmem:
cmd += "(/dev/shm/{}runcl -nid{} {}>> /dev/shm/results{}.out 2>&1 &);".format(rfile,nn,args,nn)
else:
cmd += "(./{}runcl -nid{} {}>> results{}.out 2>&1 &);".format(rfile,nn,args,nn)
# for r in env.roledefs["sequencer"]:
# if r == env.host:
# nn = nid.next()
# args = get_args(fmt,exp.next())
# if env.shmem:
# cmd += "(/dev/shm/runsq -nid{} {}>> /dev/shm/results{}.out 2>&1 &);".format(nn,args,nn)
# else:
# cmd += "(./runsq -nid{} {}>> results{}.out 2>&1 &);".format(nn,args,nn)
cmd = cmd[:-3]
cmd += ")"
try:
res = run("echo $SCHEMA_PATH")
if not env.dry_run:
run(cmd)
else:
print(cmd)
except CommandTimeout:
pass
except NetworkError:
pass
# else:
# if env.host in env.roledefs["servers"]:
# nn = nid.next();
# cmd = "./rundb -nid{} >> results{}.out 2>&1".format(nn,nn)
# elif env.host in env.roledefs["clients"]:
# nn = nid.next();
# cmd = "./runcl -nid{} >> results{}.out 2>&1".format(nn,nn)
# elif "sequencer" in env.roledefs and env.host in env.roledefs["sequencer"]:
# nn = nid.next();
# cmd = "./runsq -nid{} >> results{}.out 2>&1".format(nn,nn)
# else:
# with color('error'):
# puts("host does not belong to any roles",show_prefix=True)
# puts("current roles:",show_prefix=True)
# puts(pprint.pformat(env.roledefs,depth=3),show_prefix=False)
#
# try:
# res = run("echo $SCHEMA_PATH")
# if not env.dry_run:
# run(cmd)
# except CommandTimeout:
# pass
# except NetworkError:
# pass
return True
@task
@parallel
def get_results(outfiles,nids):
succeeded = True
# if env.same_node:
for n in nids[env.host]:
if env.shmem:
rem_path=os.path.join(env.rem_homedir,"/dev/shm/results{}.out".format(n))
else:
rem_path=os.path.join(env.rem_homedir,"results{}.out".format(n))
loc_path=os.path.join(env.result_dir, "{}_{}".format(n,outfiles[env.host]))
with settings(warn_only=True):
if not env.dry_run:
res1 = get(remote_path=rem_path, local_path=loc_path)
succeeded = succeeded and res1.succeeded
with settings(warn_only=True):
if not env.dry_run:
if env.shmem:
res2 = run("rm -f /dev/shm/results*.out")
else:
res2 = run("rm -f results*.out")
succeeded = succeeded and res2.succeeded
# else:
# nid = env.hosts.index(env.host)
# rem_path=os.path.join(env.rem_homedir,"results.out")
# loc_path=os.path.join(env.result_dir, outfiles[env.host])
# with settings(warn_only=True):
# if not env.dry_run:
# res1 = get(remote_path=rem_path, local_path=loc_path)
# res2 = run("rm -f results.out")
# succeeded = res1.succeeded and res2.succeeded
return succeeded
@task
@hosts('localhost')
def write_config(cfgs):
dbx_cfg = os.path.join(env.local_path,"config.h")
f = open(dbx_cfg,'r');
lines = f.readlines()
f.close()
with open(dbx_cfg,'w') as f_cfg:
for line in lines:
found_cfg = False
for c in cfgs:
found_cfg = re.search("#define "+c + "\t",line) or re.search("#define "+c + " ",line);
if found_cfg:
f_cfg.write("#define " + c + " " + str(cfgs[c]) + "\n")
break
if not found_cfg: f_cfg.write(line)
@task
@hosts('localhost')
def write_ifconfig(roles,exp,rfile):
with color():
puts("writing roles to the ifconfig file:",show_prefix=True)
puts(pprint.pformat(roles,depth=3),show_prefix=False)
nids = {}
exps = {}
rfiles = {}
nid = 0
print(roles)
with open("ifconfig.txt",'w') as f:
for server in roles['servers']:
f.write(server + "\n")
if server not in nids:
nids[server] = [nid]
exps[server] = [exp]
rfiles[server] = [rfile]
else:
nids[server].append(nid)
exps[server].append(exp)
rfiles[server].append(rfile)
nid += 1
for client in roles['clients']:
f.write(client + "\n")
if client not in nids:
nids[client] = [nid]
exps[client] = [exp]
rfiles[client] = [rfile]
else:
nids[client].append(nid)
exps[client].append(exp)
rfiles[client].append(rfile)
nid += 1
# if "sequencer" in roles:
# assert CC_ALG == "CALVIN"
# sequencer = roles['sequencer'][0]
# f.write(sequencer + "\n")
# nids[sequencer] = [nid]
# exps[sequencer] = [exp]
# nid += 1
return nids,exps,rfiles
@task
@hosts('localhost')
def assign_roles(server_cnt,client_cnt,append=False):
if env.same_node:
servers=[env.hosts[0]] * server_cnt
clients=[env.hosts[0]] * client_cnt
elif env.cram:
ncnt = max(max(server_cnt,client_cnt) / 8,1)
servers = []
clients = []
for r in range(server_cnt):
servers.append(env.hosts[r%ncnt])
for r in range(client_cnt):
clients.append(env.hosts[r%ncnt])
else:
# if len(env.hosts) < server_cnt+client_cnt:
# with color("error"):
# puts("ERROR: not enough hosts to run experiment",show_prefix=True)
# puts("\tHosts required: {}".format(server_cnt+client_cnt))
# puts("\tHosts available: {} ({})".format(len(env.hosts),pprint.pformat(env.hosts,depth=3)))
# assert len(env.hosts) >= server_cnt+client_cnt
servers=env.hosts[0:server_cnt]
if env.overlap:
clients=env.hosts[0:client_cnt]
else:
clients=env.hosts[server_cnt:server_cnt+client_cnt]
new_roles = {}
# if CC_ALG == 'CALVIN':
# sequencer = env.hosts[server_cnt+client_cnt:server_cnt+client_cnt+1]
if env.roledefs is None or len(env.roledefs) == 0:
env.roledefs={}
env.roledefs['clients']=[]
env.roledefs['servers']=[]
env.roledefs['sequencer']=[]
if append:
env.roledefs['clients'].extend(clients)
env.roledefs['servers'].extend(servers)
# if CC_ALG == 'CALVIN':
# env.roledefs['sequencer'].extend(sequencer)
else:
env.roledefs['clients']=clients
env.roledefs['servers']=servers
# if CC_ALG == 'CALVIN':
# env.roledefs['sequencer']=sequencer
new_roles['clients']=clients
new_roles['servers']=servers
# if CC_ALG == 'CALVIN':
# new_roles['sequencer']=sequencer
with color():
puts("Assigned the following roles:",show_prefix=True)
puts(pprint.pformat(new_roles,depth=3) + "\n",show_prefix=False)
puts("Updated env roles:",show_prefix=True)
puts(pprint.pformat(env.roledefs,depth=3) + "\n",show_prefix=False)
return new_roles
def get_good_hosts():
# good_hosts = []
set_hosts()
good_hosts = env.hosts
# Find and skip bad hosts
ping_results = execute(ping)
for host in ping_results:
if ping_results[host] == 0:
# good_hosts.append(host)
continue
else:
with color("warn"):
puts("Skipping non-responsive host {}".format(host),show_prefix=True)
good_hosts.remove(host)
return good_hosts
@task
@hosts('localhost')
def compile_binary(fmt,e):
ecfgs = get_cfgs(fmt,e)
cfgs = dict(configs)
for c in dict(ecfgs):
if c not in CONFIG_PARAMS and c in FLAG:
del ecfgs[c]
cfgs.update(ecfgs)
# if env.remote and not env.same_node:
if env.cluster == "ec2":
cfgs["ENVIRONMENT_EC2"]="true"
else:
cfgs["ENVIRONMENT_EC2"]="false"
if env.cluster == "istc":
cfgs["CORE_CNT"]=64
else:
cfgs["CORE_CNT"]=8
if env.remote:
cfgs["TPORT_TYPE"]="TCP"
if env.shmem:
cfgs["SHMEM_ENV"]="true"
else:
cfgs["SHMEM_ENV"]="false"
execute(write_config,cfgs)
execute(compile)
# output_f = get_outfile_name(cfgs,fmt,env.hosts)
output_f = get_execfile_name(cfgs,fmt,env.hosts)
local("cp rundb binaries/{}rundb".format(output_f))
local("cp runcl binaries/{}runcl".format(output_f))
# local("cp runsq binaries/{}runsq".format(output_f))
local("cp config.h binaries/{}cfg".format(output_f))
if EXECUTE_EXPS:
cmd = "mkdir -p {}".format(env.result_dir)
local(cmd)
set_hosts() #????
execute(copy_binaries,output_f)
#cmd = "cp config.h {}.cfg".format(os.path.join(env.result_dir,output_f))
#local(cmd)
@task
@hosts('localhost')
def compile_binaries(exps):
local("mkdir -p binaries")
local("rm -rf binaries/*")
fmt,experiments = experiment_map[exps]()
# for e in experiments:
# execute(compile_binary,fmt,e)
@task
@hosts('localhost')
def check_binaries(exps):
# if not os.path.isdir("binaries"):
# execute(compile_binaries,exps)
# return
# if len(glob.glob("binaries/*")) == 0:
# execute(compile_binaries,exps)
# return
if not os.path.isdir("binaries") or len(glob.glob("binaries/*")) == 0:
local("mkdir -p binaries")
local("rm -rf binaries/*")
fmt,experiments = experiment_map[exps]()
for e in experiments:
cfgs = get_cfgs(fmt,e)
# if env.remote and not env.same_node:
if env.cluster == "ec2":
cfgs["ENVIRONMENT_EC2"]="true"
else:
cfgs["ENVIRONMENT_EC2"]="false"
if env.cluster == "istc":
cfgs["CORE_CNT"]=64
else:
cfgs["CORE_CNT"]=8
if env.remote:
cfgs["TPORT_TYPE"]="TCP"
if env.shmem:
cfgs["SHMEM_ENV"]="true"
else:
cfgs["SHMEM_ENV"]="false"
# output_f = get_outfile_name(cfgs,fmt,env.hosts)
output_f = get_execfile_name(cfgs,fmt,env.hosts)
executables = glob.glob("{}*".format(os.path.join("binaries",output_f)))
has_rundb,has_runcl,has_config=False,False,False
# has_rundb,has_runcl,has_runsq,has_config=False,False,False,False
for executable in executables:
if executable.endswith("rundb"):
has_rundb = True
elif executable.endswith("runcl"):
has_runcl = True
# elif executable.endswith("runsq"):
# has_runsq = True
elif executable.endswith("cfg"):
has_config = True
# if not has_rundb or not has_runcl or not has_runsq or not has_config:
if not has_rundb or not has_runcl or not has_config:
execute(compile_binary,fmt,e)
@task
@hosts(['localhost'])
def run_exp_old(exps,network_test=False,delay=''):
if env.shmem:
schema_path = "/dev/shm/"
else:
schema_path = "{}/".format(env.rem_homedir)
good_hosts = []
if not network_test and EXECUTE_EXPS:
good_hosts = get_good_hosts()
with color():
puts("good host list =\n{}".format(pprint.pformat(good_hosts,depth=3)),show_prefix=True)
execute(copy_schema)
fmt,experiments = experiment_map[exps]()
batch_size = 0
nids = {}
outfiles = {}
exps = {}
runfiles = {}
for e in experiments:
print(e)
cfgs = get_cfgs(fmt,e)
output_fbase = get_outfile_name(cfgs,fmt,env.hosts)
output_exec_fname = get_execfile_name(cfgs,fmt,env.hosts)
output_f = output_fbase + STRNOW
last_exp = experiments.index(e) == len(experiments) - 1
skip_exp = False
# Check whether experiment has been already been run in this batch
if SKIP:
if len(glob.glob('{}*{}*.out'.format(env.result_dir,output_fbase))) > 0:
with color("warn"):
puts("experiment exists in results folder... skipping",show_prefix=True)
if last_exp:
skip_exp = True
else:
continue
global CC_ALG
CC_ALG = cfgs["CC_ALG"]
if EXECUTE_EXPS:
cfg_srcpath = "{}cfg".format(os.path.join("binaries",output_exec_fname))
cfg_destpath = "{}.cfg".format(os.path.join(env.result_dir,output_exec_fname+STRNOW))
local("cp {} {}".format(cfg_srcpath,cfg_destpath))
nnodes = cfgs["NODE_CNT"]
nclnodes = cfgs["CLIENT_NODE_CNT"]
try:
ntotal = nnodes + nclnodes
except TypeError:
nclnodes = cfgs[cfgs["CLIENT_NODE_CNT"]]
ntotal = nnodes + nclnodes
# if CC_ALG == 'CALVIN':
# ntotal += 1
if env.same_node:
ntotal = 1
if env.overlap:
ntotal = max(nnodes,nclnodes)
if env.cram:
ntotal = max(max(nnodes,nclnodes)/8,1)
if env.remote:
if not network_test:
set_hosts(good_hosts)
# if ntotal > len(env.hosts):
# msg = "Not enough nodes to run experiment!\n"
# msg += "\tRequired nodes: {}, ".format(ntotal)
# msg += "Actual nodes: {}".format(len(env.hosts))
# with color():
# puts(msg,show_prefix=True)
# cmd = "rm -f config.h {}".format(cfg_destpath)
# local(cmd)
# continue
if not skip_exp:
if env.batch_mode:
# If full, execute all exps in batch and reset everything
full = (batch_size + ntotal) > len(env.hosts)
if full:
if env.cluster != 'istc' and not env.dry_run:
# Sync clocks before each experiment
execute(sync_clocks)
with color():
puts("Batch is full, deploying batch...{}/{}".format(batch_size,len(good_hosts)),show_prefix=True)
with color("debug"):
puts(pprint.pformat(outfiles,depth=3),show_prefix=False)
set_hosts(env.hosts[:batch_size])
with color():
puts("Starttime: {}".format(datetime.datetime.now().strftime("%H:%M:%S")),show_prefix=True)
execute(deploy,schema_path,nids,exps,runfiles,fmt)
with color():
puts("Endtime: {}".format(datetime.datetime.now().strftime("%H:%M:%S")),show_prefix=True)
execute(get_results,outfiles,nids)
if not env.dry_run:
good_hosts = get_good_hosts()
env.roledefs = None
batch_size = 0
nids = {}
exps = {}
runfiles = {}
outfiles = {}
set_hosts(good_hosts)
else:
with color():
puts("Adding experiment to current batch: {}".format(output_f), show_prefix=True)
machines = env.hosts[batch_size : batch_size + ntotal]
batch_size += ntotal
else:
machines = env.hosts[:ntotal]
set_hosts(machines)
new_roles=execute(assign_roles,nnodes,nclnodes,append=env.batch_mode)[env.host]
new_nids,new_exps,new_runfiles = execute(write_ifconfig,new_roles,e,output_exec_fname)[env.host]
nids.update(new_nids)
exps.update(new_exps)
runfiles.update(new_runfiles)
for host,nid in new_nids.iteritems():
outfiles[host] = "{}.out".format(output_f)
# if env.same_node:
# outfiles[host] = "{}.out".format(output_f)
# else:
# outfiles[host] = "{}_{}.out".format(nid[0],output_f)
print(nids)
if cfgs["WORKLOAD"] == "TPCC":
schema = "benchmarks/TPCC_full_schema.txt"
# schema = "benchmarks/TPCC_short_schema.txt"
elif cfgs["WORKLOAD"] == "YCSB":
schema = "benchmarks/YCSB_schema.txt"
elif cfgs["WORKLOAD"] == "PPS":
schema = "benchmarks/PPS_schema.txt"
# NOTE: copy_files will fail if any (possibly) stray processes
# are still running one of the executables. Setting the 'kill'
# flag in environment.py to true to kill these processes. This
# is useful for running real experiments but dangerous when both
# of us are debugging...
# execute(copy_files,schema,output_exec_fname)
execute(copy_ifconfig)
if not env.batch_mode or last_exp and len(exps) > 0:
if env.batch_mode:
set_hosts(good_hosts[:batch_size])
puts("Deploying last batch...{}/{}".format(batch_size,len(good_hosts)),show_prefix=True)
else:
print("Deploying: {}".format(output_f))
if env.cluster != 'istc':
# Sync clocks before each experiment
print("Syncing Clocks...")
execute(sync_clocks)
if delay != '':
execute(set_delay,delay=delay)
with color():
puts("Starttime: {}".format(datetime.datetime.now().strftime("%H:%M:%S")),show_prefix=True)
execute(deploy,schema_path,nids,exps,runfiles,fmt)
with color():
puts("Endtime: {}".format(datetime.datetime.now().strftime("%H:%M:%S")),show_prefix=True)
if delay != '':
execute(reset_delay)
execute(get_results,outfiles,nids)
if not env.dry_run:
good_hosts = get_good_hosts()
set_hosts(good_hosts)
batch_size = 0
nids = {}
exps = {}
outfiles = {}
env.roledefs = None
else:
pids = []
print("Deploying: {}".format(output_f))
for n in range(ntotal):
if n < nnodes:
cmd = "./rundb -nid{}".format(n)
elif n < nnodes+nclnodes:
cmd = "./runcl -nid{}".format(n)
# elif n == nnodes+nclnodes:
# assert(CC_ALG == 'CALVIN')
# cmd = "./runsq -nid{}".format(n)
else:
assert(false)
print(cmd)
cmd = shlex.split(cmd)
ofile_n = "{}{}_{}.out".format(env.result_dir,n,output_f)
ofile = open(ofile_n,'w')
p = subprocess.Popen(cmd,stdout=ofile,stderr=ofile)
pids.insert(0,p)
for n in range(ntotal):
pids[n].wait()
def succeeded(outcomes):
for host,outcome in outcomes.iteritems():
if not outcome:
return False
return True
@task
@parallel
def ping():
with settings(warn_only=True):
res=local("ping -w8 -c1 {}".format(env.host),capture=True)
assert res != None
return res.return_code
@task
@hosts('localhost')
def ec2_run_instances(
dry_run="False",
image_id="ami-d05e75b8",
count="12",
security_group="dist-sg",
instance_type="m4.2xlarge",
# instance_type="m4.xlarge",
key_name="devenv-key",
):
opt = "--{k} {v} ".format
cmd = "aws ec2 run-instances "
if dry_run == "True":
cmd += "--dry-run "
cmd += opt(k="image-id",v=image_id)
cmd += opt(k="count",v=count)
cmd += opt(k="security-groups",v=security_group)
cmd += opt(k="instance-type",v=instance_type)
cmd += opt(k="key-name",v=key_name)
local(cmd)
@task
@hosts('localhost')
def ec2_run_spot_instances(
dry_run="False",
image_id="ami-d05e75b8",
price="0.10",
count="12",
security_group="dist-sg",
instance_type="m4.2xlarge",
# instance_type="m4.xlarge",
key_name="devenv-key",
):
opt = "--{k} {v} ".format
cmd = "aws ec2 request-spot-instances "
if dry_run == "True":
cmd += "--dry-run "
# cmd += opt(k="ami-id",v=image_id)
cmd += opt(k="spot-price",v=price)
cmd += opt(k="instance-count",v=count)
# cmd += opt(k="instance-type",v=instance_type)
# cmd += opt(k="group",v=security_group)
# cmd += opt(k="key",v=key_name)
cmd += opt(k="launch-specification",v="file://ec2_specification.json")
local(cmd)
@task
@hosts('localhost')
def ec2_get_status():
cmd = "aws ec2 describe-instance-status --query 'InstanceStatuses[*].{InstanceId:InstanceId,SystemStatus:SystemStatus.Status,InstanceStatus:InstanceStatus.Status}'"
res = local(cmd,capture=True)
statuses = ast.literal_eval(res)
for status in statuses:
if status['SystemStatus'] != "ok":
print("{}: ERROR: bad system status {}".format(status['InstanceId'],status['SystemStatus']))
sys.exit(1)
elif status['InstanceStatus'] == "initializing":
print("{}: ERROR: still initializing...".format(status['InstanceId']))
sys.exit(1)
elif status['InstanceStatus'] != "ok":
print("{}: ERROR: bad instance status {}".format(status['InstanceId'],status['InstanceStatus']))
sys.exit(1)
print("READY!")
return 0
@task
@hosts('localhost')
def ec2_write_ifconfig():
cmd = "aws ec2 describe-instances --query 'Reservations[*].Instances[*].{ID:InstanceId,IP:PublicIpAddress,TYPE:InstanceType}'"
res = local(cmd,capture=True)
# Skip any previously terminated VMs (terminate VM state remains for 1 hour)
res = res.replace("null","\"\"")
ip_info = ast.literal_eval(res)
with open("ec2_ifconfig.txt","w") as f:
for entry in ip_info:
for ip in entry:
if ip["IP"] != "":
f.write(ip["IP"] + "\n")
@task
@hosts('localhost')
def ec2_terminate_instances():
cmd = "aws ec2 describe-instances --query 'Reservations[*].Instances[*].InstanceId'"
res = local(cmd,capture=True)
ids = ast.literal_eval(res)
id_list = []
for id_entry in ids:
for id in id_entry:
id_list.append(id)
cmd = "aws ec2 terminate-instances --instance-ids {}".format(" ".join(id_list))
res = local(cmd,capture=True)
print(res)
@contextmanager
def color(level="info"):
if not level in COLORS:
level = "info"
print("\033[%sm" % COLORS[level],end="")
yield
print("\033[0m",end="")
@task
@hosts(['localhost'])
def run_exp(exps,network_test=False,delay=''):
if env.shmem:
schema_path = "/dev/shm/"
else:
schema_path = "{}/".format(env.rem_homedir)
good_hosts = []
if not network_test and EXECUTE_EXPS:
good_hosts = get_good_hosts()
with color():
puts("good host list =\n{}".format(pprint.pformat(good_hosts,depth=3)),show_prefix=True)
fmt,experiments = experiment_map[exps]()
batch_size = 0
nids = {}
outfiles = {}
exps = {}
if SKIP:
for e in experiments[:]:
cfgs = get_cfgs(fmt,e)
output_fbase = get_outfile_name(cfgs,fmt,env.hosts)
if len(glob.glob('{}*{}*.out'.format(env.result_dir,output_fbase))) > 0:
with color("warn"):
puts("experiment exists in results folder... skipping",show_prefix=True)
experiments.remove(e)
experiments.sort(key=lambda x: x[fmt.index("NODE_CNT")] + x[fmt.index("CLIENT_NODE_CNT")],reverse=True)
# Fill experiment pool
while len(experiments) > 0 :
round_exps = []
batch_total = 0
for e in experiments[:]:
cfgs = get_cfgs(fmt,e)
nnodes = cfgs["NODE_CNT"]
nclnodes = cfgs["CLIENT_NODE_CNT"]
ccalg = cfgs["CC_ALG"]
ntotal = cfgs["NODE_CNT"] + cfgs["CLIENT_NODE_CNT"]
# if ccalg == 'CALVIN':
# ntotal += 1
if env.same_node:
ntotal = 1
if env.overlap:
ntotal = max(nnodes,nclnodes)
if env.cram:
ntotal = max(max(nnodes,nclnodes)/8,1)
if ntotal > len(env.hosts):
msg = "Not enough nodes to run experiment!\n"
msg += "\tRequired nodes: {}, ".format(ntotal)
msg += "Actual nodes: {}".format(len(env.hosts))
with color():
puts(msg,show_prefix=True)
experiments.remove(e)
continue
if (batch_total + ntotal) > len(env.hosts):
continue
batch_total += ntotal
round_exps.append(e)
experiments.remove(e)
if not EXECUTE_EXPS: continue
batch_size = 0
for e in round_exps:
set_hosts(good_hosts)
cfgs = get_cfgs(fmt,e)
global CC_ALG
nnodes = cfgs["NODE_CNT"]
nclnodes = cfgs["CLIENT_NODE_CNT"]
CC_ALG = cfgs["CC_ALG"]
ntotal = cfgs["NODE_CNT"] + cfgs["CLIENT_NODE_CNT"]
# if ccalg == 'CALVIN':
# ntotal += 1
if env.same_node:
ntotal = 1
if env.overlap:
ntotal = max(nnodes,nclnodes)
if env.cram:
ntotal = max(max(nnodes,nclnodes)/8,1)
output_fbase = get_outfile_name(cfgs,fmt,env.hosts)
output_exec_fname = get_execfile_name(cfgs,fmt,env.hosts)
output_f = output_fbase + STRNOW
cfg_srcpath = "{}cfg".format(os.path.join("binaries",output_exec_fname))
cfg_destpath = "{}.cfg".format(os.path.join(env.result_dir,output_exec_fname+STRNOW))
local("cp {} {}".format(cfg_srcpath,cfg_destpath))
with color():
puts("Adding experiment to current batch: {}".format(output_f), show_prefix=True)
machines = env.hosts[batch_size : batch_size + ntotal]
batch_size += ntotal
set_hosts(machines)
new_roles=execute(assign_roles,nnodes,nclnodes,append=env.batch_mode)[env.host]
new_nids,new_exps = execute(write_ifconfig,new_roles,e)[env.host]
nids.update(new_nids)
exps.update(new_exps)
for host,nid in new_nids.iteritems():
outfiles[host] = "{}.out".format(output_f)
if cfgs["WORKLOAD"] == "TPCC":
schema = "benchmarks/TPCC_full_schema.txt"
# schema = "benchmarks/TPCC_short_schema.txt"
elif cfgs["WORKLOAD"] == "YCSB":
schema = "benchmarks/YCSB_schema.txt"
elif cfgs["WORKLOAD"] == "PPS":
schema = "benchmarks/PPS_schema.txt"
# NOTE: copy_files will fail if any (possibly) stray processes
# are still running one of the executables. Setting the 'kill'
# flag in environment.py to true to kill these processes. This
# is useful for running real experiments but dangerous when both
# of us are debugging...
# execute(copy_files,schema,output_exec_fname)
execute(copy_ifconfig)
if env.remote:
set_hosts(good_hosts[:batch_size])
if env.cluster != 'istc' and not env.dry_run:
# Sync clocks before each experiment
execute(sync_clocks)
with color():
puts("Batch is full, deploying batch...{}/{}".format(batch_size,len(good_hosts)),show_prefix=True)
with color("debug"):
puts(pprint.pformat(outfiles,depth=3),show_prefix=False)
with color():
puts("Starttime: {}".format(datetime.datetime.now().strftime("%H:%M:%S")),show_prefix=True)
execute(deploy,schema_path,nids,exps,runfiles,fmt)
with color():
puts("Endtime: {}".format(datetime.datetime.now().strftime("%H:%M:%S")),show_prefix=True)
execute(get_results,outfiles,nids)
good_hosts = get_good_hosts()
batch_size = 0
nids = {}
exps = {}
outfiles = {}
set_hosts(good_hosts)
env.roledefs = None
| 36.549738 | 172 | 0.542138 | 0 | 0 | 158 | 0.003772 | 39,477 | 0.942487 | 0 | 0 | 13,129 | 0.313446 |
de852461942a9c2a911b8c95e145d87c827bf61c | 651 | py | Python | mezzanine_recipes/forms.py | tjetzinger/mezzanine-recipes | f00be89ae5b93fdb2cf2771270efb4ecfa30e313 | [
"MIT"
]
| 6 | 2015-02-01T18:08:41.000Z | 2021-06-20T16:24:11.000Z | mezzanine_recipes/forms.py | tjetzinger/mezzanine-recipes | f00be89ae5b93fdb2cf2771270efb4ecfa30e313 | [
"MIT"
]
| 2 | 2020-02-11T21:19:13.000Z | 2020-06-05T16:38:44.000Z | mezzanine_recipes/forms.py | tjetzinger/mezzanine-recipes | f00be89ae5b93fdb2cf2771270efb4ecfa30e313 | [
"MIT"
]
| 1 | 2016-05-17T20:16:25.000Z | 2016-05-17T20:16:25.000Z |
from django import forms
from mezzanine.blog.forms import BlogPostForm
from .models import BlogPost
# These fields need to be in the form, hidden, with default values,
# since it posts to the blog post admin, which includes these fields
# and will use empty values instead of the model defaults, without
# these specified.
hidden_field_defaults = ("status", "gen_description", "allow_comments")
class BlogPostForm(BlogPostForm):
"""
Model form for ``BlogPost`` that provides the quick blog panel in the
admin dashboard.
"""
class Meta:
model = BlogPost
fields = ("title", "content") + hidden_field_defaults
| 26.04 | 73 | 0.723502 | 248 | 0.380952 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.58679 |
de86c719ac9ffce9e1f273be9d0dc93bbd224576 | 14,533 | py | Python | reviews/migrations/0022_auto_20190302_1556.py | UrbanBogger/horrorexplosion | 3698e00a6899a5e8b224cd3d1259c3deb3a2ca80 | [
"MIT"
]
| null | null | null | reviews/migrations/0022_auto_20190302_1556.py | UrbanBogger/horrorexplosion | 3698e00a6899a5e8b224cd3d1259c3deb3a2ca80 | [
"MIT"
]
| 4 | 2020-06-05T18:21:18.000Z | 2021-06-10T20:17:31.000Z | reviews/migrations/0022_auto_20190302_1556.py | UrbanBogger/horrorexplosion | 3698e00a6899a5e8b224cd3d1259c3deb3a2ca80 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2019-03-02 15:56
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reviews', '0021_auto_20190302_1514'),
]
operations = [
migrations.CreateModel(
name='TelevisionEpisode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('episode_title', models.CharField(default='Episode', help_text='Enter the title of the television episode', max_length=50)),
('episode_number', models.IntegerField(default=1, help_text='Enter the TV episode\'s chronological position in the TV seasonas an integer, e.g. "1" for the first episode in the TV season, "2" for the second one, etc.')),
('poster', models.ImageField(blank=True, help_text='Upload the poster of the movie', null=True, upload_to='images/')),
('poster_thumbnail', models.ImageField(blank=True, help_text='Upload the poster thumbnail', null=True, upload_to='images/')),
('duration', models.IntegerField(blank=True, help_text='Enter the duration of the TV episode in minutes [OPTIONAL]', null=True)),
('genre', models.ManyToManyField(blank=True, help_text="Enter the TV episode's genre(s) [OPTIONAL]", to='reviews.Genre')),
('keyword', models.ManyToManyField(blank=True, help_text='Enter the keyword(s) that best describe the TV episode [OPTIONAL]', to='reviews.Keyword')),
('microgenre', models.ManyToManyField(blank=True, help_text="Enter the TV episode's microgenre [OPTIONAL]", to='reviews.Microgenre')),
('movie_participation', models.ManyToManyField(blank=True, help_text='Add the name of the TV episode creator, their role and the position you want them to appear in the credits', to='reviews.MovieParticipation')),
('subgenre', models.ManyToManyField(blank=True, help_text="Enter the TV episode's subgenre [OPTIONAL]", to='reviews.Subgenre')),
],
options={
'ordering': ['tv_season', 'episode_number'],
},
),
migrations.CreateModel(
name='TelevisionEpisodeReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('review_text', ckeditor.fields.RichTextField(help_text='Enter the text', verbose_name='Text')),
('date_created', models.DateField(help_text='Enter the original date of the text creation')),
('last_modified', models.DateField(auto_now=True)),
('first_created', models.DateField(auto_now_add=True, null=True)),
('mov_review_page_description', models.CharField(default='Click on the link to see what we have to say about this flick.', max_length=155)),
('human_readable_url', models.SlugField(help_text='Enter the "slug",i.e., the human-readable URL for the TV episode review', null=True)),
('grade', models.ForeignKey(help_text="Choose the motion picture's grade", null=True, on_delete=django.db.models.deletion.SET_NULL, to='reviews.Grade')),
('review_author', models.ForeignKey(help_text='Enter the name of the author', null=True, on_delete=django.db.models.deletion.SET_NULL, to='reviews.Reviewer', verbose_name='Text Author')),
('reviewed_tv_episode', models.ForeignKey(help_text="Enter the TV episode that you're reviewing", null=True, on_delete=django.db.models.deletion.CASCADE, to='reviews.TelevisionEpisode')),
],
options={
'ordering': ['reviewed_tv_episode'],
},
),
migrations.CreateModel(
name='TelevisionSeason',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('season_title', models.CharField(default='Season', help_text='Enter the title of the television season', max_length=50)),
('season_number', models.IntegerField(default=1, help_text='Enter the TV season\'s chronological position in the TV seriesas an integer, e.g. "1" for the first season in the TV series, "2" for the second one, etc.')),
('year_of_release', models.IntegerField(choices=[(1895, 1895), (1896, 1896), (1897, 1897), (1898, 1898), (1899, 1899), (1900, 1900), (1901, 1901), (1902, 1902), (1903, 1903), (1904, 1904), (1905, 1905), (1906, 1906), (1907, 1907), (1908, 1908), (1909, 1909), (1910, 1910), (1911, 1911), (1912, 1912), (1913, 1913), (1914, 1914), (1915, 1915), (1916, 1916), (1917, 1917), (1918, 1918), (1919, 1919), (1920, 1920), (1921, 1921), (1922, 1922), (1923, 1923), (1924, 1924), (1925, 1925), (1926, 1926), (1927, 1927), (1928, 1928), (1929, 1929), (1930, 1930), (1931, 1931), (1932, 1932), (1933, 1933), (1934, 1934), (1935, 1935), (1936, 1936), (1937, 1937), (1938, 1938), (1939, 1939), (1940, 1940), (1941, 1941), (1942, 1942), (1943, 1943), (1944, 1944), (1945, 1945), (1946, 1946), (1947, 1947), (1948, 1948), (1949, 1949), (1950, 1950), (1951, 1951), (1952, 1952), (1953, 1953), (1954, 1954), (1955, 1955), (1956, 1956), (1957, 1957), (1958, 1958), (1959, 1959), (1960, 1960), (1961, 1961), (1962, 1962), (1963, 1963), (1964, 1964), (1965, 1965), (1966, 1966), (1967, 1967), (1968, 1968), (1969, 1969), (1970, 1970), (1971, 1971), (1972, 1972), (1973, 1973), (1974, 1974), (1975, 1975), (1976, 1976), (1977, 1977), (1978, 1978), (1979, 1979), (1980, 1980), (1981, 1981), (1982, 1982), (1983, 1983), (1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019)], help_text="Choose the TV season's release year")),
('season_end_year', models.IntegerField(blank=True, choices=[(1895, 1895), (1896, 1896), (1897, 1897), (1898, 1898), (1899, 1899), (1900, 1900), (1901, 1901), (1902, 1902), (1903, 1903), (1904, 1904), (1905, 1905), (1906, 1906), (1907, 1907), (1908, 1908), (1909, 1909), (1910, 1910), (1911, 1911), (1912, 1912), (1913, 1913), (1914, 1914), (1915, 1915), (1916, 1916), (1917, 1917), (1918, 1918), (1919, 1919), (1920, 1920), (1921, 1921), (1922, 1922), (1923, 1923), (1924, 1924), (1925, 1925), (1926, 1926), (1927, 1927), (1928, 1928), (1929, 1929), (1930, 1930), (1931, 1931), (1932, 1932), (1933, 1933), (1934, 1934), (1935, 1935), (1936, 1936), (1937, 1937), (1938, 1938), (1939, 1939), (1940, 1940), (1941, 1941), (1942, 1942), (1943, 1943), (1944, 1944), (1945, 1945), (1946, 1946), (1947, 1947), (1948, 1948), (1949, 1949), (1950, 1950), (1951, 1951), (1952, 1952), (1953, 1953), (1954, 1954), (1955, 1955), (1956, 1956), (1957, 1957), (1958, 1958), (1959, 1959), (1960, 1960), (1961, 1961), (1962, 1962), (1963, 1963), (1964, 1964), (1965, 1965), (1966, 1966), (1967, 1967), (1968, 1968), (1969, 1969), (1970, 1970), (1971, 1971), (1972, 1972), (1973, 1973), (1974, 1974), (1975, 1975), (1976, 1976), (1977, 1977), (1978, 1978), (1979, 1979), (1980, 1980), (1981, 1981), (1982, 1982), (1983, 1983), (1984, 1984), (1985, 1985), (1986, 1986), (1987, 1987), (1988, 1988), (1989, 1989), (1990, 1990), (1991, 1991), (1992, 1992), (1993, 1993), (1994, 1994), (1995, 1995), (1996, 1996), (1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019)], help_text='Choose the year when the TV season stopped being aired', null=True)),
('poster', models.ImageField(blank=True, help_text='Upload a poster for the TV season if applicable', null=True, upload_to='images/')),
('poster_thumbnail', models.ImageField(blank=True, help_text='Upload a poster thumbnail for the TV season if applicable', null=True, upload_to='images/')),
('duration', models.IntegerField(blank=True, help_text='Enter the duration of the TV Mini-Series in minutes [OPTIONAL]', null=True)),
('description', ckeditor.fields.RichTextField(blank=True, help_text='Provide background info on this TV season [OPTIONAL]')),
('human_readable_url', models.SlugField(help_text="Enter the 'slug',i.e., the human-readable URL for the TV serie's season", null=True)),
('first_created', models.DateField(auto_now_add=True, null=True)),
('country_of_origin', models.ManyToManyField(help_text='Enter the country of origin', to='reviews.Country')),
('genre', models.ManyToManyField(blank=True, help_text="Enter the TV season's genre(s) [OPTIONAL]", to='reviews.Genre')),
('keyword', models.ManyToManyField(blank=True, help_text='Enter the keyword(s) that best describe the TV season [OPTIONAL]', to='reviews.Keyword')),
('microgenre', models.ManyToManyField(blank=True, help_text="Enter the TV season's microgenre [OPTIONAL]", to='reviews.Microgenre')),
('movie_participation', models.ManyToManyField(blank=True, help_text="Add the name of the TV season's creator, their role and the position you want them to appear in the credits", to='reviews.MovieParticipation')),
('subgenre', models.ManyToManyField(blank=True, help_text="Enter the TV season's subgenre [OPTIONAL]", to='reviews.Subgenre')),
],
options={
'ordering': ['tv_series', 'season_number'],
},
),
migrations.CreateModel(
name='TelevisionSeasonReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('review_text', ckeditor.fields.RichTextField(help_text='Enter the text', verbose_name='Text')),
('date_created', models.DateField(help_text='Enter the original date of the text creation')),
('last_modified', models.DateField(auto_now=True)),
('first_created', models.DateField(auto_now_add=True, null=True)),
('mov_review_page_description', models.CharField(default='Click on the link to see what we have to say about this flick.', max_length=155)),
('human_readable_url', models.SlugField(help_text="Enter the 'slug',i.e., the human-readable URL for the TV season review", null=True)),
('grade', models.ForeignKey(help_text="Choose the motion picture's grade", null=True, on_delete=django.db.models.deletion.SET_NULL, to='reviews.Grade')),
('review_author', models.ForeignKey(help_text='Enter the name of the author', null=True, on_delete=django.db.models.deletion.SET_NULL, to='reviews.Reviewer', verbose_name='Text Author')),
('reviewed_tv_season', models.ForeignKey(help_text="Enter the TV season that you're reviewing", null=True, on_delete=django.db.models.deletion.CASCADE, to='reviews.TelevisionSeason')),
],
options={
'ordering': ['reviewed_tv_season'],
},
),
migrations.CreateModel(
name='TelevisionSeries',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title_for_sorting', models.CharField(help_text='Enter the title for sorting: Remove all stop words such as "A", "An" and "The" and word all numbers', max_length=250, null=True)),
('is_still_running', models.NullBooleanField(default=False, help_text='Is TV series still ongoing?')),
('poster', models.ImageField(help_text='Upload the top-level poster for the TV series if applicable [OPTIONAL]', null=True, upload_to='images/')),
('poster_thumbnail', models.ImageField(help_text='Upload the top-level poster thumbnail for the TV series if applicable [OPTIONAL]', null=True, upload_to='images/')),
('description', ckeditor.fields.RichTextField(blank=True, help_text='Provide background info on the TV series [OPTIONAL]')),
('tv_series_type', models.CharField(choices=[('Mini-Series', 'TV Mini-Series'), ('Anthology', 'Anthology (Episodic) TV Series'), ('Serial', 'Serial TV Series')], max_length=25)),
('first_created', models.DateField(auto_now_add=True, null=True)),
('human_readable_url', models.SlugField(help_text="Enter the 'slug',i.e., the human-readable URL for the TV series", null=True, unique=True)),
('alternative_title', models.ManyToManyField(blank=True, help_text="Enter the TV serie's alternative_title(s) [OPTIONAL]", related_name='tv_series_alternative_title_set', to='reviews.Title')),
('main_title', models.ForeignKey(help_text="Enter the TV serie's main title", null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tv_series_main_title_set', to='reviews.Title')),
('original_title', models.OneToOneField(blank=True, help_text="Enter the TV serie's original title [OPTIONAL]", null=True, on_delete=django.db.models.deletion.SET_NULL, to='reviews.Title')),
],
options={
'ordering': ['title_for_sorting'],
},
),
migrations.AddField(
model_name='televisionseason',
name='tv_series',
field=models.ForeignKey(help_text='Enter the TV series', null=True, on_delete=django.db.models.deletion.SET_NULL, to='reviews.TelevisionSeries'),
),
migrations.AddField(
model_name='televisionepisode',
name='tv_season',
field=models.ForeignKey(help_text='Enter the TV Season this episode belongs to', null=True, on_delete=django.db.models.deletion.SET_NULL, to='reviews.TelevisionSeason'),
),
]
| 113.539063 | 1,908 | 0.637515 | 14,319 | 0.985275 | 0 | 0 | 0 | 0 | 0 | 0 | 4,759 | 0.327462 |
de876b3ed14bbdc7196b4d80c31ffed86152546c | 1,414 | py | Python | setup.py | imdaveho/intermezzo | 3fe4824a747face996e301ca5190caec0cb0a6fd | [
"MIT"
]
| 8 | 2018-02-26T16:24:07.000Z | 2021-06-30T07:40:52.000Z | setup.py | imdaveho/intermezzo | 3fe4824a747face996e301ca5190caec0cb0a6fd | [
"MIT"
]
| null | null | null | setup.py | imdaveho/intermezzo | 3fe4824a747face996e301ca5190caec0cb0a6fd | [
"MIT"
]
| null | null | null | import platform
from setuptools import setup
if platform.system() == "Windows":
setup(
name="intermezzo",
version="0.1.0",
description="A library for creating cross-platform text-based interfaces using termbox-go.",
long_description="",
url="https://github.com/imdaveho/intermezzo",
author="Dave Ho",
author_email="[email protected]",
license="MIT",
classifiers=[],
packages=["intermezzo"],
package_data={"intermezzo": ["build/*/*.dll"]},
keywords="termbox tui terminal command-line",
install_requires=["cffi>=1.10.0"],
cffi_modules=["intermezzo/build/build_ffi_win.py:ffi"],
setup_requires=["cffi>=1.10.0"],
)
else:
setup(
name="intermezzo",
version="0.1.0",
description="A library for creating cross-platform text-based interfaces using termbox-go.",
long_description="",
url="https://github.com/imdaveho/intermezzo",
author="Dave Ho",
author_email="[email protected]",
license="MIT",
classifiers=[],
packages=["intermezzo"],
package_data={"intermezzo": ["build/*/*.so"]},
keywords="termbox tui terminal command-line",
install_requires=["cffi>=1.10.0"],
cffi_modules=["intermezzo/build/build_ffi_nix.py:ffi"],
setup_requires=["cffi>=1.10.0"],
)
| 35.35 | 100 | 0.601839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 638 | 0.451202 |
de87df11dbf3b3a221e585a21372627cd71cbf40 | 173 | py | Python | aula3/ola/urls.py | Danilo-Xaxa/django_cs50w | 5ae2e076f35a8c32a4e445f8cfd1c66500fbc496 | [
"MIT"
]
| null | null | null | aula3/ola/urls.py | Danilo-Xaxa/django_cs50w | 5ae2e076f35a8c32a4e445f8cfd1c66500fbc496 | [
"MIT"
]
| null | null | null | aula3/ola/urls.py | Danilo-Xaxa/django_cs50w | 5ae2e076f35a8c32a4e445f8cfd1c66500fbc496 | [
"MIT"
]
| null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('<str:nome>', views.cumprimentar, name='cumprimentar'),
] | 24.714286 | 64 | 0.676301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.202312 |
de88715741307a44df748cb0254417ebbcf130e6 | 70,016 | py | Python | MOTION.py | catubc/MOTION | 528ce8a860e4f1f1075b85d3bcb162fb78bdad81 | [
"MIT"
]
| null | null | null | MOTION.py | catubc/MOTION | 528ce8a860e4f1f1075b85d3bcb162fb78bdad81 | [
"MIT"
]
| null | null | null | MOTION.py | catubc/MOTION | 528ce8a860e4f1f1075b85d3bcb162fb78bdad81 | [
"MIT"
]
| null | null | null | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import cv2, os, sys, glob
import scipy
import sklearn
import imageio
import matplotlib.cm as cm
import matplotlib
import time
from sklearn import decomposition, metrics, manifold, svm
from tsne import bh_sne
from matplotlib.path import Path
from numpy import linalg as LA
from scipy.signal import butter, filtfilt, cheby1
from scipy.spatial import distance
#**************************************************************************************************************************
#*************************************************CODE START***************************************************************
#**************************************************************************************************************************
class MOTION(object):
''' Class to detect motion in behaviour video;
self.crop() to select only part of video (speeds up analysis)
self.dimreduce() to reduce dimensionality of video and increase SNR
self.detect_motion() compute euclidean distance between frames and plots timecourse
'''
def __init__(self,filename):
print "...current session: ", filename
self.filename = filename
def crop(self):
''' Function crops the FOV for image registration (stable region) and area of interest
Also converts .avi -> .npy format for both stacks + entire original movie.
Currently only rGb channel saved; possibly might improve to average all
'''
#**************** SELECT CROPPED AREA TO TRACK MOTION (smaller is faster) **********************
#Load and save to disk # frames, frame rate and sample frame for cropping
if os.path.exists(os.path.split(self.filename)[0]+"/nframes.txt")==False:
camera = cv2.VideoCapture(self.filename)
self.frame_rate = camera.get(5)
ctr=0
print "reading frames"
while True:
print (ctr)
(grabbed, frame) = camera.read()
if not grabbed: break
ctr+=1
if ctr==100:
image_original = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
np.save(os.path.split(self.filename)[0]+"/original_image.npy", image_original)
self.n_frames=ctr
np.savetxt(os.path.split(self.filename)[0]+"/nframes.txt",[self.n_frames])
np.savetxt(os.path.split(self.filename)[0]+"/frame_rate.txt",[self.frame_rate])
cv2.destroyAllWindows()
camera.release()
else:
image_original = np.load(os.path.split(self.filename)[0]+"/original_image.npy")
self.n_frames = np.loadtxt(os.path.split(self.filename)[0]+"/nframes.txt",dtype='int32')
self.frame_rate = np.loadtxt(os.path.split(self.filename)[0]+"/frame_rate.txt",dtype='float32')
self.frame_xwidth = len(image_original); self.frame_ywidth = len(image_original[0])
#Run cropping functions on sample frame
self.crop_frame_box(image_original, motion_correct_flag=True) #DEFINE BOX AREAS FOR CROPPING; first define area for register
self.crop_frame_box(image_original, motion_correct_flag=False) #DEFINE BOX AREAS FOR CROPPING; first define area for register
#Convert original file and cropped to .npy
crop_registry = np.load(self.filename[:-4]+'_registry_cropped.npz')
self.x1_registry = crop_registry['x1']; self.x2_registry = crop_registry['x2']
self.y1_registry = crop_registry['y1']; self.y2_registry = crop_registry['y2']
crop_area = np.load(self.filename[:-4]+'_'+self.area+'_cropped.npz')
self.x1 = crop_area['x1']; self.x2 = crop_area['x2']
self.y1 = crop_area['y1']; self.y2 = crop_area['y2']
if os.path.exists(self.filename[:-4]+'_'+self.area+'_cropped.npy')==False:
print "... converting .avi -> .npy files (only Green channel) ..."
if os.path.exists(self.filename[:-4]+'.npy')==False:
original_frames = np.zeros((self.n_frames, self.frame_xwidth,self.frame_ywidth),dtype=np.uint8)
cropped_frames = np.zeros((self.n_frames, self.x2-self.x1,self.y2-self.y1),dtype=np.uint8)
registry_frames = np.zeros((self.n_frames, self.x2_registry-self.x1_registry,self.y2_registry-self.y1_registry),dtype=np.uint8)
camera = cv2.VideoCapture(self.filename)
ctr = 0
while True:
if ctr%1000==0: print " loading frame: ", ctr
if 'luis' in self.filename:
if ctr>15000:
print "...************ too many frames, exiting on 15000..."
break
(grabbed, frame) = camera.read()
if not grabbed: break
#Save copy of frame for .npy file
if os.path.exists(self.filename[:-4]+'.npy')==False:
original_frames[ctr]=frame[:,:,1] #Save green ch only
#original_frames.append(np.uint8(np.mean(frame, axis=2))) #Save average of RGB chans
#Crop FOV for analysis
cropped_frames[ctr]=frame[:,:,1][self.x1:self.x2,self.y1:self.y2]
#cropped_frames.append(np.uint8(np.mean(frame[self.x1:self.x2,self.y1:self.y2],axis=2)))
#Crop FOV for registry
registry_frames[ctr]=frame[:,:,1][self.x1_registry:self.x2_registry,self.y1_registry:self.y2_registry]
#registry_frames.append(np.uint8(np.mean(frame[self.x1_registry:self.x2_registry,self.y1_registry:self.y2_registry],axis=2)))
ctr+=1
#Save original movie in .npy format
if os.path.exists(self.filename[:-4]+'.npy')==False:
np.save(self.filename[:-4]+'.npy', original_frames) #This is the entire movie converted to .npy
#Save cropped movie area and registry area
np.save(self.filename[:-4]+'_'+self.area+'_cropped', cropped_frames) #just cropped movie
np.save(self.filename[:-4]+'_registry_cropped', registry_frames) #just cropped movie
def binarize_frames(self):
''' Reduce the size/dimensionality of the sample/frame by calling various functions
This also binarizes the frames (i.e. all vals are 0/1
TODO: Check this step, investigate if preserving more information in the would be hefpful
'''
#area_filename = self.filename[:-4]+"_"+self.area+"_"+self.mode+".npy"
#area_filename = self.filename[:-4]+"_"+self.area+"_"+self.mode+".npy"
area_filename = self.filename[:-4]+"_"+self.area+"_cropped_registered_"+self.mode+".npy"
self.movie_filename = self.filename[:-4]+'.npy'
if os.path.exists(area_filename)==False:
frames = np.load(self.filename[:-4]+"_"+self.area+"_cropped_registered.npy")
rLow=100; rHigh=255
reduced_frames = []
contour_frames = []
edge_frames = []
frame_count = 0
for frame in frames:
if (frame_count%1000)==0: print " reducing frame: ", frame_count
#Crop frame before processing
#frame = frame[self.x1:self.x2,self.y1:self.y2]
if self.mode=='all':
reduced_frames.append(self.decimate(frame, frame_count, rLow, rHigh))
elif self.mode == 'contours':
contour_frames.append(self.find_contours(frame, frame_count, rLow, rHigh))
elif self.mode=='edges':
edge_frames.append(self.find_edges(frame, frame_count, rLow, rHigh))
frame_count += 1
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
if self.mode=='all':
np.save(area_filename, np.nan_to_num(reduced_frames))
self.decimated_frames = np.nan_to_num(reduced_frames)
elif self.mode=='contours':
np.save(area_filename, np.nan_to_num(contour_frames))
self.decimated_frames = np.nan_to_num(contour_frames)
elif self.mode=='edges':
np.save(area_filename, np.nan_to_num(edge_frames))
self.decimated_frames = np.nan_to_num(edge_frames)
else:
self.decimated_frames = np.load(area_filename,mmap_mode='c')
def detect_movement(self):
''' Detect movement as euclidean distance between frames
'''
print "... detecting movement ..."
if os.path.exists(self.filename[:-4]+"_diff_array.npy")==False:
self.compute_diff()
#Plot data
t = np.arange(len(self.diff_array))/(self.frame_rate)
plt.plot(t, self.diff_array)
#Plotting parameters
plt.xlim(0,t[-1])
plt.yticks([])
font_size = 20
plt.xlabel("Time (sec)", fontsize = font_size)
plt.ylabel("Movement index (a.u.)", fontsize = font_size)
plt.tick_params(axis='both', which='both', labelsize=font_size)
plt.title(self.filename, fontsize = font_size)
plt.show(block=True)
else:
self.diff_array = np.load(self.filename[:-4]+"_diff_array.npy")
def read_metadata(self, output):
decimated_filename = self.filename[:-4]+"_"+self.area+"_cropped_registered_"+self.mode+".npy"
n_frames = len(np.load(decimated_filename,mmap_mode='c'))
names = np.loadtxt(self.filename[:-4]+"_"+self.area+"_"+str(self.methods[self.method])+"_clusternames.txt",dtype='str')
print names
indexes = np.load(self.filename[:-4]+"_"+self.area+"_"+str(self.methods[self.method])+"_clusterindexes.npy")
#Licking
idx = np.where(names=='lick')[0]
if len(idx)!=0: output.lick_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=len(indexes[idx][0])/float(n_frames)
else: output.lick_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=0
#Pawing
idx = np.where(names=='paw')[0]
if len(idx)!=0: output.paw_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=len(indexes[idx][0])/float(n_frames)
else: output.paw_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=0
#Add scratching to pawing
idx = np.where(names=='scratch')[0]
if len(idx)!=0: output.scratch_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=len(indexes[idx][0])/float(n_frames)
else: output.scratch_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=0
data = np.load(glob.glob(os.path.split(self.filename)[0]+'/*_metadata.npz')[0])
if data['drift']=='y': self.drift=1
elif data['drift']=='n': self.drift=0
else: print "...exception..."; quit()
if data['spout_moved']=='y': self.spout_moved=1
elif data['spout_moved']=='n': self.spout_moved=0
else: print "...exception..."; quit()
if data['hand_inview']=='y': self.hand_inview=1
elif data['hand_inview']=='n': self.hand_inview=0
else: print "...exception..."; quit()
if data['camera_moved']=='y': self.camera_moved=1
elif data['camera_moved']=='n': self.camera_moved=0
else: print "...exception..."; quit()
output.drift_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=self.drift
output.spout_matrix[self.rt_ctr*output.scale:(self.rt_ctr+1)*output.scale,self.ses_ctr*output.scale:(self.ses_ctr+1)*output.scale]=self.spout_moved
self.other_exclusion=data['other_exclusion']
return output
def load_frames(self, cluster_name):
names = np.loadtxt(self.filename[:-4]+"_"+self.area+"_"+str(self.methods[self.method])+"_clusternames.txt",dtype='str')
print names
indexes = np.load(self.filename[:-4]+"_"+self.area+"_"+str(self.methods[self.method])+"_clusterindexes.npy")
cluster_index = np.where(names ==cluster_name)[0]
if len(cluster_index)==0:
return None
cluster_indexes = indexes[cluster_index][0] #List of indexes for selected behaviour
#Load movie
self.movie_filename = self.filename[:-4]+'.npy'
enlarge = 100 #Make movie FOV larger than original cropping rectangle by 50pixels or so; otherwies difficult to see what's going on;
movie_array = np.load(self.movie_filename, mmap_mode='c')[:, max(0,self.x1-enlarge):self.x2+enlarge, max(0,self.y1-enlarge):self.y2+enlarge]
print movie_array.shape
if len(cluster_index)==0:
return movie_array[0]*0
#Randomly return one of these images;
return movie_array[np.random.choice(cluster_indexes)]
def save_metadata(self):
print self.filename[:-4]
metadata = []
drift = raw_input("Did camera drift ? (y/n) " )
spout_moved = raw_input("Was spout readjusted ? (y/n) " )
hand_inview = raw_input("Did hand enter the screen ? (y/n) " )
camera_moved = raw_input("Did camera move or otherwise jump ? (y/n) " )
other_exclusion = raw_input("Any thing else to note (y/n or type out) ")
np.savez(self.filename[:-4]+"_metadata.npz", drift=drift, spout_moved=spout_moved, hand_inview=hand_inview, camera_moved=camera_moved, other_exclusion=other_exclusion)
def annotate_frames(self):
''' Function to annotate frames in partially supervised fashion
Calls mupltiple functions
'''
#Subsample frames to further reduce dimensionality and speed up processing
if True: self.subsample_frames()
else: self.data_subsampled = self.decimated_frames
#Scale the frame information by some coefficient of movement
if False: self.scale_moving_frames()
#Run dim reduction
self.dimension_reduction()
#Filter transformed distributions to remove camera drift (usually)
if True:
self.filter_PCA(self.data_dim_reduction, filtering=True, plotting=True)
#Cluster data
self.cluster_methods = ['KMeans', 'MeanShift', 'DBSCAN', 'manual']
self.cluster_method = 3
self.cluster_data()
#Review clusters and re/cut them
#self.review_clusters()
self.export_clusters(recluster_flag=False)
def resplit_cluster(self, cluster):
''' Recluster previously split clusters...
'''
print "... resplitting cluster: ", cluster
#THIS NEEDS TO BE SIMPLIFIED
#Subsample frames to further reduce dimensionality and speed up processing
if True: self.subsample_frames()
else: self.data_subsampled = self.decimated_frames
#Scale the frame information by some coefficient of movement
if False: self.scale_moving_frames()
#Run dim reduction
self.dimension_reduction()
#Filter transformed distributions to remove camera drift (usually)
if True:
self.filter_PCA(self.data_dim_reduction, filtering=True, plotting=False)
self.load_clusters()
#Load clustered info
cluster_names = np.loadtxt(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusternames.txt", dtype='str')
cluster_indexes = np.load(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusterindexes.npy")
#Assign clusters to unique ids
cumulative_indexes=[]
unique_names = np.unique(self.cluster_names)
print self.cluster_names
print unique_names
unique_indexes = []
for ctr1, unique_name in enumerate(unique_names):
unique_indexes.append([])
for ctr, cluster_name in enumerate(self.cluster_names):
if unique_name==cluster_name:
unique_indexes[ctr1].extend(self.cluster_indexes[ctr])
cluster_id = np.where(unique_names==cluster)[0]
print "... cluster_id: ", cluster_id
self.unique_indexes = unique_indexes[cluster_id]
#Cluster data
self.cluster_methods = ['KMeans', 'MeanShift', 'DBSCAN', 'manual']
self.cluster_method = 3
self.cluster_data(indexes=unique_indexes[cluster_id]) #Send indexes for the selected cluster after collapsing over unique valus
def resave_clusters(self,indexes):
''' Load original cluster labels and re-adjust based on resplit cluster
'''
reclustered_id_indexes = np.int32(indexes)
print "... reclustered id indexes: ", len(reclustered_id_indexes)
#Load clustered info
original_cluster_names = np.loadtxt(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusternames.txt", dtype='str')
original_cluster_indexes = np.load(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusterindexes.npy")
#Delete the cluster that was just resplit
temp_index = np.where(original_cluster_names==self.recluster_id)[0]
#print "... reclustered id : ", temp_index
original_cluster_names = np.delete(original_cluster_names, temp_index,0)
original_cluster_indexes = np.delete(original_cluster_indexes, temp_index,0)
#Append new labels back in; first convert to lists, easier to work with due to variable length
cluster_names_array = []
for k in range(len(original_cluster_names)):
cluster_names_array.append(original_cluster_names[k])
#Add new labels back in from newly identified self.cluster_names
for k in range(len(self.cluster_names)):
cluster_names_array.append(self.cluster_names[k])
self.cluster_names = cluster_names_array
#Do the same with cluster indexes
cluster_indexes_array = []
for k in range(len(original_cluster_indexes)):
cluster_indexes_array.append(original_cluster_indexes[k])
#Add new labels back in ******************* NOTE: Indexes will be relative to the previously clustered indexes not 0
for k in range(len(self.cluster_indexes)):
print k, len(self.cluster_indexes[k]), len(reclustered_id_indexes)
print self.cluster_indexes[k]
cluster_indexes_array.append(reclustered_id_indexes[np.int32(self.cluster_indexes[k])])
self.cluster_indexes = cluster_indexes_array
print ".... check that all frames have been saved..."
print len(self.cluster_indexes)
#print np.unique(np.array(self.cluster_indexes))
#*****Re-assign clusters to unique ids after adding the new split cluster labels back in
cumulative_indexes=[]
unique_names = np.unique(self.cluster_names)
print "...reclustered data..."
print self.cluster_names
for k in range(len(self.cluster_indexes)):
print len(self.cluster_indexes[k])
print "\n\n... unique data..."
print unique_names
unique_indexes = []
for ctr1, unique_name in enumerate(unique_names):
unique_indexes.append([])
for ctr, cluster_name in enumerate(self.cluster_names):
if unique_name==cluster_name:
unique_indexes[ctr1].extend(self.cluster_indexes[ctr])
print len(unique_indexes[ctr1])
#cluster_id = np.where(unique_names==cluster)[0]
#print "... cluster_id: ", cluster_id
np.savetxt(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusternames_new.txt", unique_names,fmt='%s')
np.save(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusterindexes_new.npy", unique_indexes)
self.export_clusters(recluster_flag=True)
def manual_label(self):
filename_manuallabel = self.filename[:-4]+"_"+self.area+"_manuallabels.npz"
if os.path.exists(filename_manuallabel)==False:
plt.plot(self.diff_array)
mean = np.mean(self.diff_array)
top_cutoff = np.max(self.diff_array)*.55
bottom_cutoff = np.mean(self.diff_array)*.05
plt.plot([0,len(self.diff_array)],[top_cutoff,top_cutoff])
plt.plot([0,len(self.diff_array)],[bottom_cutoff,bottom_cutoff])
plt.show(block=True)
print "... limitting annotation to 50 events max..."
indexes = np.where(self.diff_array>top_cutoff)[0]
indexes = indexes[np.random.randint(len(indexes),size=50)]
print "... # frames: ", len(indexes)
indexes2 = np.where(self.diff_array<bottom_cutoff)[0]
indexes2 = indexes2[np.random.randint(len(indexes2),size=50)]
print "... # frames: ", len(indexes2)
indexes = np.hstack((indexes,indexes2))
print "... # total frames to annotate: ", len(indexes)
enlarge=100
movie_array = np.load(self.movie_filename, mmap_mode='c')[:, max(0,self.x1-enlarge):self.x2+enlarge, max(0,self.y1-enlarge):self.y2+enlarge]
#Select most active frames
data_ = movie_array[indexes]
border = 30
fontsize=15
classifier = np.array([0,0,0,0])
classifier_list = []
self.complete=False
for k,frame in enumerate(data_):
if self.complete==True: break #Exit by clicking outside the annotation box
#Make nice box around each frame to use for annotation
temp = np.zeros((frame.shape[0]+border,frame.shape[1]+border))
temp[:, :border/2]=100
temp[:, frame.shape[1]+border/2:]=150
temp[:border/2]=50
temp[frame.shape[0]+border/2:]=200
temp[border/2:frame.shape[0]+border/2,border/2:frame.shape[1]+border/2]=frame[:,:,1]
#Make plots
fig, ax = plt.subplots()
ax.imshow(temp)
self.cid = fig.canvas.mpl_connect('button_press_event', self.on_click_classify)
plt.suptitle("frame: "+str(k)+" / "+str(len(data_)),fontsize=fontsize)
plt.title("Lick: "+str(classifier[0]),fontsize=fontsize)
plt.xlabel("Stationary: "+str(classifier[1]),fontsize=fontsize)
plt.ylabel("Paw :"+str(classifier[2]),fontsize=fontsize)
plt.show(block=True)
y = self.coords[0]
x = self.coords[1]
if (y<(border/2)):
classifier[0]+=1
classifier_list.append(0)
elif (y>(frame.shape[0]+border/2)):
classifier[1]+=1
classifier_list.append(1)
elif (x<(border/2)):
classifier[2]+=1
classifier_list.append(2)
else:
classifier[3]+=1
classifier_list.append(3)
np.savez(self.filename[:-4]+"_"+self.area+"_manuallabels",indexes=indexes[:k],classification=classifier_list[:k])
def classify_frames(self):
self.manual_label()
#Load classified data
data = np.load(self.filename[:-4]+"_"+self.area+"_manuallabels.npz")
indexes=data['indexes']
class_id=data['classification']
#if True: self.subsample_frames()
if True:
self.dimension_reduction()
self.data = self.data_dim_reduction
if True: self.filter_data(self.data_dim_reduction)
#Load original movie and
enlarge = 50 #Make movie FOV larger than original cropping rectangle by 50pixels or so; otherwies difficult to see what's going on;
movie_array = np.load(self.movie_filename, mmap_mode='c')[:, max(0,self.x1-enlarge):self.x2+enlarge, max(0,self.y1-enlarge):self.y2+enlarge]
#Delete last class
for k in [3]:
temp_indexes = np.where(class_id==k)[0]
indexes = np.delete(indexes,temp_indexes,0)
class_id = np.delete(class_id,temp_indexes,0)
print indexes
print class_id
#Load original data
#Select training data and test data
#filename = self.filename[:-4]+"_"+self.area+"_"+self.mode+".npy"
#data_movie = np.load(filename)
#data_movie = np.float32(self.decimated_frames)
data_movie = np.float32(self.data)
print data_movie.shape
#Convert data to 1D
X=data_movie[indexes]
X_1D = []
for frame in X:
X_1D.append(np.ravel(frame))
X_1D=np.array(X_1D)
print X_1D.shape
X=X_1D
y = class_id
test_flag = True
if test_flag:
X_test = data_movie#[:2000]
print X_test.shape
X_1D = []
for frame in X_test:
X_1D.append(np.ravel(frame))
X_1D=np.array(X_1D)
print X_1D.shape
X_test=X_1D
C = 1.0
methods = []
titles=[]
#Run SVM
if True:
# SVC with linear kernel
print "...computing svm..."
classifier = svm.SVC(kernel='linear', C=C, probability=True)
svc = classifier.fit(X, y)
#svc_test= classifier.predict_proba(X_test)
#print svc_test
titles.append('SVC with linear kernel')
methods.append(svc)
if False:
# LinearSVC (linear kernel)
print "...computing svm..."
classifier = svm.LinearSVC(C=C)
lin_svc = classifier.fit(X, y)
#svc_test= classifier.decision_function(X_test)
#print svc_test
titles.append('LinearSVC (linear kernel)')
methods.append(lin_svc)
if True:
# LinearSVC (linear kernel)
print "...computing svm..."
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
classifier = GaussianProcessClassifier(1.0 * RBF(1.0))
gaussian = classifier.fit(X, y)
#svc_test= classifier.decision_function(X_test)
#print svc_test
titles.append('Gaussian Process')
methods.append(gaussian)
if False:
# SVC with RBF kernel
print "...computing svm..."
classifier = svm.SVC(kernel='rbf', gamma=0.7, C=C)
rbf_svc = classifier.fit(X, y)
titles.append('SVC with RBF kernel')
methods.append(rbf_svc)
if True:
# SVC with polynomial (degree 3) kernel
print "...computing svm..."
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
titles.append('SVC with polynomial (degree 3) kernel')
methods.append(poly_svc)
colors=['red','blue','green','lightsalmon','dodgerblue','indianred','mediumvioletred','pink', 'brown', 'magenta']
y_clr = []
for k in range(len(y)): y_clr.append(colors[y[k]])
for i, clf in enumerate(methods):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
#plt.scatter(X_test[:,0],X_test[:,1],c=Z, cmap=plt.cm.coolwarm)
## Put the result into a color plot
if test_flag:
Z = clf.predict(X_test)
Z_unique = np.unique(Z)
for p in Z_unique:
indexes = np.where(Z==Z_unique[p])[0]
imageio.mimwrite(self.filename[:-4]+'_'+self.area+'_'+titles[i]+"_cluster"+str(p)+".mp4", movie_array[indexes], fps = self.frame_rate)
Z_clr = []
for k in range(len(Z)): Z_clr.append(colors[Z[k]])
plt.scatter(X_test[:,0], X_test[:,1], c=Z_clr, alpha=0.3)#, cmap=plt.cm.coolwarm, alpha=0.8)
# Plot also the training points
plt.scatter(X[:,0], X[:,1], c=y_clr)#, cmap=plt.cm.coolwarm)
plt.xlabel('length')
plt.ylabel('width')
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show(block=True)
def load_cluster(self, cluster):
print "...loading cluster indexes: ", cluster
filename = self.filename[:-4]+'_'+self.area+'_'+str(cluster)+".txt"
self.indexes = np.loadtxt(filename,dtype=np.int32)
self.data_subsampled = self.data_subsampled[self.indexes]
print self.data_subsampled.shape
def subsample_frames(self):
''' Further reduce dimensionality by subsampling frames
'''
print "...subsampling frames..."
subsample_filename = self.filename[:-4]+"_"+self.area+"_"+self.mode+"_subsampled.npy"
decimated_filename = self.filename[:-4]+"_"+self.area+"_cropped_registered_"+self.mode+".npy"
self.decimated_frames = np.load(decimated_filename)
if os.path.exists(subsample_filename)==False:
subsampled_array = []
print "... subsampling ..."
#print self.decimated_frames
for k in range(len(self.decimated_frames)):
#print self.decimated_frames[k].shape
#subsampled_array.append(scipy.misc.imresize(data_2D_array[k], 0.2, interp='bilinear', mode=None))
subsampled_array.append(scipy.misc.imresize(self.decimated_frames[k], 0.1, interp='bilinear', mode=None))
self.data_subsampled = np.array(subsampled_array)
np.save(subsample_filename, self.data_subsampled)
else:
self.data_subsampled = np.load(subsample_filename)
def scale_moving_frames(self):
#Set first value in diff_array to second value (otherwise very large)
print len(self.diff_array)
#self.diff_array[0:10]=self.diff_array[10]
self.diff_array = np.insert(self.diff_array,0,self.diff_array[0])
self.diff_array-=np.min(self.diff_array) #Translate to min val=0
print self.data_subsampled.T.shape
#Scale the
self.data_subsampled = (self.data_subsampled.T * self.diff_array).T
print self.data_subsampled.shape
def load_clusters(self):
self.cluster_names = np.loadtxt(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusternames.txt" ,dtype='str')
self.cluster_indexes = np.load(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusterindexes.npy")
def export_clusters(self, recluster_flag=False):
if recluster_flag==True:
text_append="_new"
else:
text_append=''
##THIS NEEDS TO BE SIMPLIFIED
##Subsample frames to further reduce dimensionality and speed up processing
#if True: self.subsample_frames()
#else: self.data_subsampled = self.decimated_frames
##Scale the frame information by some coefficient of movement
#if False: self.scale_moving_frames()
##Run dim reduction
#self.dimension_reduction()
##Filter transformed distributions to remove camera drift (usually)
#if True:
#self.filter_PCA(self.data_dim_reduction, filtering=True, plotting=False)
#self.load_clusters()
##Legacy variables need to be assigned to object attributes
##clustered_pts = self.clustered_pts
#n_frames= len(self.data_dim_reduction)
#X = self.data_dim_reduction
#pathname = self.filename[:-4]
#area = self.area
#print "... n frames: ", n_frames
##Initialize the color_array to all black
#colors=['red','blue','green','lightsalmon','dodgerblue','indianred','mediumvioletred','pink', 'brown', 'magenta']
#color_array=[]
##Initialize a list of clrs; NB: crappy way to initialize list of colours; REDO
#for k in range(n_frames):
#color_array.append('mediumvioletred')
#color_array = np.array(color_array, dtype=str)
##Enumerate all indexes from 0.. # frames
#all_frames = np.arange(n_frames)
#try:
#self.indexes
#except:
#self.indexes=np.arange(n_frames)
#List of behaviors:
cumulative_indexes=[]
unique_names = np.unique(self.cluster_names)
print self.cluster_names
print unique_names
unique_indexes = []
for ctr1, unique_name in enumerate(unique_names):
unique_indexes.append([])
for ctr, cluster_name in enumerate(self.cluster_names):
if unique_name==cluster_name:
unique_indexes[ctr1].extend(self.cluster_indexes[ctr])
cumulative_indexes = unique_indexes
#Review the clustering in original PCA space - no point if PCA was done recursively
#if False:
#plt.scatter(X[:,0],X[:,1],color=color_array)
#plt.show(block=True)
#************************************************************************************
#******************************** GENERATE MOVIES & FIGS ****************************
#************************************************************************************
#Load movie
enlarge = 50 #Make movie FOV larger than original cropping rectangle by 50pixels or so; otherwies difficult to see what's going on;
print self.x1, self.x2, self.y1, self.y2
print max(0,self.x1-enlarge), self.x2+enlarge, max(0,self.y1-enlarge),self.y2+enlarge
movie_array = np.load(self.movie_filename, mmap_mode='c')[:, max(0,self.x1-enlarge):self.x2+enlarge, max(0,self.y1-enlarge):self.y2+enlarge]
#movie_array = movie_array[self.indexes] #Load all or just part of moview --- NOTE IMPLEMNETED
print movie_array.shape
#Compute membership in each cluster and save examples to file:
cluster_ids = []
dim=4
frame_indexes = np.arange(len(movie_array)) #Make original indexes and remove them as they are removed from the datasets
for k in range(len(cumulative_indexes)):
if len(cumulative_indexes[k])==0:
print "... empty cluster..."
continue
img_indexes = np.int32(np.random.choice(cumulative_indexes[k], min(len(cumulative_indexes[k]), dim*dim))) #Chose random examples from cluster
#Plot individual frames
gs = gridspec.GridSpec(dim,dim)
for d in range(len(img_indexes)):
ax = plt.subplot(gs[int(d/dim), d%dim])
plt.imshow(movie_array[img_indexes[d]])#, cmap='Greys_r')
ax.set_xticks([]); ax.set_yticks([])
plt.suptitle("Cluster: " + unique_names[k] + "/" + str(len(cumulative_indexes))+" # frames: "+str(len(cumulative_indexes[k])), fontsize = 10)
plt.savefig(self.filename[:-4]+'_'+self.area+'_cluster_'+unique_names[k]+text_append+'.png') # save the figure to file
plt.close()
#cluster_name = k
np.savetxt(self.filename[:-4]+'_'+self.area+'_cluster_'+unique_names[k]+text_append+".txt", cumulative_indexes[k], fmt='%i')
print "... making movie: ", unique_names[k], " # frames: ", len(cumulative_indexes[k])
imageio.mimwrite(self.filename[:-4]+'_'+self.area+'_cluster_'+unique_names[k]+text_append+".mp4", movie_array[cumulative_indexes[k]], fps = self.frame_rate)
#************************************************************************************************************************************
#*************************************************UTILTY FUNCTIONS ***************************************************************
#************************************************************************************************************************************
#@jit
def decimate(self, frame, frame_count, rLow, rHigh):
''' Binarize, erode, dilate and blur each frame
TODO: check if alternative approaches work better: e.g. keeping more data in frame
'''
#print frame.shape
#lower = np.array([0, 0, rLow], dtype = "uint8")
#upper = np.array([255, 255, rHigh], dtype = "uint8")
#lower = np.array([0, rLow, 0], dtype = "uint8")
#upper = np.array([255, rHigh, 255], dtype = "uint8")
lower = np.array([rLow], dtype = "uint8")
upper = np.array([rHigh], dtype = "uint8")
# apply a series of erosions and dilations to the mask
# using an rectangular kernel
skinMask = cv2.inRange(frame, lower, upper)
#kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
#skinMask = cv2.erode(skinMask, kernel, iterations = 1)
skinMask = cv2.dilate(skinMask, kernel, iterations = 1)
# blur the mask to help remove noise, then apply the
# mask to the frame
skinMask = cv2.GaussianBlur(skinMask, (5, 5), 0)
if self.show_vid:
cv2.imshow('original', frame)
cv2.imshow('filtered', skinMask)
cv2.waitKey(1)
#return skinMask
return frame
def find_contours(self, frame, frame_count, rLow, rHigh):
lower = np.array([100, 100, rLow], dtype = "uint8")
upper = np.array([255, 255, rHigh], dtype = "uint8")
frame = cv2.inRange(frame, lower, upper)
#blurred = cv2.pyrMeanShiftFiltering(frame, 31,91)
#gray = cv2.cvtColor(blurred,cv2.COLOR_BGR2GRAY)
#ret, threshold = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
ret, threshold = cv2.threshold(frame,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
contours,_=cv2.findContours(threshold,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
print "... # contours detected: ", len(contours)
cv2.drawContours(frame,contours,0,(0,0,255),-1)
cv2.namedWindow('Display',cv2.WINDOW_NORMAL)
cv2.imshow('Display',frame)
cv2.waitKey(1)
#imgray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY);
#ret,thresh = cv2.threshold(imgray,127,255,0);
#contours,hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE);
##draw a three pixel wide outline
#cv2.drawContours(img,contours,-1,(0,255,0),3);
def find_edges(self, frame, frame_count, rLow, rHigh):
image = frame
skinMask = image
#image = cv2.imread(imagePath)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
skinMask = cv2.erode(skinMask, kernel, iterations = 1)
skinMask = cv2.dilate(skinMask, kernel, iterations = 3)
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#blurred = cv2.GaussianBlur(gray, (3, 3), 0)
blurred = skinMask
image=skinMask
# apply Canny edge detection using a wide threshold, tight
# threshold, and automatically determined threshold
wide = cv2.Canny(blurred, 10, 200)
tight = cv2.Canny(blurred, 225, 250)
#auto = auto_canny(blurred)
sigma=0.33
if True:
v = np.median(blurred)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
auto = cv2.Canny(image, lower, upper)
auto = cv2.dilate(auto, kernel, iterations = 2)
#auto = cv2.erode(auto, kernel, iterations = 3)
# show the images
#cv2.imshow("Original", image)
#cv2.imshow("Edges", np.hstack([wide, tight, auto]))
cv2.imshow("Edges", auto)
cv2.waitKey(1)
#ax=plt.subplot(1,2,1)
#ax.imshow(auto)
#print np.min(auto), np.max(auto)
#auto=np.nan_to_num(auto)
#ax=plt.subplot(1,2,2)
#ax.imshow(auto)
#print np.min(auto), np.max(auto)
#print auto
#plt.show(block=True)
return auto
#@jit
def compute_diff(self):
#filename = self.filename[:-4]+"_"+self.area+"_"+"cropped_movie.npy"
#filename = self.filename[:-4]+"_"+self.area+"_"+self.mode+".npy"
filename = self.filename[:-4]+"_"+self.area+"_"+self.mode+".npy"
data = np.load(filename, mmap_mode='c')
self.diff_array = []
for k in range(len(data)-1):
print "... computing difference for frame: ", k
self.diff_array.append(LA.norm(data[k+1]-data[k]))
#Filter video motion to remove any artifact jitters.
if False:
#filter_val = self.frame_rate*0.49 #Hz
filter_val = 0.2 #Fixed threshold filter #Hz
self.diff_array = self.butter_lowpass_filter(self.diff_array, filter_val, self.frame_rate, 5)
np.save(self.filename[:-4]+"_diff_array", self.diff_array)
def butter_lowpass(self, cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff/nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(self, data, cutoff, fs, order=5):
b, a = self.butter_lowpass(cutoff, fs, order=order)
y = filtfilt(b, a, data)
return y
def crop_frame_box(self, image, motion_correct_flag = False):
''' Function to crop field-of-view of video
'''
#global coords, image_temp, ax, fig, cid, img_height, img_width
self.image_temp = image.copy()
self.img_height, self.img_width = self.image_temp.shape[:2]
if motion_correct_flag:
crop_filename = self.filename[:-4]+'_registry_cropped.npz'
else:
crop_filename = self.filename[:-4]+'_'+self.area+'_cropped.npz'
if (os.path.exists(crop_filename)==False):
self.fig, self.ax = plt.subplots()
self.coords=[]
self.ax.imshow(image)#, vmin=0.0, vmax=0.02)
if motion_correct_flag:
self.ax.set_title("Define area to be used for motion registry\n (Click top left + bottom right corner of FOV)")
else:
self.ax.set_title("Define area to be used for image analysis\n (Click top left + bottom right corner of FOV)")
#figManager = plt.get_current_fig_manager()
#figManager.window.showMaximized()
self.cid = self.fig.canvas.mpl_connect('button_press_event', self.on_click)
plt.show(block=True)
x_coords = np.int32(np.sort([self.coords[0][0], self.coords[1][0]]))
y_coords = np.int32(np.sort([self.coords[0][1], self.coords[1][1]]))
#return x_coords[0],x_coords[1],y_coords[0],y_coords[1]
#if motion_correct_flag==False:
self.x1=x_coords[0]; self.x2=x_coords[1]; self.y1=y_coords[0]; self.y2=y_coords[1]
np.savez(crop_filename, x1=x_coords[0], x2=x_coords[1], y1=y_coords[0], y2=y_coords[1]) #Save both FOV and registry cropped corners
print x_coords, y_coords
if False:
self.fig, self.ax = plt.subplots()
self.ax.imshow(image[self.x1:self.x2, self.y1:self.y2])
plt.title(self.area +"Field-of-view selected\n(close figure to start)")
plt.show(block=True)
def motion_correct_caiman(self):
''' Imported rigid motion correction toolbox from caiman
'''
fname_registry = self.filename[:-4]+"_registry_cropped.npy" #This is the cropped FOV specifically for registration
fname_original = self.filename[:-4]+".npy" #This is the entire movie
fname_cropped = self.filename[:-4]+"_"+self.area+"_cropped.npy" #This is the area FOV
#original_mov = np.load(fname_original,mmap_mode='c')
all_mov = np.load(fname_registry)
crop_mov = np.load(fname_cropped)
#Check to see if shift_rig file has already been saved
if os.path.exists(fname_registry[:-4]+"_shifts_rig.npy")==False:
# motion correction parameters
niter_rig = 1 # number of iterations for rigid motion correction
max_shifts = (6, 6) # maximum allow rigid shift
splits_rig = 56 # for parallelization split the movies in num_splits chuncks across time
strides = (48, 48) # start a new patch for pw-rigid motion correction every x pixels
overlaps = (24, 24) # overlap between pathes (size of patch strides+overlaps)
splits_els = 30 # for parallelization split the movies in num_splits chuncks across time
upsample_factor_grid = 4 # upsample factor to avoid smearing when merging patches
max_deviation_rigid = 3 # maximum deviation allowed for patch with respect to rigid shifts
#%% start a cluster for parallel processing
#caiman_path = np.loadtxt('caiman_folder_location.txt', dtype=str) #<------------ is this necessary still?
caiman_path = '/home/cat/code/CaImAn' #<------------ is this necessary still?
sys.path.append(str(caiman_path)+'/')
print (caiman_path)
import caiman as cm
c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=None, single_thread=False)
#Load cropped image and use it to align larger movie
#original_mov = np.load(fname_original,mmap_mode='c')
all_mov = np.load(fname_registry)
crop_mov = np.load(fname_cropped)
print "...cropped movie shape...", all_mov.shape
#return
min_mov = all_mov.min()
# this will be subtracted from the movie to make it non-negative
from caiman.motion_correction import MotionCorrect
mc = MotionCorrect(fname_registry, min_mov,
dview=dview, max_shifts=max_shifts, niter_rig=niter_rig,
splits_rig=splits_rig,
strides= strides, overlaps= overlaps, splits_els=splits_els,
upsample_factor_grid=upsample_factor_grid,
max_deviation_rigid=max_deviation_rigid,
shifts_opencv = True, nonneg_movie = True)
mc.motion_correct_rigid(save_movie=False,template = None)
dview.close()
dview.terminate()
#matplotlib.use('Agg')
np.save(fname_registry[:-4]+"_shifts_rig.npy", mc.shifts_rig)
#Save registered original movie and the registry FOV
reg_mov = np.zeros(all_mov.shape, dtype=np.uint8)
#reg_original_mov = np.zeros(original_mov.shape, dtype=np.uint8)
for k in range(len(all_mov)):
if k%1000==0: print "... shifting frame: ",k
reg_mov[k] = np.roll(np.roll(all_mov[k], int(mc.shifts_rig[k][0]), axis=0), int(mc.shifts_rig[k][1]), axis=1)
#reg_original_mov[k] = np.roll(np.roll(original_mov[k], int(mc.shifts_rig[k][0]), axis=0), int(mc.shifts_rig[k][1]), axis=1)
#tiff.imsave(fname[:-4]+"_registered.tif", reg_mov)
#np.save(fname_original[:-4]+"_registered.npy", reg_original_mov)
np.save(fname_registry[:-4]+"_registered.npy", reg_mov)
if os.path.exists(fname_cropped[:-4]+"_registered.npy")==False:
shifts_rig = np.load(fname_registry[:-4]+"_shifts_rig.npy")
print ("... shifting image stack based on motion correction...")
reg_cropped_mov = np.zeros(crop_mov.shape, dtype=np.uint8)
for k in range(len(all_mov)):
if k%1000==0: print "... shifting frame: ",k
reg_cropped_mov[k] = np.roll(np.roll(crop_mov[k], int(shifts_rig[k][0]), axis=0), int(shifts_rig[k][1]), axis=1)
np.save(fname_cropped[:-4]+"_registered.npy", reg_cropped_mov)
imageio.mimwrite(fname_registry[:-4]+"_registered.mp4", reg_cropped_mov, fps = self.frame_rate)
if False:
print "...saving movies..."
imageio.mimwrite(fname_original[:-4]+"_registered.mp4", reg_original_mov, fps = self.frame_rate)
imageio.mimwrite(fname_registry[:-4]+"_registered.mp4", reg_mov, fps = self.frame_rate)
imageio.mimwrite(fname_cropped[:-4]+"_registered.mp4", reg_cropped_mov, fps = self.frame_rate)
def register(self):
import imreg_dft as ird
enlarge=0
#data = np.load(filename[:-4]+"_movie.npy",mmap_mode='c')[:,:,:,1]
#Convert original file to .npy first
if True: #Use binarized
#self.movie_filename = self.filename[:-4]+"_"+self.area+"_"+self.mode+".npy" #This is cropped + binarized image
self.movie_filename = self.filename[:-4]+'_'+self.area+'_cropped_movie.npy' #This is cropped original image
data = np.load(self.movie_filename, mmap_mode='c')[:,:,:,1] #pick green channel
else:
self.movie_filename = self.filename[:-4]+'.npy' #Check if original video file was converted to .npy format
data = np.load(self.movie_filename, mmap_mode='c')[:,:,:,1]
original_data = np.load(self.filename[:-4]+'.npy', mmap_mode='c')[:,:,:,1]
#Register the original data
#Note: using either cropped image or entire video frame need different step
registered_filename = self.filename[:-4]+"_registered.npy"
if os.path.exists(registered_filename)==False:
#data = self.movie[:,:,:,1]
im0=data[0]
result_array=[]
result_array.append(original_data[0])
#for k in range(len(data[:100])):
for k in range(len(data)):
if k%100==0: print "...registering frame: ", k
im1= data[k]
tvec = ird.translation(im0,im1)["tvec"].round(4)
result_array.append(np.uint8(ird.transform_img(original_data[k], tvec=tvec))) #Perform transformation on entire recording
#print result_array
#result_array=np.uint8(result_array)
#print result_array.shape
print "...saving registered data ..."
np.save(registered_filename, result_array)
imageio.mimwrite(registered_filename[:-4]+'.mp4', result_array, fps = 10.)
def on_click(self, event):
''' Mouse click function that catches clicks and plots them on top of existing image
'''
#global coords, image_temp, ax, fig, cid
print self.coords
if event.inaxes is not None:
print event.ydata, event.xdata
self.coords.append((event.ydata, event.xdata))
for j in range(len(self.coords)):
for k in range(6):
for l in range(6):
#print self.coords[j][0], self.coords[j][1]
#print image_temp[int(event.ydata)-1+k,int(event.xdata)-1+l]
self.image_temp[int(event.ydata)-1+k,int(event.xdata)-1+l]=np.max(self.image_temp)
self.ax.imshow(self.image_temp)
self.fig.canvas.draw()
else:
print 'Exiting'
plt.close()
self.fig.canvas.mpl_disconnect(self.cid)
def on_click_classify(self, event):
''' Mouse click function to click on four corners
'''
#Clicks inside the image
if event.inaxes is not None:
print "Clicked inside"
print event.ydata, event.xdata
self.coords = [event.ydata, event.xdata]
plt.close()
fig.canvas.mpl_disconnect(self.cid)
#Clicks outside image
else:
print 'Clicked Outside'
plt.close()
fig.canvas.mpl_disconnect(self.cid)
self.complete = True
def dimension_reduction(self):
''' Running dimensionality reduction on the
A list of frame indexes can be provided, otherwise all frames will be considered;
'''
print "... computing original dim reduction ..."
#self.subsample_frames()
frames_list = np.arange(len(self.data_subsampled))
#Convert data to 1D vectors before dim reduction
self.data_subsampled_1D= []
for k in frames_list:
self.data_subsampled_1D.append(np.ravel(self.data_subsampled[k]))
filename = self.filename[:-4]+'_'
area = self.area #Correct
method=self.methods[self.method]
matrix_in = self.data_subsampled_1D
print "Computing dim reduction, size of array: ", np.array(matrix_in).shape
if self.method==0:
#MDS Method - SMACOF implementation Nelle Varoquaux
if os.path.exists(filename+area+'_'+method+'.npy')==False:
print "... MDS-SMACOF..."
print "... pairwise dist ..."
dists = metrics.pairwise.pairwise_distances(matrix_in)
adist = np.array(dists)
amax = np.amax(adist)
adist /= amax
print "... computing MDS ..."
mds_clf = manifold.MDS(n_components=2, metric=True, n_jobs=-1, dissimilarity="precomputed", random_state=6)
results = mds_clf.fit(adist)
Y = results.embedding_
np.save(filename+area+'_'+method, Y)
else:
Y = np.load(filename+area+'_'+method+'.npy')
elif self.method==1:
##t-Distributed Stochastic Neighbor Embedding; Laurens van der Maaten
if os.path.exists(filename+area+'_'+method+'.npy')==False:
print "... tSNE ..."
print "... pairwise dist ..."
dists = sklearn.metrics.pairwise.pairwise_distances(matrix_in)
adist = np.array(dists)
amax = np.amax(adist)
adist /= amax
print "... computing tSNE ..."
model = manifold.TSNE(n_components=3, init='pca', random_state=0)
Y = model.fit_transform(adist)
np.save(filename+area+'_'+method, Y)
else:
Y = np.load(filename+area+'_'+method+'.npy')
elif self.method==2:
if os.path.exists(filename+area+'_'+method+'.npy')==False:
Y = self.PCA_reduction(matrix_in, 3)
np.save(filename+area+'_'+method, Y)
else:
Y = np.load(filename+area+'_'+method+'.npy')
elif self.method==3:
if os.path.exists(filename+area+'_'+method+'.npy')==False:
print "... computing Barnes-Hut tSNE..."
Y = bh_sne(np.float64(matrix_in), perplexity=90.)
np.save(filename+area+'_'+method+'.npy', Y)
else:
Y = np.load(filename+area+'_'+method+'.npy')
elif self.method==4:
if os.path.exists(filename+area+'_'+method+'.npy')==False:
print "... computing locally linear embedding ..."
n_neighbors = 30
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
Y = clf.fit_transform(np.float64(matrix_in))
np.save(filename+area+'_'+method+'.npy', Y)
else:
Y = np.load(filename+area+'_'+method+'.npy')
elif self.method==5:
if os.path.exists(filename+area+'_'+method+'.npy')==False:
print "... computing Hessian locally linear embedding ..."
n_neighbors = 30
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
Y = clf.fit_transform(np.float64(matrix_in))
np.save(filename+area+'_'+method+'.npy', Y)
else:
Y = np.load(filename+area+'_'+method+'.npy')
elif self.method==6:
if os.path.exists(filename+area+'_'+method+'.npy')==False:
print "... computing Hessian locally linear embedding ..."
n_neighbors = 30
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
Y = clf.fit_transform(np.float64(matrix_in))
np.save(filename+area+'_'+method+'.npy', Y)
else:
Y = np.load(filename+area+'_'+method+'.npy')
elif self.method==7:
if os.path.exists(filename+area+'_'+method+'.npy')==False:
print "... computing Random Trees embedding locally linear embedding ..."
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
X_transformed = hasher.fit_transform(np.float64(matrix_in))
pca = decomposition.TruncatedSVD(n_components=2)
Y = pca.fit_transform(X_transformed)
np.save(filename+area+'_'+method+'.npy', Y)
else:
Y = np.load(filename+area+'_'+method+'.npy')
elif self.method==8:
if os.path.exists(filename+area+'_'+method+'.npy')==False:
print "... computing Spectral embedding ..."
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
Y = embedder.fit_transform(np.float64(matrix_in))
np.save(filename+area+'_'+method+'.npy', Y)
else:
Y = np.load(filename+area+'_'+method+'.npy')
self.data_dim_reduction = Y
def cluster_data(self, indexes='all'): #cluster_data, cluster_method, dim_reduction_method):
#colours = ['blue','red','green','black','orange','magenta','cyan','yellow','brown','pink','blue','red','green','black','orange','magenta','cyan','yellow','brown','pink','blue','red','green','black','orange','magenta','cyan','yellow','brown','pink']
#cluster_method=2
#cluster_method = self.cluster_method
#if indexes=='all':
# data = self.data_dim_reduction
#else:
# data = self.data_dim_reduction[indexes] #Cluster only part of data
#MANUAL
if self.cluster_method == 3:
self.clustered_pts = self.manual_cluster(indexes=indexes) #not sure the self attribute is still required
return
else:
print "... clustering method not implemented ..."
quit()
#labels = np.array(labels)
#clrs = []
#for k in range(len(labels)):
#clrs.append(colours[labels[k]])
#plt.scatter(data[:,0], data[:,1], color=clrs)
#plt.show(block=True)
#Automated clustering not working
#KMEANS
#if cluster_method == 0:
#from sklearn import cluster
#n_clusters = 3
#print "... n_clusters sought: ", 3
#clusters = cluster.KMeans(n_clusters, max_iter=1000, n_jobs=-1, random_state=1032)
#clusters.fit(data)
#labels = clusters.labels_
#clustered_pts = []
##for k in range(len(np.max(labels)):
#print " TODO : FIX THIS TO CORRESPOND TO RECURSIVE CLUSTERING ..."
#return labels, labels
##MEAN SHIFT
#if cluster_method == 1:
#from sklearn.cluster import MeanShift, estimate_bandwidth
#from sklearn.datasets.samples_generator import make_blobs
#quantile = 0.1
#bandwidth = estimate_bandwidth(data, quantile=quantile, n_samples=5000)
#ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
#ms.fit(data)
#labels = ms.labels_
##print labels
##DBSCAN
#if cluster_method == 2:
#from sklearn.cluster import DBSCAN
#from sklearn import metrics
#from sklearn.datasets.samples_generator import make_blobs
#from sklearn.preprocessing import StandardScaler
#X = StandardScaler().fit_transform(data)
#eps = 0.2
#db = DBSCAN(eps=eps, min_samples=10).fit(X)
#core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
#core_samples_mask[db.core_sample_indices_] = True
#labels = db.labels_
def filter_PCA(self, X, filtering, plotting):
print " ... filtering PCA data ..."
#print " ... xshape:" , X.shape
#Filter PCA DATA
plt.close()
for d in range(X.shape[1]):
if plotting:
t = np.linspace(0,len(X[:,0]), len(X[:,0]))/self.frame_rate
ax = plt.subplot(X.shape[1],1,d+1)
plt.plot(t, X[:,d])
plt.title("PCA #"+str(d)+" vs. time (sec)", fontsize = 20)
if filtering:
x = np.hstack(X[:,d])
b, a = butter(2, 0.001, 'high') #Implement 2Hz highpass filter
y = filtfilt(b, a, x)
X[:,d]=y
if plotting:
plt.plot(t, y, color='red')
if plotting:
plt.show(block=True)
self.data_dim_reduction = X.copy() #Need to also copy this data into the separate attribute because filtering is optional
def filter_data(self, X, filtering=True, plotting=True):
print " ... xshape:" , X.shape
#Convert data to 1D vectors before dim reduction
self.data_subsampled_1D= []
for k in range(len(X)):
self.data_subsampled_1D.append(np.ravel(X[k]))
X = np.array(self.data_subsampled_1D)
#Filter PCA DATA
for d in range(X.shape[1]):
if plotting:
t = np.linspace(0,len(X[:,0]), len(X[:,0]))/self.frame_rate
ax = plt.subplot(X.shape[1],1,d+1)
plt.plot(t, X[:,d])
plt.title("PCA #"+str(d)+" vs. time (sec)", fontsize = 20)
if filtering:
x = np.hstack(X[:,d])
b, a = butter(2, 0.001, 'high') #Implement 2Hz highpass filter
y = filtfilt(b, a, x)
X[:,d]=y
if plotting:
plt.plot(t, y, color='red')
if plotting:
plt.show(block=True)
self.data_dim_reduction = X.copy() #Need to also copy this data into the separate attribute because filtering is optional
def manual_cluster(self, indexes='all'):
#Load video data to display during clustering
enlarge=50
try:
self.display_frames = np.load(self.filename[:-4]+'_registered.npy')[:, max(0,self.x1-enlarge):self.x2+enlarge, max(0,self.y1-enlarge):self.y2+enlarge]
except:
self.display_frames = np.load(self.filename[:-4]+'.npy',mmap_mode='c')[:, max(0,self.x1-enlarge):self.x2+enlarge, max(0,self.y1-enlarge):self.y2+enlarge]
print "... missing registered file, loading original..."
#Select all indexes; or just subset for re-clustering
if indexes=='all':
self.data = self.data_dim_reduction
else:
self.data=self.data_dim_reduction[indexes]
self.display_frames = self.display_frames[indexes]
ctr = 0 #Keep track of cluster #
clustered_pts = [] #keep track of clsutered points - NOTE THIS IS RECURSIVE: i.e., the points have relative indexes not absolute
#need to keep track of indexes as they are being deleted
self.frame_indexes = np.arange(len(self.data))
self.cluster_names = []
self.cluster_indexes = []
while True:
cmap_array = np.linspace(len(self.data)/4,len(self.data),len(self.data))/float(len(self.data))#*256.
clustered_pts.append([])
print "... NEW LOOP..."
plt.close('all')
self.fig = plt.figure()
self.ax1 = self.fig.add_subplot(121) #plt.subplot(1,2,1)
self.img = self.ax1.imshow(self.display_frames[0],vmin=0, vmax=255, animated=True)
self.ax2 = self.fig.add_subplot(122) #plt.subplot(1,2,2)
self.coords=[]
#mng = plt.get_current_fig_manager()
#mng.resize(*mng.window.maxsize())
#self.ax2.scatter(data[:,0],data[:,1], color='black', picker=True)
self.ax2.scatter(self.data[:,0],self.data[:,1], color = cm.viridis(cmap_array), alpha=0.5, picker=True)
self.ax2.set_title("Manual select clusters")
#self.cid = self.fig.canvas.mpl_connect('button_press_event', self.on_click_single_frame)
self.cid = self.fig.canvas.mpl_connect('motion_notify_event', self.on_plot_hover)
plt.show(block=True)
self.cluster_names.append(raw_input("Cluster label: "))
print "... COORDS OUT: ", self.coords
print "... data...", self.data
#if len(self.coords)==0:
if self.cluster_names[ctr]=='rest':
print "... DONE MANUAL CLUSTERING ..."
#Save remaining indexes to file
self.cluster_indexes.append(self.frame_indexes)
#Reconcile all the duplicate clusters:
#Assign clusters to unique ids
cumulative_indexes=[]
unique_names = np.unique(self.cluster_names)
print self.cluster_names
print unique_names
unique_indexes = []
for ctr1, unique_name in enumerate(unique_names):
unique_indexes.append([])
for ctr, cluster_name in enumerate(self.cluster_names):
if unique_name==cluster_name:
unique_indexes[ctr1].extend(self.cluster_indexes[ctr])
#cluster_id = np.where(unique_names==cluster)[0]
#print "... cluster_id: ", cluster_id
#Save both names and indexes
if indexes=='all': #If saving all original data
#Reconcile all the duplicate clusters; Assign clusters to unique ids
cumulative_indexes=[]
unique_names = np.unique(self.cluster_names)
print self.cluster_names
print unique_names
unique_indexes = []
for ctr1, unique_name in enumerate(unique_names):
unique_indexes.append([])
for ctr, cluster_name in enumerate(self.cluster_names):
if unique_name==cluster_name:
unique_indexes[ctr1].extend(self.cluster_indexes[ctr])
np.savetxt(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusternames.txt", unique_names,fmt='%s')
np.save(self.filename[:-4]+"_"+self.area+"_"+self.methods[self.method]+"_clusterindexes.npy", unique_indexes)
else: #If making edits to original data
print '... adjusting previous indexes...'
self.resave_clusters(indexes=indexes)
return clustered_pts #Return list of lists of points clustered in each stage;
if len(self.coords)==0:
self.cluster_indexes.append([])
plt.close()
ctr+=1
continue #Skip computation of stuff before and redraw
#FIND points inside polygon
bbPath = Path(np.array(self.coords))
for k,d in enumerate(self.data):
if bbPath.contains_point(d):
clustered_pts[ctr].append(k)
if len(clustered_pts[ctr])==0:
self.cluster_indexes.append([])
plt.close()
ctr+=1
continue #Skip computation of stuff before and redraw
#Delete the clustered_pts indexes from
print self.data.shape
self.data = np.delete(self.data, clustered_pts[ctr], axis=0)
self.cluster_indexes.append(self.frame_indexes[np.array(clustered_pts[ctr])])
self.frame_indexes = np.delete(self.frame_indexes, clustered_pts[ctr], axis=0)
print self.data.shape
methods = ['MDS', 'tSNE', 'PCA', 'BHtSNE']
method = methods[2]
print "... recomputing dim reduction on remaning scatter plot ..."
if self.methods[self.method] !='tSNE':
n_components=2
self.data = self.PCA_reduction(self.data, n_components)
plt.close()
ctr+=1
print ".... WRONG EXIT ********"
#def on_plot_scatter_click(self,event):
##print self.ax2.get_lines()
#ind = event.ind
##print 'onpick3 scatter:', ind, np.take(self.data_dim_reduction[:,0], ind), np.take(self.data_dim_reduction[:,1], ind)
#print 'scatter:', ind[0]
#self.img.set_data(self.display_frames[ind[0]])
#plt.draw()
def on_plot_hover(self,event):
print event.xdata, event.ydata
a = self.data[:,:2]
index = distance.cdist([(event.xdata,event.ydata)], a).argmin()
self.img.set_data(self.display_frames[self.frame_indexes[index]])
plt.draw()
if event.button==1:
print "***********"
#print event.xdata, event.ydata
self.coords.append([event.xdata, event.ydata])
self.ax2.scatter(event.xdata, event.ydata, color='red', s=50)
self.fig.canvas.draw()
time.sleep(.1)
#if (event.inaxes is None) and (event.button==1):
if event.button==2:
print 'Exiting'
plt.close()
self.fig.canvas.mpl_disconnect(self.cid)
time.sleep(.1)
def on_click_single_frame(self,event):
#global coords, ax, fig, cid, temp_img
if event.inaxes is not None:
print event.xdata, event.ydata
self.coords.append([event.xdata, event.ydata])
self.ax2.scatter(event.xdata, event.ydata, color='red', s=50)
self.fig.canvas.draw()
else:
print 'Exiting'
plt.close()
self.fig.canvas.mpl_disconnect(self.cid)
def PCA_reduction(self, X, n_components):
''' Redundant function, can just use dim-redution fuction above
'''
plt.cla()
#pca = decomposition.SparsePCA(n_components=3, n_jobs=1)
pca = decomposition.PCA(n_components=n_components)
print "... fitting PCA ..."
pca.fit(X)
return pca.transform(X)
def plot_metadata(output):
tot_frames = 925566
#Convert dta to pretty plots
output.drift_matrix[::output.scale]=np.nan
output.drift_matrix[:,::output.scale]=np.nan
output.spout_matrix[::output.scale]=np.nan
output.spout_matrix[:,::output.scale]=np.nan
output.lick_matrix[::output.scale]=np.nan
output.lick_matrix[:,::output.scale]=np.nan
output.paw_matrix[::output.scale]=np.nan
output.paw_matrix[:,::output.scale]=np.nan
output.scratch_matrix[::output.scale]=np.nan
output.scratch_matrix[:,::output.scale]=np.nan
output.lick_matrix*=100
output.paw_matrix*=100
output.scratch_matrix*=100
x_labels = []
for root_dir in output.root_dirs:
x_labels.append(os.path.split(root_dir)[1])
y_labels = []
for k in range(1,22):
y_labels.append(k)
fig = plt.figure()
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
fontsize=20
if False:
ax=plt.subplot(1,2,1)
plt.imshow(output.drift_matrix.T)
plt.ylim(0,21*output.scale)
plt.title("Camera Drifts During Recording", fontsize=fontsize)
plt.ylabel("Session #", fontsize=fontsize)
plt.xlabel("Animal ID", fontsize=fontsize)
plt.xticks(np.int16(range(0,len(output.root_dirs)*output.scale,output.scale))+scale/3, x_labels,rotation=70)
plt.yticks(np.int16(range(0,21*output.scale,output.scale))+output.scale/2, y_labels)
plt.tick_params(axis='both', which='both', labelsize=fontsize-5)
ax=plt.subplot(1,2,2)
plt.imshow(output.spout_matrix.T)
plt.ylim(0,21*output.scale)
plt.title("Spout Moves During Recording", fontsize=fontsize)
plt.ylabel("Session #", fontsize=fontsize)
plt.xlabel("Animal ID", fontsize=fontsize)
plt.xticks(np.int16(range(0,len(output.root_dirs)*output.scale,output.scale))+output.scale/3, x_labels,rotation=70)
plt.yticks(np.int16(range(0,21*output.scale,output.scale))+output.scale/2, y_labels)
plt.tick_params(axis='both', which='both', labelsize=fontsize-5)
plt.show()
else:
ax=plt.subplot(1,3,1)
plt.imshow(output.lick_matrix.T)
plt.ylim(0,21*output.scale)
plt.title("% Frames Licking", fontsize=fontsize)
plt.ylabel("Session #", fontsize=fontsize)
plt.xlabel("Animal ID", fontsize=fontsize)
plt.xticks(np.int16(range(0,len(output.root_dirs)*output.scale,output.scale))+output.scale/3, x_labels,rotation=70)
plt.yticks(np.int16(range(0,21*output.scale,output.scale))+output.scale/2, y_labels)
plt.tick_params(axis='both', which='both', labelsize=fontsize-5)
cbaxes = inset_axes(ax, width="8%", height="10%")
plt.colorbar(cax=cbaxes, ticks=[0.,int(np.nanmax(output.lick_matrix))], orientation='vertical')
ax=plt.subplot(1,3,2)
plt.imshow(output.paw_matrix.T)
plt.ylim(0,21*output.scale)
plt.title("% Frames Pawing", fontsize=fontsize)
#plt.ylabel("Session #", fontsize=fontsize)
plt.xlabel("Animal ID", fontsize=fontsize)
plt.xticks(np.int16(range(0,len(output.root_dirs)*output.scale,output.scale))+output.scale/3, x_labels,rotation=70)
plt.yticks(np.int16(range(0,21*output.scale,output.scale))+output.scale/2, y_labels)
plt.tick_params(axis='both', which='both', labelsize=fontsize-5)
cbaxes = inset_axes(ax, width="8%", height="10%")
plt.colorbar(cax=cbaxes, ticks=[0.,int(np.nanmax(output.paw_matrix))], orientation='vertical')
ax=plt.subplot(1,3,3)
plt.imshow(output.scratch_matrix.T)
plt.ylim(0,21*output.scale)
plt.title("% Frames Scratching", fontsize=fontsize)
#plt.ylabel("Session #", fontsize=fontsize)
plt.xlabel("Animal ID", fontsize=fontsize)
plt.xticks(np.int16(range(0,len(output.root_dirs)*output.scale,output.scale))+output.scale/3, x_labels,rotation=70)
plt.yticks(np.int16(range(0,21*output.scale,output.scale))+output.scale/2, y_labels)
plt.tick_params(axis='both', which='both', labelsize=fontsize-5)
cbaxes = inset_axes(ax, width="8%", height="10%")
plt.colorbar(cax=cbaxes, ticks=[0.,int(np.nanmax(output.scratch_matrix))], orientation='vertical')
plt.show()
class emptyObject(object):
pass
| 39.049637 | 257 | 0.638211 | 65,251 | 0.931944 | 0 | 0 | 0 | 0 | 0 | 0 | 23,098 | 0.329896 |
de88b285c3b2ad75dee639aa1cc273972692cd58 | 619 | py | Python | MinersArchers/game/game_data/cells/Cell_pygame.py | ea-evdokimov/MinersArchers | 2e2830d3723b66cbd0e8829092124e30f8b4c854 | [
"MIT"
]
| null | null | null | MinersArchers/game/game_data/cells/Cell_pygame.py | ea-evdokimov/MinersArchers | 2e2830d3723b66cbd0e8829092124e30f8b4c854 | [
"MIT"
]
| null | null | null | MinersArchers/game/game_data/cells/Cell_pygame.py | ea-evdokimov/MinersArchers | 2e2830d3723b66cbd0e8829092124e30f8b4c854 | [
"MIT"
]
| null | null | null | import pygame
from game.game_data.cells.Cell import Cell
from game.pygame_ import PICS_pygame, CELL_SIZE
from game.pygame_.Object import Object
class PyGCell(Object, Cell):
# TODO не передавать лишнее
def __init__(self, id__, cell: Cell, x_: int, y_: int):
Object.__init__(self, id__, *self.create_coordinates(x_, y_), CELL_SIZE, CELL_SIZE)
image = pygame.image.load(PICS_pygame["cell2"])
self.load_image(image)
Cell.__init__(self, x=x_, y=y_, relief=cell._relief)
def create_coordinates(self, x: int, y: int) -> (int, int):
return CELL_SIZE * x, CELL_SIZE * y
| 30.95 | 91 | 0.691438 | 489 | 0.767661 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.081633 |
de8b266bc66642e780d1f515de7639ab0386bd85 | 2,690 | py | Python | scheduler.py | shuaiqi361/a-PyTorch-Tutorial-to-Object-Detection | 5706b82ff67911864967aa72adf7e4a994c7ec89 | [
"MIT"
]
| null | null | null | scheduler.py | shuaiqi361/a-PyTorch-Tutorial-to-Object-Detection | 5706b82ff67911864967aa72adf7e4a994c7ec89 | [
"MIT"
]
| null | null | null | scheduler.py | shuaiqi361/a-PyTorch-Tutorial-to-Object-Detection | 5706b82ff67911864967aa72adf7e4a994c7ec89 | [
"MIT"
]
| null | null | null | import json
import os
import torch
import math
def adjust_learning_rate(optimizer, scale):
"""
Scale learning rate by a specified factor.
:param optimizer: optimizer whose learning rate must be shrunk.
:param scale: factor to multiply learning rate with.
"""
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * scale
print("DECAYING learning rate, the new LR is %f" % (optimizer.param_groups[1]['lr'],))
def warm_up_learning_rate(optimizer, rate=5.):
"""
Scale learning rate by a specified factor.
:param rate:
:param optimizer: optimizer whose learning rate must be shrunk.
:param scale: factor to multiply learning rate with.
"""
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * rate
print("WARMING up learning rate, the new LR is %f" % (optimizer.param_groups[1]['lr'],))
class WarmUpScheduler(object):
def __init__(self, target_lr, n_steps, optimizer, types='exp'):
self.target_lr = target_lr
self.n_steps = n_steps
self.optimizer = optimizer
self.init_scheduler(types)
def init_scheduler(self, types):
if types.lower() == 'exp':
self.rate = 2.
self.init_lr = self.target_lr / (self.rate ** self.n_steps)
for param_group in self.optimizer.param_groups:
param_group['lr'] = param_group['lr'] / (self.rate ** self.n_steps)
print('EXP Warming up lr from {:.6f}'.format(self.init_lr))
else:
self.init_lr = self.target_lr * 0.1
self.rate = (self.target_lr - self.init_lr) / self.n_steps
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.init_lr
print('Linear Warming up lr from {:.6f}'.format(self.init_lr))
def update(self, types='exp'):
if types.lower() == 'exp':
if self.n_steps > 0:
for param_group in self.optimizer.param_groups:
param_group['lr'] = param_group['lr'] * self.rate
# print(self.n_steps, self.target_lr, self.rate)
print('New lr {:.6f}'.format(self.target_lr / (self.rate ** (self.n_steps - 1))))
else:
return
else:
if self.n_steps > 0:
for param_group in self.optimizer.param_groups:
param_group['lr'] = param_group['lr'] + (self.target_lr - self.init_lr) / self.n_steps
print('New lr {:.6f}'.format(self.target_lr - self.rate * (self.n_steps - 1)))
else:
return
self.n_steps -= 1
| 36.351351 | 106 | 0.600743 | 1,763 | 0.65539 | 0 | 0 | 0 | 0 | 0 | 0 | 686 | 0.255019 |
de8c74beee9cae08acd3e8037eb35833307f76e4 | 93 | py | Python | app/routing/feeds/feed_type.py | wolfhardfehre/guide-io | cf076bad0634bcaf4ad0be4822539b7c8d254e76 | [
"MIT"
]
| null | null | null | app/routing/feeds/feed_type.py | wolfhardfehre/guide-io | cf076bad0634bcaf4ad0be4822539b7c8d254e76 | [
"MIT"
]
| null | null | null | app/routing/feeds/feed_type.py | wolfhardfehre/guide-io | cf076bad0634bcaf4ad0be4822539b7c8d254e76 | [
"MIT"
]
| null | null | null | from enum import Enum
class FeedType(str, Enum):
OSM = 'osm'
OVERPASS = 'overpass'
| 13.285714 | 26 | 0.645161 | 68 | 0.731183 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.16129 |
de8c915237260239c036a5cbacb8018944e669da | 8,774 | py | Python | lego_sorter.py | bmleedy/lego_sorter | 0164bc0042127f255590d1883b5edadfba781537 | [
"BSD-2-Clause"
]
| null | null | null | lego_sorter.py | bmleedy/lego_sorter | 0164bc0042127f255590d1883b5edadfba781537 | [
"BSD-2-Clause"
]
| null | null | null | lego_sorter.py | bmleedy/lego_sorter | 0164bc0042127f255590d1883b5edadfba781537 | [
"BSD-2-Clause"
]
| null | null | null | #!/bin/python3
"""This is the top-level program to operate the Raspberry Pi based lego sorter."""
# Things I can set myself: AWB, Brightness, crop, exposure_mode,
# exposure_speed,iso (sensitivity), overlays, preview_alpha,
# preview_window, saturation, shutter_speed,
# Thought for future enhancement: at start time, calibrate against
# a background image. Possibly only evaluate pixels which
# deviate significantly in hue from the original background image.
# Thoughts on controlling the air valves:
# I'm going to take the simple approach first, and hopefully it's sufficient:
# 1. Detect different colors in zones in front of their respective valves
# 2. If enough of the first color is detected, puff it into that color's bin
# 3. Otherwise, let it ride through as many detection zones as
# necessary until it's detected or falls off the track
# Upsides:
# 1. It's dead simple and reactive. No state needed to manage
# 2. No timing tuning needed for detect-then-wait method (source of failure)
# 3. No tracking needed (source of failure/flakiness)
# 4. Less memory/CPU intensive
#
# Downsides:
# 1. A multi-color part could slip past without enough "density" of any one color
# 2. More detection zones means more potential variation in the
# lighting - same part could look yellow in one zone and orange
# in the next, causing misses
import os
import json
import time
from datetime import datetime
import cv2
from picamera import PiCamera
from picamera.array import PiRGBArray
import numpy as np
# GPIO Imports
import RPi.GPIO as GPIO
# constants for tweaking
WINDOW_NAME = "Recognition"
SCALE_PERCENT = 20
PIXEL_THRESHOLD = 50
RANGE_PADDING = 10
SHOW_OVERLAY = True
COLOR_COLUMN_WIDTH = 10
OUTPUT_VIDEO = False
VIDEO_NAME = "output.avi"
LEGO_CONFIG_NAME = "legos.config.json"
# setup GPIO (https://pythonhosted.org/RPIO/)
VALVE_PIN = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(VALVE_PIN, GPIO.OUT)
GPIO.output(VALVE_PIN, GPIO.HIGH)
# Detection box location
XMIN = 36
XMAX = 85
YMIN = 96
YMAX = 121
SHOW_BOX = True
# todo: fork data to a logfile in /var
class Lego:
"""This is the class for a lego object which we want to detect"""
name = "undefined"
upper_hsv = [0, 0, 0]
lower_hsv = [0, 0, 0]
display_bgr = [0, 0, 0]
recognition_mask = []
recognition_indices = []
pixel_count = 0
jet_number = -1 #default to no jet assigned
recognition_box = [(0, 0), (0, 0)] # (XMIN,YMIN),(XMAX,YMAX)
def __init__(self, lconfig, recognition_box):
self.name = lconfig["name"]
self.upper_hsv = lconfig["upperhsv"]
self.lower_hsv = lconfig["lowerhsv"]
self.display_bgr = lconfig["display_bgr"]
self.recognition_box = recognition_box
self.jet_number = lconfig["jet_number"]
def recognize_at(self, hsv_image, box=None):
""" run recognition over an area of an image to determine how
much lego I think is there"""
if box is None:
box = self.recognition_box
# Super simple approach:
# inside a specific box, count the number of pixels I think are each color
self.recognition_mask = cv2.inRange(
hsv_image,
np.array(self.lower_hsv),
np.array(self.upper_hsv))
# find where the masks found the colors
# (making a trade-off here because I'm doing recognition on the whole image,
# then only paring down here)
self.recognition_indices = np.where(
self.recognition_mask[box[0][0]:box[1][0], # XMIN:XMAX
box[0][1]:box[1][1]] > 0) # YMIN: YMAX
self.pixel_count = self.recognition_indices[0].size
def filter_mask(self, filter_params=None):
""" todo: we should be able to filter out less-contiguous pixels
(this would be a particle filter?)"""
# Setup the display window
if SHOW_OVERLAY:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.resizeWindow(WINDOW_NAME, 800, 800)
# Load jets we want to use
jets = []
with open('jets.config.json') as json_file:
jets = json.load(json_file)
# Load legos we want to recognize
legos = []
with open('legos.config.json') as json_file:
config = json.load(json_file)
for lego_config in config:
if((lego_config["jet_number"] >= 0) and
(lego_config["jet_number"] < len(jets))):
legos.append(
Lego(
lconfig=lego_config,
recognition_box=jets[lego_config["jet_number"]]["bounding_box_corners"],
)
)
else:
legoname = lego_config["name"]
print(f"Lego color {legoname} disabled")
# Run the camera
with PiCamera(
camera_num=0, # default
stereo_mode='none', # default
stereo_decimate=False, # default
resolution=(160, 96), # default (10% of full resolution of 1600x900)
framerate=10, # 10 fps, default is 30
sensor_mode=5) as camera: # default=1, 5 is full FOV with 2x2 binning
#camera.awb_mode = 'off' # turn off AWB because I will control lighting
camera.awb_gains = (1.184, 2.969) # Set constant AWB (tuple for red and blue, or constant)
# time.sleep(2)
print("{datetime.now()} Camera setup complete.")
print(f"{datetime.now()} AWB Gains are {camera.awb_gains}")
# time.sleep(3)
# Setup the buffer into which we'll capture the images
cam_image = PiRGBArray(camera)
if OUTPUT_VIDEO:
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 10.0, (160, 96))
# start the preview window in the top left corner
camera.start_preview(resolution=(160, 96),
window=(40, 40, 320, 192),
fullscreen=False)
camera.preview_alpha = 200
print("{datetime.now()} Camera preview started")
# continuously capture files
last_loop_time = time.time()
for i, filename in enumerate(
camera.capture_continuous(
cam_image,
format='bgr',
use_video_port=True, # faster, but less good images
resize=None # resolution was specified above
)):
# clear the screen
os.system('clear')
# load the image
image = cam_image.array.copy()
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Run recognition on the same image for each lego type
for lego in legos:
lego.recognize_at(image_hsv)
all_pixel_counts = 0
for lego in legos:
all_pixel_counts += lego.pixel_count
print(f"{datetime.now()} {all_pixel_counts} Pixels detected")
print_string = ""
for lego in legos:
print_string += f"{lego.name:^{COLOR_COLUMN_WIDTH}}|"
print(print_string)
print_string = ""
for lego in legos:
print_string += f"{lego.pixel_count:^{COLOR_COLUMN_WIDTH}}|"
print(print_string)
for lego in legos:
yxmin = (jets[lego.jet_number]["bounding_box_corners"][0][1],
jets[lego.jet_number]["bounding_box_corners"][0][0])
yxmax = (jets[lego.jet_number]["bounding_box_corners"][1][1],
jets[lego.jet_number]["bounding_box_corners"][1][0])
if lego.pixel_count > PIXEL_THRESHOLD:
GPIO.output(jets[lego.jet_number]["gpio_pin"], GPIO.LOW)
print(f"{lego.name} RECOGNIZED! {lego.pixel_count} pixels")
if SHOW_BOX:
cv2.rectangle(image, yxmin, yxmax, lego.display_bgr, 1)
else:
GPIO.output(jets[lego.jet_number]["gpio_pin"], GPIO.HIGH)
if SHOW_BOX:
cv2.rectangle(image, yxmin, yxmax, (0, 0, 0), 1)
if SHOW_OVERLAY:
for lego in legos:
image[lego.recognition_indices[0]+
jets[lego.jet_number]["bounding_box_corners"][0][0],
lego.recognition_indices[1]+
jets[lego.jet_number]["bounding_box_corners"][0][1]] = lego.display_bgr
cv2.waitKey(1)
cv2.imshow(WINDOW_NAME, image)
if OUTPUT_VIDEO:
out.write(image)
# display the loop speed
now_time = int(round(time.time() * 1000))
print(f"Loop [{i}] completed in {now_time-last_loop_time}ms")
last_loop_time = now_time
# clear the buffers for the image
cam_image.truncate(0)
camera.stop_preview()
out.release()
cv2.destroyAllWindows()
| 35.379032 | 94 | 0.624915 | 1,772 | 0.20196 | 0 | 0 | 0 | 0 | 0 | 0 | 3,677 | 0.419079 |
de8d539f5152c1d0482b8d70ccc7c573352b8f81 | 6,061 | py | Python | tableloader/tableFunctions/types.py | warlof/yamlloader | ff1c1e62ec40787dd77115f6deded8a93e77ebf6 | [
"MIT"
]
| 26 | 2015-07-08T12:55:30.000Z | 2022-01-21T11:44:35.000Z | tableloader/tableFunctions/types.py | warlof/yamlloader | ff1c1e62ec40787dd77115f6deded8a93e77ebf6 | [
"MIT"
]
| 16 | 2016-05-01T17:42:44.000Z | 2021-06-02T04:33:53.000Z | tableloader/tableFunctions/types.py | warlof/yamlloader | ff1c1e62ec40787dd77115f6deded8a93e77ebf6 | [
"MIT"
]
| 17 | 2016-05-01T11:15:00.000Z | 2021-12-02T03:25:04.000Z | # -*- coding: utf-8 -*-
from yaml import load, dump
try:
from yaml import CSafeLoader as SafeLoader
print "Using CSafeLoader"
except ImportError:
from yaml import SafeLoader
print "Using Python SafeLoader"
import os
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from sqlalchemy import Table
def importyaml(connection,metadata,sourcePath,language='en'):
invTypes = Table('invTypes',metadata)
trnTranslations = Table('trnTranslations',metadata)
certMasteries = Table('certMasteries',metadata)
invTraits = Table('invTraits',metadata)
invMetaTypes = Table('invMetaTypes',metadata)
print "Importing Types"
print "Opening Yaml"
with open(os.path.join(sourcePath,'fsd','typeIDs.yaml'),'r') as yamlstream:
trans = connection.begin()
typeids=load(yamlstream,Loader=SafeLoader)
print "Yaml Processed into memory"
for typeid in typeids:
connection.execute(invTypes.insert(),
typeID=typeid,
groupID=typeids[typeid].get('groupID',0),
typeName=typeids[typeid].get('name',{}).get(language,'').decode('utf-8'),
description=typeids[typeid].get('description',{}).get(language,'').decode('utf-8'),
mass=typeids[typeid].get('mass',0),
volume=typeids[typeid].get('volume',0),
capacity=typeids[typeid].get('capacity',0),
portionSize=typeids[typeid].get('portionSize'),
raceID=typeids[typeid].get('raceID'),
basePrice=typeids[typeid].get('basePrice'),
published=typeids[typeid].get('published',0),
marketGroupID=typeids[typeid].get('marketGroupID'),
graphicID=typeids[typeid].get('graphicID',0),
iconID=typeids[typeid].get('iconID'),
soundID=typeids[typeid].get('soundID'))
if typeids[typeid].has_key("masteries"):
for level in typeids[typeid]["masteries"]:
for cert in typeids[typeid]["masteries"][level]:
connection.execute(certMasteries.insert(),
typeID=typeid,
masteryLevel=level,
certID=cert)
if (typeids[typeid].has_key('name')):
for lang in typeids[typeid]['name']:
connection.execute(trnTranslations.insert(),tcID=8,keyID=typeid,languageID=lang.decode('utf-8'),text=typeids[typeid]['name'][lang].decode('utf-8'))
if (typeids[typeid].has_key('description')):
for lang in typeids[typeid]['description']:
connection.execute(trnTranslations.insert(),tcID=33,keyID=typeid,languageID=lang.decode('utf-8'),text=typeids[typeid]['description'][lang].decode('utf-8'))
if (typeids[typeid].has_key('traits')):
if typeids[typeid]['traits'].has_key('types'):
for skill in typeids[typeid]['traits']['types']:
for trait in typeids[typeid]['traits']['types'][skill]:
result=connection.execute(invTraits.insert(),
typeID=typeid,
skillID=skill,
bonus=trait.get('bonus'),
bonusText=trait.get('bonusText',{}).get(language,''),
unitID=trait.get('unitID'))
traitid=result.inserted_primary_key
for languageid in trait.get('bonusText',{}):
connection.execute(trnTranslations.insert(),tcID=1002,keyID=traitid[0],languageID=languageid.decode('utf-8'),text=trait['bonusText'][languageid].decode('utf-8'))
if typeids[typeid]['traits'].has_key('roleBonuses'):
for trait in typeids[typeid]['traits']['roleBonuses']:
result=connection.execute(invTraits.insert(),
typeID=typeid,
skillID=-1,
bonus=trait.get('bonus'),
bonusText=trait.get('bonusText',{}).get(language,''),
unitID=trait.get('unitID'))
traitid=result.inserted_primary_key
for languageid in trait.get('bonusText',{}):
connection.execute(trnTranslations.insert(),tcID=1002,keyID=traitid[0],languageID=languageid.decode('utf-8'),text=trait['bonusText'][languageid].decode('utf-8'))
if typeids[typeid]['traits'].has_key('miscBonuses'):
for trait in typeids[typeid]['traits']['miscBonuses']:
result=connection.execute(invTraits.insert(),
typeID=typeid,
skillID=-2,
bonus=trait.get('bonus'),
bonusText=trait.get('bonusText',{}).get(language,''),
unitID=trait.get('unitID'))
traitid=result.inserted_primary_key
for languageid in trait.get('bonusText',{}):
connection.execute(trnTranslations.insert(),tcID=1002,keyID=traitid[0],languageID=languageid.decode('utf-8'),text=trait['bonusText'][languageid].decode('utf-8'))
if typeids[typeid].has_key('metaGroupID') or typeids[typeid].has_key('variationParentTypeID'):
connection.execute(invMetaTypes.insert(),typeID=typeid,metaGroupID=typeids[typeid].get('metaGroupID'),parentTypeID=typeids[typeid].get('variationParentTypeID'))
trans.commit()
| 63.135417 | 193 | 0.530606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 901 | 0.148655 |
de8e61ed55aedc48bfff03d78334a493e87826b6 | 242 | py | Python | core/views.py | AlikBerry/countdown_timer | 457f6d499b1fd702d43c348a012ae78780009e3b | [
"MIT"
]
| null | null | null | core/views.py | AlikBerry/countdown_timer | 457f6d499b1fd702d43c348a012ae78780009e3b | [
"MIT"
]
| null | null | null | core/views.py | AlikBerry/countdown_timer | 457f6d499b1fd702d43c348a012ae78780009e3b | [
"MIT"
]
| null | null | null | from django.shortcuts import render
from core.models import Projects,InfoNotifications,WarningNotifications
from django.http import HttpResponse
from .tasks import sleepy
def index(reuqest):
sleepy(10)
return HttpResponse('Done!')
| 22 | 71 | 0.801653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.028926 |
de8e8bcbbb73ed82dfadbb561cfbfe8bb447a711 | 5,017 | py | Python | networks/autoencoder/losses.py | annachen/dl_playground | f263dc16b4f0d91f6d33d94e678a9bbe2ace8913 | [
"MIT"
]
| null | null | null | networks/autoencoder/losses.py | annachen/dl_playground | f263dc16b4f0d91f6d33d94e678a9bbe2ace8913 | [
"MIT"
]
| null | null | null | networks/autoencoder/losses.py | annachen/dl_playground | f263dc16b4f0d91f6d33d94e678a9bbe2ace8913 | [
"MIT"
]
| null | null | null | import tensorflow as tf
import numpy as np
EPS = 1e-5
def KL_monte_carlo(z, mean, sigma=None, log_sigma=None):
"""Computes the KL divergence at a point, given by z.
Implemented based on https://www.tensorflow.org/tutorials/generative/cvae
This is the part "log(p(z)) - log(q(z|x)) where z is sampled from
q(z|x).
Parameters
----------
z : (B, N)
mean : (B, N)
sigma : (B, N) | None
log_sigma : (B, N) | None
Returns
-------
KL : (B,)
"""
if log_sigma is None:
log_sigma = tf.math.log(sigma)
zeros = tf.zeros_like(z)
log_p_z = log_multivar_gaussian(z, mean=zeros, log_sigma=zeros)
log_q_z_x = log_multivar_gaussian(z, mean=mean, log_sigma=log_sigma)
return log_q_z_x - log_p_z
def KL(mean, sigma=None, log_sigma=None):
"""KL divergence between a multivariate Gaussian and Multivariate
N(0, I).
Implemented based on
https://mr-easy.github.io/2020-04-16-kl-divergence-between-2-gaussian-distributions/
Parameters
----------
mean : (B, N)
sigma : (B, N) | None
The diagonol of a covariance matrix of a factorized Gaussian
distribution.
log_sigma : (B, N) | None
The log diagonol of a covariance matrix of a factorized
Gaussian distribution.
One of `sigma` and `log_sigma` has to be passed in.
Returns
-------
KL : (B,)
"""
if sigma is None:
sigma = tf.math.exp(log_sigma)
if log_sigma is None:
log_sigma = tf.math.log(sigma)
u = tf.reduce_sum(mean * mean, axis=1) # (B,)
tr = tf.reduce_sum(sigma, axis=1) # (B,)
k = tf.cast(tf.shape(mean)[1], tf.float32) # scalar
lg = tf.reduce_sum(log_sigma, axis=1) # (B,)
return 0.5 * (u + tr - k - lg)
def log_multivar_gaussian(x, mean, sigma=None, log_sigma=None):
"""Computes log pdf at x of a multi-variate Gaussian.
Parameters
----------
x : (B, N)
mean : (B, N)
sigma : (B, N) | None
log_sigma: (B, N) | None
Returns
-------
log_p : (B,)
"""
if sigma is None:
sigma = tf.math.exp(log_sigma)
if log_sigma is None:
log_sigma = tf.math.log(sigma)
x = x - mean
upper = -0.5 * tf.reduce_sum(x * x / (sigma + EPS), axis=-1) # (B,)
k = tf.cast(tf.shape(x)[1], tf.float32)
log_pi = tf.math.log(np.pi * 2)
log_prod_sig = tf.reduce_sum(log_sigma, axis=1) # (B,)
lower = -0.5 * (k * log_pi + log_prod_sig)
return upper - lower
def multivar_gaussian(x, mean, sigma):
"""Computes pdf at x of a multi-variate Gaussian
Parameters
----------
x : (B, N)
mean : (B, N)
sigma : (B, N)
Represents the diagonol of a covariance matrix of a factorized
Gaussian distribution.
Returns
-------
p_x : (B,)
"""
x = x - mean
upper = tf.reduce_sum(x * x / sigma, axis=-1) # (B,)
upper = tf.math.exp(-0.5 * upper) # (B,)
pi_vec = tf.ones_like(x) * np.pi * 2 # (B, N)
lower = pi_vec * sigma
lower = tf.reduce_prod(lower, axis=-1) # (B,)
lower = tf.math.sqrt(lower)
return upper / lower
def reconstruction_cross_entropy(prediction, labels, is_logit=True):
"""Computes reconstruction error using cross entropy.
Parameters
----------
prediction : (B, ...)
labels : (B, ...)
Same dimensions as `prediction`
is_logit : bool
Whether the prediction is logit (pre-softmax / sigmoid)
Returns
-------
recons_error : (B,)
"""
assert is_logit, "Not Implemented"
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.cast(labels, tf.float32),
logits=prediction,
)
batch_size = tf.shape(prediction)[0]
cross_ent = tf.reshape(cross_ent, (batch_size, -1))
return tf.reduce_mean(cross_ent, -1)
def reconstruction_mean_square_error(prediction, labels, is_logit=True):
"""Computes reconstruction error using mean-square-error.
Parameters
----------
prediction : (B, ...)
labels : (B, ...)
Same dimensions as `prediction`
is_logit : bool
Whether the prediciton is logit.
Returns
-------
recons_error : (B,)
"""
if is_logit:
prediction = tf.nn.sigmoid(prediction)
error = prediction - tf.cast(labels, tf.float32)
error = error * error
batch_size = tf.shape(labels)[0]
error = tf.reshape(error, (batch_size, -1))
return tf.reduce_mean(error, axis=1)
def reconstruction_loss(loss_type, prediction, labels, is_logit):
# `is_logit` : whether the input `recons` is logit
if loss_type == 'mse':
loss = reconstruction_mean_square_error(
prediction=prediction,
labels=labels,
is_logit=is_logit,
)
elif loss_type == 'ce':
loss = reconstruction_cross_entropy(
prediction=prediction,
labels=labels,
is_logit=is_logit,
)
else:
raise ValueError()
return loss
| 24.960199 | 88 | 0.590592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,228 | 0.44409 |
de9037d4a2c6b5fbbf0a5f4e22a9796ae161e5b0 | 4,288 | py | Python | Onderdelen/Hoofdscherm.py | RemcoTaal/IDP | 33959e29235448c38b7936f16c7421a24130e745 | [
"MIT"
]
| null | null | null | Onderdelen/Hoofdscherm.py | RemcoTaal/IDP | 33959e29235448c38b7936f16c7421a24130e745 | [
"MIT"
]
| null | null | null | Onderdelen/Hoofdscherm.py | RemcoTaal/IDP | 33959e29235448c38b7936f16c7421a24130e745 | [
"MIT"
]
| null | null | null | from tkinter import *
import os, xmltodict, requests
def knop1():
'Open GUI huidig station'
global root
root.destroy()
os.system('Huidig_Station.py')
def knop2():
'Open GUI ander station'
global root
root.destroy()
os.system('Ander_Station.py')
def nl_to_eng():
'Wanneer er op de Engelse vlag wordt gedrukt veranderd de Nederlandstalige tekst naar het Engels'
button1['text'] = 'Departure\ntimes current station'
button2['text'] = 'Departure\ntimes other station'
welkomlabel['text'] = 'Welcome to NS'
photo['file'] = 'afbeeldingen\kaartlezerengels.PNG'
def eng_to_nl():
'Wanneer er op de Nederlandse vlag wordt gedrukt veranderd de Engelstalige tekst naar het Nederlands'
button1['text'] = 'Actuele vertrektijden\nhuidig station'
button2['text'] = 'Actuele vertrektijden\nander station'
welkomlabel['text'] = 'Welkom bij NS'
photo['file'] = 'afbeeldingen\kaartlezer.PNG'
root = Tk() # Maakt het venster
root.attributes('-fullscreen',True) #Open fullscreen
hoofdframe = Frame(master=root, #Venster gele gedeelte
background='#FFD720',
width=1920,
height=980)
hoofdframe.pack(side='top', fill=X)
onderframe = Frame(master=root, #Venster blauwe gedeelte
background='#001F6A',
width=1920,
height=100)
onderframe.pack(side='bottom', fill=X)
welkomlabel = Label(master=hoofdframe, #Welkom bij NS tekst
text='Welkom bij NS',
foreground='#001F6A',
background='#FFD720',
font=('Helvetica', 60, 'bold'),
width=14,
height=3)
welkomlabel.place(x=615, y=50)
photo = PhotoImage(file='afbeeldingen\kaartlezer.PNG') #Foto kaartlezer
fotolabel = Label(master=hoofdframe, image=photo, borderwidth=-1)
fotolabel.place(x=745, y=320)
button1 = Button(master=hoofdframe, #Knop 2
text="Actuele vertrektijden\nhuidig station",
foreground="white",
background="#001F6A",
font=('arial', 12, 'bold'),
width=17,
height=3,
command=knop1)
button1.place(x=765, y=650)
button2 = Button(master=hoofdframe, #Knop 3
text="Actuele vertrektijden\nander station",
foreground="white",
background="#001F6A",
font=('arial', 12, 'bold'),
width=17,
height=3,
command=knop2)
button2.place(x=965, y=650)
buttonNL = Button (master=onderframe, #Knop van Engels naar Nederlands
width=10,
height=10,
command=eng_to_nl)
photoNL = PhotoImage (file='afbeeldingen\kroodwitblauw.png')
buttonNL.config(image=photoNL, #Het converteren dat de afbeelding een knop wordt
width=48,
height=25)
buttonNL.place(x=50, y=25)
labelengels = Label(master=onderframe, #Label onder de Engelse vlag
text='English',
foreground='white',
background='#001F6A',
font=('arial', 9))
labelengels.place(x=128, y=55)
buttonENG = Button (master=onderframe, #Knop van Nederlands naar Engels
width=10,
height=10,
command=nl_to_eng)
photoENG = PhotoImage (file='afbeeldingen\kengenland.png')
buttonENG.config(image=photoENG, #Het converteren dat de afbeelding een knop wordt
width=48,
height=25)
buttonENG.place(x=125, y=25)
labelnederlands = Label(master=onderframe, #Label onder de Nederlandse vlag
text='Nederlands',
foreground='white',
background='#001F6A',
font=('arial', 9))
labelnederlands.place(x=42, y=55)
root.mainloop()
| 34.861789 | 117 | 0.541045 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,308 | 0.305037 |
de93263b9043812ffa8057bd744f43dfad03bbdf | 27 | py | Python | py2ifttt/__init__.py | moevis/py2ifttt | 99dc2be647c53c9279f2f212528fef7190de7476 | [
"MIT"
]
| 3 | 2018-05-04T12:50:04.000Z | 2020-02-28T03:22:53.000Z | py2ifttt/__init__.py | moevis/py2ifttt | 99dc2be647c53c9279f2f212528fef7190de7476 | [
"MIT"
]
| null | null | null | py2ifttt/__init__.py | moevis/py2ifttt | 99dc2be647c53c9279f2f212528fef7190de7476 | [
"MIT"
]
| null | null | null | from .py2ifttt import IFTTT | 27 | 27 | 0.851852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
de9373d0df66278e0b02dc262104db37303b9a61 | 3,806 | py | Python | server-program/clientApplication.py | ezequias2d/projeto-so | 993f3dd12135946fe5b4351e8488b7aa8a18f37e | [
"MIT"
]
| null | null | null | server-program/clientApplication.py | ezequias2d/projeto-so | 993f3dd12135946fe5b4351e8488b7aa8a18f37e | [
"MIT"
]
| null | null | null | server-program/clientApplication.py | ezequias2d/projeto-so | 993f3dd12135946fe5b4351e8488b7aa8a18f37e | [
"MIT"
]
| null | null | null | import socket
import tokens
import connection
import io
import os
from PIL import Image
from message.literalMessage import LiteralMessage
from baseApplication import BaseApplication
class ClientApplication(BaseApplication):
def __init__(self, host, port):
super().__init__(host, port, tokens.CLIENT_TOKEN)
def show_image_file_from_storage(self):
filename = input("Filename:")
file = self.get_file(filename)
img = Image.open(io.BytesIO(file))
img.show()
def see_files_in_storage(self):
files = self.get_files_from_storage()
for filename in files:
print(filename)
def send_file_to_storage(self):
filename = input("Filename:")
self.send_file(filename)
def send_job(self, token):
filename = input("Filename:")
dstfilename = input("Destination filename:")
self.send_literal(token)
self.send_literal(filename)
self.send_literal(dstfilename)
messageToken = self.receive_message().value
message = self.receive_message().value
if messageToken == tokens.INFO_MESSAGE or messageToken == tokens.ERROR_MESSAGE:
print(message)
def remove_file(self):
filename = input("Filename:")
self.send_literal(tokens.REMOVE_FILE)
self.send_literal(filename)
result = self.receive_message(True, 1.0)
if result is not None:
if result.value == tokens.ERROR_MESSAGE or result.value == tokens.INFO_MESSAGE:
message = self.receive_message().value
print(message)
def see_a_logfile(self):
files = [logfile for logfile in self.get_files_from_storage() if os.path.splitext(logfile)[1].lower() == '.log']
count = 0
for logfile in files:
print('{} - {}'.format(count, logfile))
count += 1
index = int(input('Index:'))
filename = files[index]
file = self.get_file(filename)
file = io.BytesIO(file).read()
print('Log:')
print(file.decode('UTF-8'))
def print_commands(self):
print('Commands:')
print('0 - Exit')
print('1 - Flip Image Horizontal')
print('2 - Flip Image Vertical')
print('3 - Rotate Image 90.')
print('4 - Rotate Image 180.')
print('5 - Rotate Image 270.')
print('6 - See Files in Storage.')
print('7 - Send File to Storage.')
print('8 - Show Image File from Storage.')
print('9 - Remove File from Storage.')
print('10 - See a logfile.')
def menu(self):
while not self.is_closed():
self.print_commands()
cmd = int(input("Cmd>"))
if cmd == 0:
self.close()
elif cmd == 1:
self.send_job(tokens.JOB_FLIP_HORIZONTAL)
elif cmd == 2:
self.send_job(tokens.JOB_FLIP_VERTICAL)
elif cmd == 3:
self.send_job(tokens.JOB_ROTATE_90)
elif cmd == 4:
self.send_job(tokens.JOB_ROTATE_180)
elif cmd == 5:
self.send_job(tokens.JOB_ROTATE_270)
elif cmd == 6:
self.see_files_in_storage()
elif cmd == 7:
self.send_file_to_storage()
elif cmd == 8:
self.show_image_file_from_storage()
elif cmd == 9:
self.remove_file()
elif cmd == 10:
self.see_a_logfile()
host = input('Host: ')
ClientApplication(host, 50007) | 34.288288 | 121 | 0.547031 | 3,556 | 0.934314 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.104834 |
de949d00cedaeb2c6790aaae5c34a82b7c16d8c5 | 230 | py | Python | ethernet/recv.py | bobbae/pingcap | c573688b42d35cefdbfa0121580807885aae8869 | [
"Unlicense"
]
| null | null | null | ethernet/recv.py | bobbae/pingcap | c573688b42d35cefdbfa0121580807885aae8869 | [
"Unlicense"
]
| 1 | 2019-10-11T16:16:22.000Z | 2019-10-11T16:16:22.000Z | ethernet/recv.py | bobbae/pingcap | c573688b42d35cefdbfa0121580807885aae8869 | [
"Unlicense"
]
| null | null | null | import sys
import socket
ETH_P_ALL=3 # not defined in socket module, sadly...
s=socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(ETH_P_ALL))
s.bind((sys.argv[1], 0))
r=s.recv(2000)
sys.stdout.write("<%s>\n"%repr(r))
| 25.555556 | 75 | 0.726087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.208696 |
de94dc8dcf783cae1964a6addda472d802119e98 | 1,110 | py | Python | legacy/exam.py | wangxinhe2006/xyzzyy | 3267614132a3b9e448b6733f13e8019aa79db922 | [
"MIT"
]
| 1 | 2021-07-16T02:29:35.000Z | 2021-07-16T02:29:35.000Z | legacy/exam.py | wangxinhe2006/xyzzyy | 3267614132a3b9e448b6733f13e8019aa79db922 | [
"MIT"
]
| null | null | null | legacy/exam.py | wangxinhe2006/xyzzyy | 3267614132a3b9e448b6733f13e8019aa79db922 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
from json import loads
from urllib.request import urlopen, Request
SITE = input('Site: ')
COOKIE = 'pj=' + input('pj=')
examList = loads(urlopen(Request(f'{SITE}/data/module/homework/all.asp?sAct=GetHomeworkListByStudent&iIsExam=1&iPageCount=' + loads(urlopen(Request(f'{SITE}/data/module/homework/all.asp?sAct=GetHomeworkListByStudent&iIsExam=1', headers={'Cookie': COOKIE})).read())['iCount'], headers={'Cookie': COOKIE})).read())
assert examList['sRet'] == 'succeeded', examList
for exam in examList['aHomework']:
if not exam['sTimeFlag'] and not int(exam['iFinished']):
examContent = loads(urlopen(Request(f'{SITE}/data/module/exam/all.asp?sAct=GetExamContent&iExamId=' + exam['sQuestionIds'], headers={'Cookie': COOKIE})).read())
assert examContent['sRet'] == 'succeeded', examContent
try:
for content in examContent['aContent']:
print(content['sTitle'])
for process in examContent['aProcess']:
print(process['iOrder'], process['sAnswer'], sep='\t')
except IndexError:
pass
| 48.26087 | 312 | 0.664865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.39009 |
de95cb380efb4a5351375e80063db451dd2899b5 | 3,803 | py | Python | TkPy/module.py | tbor8080/pyprog | 3642b9af2a92f7369d9b6fa138e47ba22df3271c | [
"MIT"
]
| null | null | null | TkPy/module.py | tbor8080/pyprog | 3642b9af2a92f7369d9b6fa138e47ba22df3271c | [
"MIT"
]
| null | null | null | TkPy/module.py | tbor8080/pyprog | 3642b9af2a92f7369d9b6fa138e47ba22df3271c | [
"MIT"
]
| null | null | null | import sys
import os
import tkinter.filedialog as fd
from time import sleep
import datetime
import tkinter
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
import threading
# New File & Duplicate File Save
def saveasFilePath( filetype=[ ("",".txt"), ("CSV",".csv") ] ):
return fd.asksaveasfilename(filetypes=filetype, initialdir=os.path.abspath(os.path.dirname(__file__)))
# FileSave
def saveFile(file_name, data, encoding='utf-8'):
with open(file_name, "wt", encoding=encoding) as fp:
fp.write(data)
class PyTkTextEditor:
def __init__(self, geometory='800x600'):
# Window Geometory
self.__geometory=geometory
# Application Path
self.__appdir=os.path.abspath(os.path.dirname(__file__))
self.__fileTypes=[("*", ".txt"),("CSV", ".csv")]
# Child Objects
def getWindowSize(self):
return self.__geometory.split('x')
def __OnClick(self, e):
print(e,self)
def __onKeyPress__(self, e):# KeyPressEventHandle
# print(e.state, e.keycode, self.__root.focus_get(), e, self)
if e.state==8 and e.keycode==65651:# command + s current save
# Debug Print
# self.asSave("sample.txt", textWidget.get("1.0","end"))
if self.__root.filename=="":
self.__root.title("Untitled")
self.__root.filename=self.asSavePath(self.__fileTypes)
self.asSave(self.__root.filename, self.widget.get("1.0","end"))
elif e.state==8 and e.keycode==2949230:# commmand + n ( new open )
self.widget.insert("1.0", "未実装(command + n")
elif e.state==8 and e.keycode==2031727:# commmand + o ( open file )
self.asOpen()
elif e.state==9 and e.keycode==65651:# commmand + shift + s ( save multi )
self.__root.filename=self.asSavePath(self.__fileTypes)
self.__root.title(self.__root.filename)
self.asSave(self.__root.filename, self.widget.get("1.0","end"))
elif e.state==9 and e.keycode==2031727:# commmand + shift + o ( open file multi )
self.widget.insert("1.0", "未実装(Open + Shift + O)")
elif e.state==64 and e.keycode==7927557:# fn + F2
self.widget.insert("1.0", "未実装(fn + F2")
def windows(self):
self.__root=tkinter.Tk()
self.__root.geometry(self.__geometory)
self.__root.filename=''
self.__root.font=''
self.__root.title('Untitled')
self.__root.focus_set()
self.__root.title(self.__root.focus_get())
fonts=('Hiragino,Meiryo',32,'')
width,height=self.getWindowSize()
self.widget=tk.scrolledtext.ScrolledText(self.__root, bg="#fff", width=width, height=height)
self.widget.configure(font=fonts)
self.widget.pack()
self.__root.bind('<Key>', self.__onKeyPress__)
self.__root.mainloop()
return self.__root
def asSave(self, filename, data, encoding='utf-8'):
try:
with open(filename, "wt", encoding=encoding) as f:
f.write(data)
except FileNotFoundError:
print('FileNotFoundError')
def asSavePath(self,filetype=[("",".txt"),("CSV",".csv")]):
return fd.asksaveasfilename(filetypes=filetype, initialdir=self.__appdir)
def asOpenPath(self, filetype=[("*",".txt"),("csv",".csv")]):
return fd.askopenfilename(filetypes=filetype,initialdir=self.__appdir)
def asOpen(self):
self.__root.filename=self.asOpenPath(self.__fileTypes)
self.__root.title(self.__root.filename)
self.__root.focus_set()
text=''
with open(self.__root.filename, 'rt') as fp:
text=fp.read()
self.widget.insert("1.0", text)
| 34.261261 | 106 | 0.616618 | 3,261 | 0.853442 | 0 | 0 | 0 | 0 | 0 | 0 | 719 | 0.188171 |
de97499bd44b3c33d3853cafca12103889273c3c | 6,005 | py | Python | core/polyaxon/cli/components/tuner.py | Ohtar10/polyaxon | 1e41804e4ae6466b6928d06bc6ee6d2d9c7b8931 | [
"Apache-2.0"
]
| null | null | null | core/polyaxon/cli/components/tuner.py | Ohtar10/polyaxon | 1e41804e4ae6466b6928d06bc6ee6d2d9c7b8931 | [
"Apache-2.0"
]
| null | null | null | core/polyaxon/cli/components/tuner.py | Ohtar10/polyaxon | 1e41804e4ae6466b6928d06bc6ee6d2d9c7b8931 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from polyaxon.logger import logger
@click.group()
def tuner():
pass
@tuner.command()
@click.option(
"--matrix",
help="A string representing the matrix configuration for bayesian optimization.",
)
@click.option(
"--join", help="A string representing the join to fetch configs and metrics."
)
@click.option("--iteration", type=int, help="The current iteration.")
def bayes(matrix, join, iteration):
"""Create suggestions based on bayesian optimization."""
from polyaxon.client import RunClient
from polyaxon.polyflow import V1Bayes, V1Join
from polyaxon.polytune.iteration_lineage import (
get_iteration_definition,
handle_iteration,
handle_iteration_failure,
)
from polyaxon.polytune.search_managers.bayesian_optimization.manager import (
BayesSearchManager,
)
matrix = V1Bayes.read(matrix)
join = V1Join.read(join)
client = RunClient()
values = get_iteration_definition(
client=client,
iteration=iteration,
join=join,
optimization_metric=matrix.metric.name,
)
if not values:
return
run_uuids, configs, metrics = values
retry = 1
exp = None
suggestions = None
while retry < 3:
try:
suggestions = BayesSearchManager(
config=matrix,
).get_suggestions(configs=configs, metrics=metrics)
exp = None
break
except Exception as exp:
retry += 1
logger.warning(exp)
if exp:
handle_iteration_failure(client=client, exp=exp)
return
handle_iteration(
client=client,
iteration=iteration,
suggestions=suggestions,
)
@tuner.command()
@click.option(
"--matrix", help="A string representing the matrix configuration for hyperband."
)
@click.option(
"--join", help="A string representing the join to fetch configs and metrics."
)
@click.option("--iteration", type=int, help="The current hyperband iteration.")
@click.option(
"--bracket-iteration", type=int, help="The current hyperband bracket iteration."
)
def hyperband(matrix, join, iteration, bracket_iteration):
"""Create suggestions based on hyperband."""
from polyaxon.client import RunClient
from polyaxon.polyflow import V1Hyperband, V1Join
from polyaxon.polytune.iteration_lineage import (
get_iteration_definition,
handle_iteration,
handle_iteration_failure,
)
from polyaxon.polytune.search_managers.hyperband.manager import HyperbandManager
matrix = V1Hyperband.read(matrix)
matrix.set_tuning_params()
join = V1Join.read(join)
client = RunClient()
values = get_iteration_definition(
client=client,
iteration=iteration,
join=join,
optimization_metric=matrix.metric.name,
name="in-iteration-{}-{}".format(iteration, bracket_iteration),
)
if not values:
return
run_uuids, configs, metrics = values
retry = 1
exp = None
suggestions = None
while retry < 3:
try:
suggestions = HyperbandManager(config=matrix).get_suggestions(
configs=configs,
metrics=metrics,
bracket_iteration=bracket_iteration,
iteration=iteration,
)
exp = None
break
except Exception as exp:
retry += 1
logger.warning(exp)
if exp:
handle_iteration_failure(client=client, exp=exp)
return
handle_iteration(
client=client,
iteration=iteration,
suggestions=suggestions,
summary={"bracket_iteration": bracket_iteration},
name="out-iteration-{}-{}".format(iteration, bracket_iteration),
)
@tuner.command()
@click.option(
"--matrix", help="A string representing the matrix configuration for hyperopt."
)
@click.option(
"--join", help="A string representing the join to fetch configs and metrics."
)
@click.option("--iteration", type=int, help="The current iteration.")
def hyperopt(matrix, join, iteration):
"""Create suggestions based on hyperopt."""
from polyaxon.client import RunClient
from polyaxon.polyflow import V1Hyperopt, V1Join
from polyaxon.polytune.iteration_lineage import (
get_iteration_definition,
handle_iteration,
handle_iteration_failure,
)
from polyaxon.polytune.search_managers.hyperopt.manager import HyperoptManager
matrix = V1Hyperopt.read(matrix)
join = V1Join.read(join)
client = RunClient()
values = get_iteration_definition(
client=client,
iteration=iteration,
join=join,
optimization_metric=matrix.metric.name,
)
if not values:
return
run_uuids, configs, metrics = values
retry = 1
exp = None
suggestions = None
while retry < 3:
try:
suggestions = HyperoptManager(config=matrix).get_suggestions(
configs=configs, metrics=metrics
)
exp = None
break
except Exception as exp:
retry += 1
logger.warning(exp)
if exp:
handle_iteration_failure(client=client, exp=exp)
return
handle_iteration(
client=client,
iteration=iteration,
suggestions=suggestions,
)
| 28.732057 | 85 | 0.657119 | 0 | 0 | 0 | 0 | 5,339 | 0.889092 | 0 | 0 | 1,416 | 0.235803 |
de974a6af213636bff804abc1abfb40a31e4354d | 8,810 | py | Python | judge/base/__init__.py | fanzeyi/Vulpix | 9448e968973073c98231b22663bbebb2a452dcd7 | [
"BSD-3-Clause"
]
| 13 | 2015-03-08T11:59:28.000Z | 2021-07-11T11:58:01.000Z | src/tornado/demos/Vulpix-master/judge/base/__init__.py | ptphp/PyLib | 07ac99cf2deb725475f5771b123b9ea1375f5e65 | [
"Apache-2.0"
]
| null | null | null | src/tornado/demos/Vulpix-master/judge/base/__init__.py | ptphp/PyLib | 07ac99cf2deb725475f5771b123b9ea1375f5e65 | [
"Apache-2.0"
]
| 3 | 2015-05-29T16:14:08.000Z | 2016-04-29T07:25:26.000Z | # -*- coding: utf-8 -*-
# AUTHOR: Zeray Rice <[email protected]>
# FILE: judge/base/__init__.py
# CREATED: 01:49:33 08/03/2012
# MODIFIED: 15:42:49 19/04/2012
# DESCRIPTION: Base handler
import re
import time
import urllib
import hashlib
import httplib
import datetime
import functools
import traceback
import simplejson as json
from operator import itemgetter
from pygments import highlight
from pygments.lexers import CLexer
from pygments.lexers import CppLexer
from pygments.lexers import DelphiLexer
from pygments.formatters import HtmlFormatter
from sqlalchemy.exc import StatementError
from sqlalchemy.orm.exc import NoResultFound
import tornado.web
import tornado.escape
from tornado.httpclient import AsyncHTTPClient
from judge.db import Auth
from judge.db import Member
from judge.utils import _len
CODE_LEXER = {
1 : DelphiLexer,
2 : CLexer,
3 : CppLexer,
}
CODE_LANG = {
1 : "delphi",
2 : "c",
3 : "cpp",
}
def unauthenticated(method):
"""Decorate methods with this to require that user be NOT logged in"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.current_user:
if self.request.method in ("GET", "HEAD"):
self.redirect("/")
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class BaseHandler(tornado.web.RequestHandler):
_ = lambda self, text: self.locale.translate(text) # i18n func
xhtml_escape = lambda self, text: tornado.escape.xhtml_escape(text) if text else text # xhtml escape
def get_page_count(self, count, pre = 10):
'''Return page num by input item num'''
return count / pre + (1 if count % pre else 0)
def get_current_user(self):
'''Check user is logined'''
auth = self.get_secure_cookie("auth")
member_id = self.get_secure_cookie("uid")
member = None
if auth and member_id:
try:
auth = self.db.query(Auth).filter_by(secret = auth).filter_by(member_id = member_id).one()
except StatementError:
# for mysql session broken
self.db.rollback()
auth = self.db.query(Auth).filter_by(secret = auth).filter_by(member_id = member_id).one()
if auth:
member = self.db.query(Member).get(auth.member_id)
if member:
delta = auth.create - datetime.datetime.now()
if delta.days > 20:
""" Refresh Token """
auth.delete()
self.db.commit()
auth = Auth()
auth.member_id = member_id
auth.secret = binascii.b2a_hex(uuid.uuid4().bytes)
auth.create = datetime.datetime.now()
self.db.add(auth)
self.db.commit()
self.set_cookie('auth', auth.secret)
self.set_cookie('uid', auth.member_id)
else:
self.clear_cookie("auth")
self.clear_cookie("uid")
return member
def get_user_locale(self):
'''Get user locale, first check cookie, then browser'''
result = self.get_cookie('LANG', default = None)
if result == None:
result = self.get_browser_locale()
else:
result = tornado.locale.get(result)
return result
def sendmail(self):
'''Send mail func, send mail to someone'''
pass
def render(self, tplname, args = {}):
'''Rewrite render func for use jinja2'''
if "self" in args.keys():
args.pop("self")
tpl = self.jinja2.get_template(tplname)
ren = tpl.render(page = self, _ = self._, user = self.current_user, **args)
self.write(ren)
self.db.close()
self.finish()
def write_error(self, status_code, **kwargs):
'''Rewrite write_error for custom error page'''
if status_code == 404:
self.render("404.html")
return
elif status_code == 500:
error = []
for line in traceback.format_exception(*kwargs['exc_info']):
error.append(line)
error = "\n".join(error)
self.render("500.html", locals())
return
msg = httplib.responses[status_code]
self.render("error.html", locals())
def check_text_value(self, value, valName, required = False, max = 65535, min = 0, regex = None, regex_msg = None, is_num = False, vaild = []):
''' Common Check Text Value Function '''
error = []
if not value:
if required:
error.append(self._("%s is required") % valName)
return error
if is_num:
try:
tmp = int(value)
except ValueError:
return [self._("%s must be a number.") % valName]
else:
if vaild and tmp not in vaild:
return [self._("%s is invalid.") % valName]
return []
if _len(value) > max:
error.append(self._("%s is too long.") % valName)
elif _len(value) < min:
error.append(self._("%s is too short.") % valName)
if regex:
if not regex.match(value):
if regex_msg:
error.append(regex_msg)
else:
error.append(self._("%s is invalid.") % valName)
elif vaild and value not in vaild:
errora.append(self._("%s is invalid.") % valName)
return error
def check_username(self, usr, queryDB = False):
error = []
error.extend(self.check_text_value(usr, self._("Username"), required = True, max = 20, min = 3, \
regex = re.compile(r'^([\w\d]*)$'), \
regex_msg = self._("A username can only contain letters and digits.")))
if not error and queryDB:
try:
query = self.select_member_by_username_lower(usr.lower())
except NoResultFound:
pass
else:
error.append(self._("That username is taken. Please choose another."))
return error
def check_password(self, pwd):
return self.check_text_value(pwd, self._("Password"), required = True, max = 32, min = 6)
def check_email(self, email, queryDB = False):
error = []
error.extend(self.check_text_value(email, self._("E-mail"), required = True, max = 100, min = 3, \
regex = re.compile(r"(?:^|\s)[-a-z0-9_.+]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}(?:\s|$)", re.IGNORECASE), \
regex_msg = self._("Your Email address is invalid.")))
if not error and queryDB:
try:
query = self.select_member_by_email(email)
except NoResultFound:
pass
else:
error.append(self._("That Email is taken. Please choose another."))
return error
def get_gravatar_url(self, email):
gravatar_id = hashlib.md5(email.lower()).hexdigest()
return "http://www.gravatar.com/avatar/%s?d=mm" % (gravatar_id)
def post_to_judger(self, query, judger, callback = None):
query["time"] = time.time()
query["code"] = query["code"].decode("utf-8")
query = dict(sorted(query.iteritems(), key=itemgetter(1)))
jsondump = json.dumps(query)
sign = hashlib.sha1(jsondump + judger.pubkey.strip()).hexdigest()
query["sign"] = sign
http_client = AsyncHTTPClient()
http_client.fetch(judger.path, method = "POST", body = urllib.urlencode({"query" : json.dumps(query)}), callback = callback)
def highlight_code(self, code, lang):
return highlight(code, CODE_LEXER[lang](), HtmlFormatter(linenos = True))
codestr = highlight(code, CODE_LEXER[lang](), HtmlFormatter(nowrap = True))
table = '<div class="highlight"><table><tr><td class="gutter"><pre class="line-numbers">'
code = ''
lines = codestr.split("\n")
for index, line in zip(range(len(lines)), lines):
table += "<span class='line-number'>%d</span>\n" % (index + 1)
code += "<span class='line'>%s</span>\n" % line
table += "</pre></td><td class='code'><pre><code class='%s'>%s</code></pre></td></tr></table></div>" % (CODE_LANG[lang], code)
return table
@property
def db(self):
return self.application.db
@property
def jinja2(self):
return self.application.jinja2
| 40.787037 | 147 | 0.559932 | 7,436 | 0.844041 | 0 | 0 | 417 | 0.047333 | 0 | 0 | 1,475 | 0.167423 |
de9773cffe9839ef07dd2219fd1b0246be382284 | 1,839 | py | Python | src/blog/migrations/0001_initial.py | triump0870/rohan | 3bd56ccdc35cb67823117e78dc02becbfbd0b329 | [
"MIT"
]
| null | null | null | src/blog/migrations/0001_initial.py | triump0870/rohan | 3bd56ccdc35cb67823117e78dc02becbfbd0b329 | [
"MIT"
]
| null | null | null | src/blog/migrations/0001_initial.py | triump0870/rohan | 3bd56ccdc35cb67823117e78dc02becbfbd0b329 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import markdownx.models
import myblog.filename
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255)),
('slug', models.SlugField(unique=True, max_length=255)),
('content', markdownx.models.MarkdownxField()),
('image', models.ImageField(upload_to=myblog.filename.generatefilename(b'posts/'), null=True, verbose_name=b'Cover Image', blank=True)),
('status', models.CharField(default=b'p', max_length=1, choices=[(b'd', b'Draft'), (b'p', b'Published'), (b'w', b'Withdrawn')])),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(related_name='posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at', 'title'],
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(unique=True, max_length=200)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='blog.Tag'),
),
]
| 39.12766 | 152 | 0.582926 | 1,650 | 0.897227 | 0 | 0 | 0 | 0 | 0 | 0 | 255 | 0.138662 |
de9904583298a90d85047bd7e803be42fe6b0d62 | 1,545 | py | Python | exams/61a-su20-practice-mt/q6/tests/q6.py | jjllzhang/CS61A | 57b68c7c06999210d96499f6d84e4ec99085d396 | [
"MIT"
]
| 1 | 2022-01-22T11:45:01.000Z | 2022-01-22T11:45:01.000Z | exams/61a-su20-practice-mt/q6/tests/q6.py | jjllzhang/CS61A | 57b68c7c06999210d96499f6d84e4ec99085d396 | [
"MIT"
]
| null | null | null | exams/61a-su20-practice-mt/q6/tests/q6.py | jjllzhang/CS61A | 57b68c7c06999210d96499f6d84e4ec99085d396 | [
"MIT"
]
| null | null | null | test = {'name': 'q6',
'points': 10,
'suites': [{'cases': [{'code': '>>> increment = lambda x: x + 1\n'
'\n'
'>>> square = lambda x: x * x\n'
'\n'
'>>> do_nothing = make_zipper(increment, '
'square, 0)\n'
'\n'
">>> do_nothing(2) # Don't call either f1 or "
'f2, just return your input untouched\n'
'2\n'
'\n'
'>>> incincsq = make_zipper(increment, square, '
'112)\n'
'\n'
'>>> incincsq(2) # '
'increment(increment(square(2))), so 2 -> 4 -> '
'5 -> 6\n'
'6\n'
'\n'
'>>> sqincsqinc = make_zipper(increment, '
'square, 2121)\n'
'\n'
'>>> sqincsqinc(2) # '
'square(increment(square(increment(2)))), so 2 '
'-> 3 -> 9 -> 10 -> 100\n'
'100\n'}],
'scored': True,
'setup': 'from q6 import *',
'type': 'doctest'}]} | 49.83871 | 80 | 0.253722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 630 | 0.407767 |
de9bc65cbfa30de1a8294fb16fd3712d1ce427db | 3,566 | py | Python | #17.py | Domino2357/daily-coding-problem | 95ddef9db53c8b895f2c085ba6399a3144a4f8e6 | [
"MIT"
]
| null | null | null | #17.py | Domino2357/daily-coding-problem | 95ddef9db53c8b895f2c085ba6399a3144a4f8e6 | [
"MIT"
]
| null | null | null | #17.py | Domino2357/daily-coding-problem | 95ddef9db53c8b895f2c085ba6399a3144a4f8e6 | [
"MIT"
]
| null | null | null | """
This problem was asked by Google.
Suppose we represent our file system by a string in the following manner:
The string "dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext" represents:
dir
subdir1
subdir2
file.ext
The directory dir contains an empty sub-directory subdir1 and a sub-directory subdir2 containing a file file.ext.
The string "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext" represents:
dir
subdir1
file1.ext
subsubdir1
subdir2
subsubdir2
file2.ext
The directory dir contains two sub-directories subdir1 and subdir2. subdir1 contains a file file1.ext and an empty
second-level sub-directory subsubdir1. subdir2 contains a second-level sub-directory subsubdir2 containing a file file2.ext.
We are interested in finding the longest (number of characters) absolute path to a file within our file system. For example,
in the second example above, the longest absolute path is "dir/subdir2/subsubdir2/file2.ext", and its length is 32
(not including the double quotes).
Given a string representing the file system in the above format, return the length of the longest absolute path to a
file in the abstracted file system. If there is no file in the system, return 0.
Note:
The name of a file contains at least a period and an extension.
The name of a directory or sub-directory will not contain a period.
"""
# I am assuming that the number of t's in /n/t/t/t.../t/ stands for the level in the tree
# Furthermore, I am assuming the format of the string to be consistent
# last but not least I'll make the assumption that this is actually a tree, i.e., it has no cycles
def trace_back(string_tree):
return longest_path_to_file(deserialize(string_tree))
class FileTree:
def __init__(self, val, children):
self.val = val
self.children = children
def longest_path_to_file(file_tree, max_path_length = 0):
deepest_layer = True
for child in file_tree.children:
if child.children:
deepest_layer = False
if deepest_layer:
for child in file_tree.children:
print("Couldn't finish this in time")
# top level idea: deserialize the tree and then perform the operation on it
def deserialize(string_file_tree):
# split off the root
root = ''
children = []
i = 0
while i < len(string_file_tree):
if string_file_tree[i] == '\\':
break
else:
root = root + string_file_tree[i]
del string_file_tree[i]
i += 1
if not string_file_tree:
return FileTree(root, [])
else:
# cut off first \n\t\tsomefile
del string_file_tree[0:4]
for subtree in find_subtree(string_file_tree):
children.append(deserialize(subtree))
def find_subtree(string_file_tree):
subtree = ''
del string_file_tree[0:4]
j = 0
while j < len(string_file_tree):
# cut of the next subtree beginning with \n\tsomefilename, do recursion afterwards
if string_file_tree[j:j + 4] == "\\n\\t":
if not string_file_tree[j + 5] == "\\":
break
else:
# delete the \t\
del string_file_tree[j+3:j+4]
j += 1
else:
subtree += string_file_tree[j]
del string_file_tree[j]
j += 1
if not string_file_tree:
return [subtree]
else:
return [subtree] + find_subtree(string_file_tree)
if __name__ == '__main__':
print()
| 31.280702 | 124 | 0.666854 | 110 | 0.030847 | 0 | 0 | 0 | 0 | 0 | 0 | 1,961 | 0.549916 |
de9bd50729808fda9f77f7ae5831c5d7b432a027 | 1,315 | py | Python | turbot/db.py | emre/turbot | 7bc49a8b79bce7f2490036d9255e5b3df8fff4b1 | [
"MIT"
]
| 3 | 2017-10-17T22:02:06.000Z | 2018-05-07T10:29:31.000Z | turbot/db.py | emre/turbot | 7bc49a8b79bce7f2490036d9255e5b3df8fff4b1 | [
"MIT"
]
| null | null | null | turbot/db.py | emre/turbot | 7bc49a8b79bce7f2490036d9255e5b3df8fff4b1 | [
"MIT"
]
| 3 | 2018-10-16T13:28:57.000Z | 2021-02-24T13:23:29.000Z | from os.path import expanduser, exists
from os import makedirs
TURBOT_PATH = expanduser('~/.turbot')
UPVOTE_LOGS = expanduser("%s/upvote_logs" % TURBOT_PATH)
CHECKPOINT = expanduser("%s/checkpoint" % TURBOT_PATH)
REFUND_LOG = expanduser("%s/refunds" % TURBOT_PATH)
def load_checkpoint(fallback_block_num=None):
try:
return int(open(CHECKPOINT).read())
except FileNotFoundError as e:
if not exists(TURBOT_PATH):
makedirs(TURBOT_PATH)
dump_checkpoint(fallback_block_num)
return load_checkpoint()
def dump_checkpoint(block_num):
f = open(CHECKPOINT, 'w+')
f.write(str(block_num))
f.close()
def load_refunds():
try:
refunds = open(REFUND_LOG).readlines()
refunds = [r.replace("\n", "") for r in refunds]
except FileNotFoundError as e:
if not exists(TURBOT_PATH):
makedirs(TURBOT_PATH)
f = open(REFUND_LOG, 'w+')
f.close()
refunds = []
return refunds
def refund_key(to, memo, amount):
return "%s-%s-%s" % (to, memo, amount)
def add_refund(to, memo, amount):
f = open(REFUND_LOG, 'a+')
f.write(refund_key(to, memo, amount))
f.close()
def already_refunded(to, memo, amount):
refunds = load_refunds()
return refund_key(to, memo, amount) in refunds
| 24.351852 | 56 | 0.650951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.062357 |
de9c334f30690be489dc54509a0861d269ca08ea | 111 | py | Python | output/models/ms_data/additional/member_type021_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/additional/member_type021_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/additional/member_type021_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| null | null | null | from output.models.ms_data.additional.member_type021_xsd.member_type021 import Root
__all__ = [
"Root",
]
| 18.5 | 83 | 0.774775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.054054 |
de9ca51de5e3ba22b379f50a5e136405d59c8422 | 4,361 | py | Python | estimator.py | 2besweet/Covid-19-project | 8cfa76662ed0b84999134a9faacbf390e8de31f3 | [
"MIT"
]
| 1 | 2021-01-31T19:04:11.000Z | 2021-01-31T19:04:11.000Z | estimator.py | 2besweet/Covid-19-project | 8cfa76662ed0b84999134a9faacbf390e8de31f3 | [
"MIT"
]
| 1 | 2021-05-11T10:34:00.000Z | 2021-05-11T10:34:00.000Z | estimator.py | 2besweet/Covid-19-project | 8cfa76662ed0b84999134a9faacbf390e8de31f3 | [
"MIT"
]
| null | null | null | def estimator(data):
return data
def __init__(self,reportedCases,name,days,totalHospitalbeds,avgDailyIncomeInUsd,avgDailyIncomePopulation):
self.reportedCases=reportedCases
self.name=name
self.days=days
self.totalHospitalbeds=totalHospitalbeds
self.avgDailyIncomeInUsd=avgDailyIncomeInUsd
self.avgDailyIncomePopulation=avgDailyIncomePopulation
def covid19Estimator(self):
myinputs = {
"region": {
"name": self.name,
"avgAge": 19.7,
"avgDailyIncomeInUSD": self.avgDailyIncomeInUsd,
"avgDailyIncomePopulation": self.avgDailyIncomePopulation
},
"periodType": self.days,
"timeToElapse": 58,
"reportedCases": self.reportedCases,
"population": 66622705,
"totalHospitalBeds": self.totalHospitalbeds}
currentlyInfected = self.reportedCases * 10
currentlyInfectedSevere = self.reportedCases * 50
factor = self.days / 3
factorRounded = math.trunc(factor)
InfectionsByRequestedTime = currentlyInfected * (2 ** factorRounded)
InfectionsByRequestedTimeSevere = currentlyInfectedSevere * (2 ** factorRounded)
ImpactSevereCasesByRequestedTime = InfectionsByRequestedTime * 15 / 100
SevereCasesByRequestedTime = InfectionsByRequestedTimeSevere * 15 / 100
hospitalBedsByRequestedTime1 = self.totalHospitalbeds * 35 / 95
hospitalBedsByRequestedTimeAtFullCapacity1 = self.totalHospitalbeds * 35 / 100
hospitalBedsByRequestedTime = math.trunc(hospitalBedsByRequestedTime1)
hospitalBedsByRequestedTimeAtFullCapacity = math.trunc(hospitalBedsByRequestedTimeAtFullCapacity1)
casesForICUByRequestedTime = InfectionsByRequestedTime * 5 / 100
casesForICUByRequestedTimeSevere = InfectionsByRequestedTimeSevere * 5 / 100
casesForVentilatorsByRequestedTime = InfectionsByRequestedTime * 2 / 100
casesForVentilatorsByRequestedTimeSevere = InfectionsByRequestedTimeSevere * 2 / 100
dollarsInFlight = InfectionsByRequestedTime * 0.65 * 1.5 * 30
dollarsInFlightSevere = InfectionsByRequestedTimeSevere * self.avgDailyIncomePopulation * self.avgDailyIncomeInUsd * 30
myoutputs = {
'data': {'inputData': myinputs},
'impact': {
'currentlyInfected': currentlyInfected,
'InfectionsByRequestedTime': InfectionsByRequestedTime,
'SevereCasesByRequestedTime': ImpactSevereCasesByRequestedTime,
'HospitalBedsByRequestedTime': hospitalBedsByRequestedTime,
'hospitalBedsByRequestedTimeFullCapacity': hospitalBedsByRequestedTimeAtFullCapacity,
'casesForICUByRequestedTime': casesForICUByRequestedTime,
'casesForVentilatorsByRequestedTime': casesForVentilatorsByRequestedTime,
'dollarsInFlight': dollarsInFlight,
},
'severeImpact': {
"currentlyInfected": currentlyInfectedSevere,
"InfectionsByRequestedTime": InfectionsByRequestedTimeSevere,
"SevereCasesByRequestedTime": SevereCasesByRequestedTime,
'HospitalBedsByRequestedTime': hospitalBedsByRequestedTime,
'hospitalBedsByRequestedTimeFullCapacity': hospitalBedsByRequestedTimeAtFullCapacity,
'casesForICUByRequestedTime': casesForICUByRequestedTimeSevere,
"casesForVentilatorsByRequestedTime": casesForVentilatorsByRequestedTimeSevere,
'dollarsInFlight': dollarsInFlightSevere
}
}
print(myoutputs)
day=estimator(674,"Africa",28,1380614,1.5,0.65)
day.covid19Estimator()
reportedCases=eval(input('Enter the number of reported cases:-'))
name=input('Enter the name of the region:-')
days=eval(input('Enter the number of days:-'))
totalHospitalbeds=eval(input('Enter the total number of beds available in the region:'))
avgDailyIncomeInUsd=eval(input('Enter the Average income:-'))
avgDailyIncomePopulation=eval(input('Enter the average daily income of the population:-'))/100
reportedCases=674
name="Africa"
days=28
totalHospitalbeds=1380614
avgDailyIncomeInUsd=1.5
avgDailyIncomePopulation=0.65
| 40.757009 | 127 | 0.698234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 881 | 0.202018 |
dea196647fceafaeec0ee9058ac3907d2c76082c | 3,752 | py | Python | pys3crypto.py | elitest/pys3crypto | 9dfef5935ff1c663b8641eaa052e778cdf34a565 | [
"MIT"
]
| null | null | null | pys3crypto.py | elitest/pys3crypto | 9dfef5935ff1c663b8641eaa052e778cdf34a565 | [
"MIT"
]
| null | null | null | pys3crypto.py | elitest/pys3crypto | 9dfef5935ff1c663b8641eaa052e778cdf34a565 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# Original Author @elitest
# This script uses boto3 to perform client side decryption
# of data encryption keys and associated files
# and encryption in ways compatible with the AWS SDKs
# This support is not available in boto3 at this time
# Wishlist:
# Currently only tested with KMS managed symmetric keys.
# Error checking
import boto3, argparse, base64, json
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import padding
from cryptography.hazmat.primitives.ciphers import (
Cipher, algorithms, modes
)
# Build the parser
argparser = argparse.ArgumentParser(description='Prints info about deleted items in s3 buckets and helps you download them.')
argparser.add_argument('bucket', help='The bucket that contains the file.')
argparser.add_argument('region', help='The region the CMK is in.')
argparser.add_argument('key', help='The name of the file that you would like to download and decrypt.')
argparser.add_argument('--profile', default='default', help='The profile name in ~/.aws/credentials')
args = argparser.parse_args()
# Set variables from arguments
bucket = args.bucket
region = args.region
profile = args.profile
key = args.key
# Setup AWS clients
boto3.setup_default_session(profile_name=profile, region_name=region)
s3_client = boto3.client('s3')
response = s3_client.get_object(Bucket=bucket,Key=key)
kms_client = boto3.client('kms')
# This function decrypts the encrypted key associated with the file
# and decrypts it
def decrypt_dek(metadata):
# Encrypted key
keyV2 = base64.b64decode(metadata['Metadata']['x-amz-key-v2'])
# Key ARN
context = json.loads(metadata['Metadata']['x-amz-matdesc'])
# This decrypts the DEK using KMS
dek = kms_client.decrypt(CiphertextBlob=keyV2, EncryptionContext=context)
return dek['Plaintext']
def decrypt(key, algo, iv, ciphertext, tag):
if algo == 'AES/GCM/NoPadding':
# Construct a Cipher object, with the key, iv, and additionally the
# GCM tag used for authenticating the message.
decryptor = Cipher(
algorithms.AES(key),
modes.GCM(iv, tag),
backend=default_backend()
).decryptor()
# Decryption gets us the authenticated plaintext.
# If the tag does not match an InvalidTag exception will be raised.
return decryptor.update(ciphertext) + decryptor.finalize()
elif algo == 'AES/CBC/PKCS5Padding':
# Construct a Cipher object, with the key, iv
decryptor = Cipher(
algorithms.AES(key),
modes.CBC(iv),
backend=default_backend()
).decryptor()
# Decryption gets us the plaintext.
data = decryptor.update(ciphertext) + decryptor.finalize()
# Apparently PKCS5 and 7 are basically the same for our purposes
unpadder = padding.PKCS7(128).unpadder()
return unpadder.update(data) + unpadder.finalize()
else:
print('Unknown algorithm or padding.')
exit()
# Decrypt the DEK
plaintextDek = decrypt_dek(response)
# Get the encrypted body
# Haven't tested with large files
body=response['Body'].read()
# We need the content length for GCM to build the tag
contentLen = response['Metadata']['x-amz-unencrypted-content-length']
# IV
iv = base64.b64decode(response['Metadata']['x-amz-iv'])
# Algorithm
alg = response['Metadata']['x-amz-cek-alg']
# This splits the tag and data from the body if GCM
if alg == 'AES/GCM/NoPadding':
data = body[0:int(contentLen)]
tagLen = response['Metadata']['x-amz-tag-len']
tag = body[int(contentLen):int(tagLen)]
else:
data = body[:]
tag = ''
# Decrypt the file
plaintext = decrypt(plaintextDek,alg,iv,data,tag)
print(plaintext)
| 36.427184 | 125 | 0.709488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,703 | 0.453891 |
dea3d4b6a9500edd440cd83df9ceb44f4b4e36eb | 1,777 | py | Python | openTEL_11_19/presentation_figures/tm112_utils.py | psychemedia/presentations | a4d7058b1f716c59a89d0bcd1390ead75d769d43 | [
"Apache-2.0"
]
| null | null | null | openTEL_11_19/presentation_figures/tm112_utils.py | psychemedia/presentations | a4d7058b1f716c59a89d0bcd1390ead75d769d43 | [
"Apache-2.0"
]
| null | null | null | openTEL_11_19/presentation_figures/tm112_utils.py | psychemedia/presentations | a4d7058b1f716c59a89d0bcd1390ead75d769d43 | [
"Apache-2.0"
]
| 1 | 2019-11-05T10:35:40.000Z | 2019-11-05T10:35:40.000Z | from IPython.display import HTML
#TO DO - the nested table does not display?
#Also, the nested execution seems to take a long time to run?
#Profile it to see where I'm going wrong!
def obj_display(v, nest=False, style=True):
def nested(v):
if nest:
return obj_display(v, style=False)
return v
"""Generate a simple visualisation of an object's structure. """
html = '''<style type='text/css'>
.vartable {{
border-style: solid !important;
border-width: 2px !important;
}}
.vartable td {{
border-style: solid !important;
border-width: 2px !important;
text-align: left;
}}
</style>''' if style else ''
if isinstance(v, int) or isinstance(v, str):
html = html+'''<table class='vartable'><tr><td>ID:<br/>{v_id}</td>
<td>TYPE:<br/>{v_typ}</td></tr>
<tr><td colspan=2>VALUE:<br/>{v_val}</td></tr></table>'''
html = html.format(v_id = id(v), v_typ = type(v).__name__, v_val=v)
elif isinstance(v, list) or isinstance(v, dict):
html = html+'''<table class='vartable'><tr><td>ID:<br/>{v_id}</td>
<td>TYPE:<br/>{v_typ}</td></tr>
<tr><td colspan=2>VALUE:<br/>{v_val}</td></tr></table>'''
if isinstance(v, dict):
v_items = ''.join(['<td>[{i}]: <strong>{v}</strong></td>'.format(i=i, v=nested(v_item) ) for v_item, i in enumerate(v)])
else:
v_items = ''.join(['<td>[{i}]: <strong>{v}</strong></td>'.format(i=i, v= nested(v_item) ) for i, v_item in enumerate(v)])
v_val='<table><tr>{v_items}</tr></table>'.format(v_items = v_items)
html = html.format(v_id = id(v), v_typ = type(v).__name__, v_val=v_val)
display(HTML(html))
| 38.630435 | 133 | 0.563309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 936 | 0.52673 |
dea4ec2e4ccc51ad602efcb7e648252790b6ff2d | 984 | py | Python | src/pages/artists_main.py | haoweini/spotify_stream | 83fd13d4da9fb54a595611d4c0cd594eb5b8a9fd | [
"MIT"
]
| null | null | null | src/pages/artists_main.py | haoweini/spotify_stream | 83fd13d4da9fb54a595611d4c0cd594eb5b8a9fd | [
"MIT"
]
| null | null | null | src/pages/artists_main.py | haoweini/spotify_stream | 83fd13d4da9fb54a595611d4c0cd594eb5b8a9fd | [
"MIT"
]
| null | null | null | from turtle import width
import streamlit as st
import numpy as np
import pandas as pd
from dis import dis
import streamlit as st
from data.get_saved_library import get_saved_library, display_user_name, display_user_pic
from data.get_recently_played import get_recently_played
from data.get_top_artists import get_top_artists, get_related_artists, get_top_artists_tracks_features, NormalizeData, draw_feature_plot
from data.image_url import path_to_image_html
from PIL import Image
import requests
from io import BytesIO
from IPython.core.display import HTML
import streamlit.components.v1 as components
import plotly.express as px
from subpage import SubPage
from pages import welcome, artists_top_saved, artists_select_random
# @st.cache
def app():
app_sub = SubPage()
app_sub.add_page("Select An Artist", artists_select_random.app)
app_sub.add_page("Your Top Artists", artists_top_saved.app)
app_sub.run()
#track_urls = list(df_top_artists['url'])
| 31.741935 | 136 | 0.816057 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.089431 |
dea61adbc856f28630be94c795fc850aa45a1770 | 595 | py | Python | Leetcode/res/Longest Common Prefix/2.py | AllanNozomu/CompetitiveProgramming | ac560ab5784d2e2861016434a97e6dcc44e26dc8 | [
"MIT"
]
| 1 | 2022-03-04T16:06:41.000Z | 2022-03-04T16:06:41.000Z | Leetcode/res/Longest Common Prefix/2.py | AllanNozomu/CompetitiveProgramming | ac560ab5784d2e2861016434a97e6dcc44e26dc8 | [
"MIT"
]
| null | null | null | Leetcode/res/Longest Common Prefix/2.py | AllanNozomu/CompetitiveProgramming | ac560ab5784d2e2861016434a97e6dcc44e26dc8 | [
"MIT"
]
| null | null | null | # Author: allannozomu
# Runtime: 56 ms
# Memory: 13 MB
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
res = ""
max_length = -1
for s in strs:
if max_length < 0:
max_length = len(s)
else:
max_length = min(len(s), max_length)
for i in range(max_length):
c = ''
for s in strs:
if c == '':
c = s[i]
elif c != s[i]:
return res
res += c
return res
| 24.791667 | 58 | 0.403361 | 526 | 0.884034 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.097479 |
dea6d3637da9acba0c0473fcafaedf9d82d434e7 | 884 | py | Python | tests/factory_fixtures/contact_number.py | donovan-PNW/dwellinglybackend | 448df61f6ea81f00dde7dab751f8b2106f0eb7b1 | [
"MIT"
]
| null | null | null | tests/factory_fixtures/contact_number.py | donovan-PNW/dwellinglybackend | 448df61f6ea81f00dde7dab751f8b2106f0eb7b1 | [
"MIT"
]
| 56 | 2021-08-05T02:49:38.000Z | 2022-03-31T19:35:13.000Z | tests/factory_fixtures/contact_number.py | donovan-PNW/dwellinglybackend | 448df61f6ea81f00dde7dab751f8b2106f0eb7b1 | [
"MIT"
]
| null | null | null | import pytest
from models.contact_number import ContactNumberModel
@pytest.fixture
def contact_number_attributes(faker):
def _contact_number_attributes():
return {
"number": faker.phone_number(),
"numtype": faker.random_element(("home", "work", "mobile")),
"extension": faker.bothify(text="?###"),
}
yield _contact_number_attributes()
@pytest.fixture
def create_contact_number(contact_number_attributes, create_emergency_contact):
def _create_contact_number(emergency_contact=None):
if not emergency_contact:
emergency_contact = create_emergency_contact()
contact_number = ContactNumberModel(
**contact_number_attributes, emergency_contact_id=emergency_contact.id
)
contact_number.save_to_db()
return contact_number
yield _create_contact_number
| 29.466667 | 82 | 0.70362 | 0 | 0 | 779 | 0.881222 | 811 | 0.917421 | 0 | 0 | 54 | 0.061086 |
dea6d4847a9416f809c2342943ab00ca26b745bd | 835 | py | Python | tests/test_seq_comparision.py | krzjoa/sciquence | 6a5f758c757200fffeb0fdc9206462f1f89e2444 | [
"MIT"
]
| 8 | 2017-10-23T17:59:35.000Z | 2021-05-10T03:01:30.000Z | tests/test_seq_comparision.py | krzjoa/sciquence | 6a5f758c757200fffeb0fdc9206462f1f89e2444 | [
"MIT"
]
| 2 | 2019-08-25T19:24:12.000Z | 2019-09-05T12:16:10.000Z | tests/test_seq_comparision.py | krzjoa/sciquence | 6a5f758c757200fffeb0fdc9206462f1f89e2444 | [
"MIT"
]
| 2 | 2018-02-28T09:47:53.000Z | 2019-08-25T19:24:16.000Z | import unittest
import numpy as np
from sciquence.sequences import *
class TestSequences(unittest.TestCase):
def test_seq_equals(self):
x = [np.array([1, 2, 3]), np.array([4, 5, 6])]
y = [np.array([1, 2, 7]), np.array([4, 5, 9])]
assert lseq_equal(x, x)
assert not lseq_equal(x, y)
def test_seq(self):
x = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0])
expected = [np.array([1, 1, 1]), np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([1, 1, 1, 1, 1, 1, 1, 1, 1]),
np.array([0, 0, 0, 0]), np.array([1, 1, 1, 1]), np.array([0, 0, 0])]
assert lseq_equal(seq(x), expected)
if __name__ == '__main__':
unittest.main()
| 30.925926 | 91 | 0.475449 | 714 | 0.85509 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.011976 |
dea6f4a43ec33dab31441d90f5221fa29eeb9456 | 8,191 | py | Python | analysis_guis/code_test.py | Sepidak/spikeGUI | 25ae60160308c0a34e7180f3e39a1c4dc6aad708 | [
"MIT"
]
| null | null | null | analysis_guis/code_test.py | Sepidak/spikeGUI | 25ae60160308c0a34e7180f3e39a1c4dc6aad708 | [
"MIT"
]
| 3 | 2021-08-09T21:51:41.000Z | 2021-08-09T21:51:45.000Z | analysis_guis/code_test.py | Sepidak/spikeGUI | 25ae60160308c0a34e7180f3e39a1c4dc6aad708 | [
"MIT"
]
| 3 | 2021-10-16T14:07:59.000Z | 2021-10-16T17:09:03.000Z | # -*- coding: utf-8 -*-
"""
Simple example using BarGraphItem
"""
# import initExample ## Add path to library (just for examples; you do not need this)
import numpy as np
import pickle as p
import pandas as pd
from analysis_guis.dialogs.rotation_filter import RotationFilter
from analysis_guis.dialogs import config_dialog
from analysis_guis.dialogs.info_dialog import InfoDialog
from rotation_analysis.analysis.probe.probe_io.probe_io import TriggerTraceIo, BonsaiIo, IgorIo
from PyQt5.QtWidgets import QApplication
from datetime import datetime
from dateutil import parser
import analysis_guis.calc_functions as cfcn
import analysis_guis.rotational_analysis as rot
import matplotlib.pyplot as plt
from pyphys.pyphys.pyphys import PxpParser
from collections import OrderedDict
import analysis_guis.common_func as cf
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
date2sec = lambda t: np.sum([3600 * t.hour, 60 * t.minute, t.second])
trig_count = lambda data, cond: len(np.where(np.diff(data[cond]['cpg_ttlStim']) > 1)[0]) + 1
get_bin_index = lambda x, y: next((i for i in range(len(y)) if x < y[i]), len(y)) - 1
def setup_polar_spike_freq(r_obj, sFreq, b_sz, is_pos):
'''
:param wvPara:
:param tSpike:
:param sFreq:
:param b_sz:
:return:
'''
# memory allocation
wvPara, tSpike = r_obj.wvm_para[i_filt], r_obj.t_spike[i_filt],
ind_inv, xi_bin_tot = np.empty(2, dtype=object), np.empty(2, dtype=object)
# calculates the bin times
xi_bin_tot[0], t_bin, t_phase = rot.calc_wave_kinematic_times(wvPara[0][0], b_sz, sFreq, is_pos, yDir=-1)
xi_bin_tot[1], dt_bin = -xi_bin_tot[0], np.diff(t_bin)
# determines the bin indices
for i in range(2):
xi_mid, ind_inv[i] = np.unique(0.5 * (xi_bin_tot[i][:-1] + xi_bin_tot[i][1:]), return_inverse=True)
# memory allocation
yDir = wvPara[0]['yDir']
n_trial, n_bin = len(yDir), len(xi_mid)
tSp_bin = np.zeros((n_bin, n_trial))
#
for i_trial in range(n_trial):
# combines the time spikes in the order that the CW/CCW phases occur
ii = int(yDir[i_trial] == 1)
tSp = np.hstack((tSpike[1 + ii][i_trial], tSpike[2 - ii][i_trial] + t_phase))
# appends the times
t_hist = np.histogram(tSp, bins=t_bin)
for j in range(len(t_hist[0])):
i_bin = ind_inv[ii][j]
tSp_bin[i_bin, i_trial] += t_hist[0][j] / (2.0 * dt_bin[j])
# returns the final bin
return xi_mid, tSp_bin
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
# loads the data for testing
with open('C:\\Work\\EPhys\\Code\\Sepi\\wvPara.p', 'rb') as fp:
wvPara = p.load(fp)
tSpike = p.load(fp)
#
sFreq = 30000
kb_sz = 10
title_str = ['Displacement', 'Velocity']
lg_str = ['Type 1', 'Type 2', 'Type 3']
# memory allocation
n_filt = len(wvPara)
c = cf.get_plot_col(n_filt)
#
fig = plt.figure()
ax = np.empty(2, dtype=object)
#
for i_type in range(2):
# sets up the spiking frequency arrays
tSp_bin = np.empty(n_filt, dtype=object)
for i_filt in range(n_filt):
xi_mid, tSp_bin[i_filt] = setup_polar_spike_freq(wvPara[i_filt], tSpike[i_filt], sFreq, kb_sz, i_type==0)
#
xi_min = xi_mid[0] - np.diff(xi_mid[0:2])[0]/2
theta = np.pi * (1 - (xi_mid - xi_min) / np.abs(2 * xi_min))
x_tick = np.linspace(xi_min, -xi_min, 7 + 2 * i_type)
# creates the subplot
ax[i_type] = plt.subplot(1, 2, i_type + 1, projection='polar')
ax[i_type].set_thetamin(0)
ax[i_type].set_thetamax(180)
# creates the radial plots for each of the filter types
h_plt = []
for i_filt in range(n_filt):
# creates the plot and resets the labels
tSp_mn = np.mean(tSp_bin[i_filt], axis=1)
h_plt.append(ax[i_type].plot(theta, tSp_mn, 'o-', c=c[i_filt]))
# sets the axis properties (first filter only)
if i_filt == 0:
ax[i_type].set_title(title_str[i_type])
ax[i_type].set_xticks(np.pi * (x_tick - xi_min) / np.abs(2 * xi_min))
ax[i_type].set_xticklabels([str(int(np.round(-x))) for x in x_tick])
# sets the legend (first subplot only)
if i_type == 0:
ax[i_type].legend(lg_str, loc=1)
# determines the overall radial maximum (over all subplots) and resets the radial ticks
y_max = [max(x.get_ylim()) for x in ax]
i_max = np.argmax(y_max)
dy = np.diff(ax[i_max].get_yticks())[0]
y_max_tot = dy * (np.floor(y_max[i_max] / dy) + 1)
# resets the axis radial limits
for x in ax:
x.set_ylim(0, y_max_tot)
# shows the plot
plt.show()
a = 1
# app = QApplication([])
# h_obj = RotationFilter(data)
# h_obj = InfoDialog(data)
# a = 1
# #
# igor_waveforms_path = 'G:\\Seagate\\Work\\EPhys\\Data\\CA326_C_day3\\Igor\\CA326_C_day3'
# bonsai_metadata_path = 'G:\\Seagate\\Work\\EPhys\\Data\\CA326_C_day3\\Bonsai\\CA326_C_day3_all.csv'
#
# #
# file_time_key = 'FileTime'
# bonsai_io = BonsaiIo(bonsai_metadata_path)
#
#
# # determines the indices of the experiment condition triel group
# t_bonsai = [parser.parse(x) for x in bonsai_io.data['Timestamp']]
# t_bonsai_sec = np.array([date2sec(x) for x in t_bonsai])
# d2t_bonsai = np.diff(t_bonsai_sec, 2)
# grp_lim = grp_lim = [-1] + list(np.where(d2t_bonsai > 60)[0] + 1) + [len(d2t_bonsai) + 1]
# ind_grp = [np.arange(grp_lim[x] + 1, grp_lim[x + 1] + 1) for x in range(len(grp_lim) - 1)]
#
# # sets the time, name and trigger count from each of these groups
# t_bonsai_grp = [t_bonsai_sec[x[0]] for x in ind_grp]
# c_bonsai_grp = [bonsai_io.data['Condition'][x[0]] for x in ind_grp]
# n_trig_bonsai = [len(x) for x in ind_grp]
#
# # determines the feasible variables from the igor data file
# igor_data = PxpParser(igor_waveforms_path)
# var_keys = list(igor_data.data.keys())
# is_ok = ['command' in igor_data.data[x].keys() if isinstance(igor_data.data[x], OrderedDict) else False for x in var_keys]
#
# # sets the name, time and trigger count from each of the igor trial groups
# c_igor_grp = [y for x, y in zip(is_ok, var_keys) if x]
# t_igor_grp, t_igor_str, n_trig_igor = [], [], [trig_count(igor_data.data, x) for x in c_igor_grp]
# for ck in c_igor_grp:
# t_igor_str_nw = igor_data.data[ck]['vars'][file_time_key][0]
# t_igor_str.append(t_igor_str_nw)
# t_igor_grp.append(date2sec(datetime.strptime(t_igor_str_nw, '%H:%M:%S').time()))
#
# # calculates the point-wise differences between the trial timer and trigger count
# dt_grp = cfcn.calc_pointwise_diff(t_igor_grp, t_bonsai_grp)
# dn_grp = cfcn.calc_pointwise_diff(n_trig_igor, n_trig_bonsai)
#
# # ensures that only groups that have equal trigger counts are matched
# dt_max = np.max(dt_grp) + 1
# dt_grp[dn_grp > 0] = dt_max
#
# #
# iter = 0
# while 1:
# i2b = np.argmin(dt_grp, axis=1)
# i2b_uniq, ni2b = np.unique(i2b, return_counts=True)
#
# ind_multi = np.where(ni2b > 1)[0]
# if len(ind_multi):
# if iter == 0:
# for ii in ind_multi:
# jj = np.where(i2b == i2b_uniq[ii])[0]
#
# imn = np.argmin(dt_grp[jj, i2b[ii]])
# for kk in jj[jj != jj[imn]]:
# dt_grp[kk, i2b[ii]] = dt_max
# else:
# pass
# else:
# break
#
# # sets the igor-to-bonsai name groupings
# i2b_key, x = {}, np.array(c_igor_grp)[i2b]
# for cc in c_bonsai_grp:
# if cc not in i2b_key:
# jj = np.where([x == cc for x in c_bonsai_grp])[0]
# i2b_key[cc] = x[jj]
| 37.401826 | 129 | 0.605421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,975 | 0.485289 |
dea94f5c042a5187e7e181584aadcbc88251aee3 | 2,852 | py | Python | att/gm.py | thexdesk/foiamail | d135bbb5f52d5a31ca8ce3450bd0035f94a182f5 | [
"MIT"
]
| null | null | null | att/gm.py | thexdesk/foiamail | d135bbb5f52d5a31ca8ce3450bd0035f94a182f5 | [
"MIT"
]
| null | null | null | att/gm.py | thexdesk/foiamail | d135bbb5f52d5a31ca8ce3450bd0035f94a182f5 | [
"MIT"
]
| null | null | null | """
downloads gmail atts
"""
import base64, os
from auth.auth import get_service
from msg.label import agencies, get_atts
from report.response import get_threads, get_status
from att.drive import get_or_create_atts_folder,\
check_if_drive, make_drive_folder, upload_to_drive
### START CONFIG ###
buffer_path = '/tmp/'
### END CONFIG ###
gmail_service = get_service(type='gmail')
def roll_thru():
"""
controller function rolls through each agency:
- checks if already filed in Drive
- checks if labeled done
... if neither:
- makes Drive folder
- downloads buffer file to this server
- uploads file to Drive folder
- deleteds buffer file
TODO: optimize by check_if_drive first before getting threads
"""
atts_drive_folder = get_or_create_atts_folder()
for agency in agencies:
try:
threads = get_threads(agency)
if not check_if_drive(agency.replace("'","")) and check_if_done(threads,agency): # no apostrophes allowed
# only proceed if agency is done, has atts and not already in drive
atts = get_agency_atts(threads)
if atts:
print agency
drive_folder = make_drive_folder(agency.replace("'",""),atts_drive_folder) # no apostrophes allowed
for att in atts:
path = download_buffer_file(att)
upload_to_drive(att, drive_folder)
os.remove(path)
else:
print 'skipping', agency
except Exception, e:
print agency,'failed',e
def check_if_done(threads,agency):
"""
checks if this agency's threads
include any messages labeled 'done'
"""
return get_status(threads,agency) == 'done'
def get_agency_atts(threads):
"""
given a list of threads,
iterates through messages,
finds attachments
and appends att data to atts list
"""
atts = []
for thread in threads:
for msg in gmail_service.users().threads().get(\
id=thread['id'],userId='me').execute().get('messages'):
for att in get_atts(msg):
atts.append({'att_id':att['body']['attachmentId'],'msg_id':msg['id'],'file_name':att['filename']})
return atts
def download_buffer_file(att):
"""
downloads specified att to
buffer file
and returns path
"""
attachment = gmail_service.users().messages().attachments().get(\
id=att['att_id'],messageId=att['msg_id'],userId='me').execute()
file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8'))
buffer_file_path = buffer_path + att['file_name']
buffer_file = open(buffer_file_path,'w')
buffer_file.write(file_data)
buffer_file.close()
return buffer_file_path
| 32.781609 | 119 | 0.630785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,002 | 0.351332 |
dea9df41450058a28e28c535ce8960f8b770dc38 | 1,147 | py | Python | pex/pip/download_observer.py | sthagen/pantsbuild-pex | bffe6c3641b809cd3b20adbc7fdb2cf7e5f54309 | [
"Apache-2.0"
]
| null | null | null | pex/pip/download_observer.py | sthagen/pantsbuild-pex | bffe6c3641b809cd3b20adbc7fdb2cf7e5f54309 | [
"Apache-2.0"
]
| null | null | null | pex/pip/download_observer.py | sthagen/pantsbuild-pex | bffe6c3641b809cd3b20adbc7fdb2cf7e5f54309 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
from pex.pip.log_analyzer import LogAnalyzer
from pex.typing import TYPE_CHECKING, Generic
if TYPE_CHECKING:
from typing import Iterable, Mapping, Optional, Text
import attr # vendor:skip
else:
from pex.third_party import attr
@attr.s(frozen=True)
class Patch(object):
code = attr.ib(default=None) # type: Optional[Text]
args = attr.ib(default=()) # type: Iterable[str]
env = attr.ib(factory=dict) # type: Mapping[str, str]
if TYPE_CHECKING:
from typing import TypeVar
_L = TypeVar("_L", bound=LogAnalyzer)
class DownloadObserver(Generic["_L"]):
def __init__(
self,
analyzer, # type: _L
patch=Patch(), # type: Patch
):
# type: (...) -> None
self._analyzer = analyzer
self._patch = patch
@property
def analyzer(self):
# type: () -> _L
return self._analyzer
@property
def patch(self):
# type: () -> Patch
return self._patch
| 23.408163 | 66 | 0.646905 | 612 | 0.533566 | 0 | 0 | 384 | 0.334786 | 0 | 0 | 297 | 0.258936 |
deaa58029f2b553b02dbaa81816ff5ce9e456f8a | 3,424 | py | Python | dispatchlib/types.py | ryxcommar/dispatchlib | bf3b6e5617af41579b240a7733cd9cc86a8a38ed | [
"MIT"
]
| null | null | null | dispatchlib/types.py | ryxcommar/dispatchlib | bf3b6e5617af41579b240a7733cd9cc86a8a38ed | [
"MIT"
]
| null | null | null | dispatchlib/types.py | ryxcommar/dispatchlib | bf3b6e5617af41579b240a7733cd9cc86a8a38ed | [
"MIT"
]
| null | null | null | from typing import Any
from typing import Callable
from typing import Iterable
def _create_getter_and_setter(name: str):
def getter(self):
return getattr(self.func, name)
getter.__name__ = name
prop = property(getter)
def setter(self, value):
setattr(self.func, name, value)
setter.__name__ = name
prop = prop.setter(setter)
return prop
class _PrioritySortable:
priority: int
def __init__(self, priority: int = 100):
self.priority = priority
def __lt__(self, other): return self.priority < other.priority
def __le__(self, other): return self.priority <= other.priority
def __gt__(self, other): return self.priority > other.priority
def __ge__(self, other): return self.priority >= other.priority
def __eq__(self, other): return self.priority == other.priority
class FunctionMixin(object):
"""Mixin for making classes look like functions.
This class isn't too fancy: if you store random non-standard attributes
inside your function then they are not directly accessible at the top-level
of the the subclass. The attributes this mixin provides are pre-defined.
"""
def __init__(self, func: callable):
self.func = func
for name in [
'__annotations__',
'__closure__',
'__code__',
'__defaults__',
'__kwdefaults__',
'__name__'
]:
locals()[name] = _create_getter_and_setter(name)
@property
def __funcdoc__(self):
return self.func.__doc__
@__funcdoc__.setter
def __funcdoc__(self, value):
self.func.__doc__ = value
@property
def __globals__(self):
return self.func.__globals__
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
class NextDispatch(Exception):
pass
class DispatcherType(type):
def __instancecheck__(cls, instance):
return (
callable(instance)
and hasattr(instance, 'register')
and hasattr(instance, 'registry')
and callable(instance.register)
and hasattr(instance.registry, '__iter__')
)
class Dispatcher(FunctionMixin, metaclass=DispatcherType):
dispatch: callable
register: callable
registry: Iterable
def __new__(cls, func: callable = None, metadispatcher: callable = None):
from .core import dispatch
return dispatch(func, metadispatcher=metadispatcher)
def __call__(self, *args, **kwargs):
raise NotImplementedError
class MetaDispatcher(FunctionMixin, metaclass=DispatcherType):
dispatch: callable
register: callable
registry: Iterable
def __new__(cls, func: callable = None):
from .core import metadispatch
return metadispatch(func)
def __call__(self, *args, **kwargs):
raise NotImplementedError
class DispatchedCallable(FunctionMixin, _PrioritySortable):
def __init__(
self,
func: callable,
validate: Callable[[Any], bool],
priority: int = 100
):
self.validate = validate
FunctionMixin.__init__(self, func)
_PrioritySortable.__init__(self, priority)
def __repr__(self):
content = ', '.join(
f'{attr}={getattr(self, attr)!r}'
for attr in ('func', 'validate', 'priority')
)
return f'{self.__class__.__name__}({content})'
| 26.96063 | 79 | 0.651285 | 3,018 | 0.881425 | 0 | 0 | 229 | 0.066881 | 0 | 0 | 501 | 0.14632 |
deabe0363fc1143c6a3fe5cc62b534d0a3e480ca | 2,096 | py | Python | pbpstats/data_loader/nba_possession_loader.py | pauldevos/pbpstats | 71c0b5e2bd45d0ca031646c70cd1c1f30c6a7152 | [
"MIT"
]
| null | null | null | pbpstats/data_loader/nba_possession_loader.py | pauldevos/pbpstats | 71c0b5e2bd45d0ca031646c70cd1c1f30c6a7152 | [
"MIT"
]
| null | null | null | pbpstats/data_loader/nba_possession_loader.py | pauldevos/pbpstats | 71c0b5e2bd45d0ca031646c70cd1c1f30c6a7152 | [
"MIT"
]
| null | null | null | from pbpstats.resources.enhanced_pbp import StartOfPeriod
class NbaPossessionLoader(object):
"""
Class for shared methods between :obj:`~pbpstats.data_loader.data_nba.possessions_loader.DataNbaPossessionLoader`
and :obj:`~pbpstats.data_loader.stats_nba.possessions_loader.StatsNbaPossessionLoader`
Both :obj:`~pbpstats.data_loader.data_nba.possessions_loader.DataNbaPossessionLoader`
and :obj:`~pbpstats.data_loader.stats_nba.possessions_loader.StatsNbaPossessionLoader` should inherit from this class
This class should not be instantiated directly
"""
def _split_events_by_possession(self):
"""
splits events by possession
:returns: list of lists with events for each possession
"""
events = []
possession_events = []
for event in self.events:
possession_events.append(event)
if event.is_possession_ending_event:
events.append(possession_events)
possession_events = []
return events
def _add_extra_attrs_to_all_possessions(self):
"""
adds possession number and next and previous possession
"""
number = 1
for i, possession in enumerate(self.items):
if i == 0 and i == len(self.items) - 1:
possession.previous_possession = None
possession.next_possession = None
elif isinstance(possession.events[0], StartOfPeriod) or i == 0:
possession.previous_possession = None
possession.next_possession = self.items[i + 1]
number = 1
elif (
i == len(self.items) - 1
or possession.period != self.items[i + 1].period
):
possession.previous_possession = self.items[i - 1]
possession.next_possession = None
else:
possession.previous_possession = self.items[i - 1]
possession.next_possession = self.items[i + 1]
possession.number = number
number += 1
| 39.54717 | 121 | 0.624046 | 2,035 | 0.970897 | 0 | 0 | 0 | 0 | 0 | 0 | 679 | 0.32395 |
dead01ec590550c2d98b328ed72222f137d3778b | 7,033 | py | Python | vmware_nsx_tempest/tests/nsxv/api/base_provider.py | gravity-tak/vmware-nsx-tempest | 3a1007d401c471d989345bb5a3f9769f84bd4ac6 | [
"Apache-2.0"
]
| null | null | null | vmware_nsx_tempest/tests/nsxv/api/base_provider.py | gravity-tak/vmware-nsx-tempest | 3a1007d401c471d989345bb5a3f9769f84bd4ac6 | [
"Apache-2.0"
]
| null | null | null | vmware_nsx_tempest/tests/nsxv/api/base_provider.py | gravity-tak/vmware-nsx-tempest | 3a1007d401c471d989345bb5a3f9769f84bd4ac6 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions
from tempest.api.network import base
from tempest import config
from tempest import test
CONF = config.CONF
class BaseAdminNetworkTest(base.BaseAdminNetworkTest):
# NOTE(akang): This class inherits from BaseAdminNetworkTest.
# By default client is cls.client, but for provider network,
# the client is admin_client. The test class should pass
# client=self.admin_client, if it wants to create provider
# network/subnet.
@classmethod
def skip_checks(cls):
super(BaseAdminNetworkTest, cls).skip_checks()
if not test.is_extension_enabled('provider', 'network'):
msg = "Network Provider Extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(BaseAdminNetworkTest, cls).resource_setup()
cls.admin_netwk_info = []
@classmethod
def resource_cleanup(cls):
if CONF.service_available.neutron:
for netwk_info in cls.admin_netwk_info:
net_client, network = netwk_info
try:
cls._try_delete_resource(net_client.delete_network,
network['id'])
except Exception:
pass
super(BaseAdminNetworkTest, cls).resource_cleanup()
@classmethod
def create_network(cls, network_name=None, client=None,
**kwargs):
net_client = client if client else cls.admin_networks_client
network_name = network_name or data_utils.rand_name('ADM-network-')
post_body = {'name': network_name}
post_body.update(kwargs)
body = net_client.create_network(**post_body)
network = body['network']
cls.admin_netwk_info.append([net_client, network])
return body
@classmethod
def update_network(cls, network_id, client=None, **kwargs):
net_client = client if client else cls.admin_networks_client
return net_client.update_network(network_id, **kwargs)
@classmethod
def delete_network(cls, network_id, client=None):
net_client = client if client else cls.admin_networks_client
return net_client.delete_network(network_id)
@classmethod
def show_network(cls, network_id, client=None, **kwargs):
net_client = client if client else cls.admin_networks_client
return net_client.show_network(network_id, **kwargs)
@classmethod
def list_networks(cls, client=None, **kwargs):
net_client = client if client else cls.admin_networks_client
return net_client.list_networks(**kwargs)
@classmethod
def create_subnet(cls, network, client=None,
gateway='', cidr=None, mask_bits=None,
ip_version=None, cidr_offset=0, **kwargs):
ip_version = (ip_version if ip_version is not None
else cls._ip_version)
net_client = client if client else cls.admin_subnets_client
post_body = get_subnet_create_options(
network['id'], ip_version,
gateway=gateway, cidr=cidr, cidr_offset=cidr_offset,
mask_bits=mask_bits, **kwargs)
return net_client.create_subnet(**post_body)
@classmethod
def update_subnet(cls, subnet_id, client=None, **kwargs):
net_client = client if client else cls.admin_subnets_client
return net_client.update_subnet(subnet_id, **kwargs)
@classmethod
def delete_subnet(cls, subnet_id, client=None):
net_client = client if client else cls.admin_subnets_client
return net_client.delete_subnet(subnet_id)
@classmethod
def show_subnet(cls, subnet_id, client=None, **kwargs):
net_client = client if client else cls.admin_subnets_client
return net_client.show_subnet(subnet_id, **kwargs)
@classmethod
def list_subnets(cls, client=None, **kwargs):
net_client = client if client else cls.admin_subnets_client
return net_client.list_subnets(**kwargs)
# add other create methods, i.e. security-group, port, floatingip
# if needed.
def get_subnet_create_options(network_id, ip_version=4,
gateway='', cidr=None, mask_bits=None,
num_subnet=1, gateway_offset=1, cidr_offset=0,
**kwargs):
"""When cidr_offset>0 it request only one subnet-options:
subnet = get_subnet_create_options('abcdefg', 4, num_subnet=4)[3]
subnet = get_subnet_create_options('abcdefg', 4, cidr_offset=3)
"""
gateway_not_set = (gateway == '')
if ip_version == 4:
cidr = cidr or netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = mask_bits or CONF.network.tenant_network_mask_bits
elif ip_version == 6:
cidr = (
cidr or netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr))
mask_bits = mask_bits or CONF.network.tenant_network_v6_mask_bits
# Find a cidr that is not in use yet and create a subnet with it
subnet_list = []
if cidr_offset > 0:
num_subnet = cidr_offset + 1
for subnet_cidr in cidr.subnet(mask_bits):
if gateway_not_set:
gateway_ip = gateway or (
str(netaddr.IPAddress(subnet_cidr) + gateway_offset))
else:
gateway_ip = gateway
try:
subnet_body = dict(
network_id=network_id,
cidr=str(subnet_cidr),
ip_version=ip_version,
gateway_ip=gateway_ip,
**kwargs)
if num_subnet <= 1:
return subnet_body
subnet_list.append(subnet_body)
if len(subnet_list) >= num_subnet:
if cidr_offset > 0:
# user request the 'cidr_offset'th of cidr
return subnet_list[cidr_offset]
# user request list of cidr
return subnet_list
except exceptions.BadRequest as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
else:
message = 'Available CIDR for subnet creation could not be found'
raise exceptions.BuildErrorException(message)
return {}
| 39.072222 | 78 | 0.65278 | 3,993 | 0.567752 | 0 | 0 | 3,496 | 0.497085 | 0 | 0 | 1,483 | 0.210863 |
deadc8ea87d0e57d203447ee89704b440dd4a622 | 3,650 | py | Python | read_csv.py | BigTony666/football-manager | 12a4c3dc2bb60f9634b419b7c230d6f78df8d650 | [
"MIT"
]
| 11 | 2019-04-23T23:15:43.000Z | 2021-07-13T06:37:25.000Z | read_csv.py | BigTony666/football-manager | 12a4c3dc2bb60f9634b419b7c230d6f78df8d650 | [
"MIT"
]
| null | null | null | read_csv.py | BigTony666/football-manager | 12a4c3dc2bb60f9634b419b7c230d6f78df8d650 | [
"MIT"
]
| 2 | 2019-03-07T21:07:34.000Z | 2020-04-19T15:28:31.000Z | #!/usr/bin/env python
# coding: utf-8
# In[2]:
from collections import defaultdict
import csv
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# ## read data to numpy(not in use)
# In[385]:
# def readCsvToNumpy(file_name, feat_num):
# util_mat = []
# with open(file_name, newline='', encoding='utf-8') as csvfile:
# next(csvfile, None)
# rd = csv.reader(csvfile, delimiter=' ', quotechar='|')
# for idx, row in enumerate(rd):
# row = (' '.join(row))
# row = row.split(',')
# if len(row) == feat_num:
# util_mat.append(row)
# # convert 2d list to 2d numpy array
# for idx, row in enumerate(util_mat):
# util_mat[idx] = np.asarray(row)
# util_mat = np.asarray(util_mat)
# return util_mat
# def getPlayerMatrix(util_mat, left_idx, right_idx):
# player_mat = util_mat[:, left_idx:right_idx]
# player_mat = player_mat.astype(int)
# return player_mat
# def getTeamMatrix(util_mat, player_mat, team_idx):
# hashmap = defaultdict(list)
# for idx, item in enumerate(util_mat):
# hashmap[util_mat[idx, team_idx]].append(player_mat[idx, :])
# team_mat = []
# # print('Team number', len(hashmap))
# for key, value in hashmap.items():
# team_avr = [sum(x)/len(value) for x in zip(*value)]
# team_mat.append(team_avr)
# # team_mat.append((key, temp))
# # for idx, item in enumerate(team_mat):
# # if item[0] == 'Arsenal':
# # print(idx, item)
# # convert team feature matrix to numpy matrix
# for idx, row in enumerate(team_mat):
# team_mat[idx] = np.asarray(row, dtype=int)
# team_mat = np.asarray(team_mat, dtype=int);
# return team_mat
# if __name__ == "__main__":
# util_mat = readCsvToNumpy('data_clean.csv', 74)
# # print(util_mat.shape, util_mat)
# player_mat = getPlayerMatrix(util_mat, 44, 73)
# # print(player_mat.shape, player_mat)
# team_mat = getTeamMatrix(util_mat, player_mat, 6)
# # print(team_mat[0, :])
# res = np.dot(player_mat, np.transpose(team_mat))
# # # print(hashmap['FC Barcelona'])
# # print(res[0,:])
# ## read data to pandas Data frame
# In[3]:
util_df = pd.read_csv('data_clean.csv', na_filter=False)
# print(util_df)
player_df = util_df.iloc[:, 44:73]
# print(player_df)
team_df = util_df.groupby('Club', sort=False).mean()
# print(team_df)
team_df = team_df.iloc[:, 37:66]
# print(team_df)
res = np.dot(player_df, np.transpose(team_df))
# In[ ]:
util_df.iloc[:,1]
# In[54]:
# util_df.describe()
player_characteristics = ['Crossing','Finishing', 'HeadingAccuracy',
'ShortPassing', 'Volleys', 'Dribbling', 'Curve',
'FKAccuracy', 'LongPassing', 'BallControl',
'Acceleration', 'SprintSpeed', 'Agility', 'Reactions',
'Balance', 'ShotPower', 'Jumping', 'Stamina',
'Strength', 'LongShots', 'Aggression',
'Interceptions', 'Positioning', 'Vision',
'Penalties', 'Composure', 'Marking', 'StandingTackle',
'SlidingTackle']
plt.figure(figsize= (25, 16))
hm=sns.heatmap(util_df.loc[:, player_characteristics + ['Overall']].corr(), annot = True, linewidths=.5, cmap='Reds')
hm.set_title(label='Heatmap of dataset', fontsize=20)
hm;
# corr_matrix = util_df.corr()
# corr_matrix.loc[player_characteristics, 'LB'].sort_values(ascending=False).head()
| 27.037037 | 117 | 0.596164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,672 | 0.732055 |
deaf2eb47ddc3dd822c0c77690801b5b8b2a48b0 | 1,492 | py | Python | layers/ternary_ops.py | victorjoos/QuantizedNeuralNetworks-Keras-Tensorflow | 4080ddff9c9e9a6fd5c1dd90997c63968195bb7e | [
"BSD-3-Clause"
]
| 1 | 2018-08-22T12:13:25.000Z | 2018-08-22T12:13:25.000Z | layers/ternary_ops.py | victorjoos/QuantizedNeuralNetworks-Keras-Tensorflow | 4080ddff9c9e9a6fd5c1dd90997c63968195bb7e | [
"BSD-3-Clause"
]
| null | null | null | layers/ternary_ops.py | victorjoos/QuantizedNeuralNetworks-Keras-Tensorflow | 4080ddff9c9e9a6fd5c1dd90997c63968195bb7e | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import keras.backend as K
from time import sleep
def switch(condition, t, e):
if K.backend() == 'tensorflow':
import tensorflow as tf
return tf.where(condition, t, e)
elif K.backend() == 'theano':
import theano.tensor as tt
return tt.switch(condition, t, e)
def _ternarize(W, H=1):
'''The weights' ternarization function,
# References:
- [Recurrent Neural Networks with Limited Numerical Precision](http://arxiv.org/abs/1608.06902)
- [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)
'''
W = W / H
cutoff = 0.7*K.mean(K.abs(W)) # # TODO: is this ok??
ones = K.ones_like(W)
zeros = K.zeros_like(W)
Wt = switch(W > cutoff, ones, switch(W <= -cutoff, -ones, zeros))
Wt *= H
return Wt
def ternarize(W, H=1):
'''The weights' ternarization function,
# References:
- [Recurrent Neural Networks with Limited Numerical Precision](http://arxiv.org/abs/1608.06902)
- [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)
'''
Wt = _ternarize(W, H)
return W + K.stop_gradient(Wt - W)
def ternarize_dot(x, W):
'''For RNN (maybe Dense or Conv too).
Refer to 'Recurrent Neural Networks with Limited Numerical Precision' Section 3.1
'''
Wt = _ternarize(W)
return K.dot(x, W) + K.stop_gradient(K.dot(x, Wt - W))
def ternary_tanh(x):
x = K.clip(x, -1, 1)
return ternarize(x)
| 27.127273 | 99 | 0.632038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 660 | 0.442359 |
deafcfc518bad5ab9572431f7de653f846580238 | 1,050 | py | Python | python/5.concurrent/ZCoroutine/z_new_ipc/8.condition.py | lotapp/BaseCode | 0255f498e1fe67ed2b3f66c84c96e44ef1f7d320 | [
"Apache-2.0"
]
| 25 | 2018-06-13T08:13:44.000Z | 2020-11-19T14:02:11.000Z | python/5.concurrent/ZCoroutine/z_new_ipc/8.condition.py | lotapp/BaseCode | 0255f498e1fe67ed2b3f66c84c96e44ef1f7d320 | [
"Apache-2.0"
]
| null | null | null | python/5.concurrent/ZCoroutine/z_new_ipc/8.condition.py | lotapp/BaseCode | 0255f498e1fe67ed2b3f66c84c96e44ef1f7d320 | [
"Apache-2.0"
]
| 13 | 2018-06-13T08:13:38.000Z | 2022-01-06T06:45:07.000Z | import asyncio
cond = None
p_list = []
# 生产者
async def producer(n):
for i in range(5):
async with cond:
p_list.append(f"{n}-{i}")
print(f"[生产者{n}]生产商品{n}-{i}")
# 通知任意一个消费者
cond.notify() # 通知全部消费者:cond.notify_all()
# 摸拟一个耗时操作
await asyncio.sleep(0.01)
# 消费者
async def consumer(i):
while True:
async with cond:
if p_list:
print(f"列表商品:{p_list}")
name = p_list.pop() # 消费商品
print(f"[消费者{i}]消费商品{name}")
print(f"列表剩余:{p_list}")
# 摸拟一个耗时操作
await asyncio.sleep(0.01)
else:
await cond.wait()
async def main():
global cond
cond = asyncio.Condition() # 初始化condition
p_tasks = [asyncio.create_task(producer(i)) for i in range(2)] # 两个生产者
c_tasks = [asyncio.create_task(consumer(i)) for i in range(5)] # 五个消费者
await asyncio.gather(*p_tasks, *c_tasks)
if __name__ == "__main__":
asyncio.run(main())
| 23.333333 | 75 | 0.526667 | 0 | 0 | 0 | 0 | 0 | 0 | 1,084 | 0.895868 | 357 | 0.295041 |
deb039b791ed71607787c0d4ffc9f5bb4edef521 | 930 | py | Python | Q846_Hand-of-Straights.py | xiaosean/leetcode_python | 844ece02d699bfc620519bd94828ed0e18597f3e | [
"MIT"
]
| null | null | null | Q846_Hand-of-Straights.py | xiaosean/leetcode_python | 844ece02d699bfc620519bd94828ed0e18597f3e | [
"MIT"
]
| null | null | null | Q846_Hand-of-Straights.py | xiaosean/leetcode_python | 844ece02d699bfc620519bd94828ed0e18597f3e | [
"MIT"
]
| null | null | null | from collections import Counter
class Solution:
def isNStraightHand(self, hand: List[int], W: int) -> bool:
n = len(hand)
groups = 0
if n == 0 or n % W != 0:
return False
groups_num = n // W
c = Counter(hand)
keys = list(c.keys())
keys.sort()
step = 0
for _ in range(groups_num):
groups = []
step_lock = None
for idx, k in enumerate(keys[step:step+W]):
if c[k] > 0:
c[k] -= 1
if groups and k != groups[-1]+1:
return False
groups += [k]
if step_lock is None and c[k] > 0:
step += idx
step_lock = True
if step_lock is None:
step += W
if len(groups) < W:
return False
return True
| 31 | 63 | 0.410753 | 889 | 0.955914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
deb1c543d933d4026fb5899f87ff8c3384301fea | 174 | py | Python | hover/utils/misc.py | haochuanwei/hover | 53eb38c718e44445b18a97e391b7f90270802b04 | [
"MIT"
]
| 1 | 2020-12-08T13:04:18.000Z | 2020-12-08T13:04:18.000Z | hover/utils/misc.py | MaxCodeXTC/hover | feeb0e0c59295a3c883823ccef918dfe388b603c | [
"MIT"
]
| null | null | null | hover/utils/misc.py | MaxCodeXTC/hover | feeb0e0c59295a3c883823ccef918dfe388b603c | [
"MIT"
]
| null | null | null | """Mini-functions that do not belong elsewhere."""
from datetime import datetime
def current_time(template="%Y%m%d %H:%M:%S"):
return datetime.now().strftime(template)
| 24.857143 | 50 | 0.718391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.385057 |
deb28e42e8f7639fbbb2df4266120ee03fd2a028 | 193 | py | Python | admin.py | sfchronicle/najee | 0c66b05ba10616243d9828465da97dee7bfedc0d | [
"MIT",
"Unlicense"
]
| null | null | null | admin.py | sfchronicle/najee | 0c66b05ba10616243d9828465da97dee7bfedc0d | [
"MIT",
"Unlicense"
]
| null | null | null | admin.py | sfchronicle/najee | 0c66b05ba10616243d9828465da97dee7bfedc0d | [
"MIT",
"Unlicense"
]
| null | null | null | import flask_admin as admin
# from flask_admin.contrib.sqla import ModelView
from app import app
# from app import db
from models import *
# Admin
admin = admin.Admin(app)
# Add Admin Views
| 16.083333 | 48 | 0.766839 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.476684 |
deb449183523148b00bdabf18e21714bbe3551c8 | 467 | py | Python | src/courses/migrations/0006_auto_20200521_2038.py | GiomarOsorio/another-e-learning-platform | 5cfc76420eb3466691f5187c915c179afb13199a | [
"MIT"
]
| null | null | null | src/courses/migrations/0006_auto_20200521_2038.py | GiomarOsorio/another-e-learning-platform | 5cfc76420eb3466691f5187c915c179afb13199a | [
"MIT"
]
| 8 | 2020-06-25T22:16:20.000Z | 2022-03-12T00:39:27.000Z | src/courses/migrations/0006_auto_20200521_2038.py | GiomarOsorio/another-e-learning-platform | 5cfc76420eb3466691f5187c915c179afb13199a | [
"MIT"
]
| null | null | null | # Generated by Django 3.0.6 on 2020-05-21 20:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0005_auto_20200521_2038'),
]
operations = [
migrations.AlterField(
model_name='module',
name='segments',
field=models.IntegerField(help_text='number of segments in a module', verbose_name='number of segments in a module'),
),
]
| 24.578947 | 129 | 0.631692 | 374 | 0.800857 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.349036 |
deb684b2e0198456aadb77cab383b2b6c0c2748f | 776 | py | Python | mypi/settings.py | sujaymansingh/mypi | 768bdda2ed43faba8a69c7985ee063e1016b9299 | [
"BSD-2-Clause-FreeBSD"
]
| 4 | 2016-08-22T17:13:43.000Z | 2020-10-21T16:50:07.000Z | mypi/settings.py | sujaymansingh/mypi | 768bdda2ed43faba8a69c7985ee063e1016b9299 | [
"BSD-2-Clause-FreeBSD"
]
| null | null | null | mypi/settings.py | sujaymansingh/mypi | 768bdda2ed43faba8a69c7985ee063e1016b9299 | [
"BSD-2-Clause-FreeBSD"
]
| 1 | 2016-08-22T17:13:47.000Z | 2016-08-22T17:13:47.000Z | import os
def import_module(name):
module = __import__(name)
components = name.split('.')
for components in components[1:]:
module = getattr(module, components)
return module
# We should try to import any custom settings.
SETTINGS_MODULE_NAME = os.getenv("MYPI_SETTINGS_MODULE")
if SETTINGS_MODULE_NAME:
SETTINGS_MODULE = import_module(SETTINGS_MODULE_NAME)
else:
SETTINGS_MODULE = object()
# Try to get everything from the custom settings, but provide a default.
PACKAGES_DIR = getattr(SETTINGS_MODULE, "PACKAGES_DIR", "./packages")
SITE_TITLE = getattr(SETTINGS_MODULE, "SITE_TITLE", "Python Packages")
SITE_URL_BASE = getattr(SETTINGS_MODULE, "SITE_URL_BASE", "")
if SITE_URL_BASE.endswith("/"):
SITE_URL_BASE = SITE_URL_BASE[:-1]
| 29.846154 | 72 | 0.744845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.280928 |
deb7178741e12b76740cccb10cf2e3f8e186116d | 1,866 | py | Python | alipay/aop/api/domain/KbAdvertPreserveCommissionClause.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
]
| null | null | null | alipay/aop/api/domain/KbAdvertPreserveCommissionClause.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
]
| null | null | null | alipay/aop/api/domain/KbAdvertPreserveCommissionClause.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class KbAdvertPreserveCommissionClause(object):
def __init__(self):
self._claimer_id_type = None
self._claimers = None
@property
def claimer_id_type(self):
return self._claimer_id_type
@claimer_id_type.setter
def claimer_id_type(self, value):
self._claimer_id_type = value
@property
def claimers(self):
return self._claimers
@claimers.setter
def claimers(self, value):
if isinstance(value, list):
self._claimers = list()
for i in value:
self._claimers.append(i)
def to_alipay_dict(self):
params = dict()
if self.claimer_id_type:
if hasattr(self.claimer_id_type, 'to_alipay_dict'):
params['claimer_id_type'] = self.claimer_id_type.to_alipay_dict()
else:
params['claimer_id_type'] = self.claimer_id_type
if self.claimers:
if isinstance(self.claimers, list):
for i in range(0, len(self.claimers)):
element = self.claimers[i]
if hasattr(element, 'to_alipay_dict'):
self.claimers[i] = element.to_alipay_dict()
if hasattr(self.claimers, 'to_alipay_dict'):
params['claimers'] = self.claimers.to_alipay_dict()
else:
params['claimers'] = self.claimers
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KbAdvertPreserveCommissionClause()
if 'claimer_id_type' in d:
o.claimer_id_type = d['claimer_id_type']
if 'claimers' in d:
o.claimers = d['claimers']
return o
| 29.15625 | 81 | 0.587889 | 1,735 | 0.929796 | 0 | 0 | 730 | 0.391211 | 0 | 0 | 200 | 0.107181 |
deba0ac91a90f7d9408ab094dc6d137f7476170c | 4,495 | py | Python | smart_contract/__init__.py | publicqi/CTFd-Fox | b1d0169db884cdf3cb665faa8987443e7630d108 | [
"MIT"
]
| 1 | 2021-01-09T15:20:14.000Z | 2021-01-09T15:20:14.000Z | smart_contract/__init__.py | publicqi/CTFd-Fox | b1d0169db884cdf3cb665faa8987443e7630d108 | [
"MIT"
]
| null | null | null | smart_contract/__init__.py | publicqi/CTFd-Fox | b1d0169db884cdf3cb665faa8987443e7630d108 | [
"MIT"
]
| null | null | null | from __future__ import division # Use floating point for math calculations
from flask import Blueprint
from CTFd.models import (
ChallengeFiles,
Challenges,
Fails,
Flags,
Hints,
Solves,
Tags,
db,
)
from CTFd.plugins import register_plugin_assets_directory
from CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge
from CTFd.plugins.flags import get_flag_class
from CTFd.utils.uploads import delete_file
from CTFd.utils.user import get_ip
class SmartContractChallenge(BaseChallenge):
id = "smart_contract"
name = "smart_contract"
templates = {
"create": "/plugins/smart_contract/assets/create.html",
"update": "/plugins/smart_contract/assets/update.html",
"view": "/plugins/smart_contract/assets/view.html",
}
scripts = {
"create": "/plugins/smart_contract/assets/create.js",
"update": "/plugins/smart_contract/assets/update.js",
"view": "/plugins/smart_contract/assets/view.js",
}
route = "/plugins/smart_contract/assets/"
blueprint = Blueprint(
"smart_contract", __name__, template_folder="templates", static_folder="assets"
)
@staticmethod
def create(request):
data = request.form or request.get_json()
challenge = Challenges(**data)
db.session.add(challenge)
db.session.commit()
return challenge
@staticmethod
def read(challenge):
data = {
"id": challenge.id,
"name": challenge.name,
"value": challenge.value,
"description": challenge.description,
"category": challenge.category,
"state": challenge.state,
"max_attempts": challenge.max_attempts,
"type": challenge.type,
"type_data": {
"id": SmartContractChallenge.id,
"name": SmartContractChallenge.name,
"templates": SmartContractChallenge.templates,
"scripts": SmartContractChallenge.scripts,
},
}
return data
@staticmethod
def update(challenge, request):
data = request.form or request.get_json()
for attr, value in data.items():
setattr(challenge, attr, value)
db.session.commit()
return challenge
@staticmethod
def delete(challenge):
Fails.query.filter_by(challenge_id=challenge.id).delete()
Solves.query.filter_by(challenge_id=challenge.id).delete()
Flags.query.filter_by(challenge_id=challenge.id).delete()
files = ChallengeFiles.query.filter_by(challenge_id=challenge.id).all()
for f in files:
delete_file(f.id)
ChallengeFiles.query.filter_by(challenge_id=challenge.id).delete()
Tags.query.filter_by(challenge_id=challenge.id).delete()
Hints.query.filter_by(challenge_id=challenge.id).delete()
Challenges.query.filter_by(id=challenge.id).delete()
db.session.commit()
@staticmethod
def attempt(challenge, request):
data = request.form or request.get_json()
submission = data["submission"].strip()
flags = Flags.query.filter_by(challenge_id=challenge.id).all()
for flag in flags:
if get_flag_class(flag.type).compare(flag, submission):
return True, "Correct"
return False, "Incorrect"
@staticmethod
def solve(user, team, challenge, request):
data = request.form or request.get_json()
submission = data["submission"].strip()
solve = Solves(
user_id=user.id,
team_id=team.id if team else None,
challenge_id=challenge.id,
ip=get_ip(req=request),
provided=submission,
)
db.session.add(solve)
db.session.commit()
db.session.close()
@staticmethod
def fail(user, team, challenge, request):
data = request.form or request.get_json()
submission = data["submission"].strip()
wrong = Fails(
user_id=user.id,
team_id=team.id if team else None,
challenge_id=challenge.id,
ip=get_ip(request),
provided=submission,
)
db.session.add(wrong)
db.session.commit()
db.session.close()
def load(app):
CHALLENGE_CLASSES["smart_contract"] = SmartContractChallenge
register_plugin_assets_directory(
app, base_path="/plugins/smart_contract/assets/"
)
| 32.338129 | 87 | 0.629588 | 3,825 | 0.850945 | 0 | 0 | 3,102 | 0.6901 | 0 | 0 | 653 | 0.145273 |
deba5b19ee11e4b42757cf8302210ae77f1c6474 | 295 | py | Python | 01-data_types/d13.py | philiphinton/learn_python | 6ddfe3c7818d6c919bfa49bd6302c75ee761b6a4 | [
"MIT"
]
| null | null | null | 01-data_types/d13.py | philiphinton/learn_python | 6ddfe3c7818d6c919bfa49bd6302c75ee761b6a4 | [
"MIT"
]
| 3 | 2022-01-17T22:55:09.000Z | 2022-01-26T07:26:13.000Z | 01-data_types/d13.py | philiphinton/learn_python | 6ddfe3c7818d6c919bfa49bd6302c75ee761b6a4 | [
"MIT"
]
| 1 | 2021-12-14T01:33:21.000Z | 2021-12-14T01:33:21.000Z |
shopping_list = {
'Tomatoes': 6,
'Bananas': 5,
'Crackers': 2,
'Sugar': 1,
'Icecream': 1,
'Bread': 3,
'Chocolate': 2
}
# Just the keys
print(shopping_list.keys())
# Just the values
# print(shopping_list.values())
# Both keys and values
# print(shopping_list.items())
| 17.352941 | 31 | 0.610169 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.60678 |
debc22c03ed999e303334d1da3320e421b5bfacc | 119 | py | Python | applications/jupyter-extension/nteract_on_jupyter/notebooks/utils/cb/python/__init__.py | jjhenkel/nteract | 088222484b59af14b1da22de4d0990d8925adf95 | [
"BSD-3-Clause"
]
| null | null | null | applications/jupyter-extension/nteract_on_jupyter/notebooks/utils/cb/python/__init__.py | jjhenkel/nteract | 088222484b59af14b1da22de4d0990d8925adf95 | [
"BSD-3-Clause"
]
| null | null | null | applications/jupyter-extension/nteract_on_jupyter/notebooks/utils/cb/python/__init__.py | jjhenkel/nteract | 088222484b59af14b1da22de4d0990d8925adf95 | [
"BSD-3-Clause"
]
| null | null | null | from .surface import *
from .modifiers import *
from .evaluator import Evaluator
from .lowlevel import display_results
| 23.8 | 37 | 0.815126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
debcd3fde3c56a4f5ccca0c23d8a57a7d2afd960 | 588 | py | Python | Numbers/PrimeFac.py | Arjuna197/the100 | 2963b4fe1b1b8e673a23b2cf97f4bcb263af9781 | [
"MIT"
]
| 1 | 2022-02-20T18:49:49.000Z | 2022-02-20T18:49:49.000Z | Numbers/PrimeFac.py | dan-garvey/the100 | 2963b4fe1b1b8e673a23b2cf97f4bcb263af9781 | [
"MIT"
]
| 13 | 2017-12-13T02:31:54.000Z | 2017-12-13T02:37:45.000Z | Numbers/PrimeFac.py | dan-garvey/the100 | 2963b4fe1b1b8e673a23b2cf97f4bcb263af9781 | [
"MIT"
]
| null | null | null | import math
from math import*
def isPrime(num):
if num%2==0 or num%3==0:
return False
for n in range(5, int(num**(1/2))):
if num%n==0:
return False
return True
print('enter a positive integer')
FacMe=int(input())
primefacts=[1]
if not isPrime(FacMe):
if FacMe % 2==0:
primefacts.append(2)
if FacMe % 3==0:
primefacts.append(3)
for i in range(5,FacMe):
if FacMe%i==0:
if isPrime(i):
primefacts.append(i)
else:
primefacts.append(FacMe)
print(primefacts)
| 21.777778 | 40 | 0.547619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.044218 |
debd457fd6d2c1141a031eefaca5f163110cfa64 | 1,130 | py | Python | src/wtfjson/validators/url.py | binary-butterfly/wtfjson | 551ad07c895ce3c94ac3015b6b5ecc2102599b56 | [
"MIT"
]
| null | null | null | src/wtfjson/validators/url.py | binary-butterfly/wtfjson | 551ad07c895ce3c94ac3015b6b5ecc2102599b56 | [
"MIT"
]
| 1 | 2021-10-11T08:55:45.000Z | 2021-10-11T08:55:45.000Z | src/wtfjson/validators/url.py | binary-butterfly/wtfjson | 551ad07c895ce3c94ac3015b6b5ecc2102599b56 | [
"MIT"
]
| null | null | null | # encoding: utf-8
"""
binary butterfly validator
Copyright (c) 2021, binary butterfly GmbH
Use of this source code is governed by an MIT-style license that can be found in the LICENSE.txt.
"""
import re
from typing import Any, Optional
from ..abstract_input import AbstractInput
from ..fields import Field
from ..validators import Regexp
from ..exceptions import ValidationError
from ..external import HostnameValidation
class URL(Regexp):
default_message = 'invalid url'
def __init__(self, require_tld: bool = True, allow_ip: bool = True, message: Optional[str] = None):
super().__init__(
r"^[a-z]+://(?P<host>[^\/\?:]+)(?P<port>:[0-9]+)?(?P<path>\/.*?)?(?P<query>\?.*)?$",
re.IGNORECASE,
message
)
self.validate_hostname = HostnameValidation(
require_tld=require_tld,
allow_ip=allow_ip,
)
def __call__(self, value: Any, form: AbstractInput, field: Field):
match = super().__call__(value, form, field)
if not self.validate_hostname(match.group('host')):
raise ValidationError(self.message)
| 30.540541 | 103 | 0.650442 | 703 | 0.622124 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.259292 |
debe6ce18f853e6b1e54abf97ade00987edf8450 | 1,270 | py | Python | runner/run_descriptions/runs/curious_vs_vanilla.py | alex-petrenko/curious-rl | 6cd0eb78ab409c68f8dad1a8542d625f0dd39114 | [
"MIT"
]
| 18 | 2018-12-29T01:52:25.000Z | 2021-11-08T06:48:20.000Z | runner/run_descriptions/runs/curious_vs_vanilla.py | alex-petrenko/curious-rl | 6cd0eb78ab409c68f8dad1a8542d625f0dd39114 | [
"MIT"
]
| 2 | 2019-06-13T12:52:55.000Z | 2019-10-30T03:27:11.000Z | runner/run_descriptions/runs/curious_vs_vanilla.py | alex-petrenko/curious-rl | 6cd0eb78ab409c68f8dad1a8542d625f0dd39114 | [
"MIT"
]
| 3 | 2019-05-11T07:50:53.000Z | 2021-11-18T08:15:56.000Z | from runner.run_descriptions.run_description import RunDescription, Experiment, ParamGrid
_params = ParamGrid([
('prediction_bonus_coeff', [0.00, 0.05]),
])
_experiments = [
Experiment(
'doom_maze_very_sparse',
'python -m algorithms.curious_a2c.train_curious_a2c --env=doom_maze_very_sparse --gpu_mem_fraction=0.1 --train_for_env_steps=2000000000',
_params.generate_params(randomize=False),
),
# Experiment(
# 'doom_maze_sparse',
# 'python -m algorithms.curious_a2c.train_curious_a2c --env=doom_maze_sparse --gpu_mem_fraction=0.1 --train_for_env_steps=100000000',
# _params.generate_params(randomize=False),
# ),
# Experiment(
# 'doom_maze',
# 'python -m algorithms.curious_a2c.train_curious_a2c --env=doom_maze --gpu_mem_fraction=0.1 --train_for_env_steps=50000000',
# _params.generate_params(randomize=False),
# ),
# Experiment(
# 'doom_basic',
# 'python -m algorithms.curious_a2c.train_curious_a2c --env=doom_basic --gpu_mem_fraction=0.1 --train_for_env_steps=10000000',
# _params.generate_params(randomize=False),
# ),
]
DOOM_CURIOUS_VS_VANILLA = RunDescription('doom_curious_vs_vanilla', experiments=_experiments, max_parallel=5)
| 40.967742 | 145 | 0.711024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 858 | 0.675591 |
debed6abf0cd6d720ad9aac4713a4ef0c18b842a | 383 | py | Python | xappt_qt/__init__.py | cmontesano/xappt_qt | 74f8c62e0104a67b4b4eb65382df851221bf0bab | [
"MIT"
]
| null | null | null | xappt_qt/__init__.py | cmontesano/xappt_qt | 74f8c62e0104a67b4b4eb65382df851221bf0bab | [
"MIT"
]
| 12 | 2020-10-11T22:42:12.000Z | 2021-10-04T19:38:51.000Z | xappt_qt/__init__.py | cmontesano/xappt_qt | 74f8c62e0104a67b4b4eb65382df851221bf0bab | [
"MIT"
]
| 1 | 2021-09-29T23:53:34.000Z | 2021-09-29T23:53:34.000Z | import os
from xappt_qt.__version__ import __version__, __build__
from xappt_qt.plugins.interfaces.qt import QtInterface
# suppress "qt.qpa.xcb: QXcbConnection: XCB error: 3 (BadWindow)"
os.environ['QT_LOGGING_RULES'] = '*.debug=false;qt.qpa.*=false'
version = tuple(map(int, __version__.split('.'))) + (__build__, )
version_str = f"{__version__}-{__build__}"
executable = None
| 27.357143 | 65 | 0.749347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.375979 |
dec0b14005ec6feafc62d8f18253556640fa35db | 145,150 | py | Python | py/countdowntourney.py | elocemearg/atropine | 894010bcc89d4e6962cf3fc15ef526068c38898d | [
"CC-BY-4.0"
]
| null | null | null | py/countdowntourney.py | elocemearg/atropine | 894010bcc89d4e6962cf3fc15ef526068c38898d | [
"CC-BY-4.0"
]
| null | null | null | py/countdowntourney.py | elocemearg/atropine | 894010bcc89d4e6962cf3fc15ef526068c38898d | [
"CC-BY-4.0"
]
| null | null | null | #!/usr/bin/python3
import sys
import sqlite3;
import re;
import os;
import random
import qualification
from cttable import CandidateTable, TableVotingGroup, PhantomTableVotingGroup
import cttable
SW_VERSION_SPLIT = (1, 1, 4)
SW_VERSION = ".".join([str(x) for x in SW_VERSION_SPLIT])
EARLIEST_COMPATIBLE_DB_VERSION = (0, 7, 0)
RANK_WINS_POINTS = 0;
RANK_POINTS = 1;
RANK_WINS_SPREAD = 2;
RATINGS_MANUAL = 0
RATINGS_GRADUATED = 1
RATINGS_UNIFORM = 2
CONTROL_NUMBER = 1
CONTROL_CHECKBOX = 2
UPLOAD_FAIL_TYPE_HTTP = 1
UPLOAD_FAIL_TYPE_REJECTED = 2
LOG_TYPE_NEW_RESULT = 1
LOG_TYPE_CORRECTION = 2
LOG_TYPE_COMMENT = 96
LOG_TYPE_COMMENT_VIDEPRINTER_FLAG = 1
LOG_TYPE_COMMENT_WEB_FLAG = 4
teleost_modes = [
{
"id" : "TELEOST_MODE_AUTO",
"name" : "Auto",
"desc" : "Automatic control. This will show Fixtures at the start of a round, Standings/Videprinter during the round, and Standings/Table Results when all games in the round have been played.",
"menuorder" : 0,
"image" : "/images/screenthumbs/auto.png",
"fetch" : [ "all" ]
},
{
"id" : "TELEOST_MODE_STANDINGS",
"name" : "Standings",
"desc" : "The current standings table and nothing else.",
"image" : "/images/screenthumbs/standings_only.png",
"menuorder" : 5,
"fetch" : [ "standings" ]
},
{
"id" : "TELEOST_MODE_STANDINGS_VIDEPRINTER",
"name" : "Standings / Videprinter",
"desc" : "Standings table with latest results appearing in the lower third of the screen.",
"image" : "/images/screenthumbs/standings_videprinter.png",
"menuorder" : 1,
"fetch" : [ "standings", "logs" ]
},
{
"id" : "TELEOST_MODE_STANDINGS_RESULTS",
"name" : "Standings / Table Results",
"desc" : "Standings table with the current round's fixtures and results cycling on the lower third of the screen.",
"image" : "/images/screenthumbs/standings_results.png",
"menuorder" : 2,
"fetch" : [ "standings", "games" ]
},
{
"id" : "TELEOST_MODE_TECHNICAL_DIFFICULTIES",
"name" : "Technical Difficulties",
"desc" : "Ceci n'est pas un probleme technique.",
"image" : "/images/screenthumbs/technical_difficulties.png",
"menuorder" : 10,
"fetch" : []
},
{
"id" : "TELEOST_MODE_FIXTURES",
"name" : "Fixtures",
"desc" : "Table of all fixtures in the next or current round.",
"image" : "/images/screenthumbs/fixtures.png",
"menuorder" : 3,
"fetch" : [ "games" ]
},
{
"id" : "TELEOST_MODE_TABLE_NUMBER_INDEX",
"name" : "Table Number Index",
"desc" : "A list of all the player names and their table numbers, in alphabetical order of player name.",
"image" : "/images/screenthumbs/table_index.png",
"menuorder" : 4,
"fetch" : [ "games" ]
},
{
"id" : "TELEOST_MODE_OVERACHIEVERS",
"name" : "Overachievers",
"desc" : "Table of players ranked by how highly they finish above their seeding position. This is only relevant if the players have different ratings.",
"image" : "/images/screenthumbs/overachievers.png",
"menuorder" : 6,
"fetch" : [ "overachievers" ]
},
{
"id" : "TELEOST_MODE_TUFF_LUCK",
"name" : "Tuff Luck",
"desc" : "Players who have lost three or more games, ordered by the sum of their three lowest losing margins.",
"image" : "/images/screenthumbs/tuff_luck.png",
"menuorder" : 7,
"fetch" : [ "tuffluck" ]
},
{
"id" : "TELEOST_MODE_HIGH_SCORES",
"name" : "High scores",
"desc" : "Highest winning scores, losing scores and combined scores in all heat games.",
"image" : "/images/screenthumbs/high_scores.jpg",
"menuorder" : 8,
"fetch" : [ "highscores" ]
}
#{
# "id" : "TELEOST_MODE_FASTEST_FINISHERS",
# "name" : "Fastest Finishers",
# "desc" : "A cheeky way to highlight which tables are taking too long to finish their games.",
# "image" : "/images/screenthumbs/placeholder.png",
# "menuorder" : 9,
# "fetch" : []
#}
#,{
# "id" : "TELEOST_MODE_CLOCK",
# "name" : "Clock",
# "desc" : "For some reason.",
# "image" : "/images/screenthumbs/placeholder.png",
# "menuorder" : 10,
# "fetch" : []
#}
]
teleost_mode_id_to_num = dict()
for idx in range(len(teleost_modes)):
teleost_modes[idx]["num"] = idx
teleost_mode_id_to_num[teleost_modes[idx]["id"]] = idx
teleost_per_view_option_list = [
(teleost_mode_id_to_num["TELEOST_MODE_AUTO"], "autousetableindex", CONTROL_CHECKBOX, "$CONTROL Show name-to-table index at start of round", 0),
(teleost_mode_id_to_num["TELEOST_MODE_AUTO"], "autocurrentroundmusthavegamesinalldivisions", CONTROL_CHECKBOX, "$CONTROL Only switch to Fixtures display after fixtures are generated for all divisions", 1),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS"], "standings_only_lines", CONTROL_NUMBER, "Players per page", 12),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS"], "standings_only_scroll", CONTROL_NUMBER, "Page scroll interval $CONTROL seconds", 12),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_VIDEPRINTER"], "standings_videprinter_standings_lines", CONTROL_NUMBER, "Players per page", 8),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_VIDEPRINTER"], "standings_videprinter_standings_scroll", CONTROL_NUMBER, "Page scroll interval $CONTROL seconds", 10),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_VIDEPRINTER"], "standings_videprinter_spell_big_scores", CONTROL_CHECKBOX, "$CONTROL Videprinter: repeat unbelievably high scores in words", 0),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_VIDEPRINTER"], "standings_videprinter_big_score_min", CONTROL_NUMBER, "$INDENT An unbelievably high score is $CONTROL or more", 90),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_RESULTS"], "standings_results_standings_lines", CONTROL_NUMBER, "Players per standings page", 8),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_RESULTS"], "standings_results_standings_scroll", CONTROL_NUMBER, "Standings scroll interval $CONTROL seconds", 10),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_RESULTS"], "standings_results_results_lines", CONTROL_NUMBER, "Number of results per page", 3),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_RESULTS"], "standings_results_results_scroll", CONTROL_NUMBER, "Results scroll interval $CONTROL seconds", 5),
(teleost_mode_id_to_num["TELEOST_MODE_STANDINGS_RESULTS"], "standings_results_show_unstarted_round_if_single_game", CONTROL_CHECKBOX, "$CONTROL Show unstarted next round if it only has one game", 1),
(teleost_mode_id_to_num["TELEOST_MODE_FIXTURES"], "fixtures_lines", CONTROL_NUMBER, "Lines per page", 12),
(teleost_mode_id_to_num["TELEOST_MODE_FIXTURES"], "fixtures_scroll", CONTROL_NUMBER, "Page scroll interval $CONTROL seconds", 10),
(teleost_mode_id_to_num["TELEOST_MODE_TABLE_NUMBER_INDEX"], "table_index_rows", CONTROL_NUMBER, "Rows per page $CONTROL", 12),
(teleost_mode_id_to_num["TELEOST_MODE_TABLE_NUMBER_INDEX"], "table_index_columns", CONTROL_NUMBER, "Columns per page", 2),
(teleost_mode_id_to_num["TELEOST_MODE_TABLE_NUMBER_INDEX"], "table_index_scroll", CONTROL_NUMBER, "Page scroll interval $CONTROL seconds", 12)
]
create_tables_sql = """
begin transaction;
-- PLAYER table
create table if not exists player (
id integer primary key autoincrement,
name text,
rating float,
team_id int,
short_name text,
withdrawn int not null default 0,
division int not null default 0,
division_fixed int not null default 0,
avoid_prune int not null default 0,
require_accessible_table int not null default 0,
preferred_table int not null default -1,
unique(name), unique(short_name)
);
-- TEAM table
create table if not exists team (
id integer primary key autoincrement,
name text,
colour int,
unique(name)
);
insert into team(name, colour) values('White', 255 * 256 * 256 + 255 * 256 + 255);
insert into team(name, colour) values('Blue', 128 * 256 + 255);
-- GAME table, containing scheduled games and played games
create table if not exists game (
round_no int,
seq int,
table_no int,
division int,
game_type text,
p1 integer,
p1_score integer,
p2 integer,
p2_score integer,
tiebreak int,
unique(round_no, seq)
);
-- game log, never deleted from
create table if not exists game_log (
seq integer primary key autoincrement,
ts text,
round_no int,
round_seq int,
table_no int,
division int,
game_type text,
p1 integer,
p1_score int,
p2 integer,
p2_score int,
tiebreak int,
log_type int,
comment text default null
);
-- Games where we don't yet know who the players are going to be, but we
-- do know it's going to be "winner of this match versus winner of that match".
create table if not exists game_pending (
round_no int,
seq int,
seat int,
winner int,
from_round_no int,
from_seq int,
unique(round_no, seq, seat)
);
-- options, such as what to sort players by, how to decide fixtures, etc
create table if not exists options (
name text primary key,
value text
);
-- metadata for per-view options in teleost (values stored in "options" above)
create table if not exists teleost_options (
mode int,
seq int,
name text primary key,
control_type int,
desc text,
default_value text,
unique(mode, seq)
);
-- Table in which we persist the HTML form settings given to a fixture
-- generator
create table if not exists fixgen_settings (
fixgen text,
name text,
value text
);
-- Round names. When a fixture generator generates some fixtures, it will
-- probably create a new round. This is always given a number, but it can
-- also be given a name, e.g. "Quarter-finals". The "round type" column is
-- no longer used.
create table if not exists rounds (
id integer primary key,
type text,
name text
);
create view if not exists rounds_derived as
select r.id,
case when r.name is not null and r.name != '' then r.name
when gc.qf = gc.total then 'Quarter-finals'
when gc.sf = gc.total then 'Semi-finals'
when gc.f = gc.total then 'Final'
when gc.tp = gc.total then 'Third Place'
when gc.f + gc.tp = gc.total then 'Final & Third Place'
else 'Round ' || cast(r.id as text) end as name
from rounds r,
(select g.round_no,
sum(case when g.game_type = 'QF' then 1 else 0 end) qf,
sum(case when g.game_type = 'SF' then 1 else 0 end) sf,
sum(case when g.game_type = '3P' then 1 else 0 end) tp,
sum(case when g.game_type = 'F' then 1 else 0 end) f,
sum(case when g.game_type = 'N' then 1 else 0 end) n,
sum(case when g.game_type = 'P' then 1 else 0 end) p,
count(*) total
from game g
group by g.round_no) gc
where gc.round_no = r.id;
create view if not exists completed_game as
select * from game
where p1_score is not null and p2_score is not null;
create view if not exists completed_heat_game as
select * from game
where p1_score is not null and p2_score is not null and game_type = 'P';
create view if not exists game_divided as
select round_no, seq, table_no, game_type, p1 p_id, p1_score p_score,
p2 opp_id, p2_score opp_score, tiebreak
from game
union all
select round_no, seq, table_no, game_type, p2 p_id, p2_score p_score,
p1 opp_id, p1_score opp_score, tiebreak
from game;
create view if not exists heat_game_divided as
select * from game_divided where game_type = 'P';
create view if not exists player_wins as
select p.id, sum(case when g.p_id is null then 0
when g.p_score is null or g.opp_score is null then 0
when g.p_score == 0 and g.opp_score == 0 and g.tiebreak then 0
when g.p_score > g.opp_score then 1
else 0 end) wins
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_draws as
select p.id, sum(case when g.p_id is null then 0
when g.p_score is null or g.opp_score is null then 0
when g.p_score == 0 and g.opp_score == 0 and g.tiebreak then 0
when g.p_score == g.opp_score then 1
else 0 end) draws
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_points as
select p.id, sum(case when g.p_score is null then 0
when g.tiebreak and g.p_score > g.opp_score
then g.opp_score
else g.p_score end) points
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_points_against as
select p.id, sum(case when g.opp_score is null then 0
when g.tiebreak and g.opp_score > g.p_score
then g.p_score
else g.opp_score end) points_against
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_played as
select p.id, sum(case when g.p_score is not null and g.opp_score is not null then 1 else 0 end) played
from player p left outer join heat_game_divided g on p.id = g.p_id
group by p.id;
create view if not exists player_played_first as
select p.id, count(g.p1) played_first
from player p left outer join completed_heat_game g on p.id = g.p1
group by p.id;
create table final_game_types(game_type text, power int);
insert into final_game_types values ('QF', 2), ('SF', 1), ('F', 0);
create view if not exists player_finals_results as
select p.id, coalesce(gd.game_type, gt.game_type) game_type,
case when gd.p_score is null then '-'
when gd.p_score > gd.opp_score then 'W'
when gd.p_score = gd.opp_score then 'D'
else 'L'
end result
from player p, final_game_types gt
left outer join game_divided gd on p.id = gd.p_id
and (gd.game_type = gt.game_type or (gt.game_type = 'F' and gd.game_type = '3P'));
create view if not exists player_finals_form as
select p.id, coalesce(pfr_qf.result, '-') qf,
coalesce(pfr_sf.result, '-') sf,
case when pfr_f.result is null then '-'
when pfr_f.game_type = '3P' then lower(pfr_f.result)
else pfr_f.result end f
from player p
left outer join player_finals_results pfr_qf on p.id = pfr_qf.id and pfr_qf.game_type = 'QF'
left outer join player_finals_results pfr_sf on p.id = pfr_sf.id and pfr_sf.game_type = 'SF'
left outer join player_finals_results pfr_f on p.id = pfr_f.id and pfr_f.game_type in ('3P', 'F')
group by p.id;
create view if not exists player_standings as
select p.id, p.name, p.division, played.played, wins.wins, draws.draws,
points.points, points_against.points_against, ppf.played_first,
pff.qf || pff.sf || upper(pff.f) finals_form,
case when pff.f = '-' then 0
else
case when pff.qf = 'W' then 48
when pff.qf = 'D' then 32
when pff.qf = 'L' then 16
else case when pff.sf != '-' or pff.f != '-' then 48 else 0 end
end +
case when pff.sf = 'W' then 12
when pff.sf = 'D' then 8
when pff.sf = 'L' then 4
-- If you're playing in a third place match then you're considered
-- to have lost the nonexistent semi-final. If you're playing in a
-- final then you're considered to have won the semi-final.
else case when pff.f in ('w', 'd', 'l') then 4
when pff.f in ('W', 'D', 'L') then 12
else 0 end
end +
case when pff.f = 'W' then 3
when pff.f = 'D' then 2
when pff.f = 'L' then 1
else 0
end
end finals_points
from player p, player_wins wins, player_draws draws, player_played played,
player_points points, player_points_against points_against,
player_played_first ppf, player_finals_form pff
where p.id = wins.id
and p.id = played.id
and p.id = points.id
and p.id = draws.id
and p.id = points_against.id
and p.id = ppf.id
and p.id = pff.id;
-- Tables for controlling the display system Teleost
create table if not exists teleost(current_mode int);
delete from teleost;
insert into teleost values(0);
create table if not exists teleost_modes(num int, name text, desc text);
create table if not exists tr_opts (
bonus float,
rating_diff_cap float
);
delete from tr_opts;
insert into tr_opts (bonus, rating_diff_cap) values (50, 40);
-- View for working out tournament ratings
-- For each game, you get 50 + your opponent's rating if you win,
-- your opponent's rating if you draw, and your opponent's rating - 50 if
-- you lost. For the purpose of this calculation, your opponent's rating
-- is your opponent's rating at the start of the tourney, except where that
-- is more than 40 away from your own, in which case it's your rating +40 or
-- -40 as appropriate.
-- The 50 and 40 are configurable, in the tr_opts table.
create view tournament_rating as
select p.id, p.name,
avg(case when hgd.p_score > hgd.opp_score then rel_ratings.opp_rating + tr_opts.bonus
when hgd.p_score = hgd.opp_score then rel_ratings.opp_rating
else rel_ratings.opp_rating - tr_opts.bonus end) tournament_rating
from player p, heat_game_divided hgd on p.id = hgd.p_id,
(select me.id p_id, you.id opp_id,
case when you.rating < me.rating - tr_opts.rating_diff_cap
then me.rating - tr_opts.rating_diff_cap
when you.rating > me.rating + tr_opts.rating_diff_cap
then me.rating + tr_opts.rating_diff_cap
else you.rating end opp_rating
from player me, player you, tr_opts) rel_ratings
on rel_ratings.p_id = p.id and hgd.opp_id = rel_ratings.opp_id,
tr_opts
where hgd.p_score is not null and hgd.opp_score is not null
group by p.id, p.name;
-- Table for information about tables (boards). The special table_no -1 means
-- the default settings for tables. So if table -1 is marked as accessible
-- that means every table not listed is considered to be accessible.
create table board (
table_no integer primary key,
accessible integer not null
);
-- By default, if a board isn't listed in this table then it isn't accessible.
insert into board (table_no, accessible) values (-1, 0);
-- Log any failures to upload updates
create table if not exists upload_error_log (
ts text,
failure_type int,
message text
);
-- Time of last successful upload
create table if not exists upload_success (
ts text
);
insert into upload_success values (null);
commit;
""";
class TourneyException(Exception):
def __init__(self, description=None):
if description:
self.description = description;
class TourneyInProgressException(TourneyException):
description = "Tournament is in progress."
pass;
class PlayerDoesNotExistException(TourneyException):
description = "Player does not exist."
pass;
class PlayerExistsException(TourneyException):
description = "Player already exists."
pass;
class DuplicatePlayerException(TourneyException):
description = "No two players are allowed to have the same name."
pass
class UnknownRankMethodException(TourneyException):
description = "Unknown ranking method."
pass;
class DBNameExistsException(TourneyException):
description = "Tourney name already exists."
pass;
class DBNameDoesNotExistException(TourneyException):
description = "No tourney by that name exists."
pass;
class InvalidDBNameException(TourneyException):
description = "Invalid tourney name."
pass;
class InvalidRatingException(TourneyException):
description = "Invalid rating. Rating must be an integer."
pass;
class TooManyPlayersException(TourneyException):
description = "You've got too many players. Turf some out onto the street."
pass
class IncompleteRatingsException(TourneyException):
description = "Incomplete ratings - specify ratings for nobody or everybody."
pass;
class InvalidDivisionNumberException(TourneyException):
description = "Invalid division number"
pass
class InvalidPlayerNameException(TourneyException):
description = "A player's name is not allowed to be blank or consist entirely of whitespace."
class InvalidTableSizeException(TourneyException):
description = "Invalid table size - number of players per table must be 2 or 3."
pass;
class FixtureGeneratorException(TourneyException):
description = "Failed to generate fixtures."
pass;
class PlayerNotInGameException(TourneyException):
description = "That player is not in that game."
pass;
class NotMostRecentRoundException(TourneyException):
description = "That is not the most recent round."
pass
class NoGamesException(TourneyException):
description = "No games have been played."
pass
class IllegalDivisionException(TourneyException):
description = "Cannot distribute players into the specified number of divisions in the way you have asked, either because there aren't enough players, or the number of players in a division cannot be set to the requested multiple."
pass
class DBVersionMismatchException(TourneyException):
description = "This tourney database file was created with a version of atropine which is not compatible with the one you're using."
pass
class InvalidEntryException(TourneyException):
description = "Result entry is not valid."
pass
class QualificationTimeoutException(TourneyException):
description = "In calculating the standings table, we took too long to work out which players, if any, have qualified for the final. This may be due to an unusually large number of players, or an unusual tournament setup. In this case it is strongly recommended go to General Setup and disable qualification analysis by setting the number of places in the qualification zone to zero."
pass
class InvalidDateException(TourneyException):
def __init__(self, reason):
self.description = reason
def get_teleost_mode_services_to_fetch(mode):
if mode < 0 or mode >= len(teleost_modes):
return [ "all" ]
else:
return teleost_modes[mode]["fetch"]
class Player(object):
def __init__(self, name, rating=0, team=None, short_name=None, withdrawn=False, division=0, division_fixed=False, player_id=None, avoid_prune=False, require_accessible_table=False, preferred_table=None):
self.name = name;
self.rating = rating;
self.team = team;
self.withdrawn = bool(withdrawn)
if short_name:
self.short_name = short_name
else:
self.short_name = name
self.division = division
# If true, player has been manually put in this division rather than
# happened to fall into it because of their rating
self.division_fixed = division_fixed
self.player_id = player_id
self.avoid_prune = avoid_prune
self.require_accessible_table = require_accessible_table
self.preferred_table = preferred_table
def __eq__(self, other):
if other is None:
return False;
elif self.name == other.name:
return True;
else:
return False;
def __ne__(self, other):
return not(self.__eq__(other));
# Emulate a 3-tuple
def __len__(self):
return 3;
def __getitem__(self, key):
return [self.name, self.rating, self.division][key];
def __str__(self):
return self.name;
def is_player_known(self):
return True;
def is_pending(self):
return False;
def is_withdrawn(self):
return self.withdrawn
def make_dict(self):
return {
"name" : self.name,
"rating" : self.rating
};
def get_name(self):
return self.name;
def get_rating(self):
return self.rating
def get_id(self):
return self.player_id
def get_team_colour_tuple(self):
if self.team:
return self.team.get_colour_tuple()
else:
return None
def get_team(self):
return self.team
def get_team_id(self):
if self.team:
return self.team.get_id()
else:
return None
def get_short_name(self):
return self.short_name
def get_division(self):
return self.division
def is_division_fixed(self):
return self.division_fixed
def is_avoiding_prune(self):
return self.avoid_prune
def is_requiring_accessible_table(self):
return self.require_accessible_table
def get_preferred_table(self):
if self.preferred_table is None or self.preferred_table < 0:
return None
else:
return self.preferred_table
def get_first_name(name):
return name.split(" ", 1)[0]
def get_first_name_and_last_initial(name):
names = name.split(" ", 1)
if len(names) < 2 or len(names[1]) < 1:
return get_first_name(name)
else:
return names[0] + " " + names[1][0]
def get_short_name(name, player_names):
short_name = get_first_name(name)
for op in player_names:
if name != op and short_name == get_first_name(op):
break
else:
return short_name
short_name = get_first_name_and_last_initial(name)
for op in player_names:
if name != op and short_name == get_first_name_and_last_initial(op):
break
else:
return short_name
return name
# When we submit a player list to a new tournament, set_players() takes a list
# of these objects.
class EnteredPlayer(object):
def __init__(self, name, rating, division=0, team_id=None,
avoid_prune=False, withdrawn=False,
requires_accessible_table=False, preferred_table=None):
self.name = name.strip()
self.short_name = self.name
self.rating = rating
self.division = division
self.team_id = team_id
self.avoid_prune = avoid_prune
self.withdrawn = withdrawn
self.requires_accessible_table = requires_accessible_table
self.preferred_table = preferred_table
def get_name(self):
return self.name
def get_rating(self):
return self.rating
def set_rating(self, rating):
self.rating = rating
def set_short_name(self, short_name):
self.short_name = short_name
def get_short_name(self):
return self.short_name
def get_division(self):
return self.division
def get_team_id(self):
return self.team_id
def get_avoid_prune(self):
return self.avoid_prune
def get_withdrawn(self):
return self.withdrawn
def get_requires_accessible_table(self):
return self.requires_accessible_table
def get_preferred_table(self):
return self.preferred_table
# This object can be on one side and/or other of a Game, just like a Player.
# However, it does not represent a player. It represents the winner or loser
# of another specific game yet to be played.
class PlayerPending(object):
def __init__(self, round_no, round_seq, winner=True, round_short_name=None):
self.round_no = round_no;
self.round_seq = round_seq;
self.winner = winner;
self.round_short_name = round_short_name if round_short_name else ("R%d" % self.round_no)
def __eq__(self, other):
if other is None:
return False;
elif self.round_no == other.round_no and self.round_seq == other.round_seq and self.winner == other.winner:
return True;
else:
return False;
def __len__(self):
return 3;
def __getitem__(self, key):
return [None, 0, 0][key];
def is_player_known(self):
return False;
def is_pending(self):
return True;
def make_dict(self):
return {
"round" : self.round_no,
"round_seq" : self.round_seq,
"winner" : self.winner,
"round_short_name" : self.round_short_name
};
@staticmethod
def from_dict(d):
return PlayerPending(d["round"], d["round_seq"], d["winner"], d["round_short_name"]);
def get_name(self):
return None;
def __str__(self):
if self.round_short_name is None:
return "%s of R%d.%d" % ("Winner" if self.winner else "Loser", self.round_no, self.round_seq);
else:
return "%s of %s.%d" % ("Winner" if self.winner else "Loser", self.round_short_name, self.round_seq);
def get_pending_game_details(self):
return (self.round_no, self.round_seq, self.winner);
# COLIN Hangover 2015: each player is assigned a team
class Team(object):
def __init__(self, team_id, team_name, colour=0xffffff):
self.team_id = team_id;
self.name = team_name;
self.colour = colour;
def get_name(self):
return self.name
def get_id(self):
return self.team_id
def get_hex_colour(self):
return "%06x" % (self.colour)
def get_colour_tuple(self):
return ((self.colour >> 16) & 0xff, (self.colour >> 8) & 0xff, self.colour & 0xff)
class StandingsRow(object):
def __init__(self, position, name, played, wins, points, draws, spread, played_first, rating, tournament_rating, withdrawn, finals_form, finals_points):
self.position = position
self.name = name
self.played = played
self.wins = wins
self.points = points
self.draws = draws
self.spread = spread
self.played_first = played_first
self.rating = rating
self.tournament_rating = tournament_rating
self.withdrawn = withdrawn
self.qualified = False
self.finals_form = finals_form
self.finals_points = finals_points
def __str__(self):
return "%3d. %-25s %3dw %3dd %4dp%s" % (self.position, self.name, self.wins, self.draws, self.points, " (W)" if self.withdrawn else "")
# Emulate a list for bits of the code that require it
def __len__(self):
return 8
def __getitem__(self, index):
return [self.position, self.name, self.played, self.wins, self.points, self.draws, self.spread, self.played_first][index]
def is_qualified(self):
return self.qualified
class Game(object):
def __init__(self, round_no, seq, table_no, division, game_type, p1, p2, s1=None, s2=None, tb=False):
self.round_no = round_no;
self.seq = seq;
self.table_no = table_no;
self.division = division
self.game_type = game_type;
self.p1 = p1;
self.p2 = p2;
self.s1 = s1;
self.s2 = s2;
self.tb = tb;
def is_complete(self):
if self.s1 is not None and self.s2 is not None:
return True;
else:
return False;
def are_players_known(self):
if self.p1.is_player_known() and self.p2.is_player_known():
return True;
else:
return False;
def get_team_colours(self):
return [self.p1.get_team_colour_tuple(), self.p2.get_team_colour_tuple()]
def contains_player(self, player):
if self.p1 == player or self.p2 == player:
return True;
else:
return False;
def is_tiebreak(self):
return self.tb
def get_score(self):
return (self.s1, self.s2)
def __str__(self):
if self.is_complete():
return "Round %d, %s, Table %d, %s %s %s" % (self.round_no, get_general_division_name(self.division), self.table_no, str(self.p1), self.format_score(), str(self.p2));
else:
return "Round %d, %s, Table %d, %s v %s" % (self.round_no, get_general_division_name(self.division), self.table_no, str(self.p1), str(self.p2));
def get_short_string(self):
if self.is_complete():
return "%s %s %s" % (str(self.p1), self.format_score(), str(self.p2))
else:
return "%s v %s" % (str(self.p1), str(self.p2))
def make_dict(self):
names = self.get_player_names();
if self.p1.is_pending():
p1pending = self.p1.make_dict();
else:
p1pending = None;
if self.p2.is_pending():
p2pending = self.p2.make_dict();
else:
p2pending = None;
return {
"round_no" : self.round_no,
"round_seq" : self.seq,
"table_no" : self.table_no,
"division" : self.division,
"game_type" : self.game_type,
"p1" : names[0],
"p2" : names[1],
"p1pending" : p1pending,
"p2pending" : p2pending,
"s1" : self.s1,
"s2" : self.s2,
"tb" : self.tb
};
def is_between_names(self, name1, name2):
if not self.p1.is_player_known() or not self.p2.is_player_known():
return False;
(pname1, pname2) = self.get_player_names();
if (pname1 == name1 and pname2 == name2) or (pname1 == name2 and pname2 == name1):
return True;
else:
return False;
def get_players(self):
return [ self.p1, self.p2 ]
def get_player_names(self):
return [self.p1.get_name(), self.p2.get_name()];
def get_short_player_names(self):
return [self.p1.get_short_name(), self.p2.get_short_name()]
def get_player_score(self, player):
if self.p1.is_player_known() and self.p1 == player:
score = self.s1;
elif self.p2.is_player_known() and self.p2 == player:
score = self.s2;
else:
raise PlayerNotInGameException("player %s is not in the game between %s and %s." % (str(player), str(self.p1), str(self.p2)));
return score;
def get_player_name_score(self, player_name):
if self.p1.is_player_known() and (self.p1.get_name().lower() == player_name.lower() or self.p1.get_name() == player_name):
return self.s1
elif self.p2.is_player_known() and (self.p2.get_name().lower() == player_name.lower() or self.p2.get_name() == player_name):
return self.s2
else:
raise PlayerNotInGameException("Player %s not in the game between %s and %s." % (str(player_name), str(self.p1), str(self.p2)))
def get_opponent_score(self, player):
if self.p1 == player:
score = self.s2;
elif self.p2 == player:
score = self.s1;
else:
raise PlayerNotInGameException("player %s is not in the game between %s and %s." % (str(player), str(self.p1), str(self.p2)));
return score;
def set_player_score(self, player, score):
if self.p1 == player:
self.s1 = score;
elif self.p2 == player:
self.s2 = score;
else:
raise PlayerNotInGameException("player %s is not in the game between %s and %s." % (str(player), str(self.p1), str(self.p2)));
def set_tiebreak(self, tb):
self.tb = tb;
def set_score(self, s1, s2, tb):
self.s1 = s1;
self.s2 = s2;
self.tb = tb;
def get_round_no(self):
return self.round_no
def get_division(self):
return self.division
def get_table_no(self):
return self.table_no
def get_round_seq(self):
return self.seq
def get_game_type(self):
return self.game_type
def format_score(self):
if self.s1 is None and self.s2 is None:
return "";
if self.s1 is None:
left = "";
else:
left = str(self.s1);
if self.s2 is None:
right = "";
else:
right = str(self.s2);
if self.tb:
if self.s1 == 0 and self.s2 == 0:
left = "X"
right = "X"
elif self.s1 > self.s2:
left += "*";
else:
right += "*";
return left + " - " + right;
def is_double_loss(self):
if self.s1 is not None and self.s2 is not None and self.s1 == 0 and self.s2 == 0 and self.tb:
return True
else:
return False
# Emulate a list of values
def __len__(self):
return 10;
def __getitem__(self, key):
return [self.round_no, self.seq, self.table_no, self.division, self.game_type, str(self.p1), self.s1, str(self.p2), self.s2, self.tb ][key];
def get_general_division_name(num):
if num < 0:
return "Invalid division number %d" % (num)
elif num > 25:
return "Division %d" % (num + 1)
else:
return "Division %s" % (chr(ord('A') + num))
def get_general_short_division_name(num):
if num < 0:
return ""
elif num > 25:
return int(num + 1)
else:
return chr(ord('A') + num)
class TeleostOption(object):
def __init__(self, mode, seq, name, control_type, desc, value):
self.mode = mode
self.seq = seq
self.name = name
self.control_type = control_type
self.desc = desc
self.value = value
class Tourney(object):
def __init__(self, filename, tourney_name, versioncheck=True):
self.filename = filename;
self.name = tourney_name;
self.db = sqlite3.connect(filename);
if versioncheck:
cur = self.db.cursor()
cur.execute("select value from options where name = 'atropineversion'")
row = cur.fetchone()
if row is None:
raise DBVersionMismatchException("This tourney database file was created by an atropine version prior to 0.7.0. It's not compatible with this version of atropine.")
else:
version = row[0]
version_split = version.split(".")
if len(version_split) != 3:
raise DBVersionMismatchException("This tourney database has an invalid version number %s." % (version))
else:
try:
version_split = list(map(int, version_split))
except ValueError:
raise DBVersionMismatchException("This tourney database has an invalid version number %s." % (version))
if tuple(version_split) < EARLIEST_COMPATIBLE_DB_VERSION:
raise DBVersionMismatchException("This tourney database was created with atropine version %s, which is not compatible with this version of atropine (%s)" % (version, SW_VERSION))
self.db_version = tuple(version_split)
else:
self.db_version = (0, 0, 0)
if self.db_version > (0,8,0):
self.round_view_name = "rounds_derived"
else:
self.round_view_name = "rounds"
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def get_name(self):
return self.name
def get_full_name(self):
return self.get_attribute("fullname", self.name)
def set_full_name(self, name):
self.set_attribute("fullname", name)
def get_venue(self):
return self.get_attribute("venue", "")
def set_venue(self, venue):
self.set_attribute("venue", venue)
def get_event_date(self):
date_str = self.get_attribute("eventdate", None)
if not date_str:
return (None, None, None)
else:
fields = date_str.split("-")
if len(fields) != 3:
return (None, None, None)
try:
return tuple([int(x) for x in fields])
except ValueError:
return (None, None, None)
def get_event_date_string(self):
(year, month, day) = self.get_event_date()
if not day or not month or not year:
return None
else:
return "%04d-%02d-%02d" % (year, month, day)
def check_date(self, year, month, day):
if month < 1 or month > 12:
raise InvalidDateException("Invalid date: %d is not a valid month." % (month))
if year < 1 or year > 9999:
raise InvalidDateException("Invalid date: year %04d is out of range." % (year))
if day < 1:
raise InvalidDateException("Invalid date: day of month %d is out of range." % (day))
leap = (year % 4 == 0 and not (year % 100 == 0 and year % 400 != 0))
if month == 2:
day_max = 29 if leap else 28
elif month in (4, 6, 9, 11):
day_max = 30
else:
day_max = 31
if day > day_max:
raise InvalidDateException("Invalid date: day of month %d is out of range for month %d." % (day, month))
def set_event_date(self, year, month, day):
if not year or not month or not day:
self.set_attribute("eventdate", "")
else:
self.check_date(year, month, day)
self.set_attribute("eventdate", "%04d-%02d-%02d" % (year, month, day))
def get_db_version(self):
return ".".join([str(x) for x in self.db_version])
def get_software_version(self):
return get_software_version()
# Number of games in the GAME table - that is, number of games played
# or in progress.
def get_num_games(self):
cur = self.db.cursor();
cur.execute("select count(*) from game");
row = cur.fetchone();
count = row[0];
cur.close();
return count;
def get_next_free_table_number_in_round(self, round_no):
cur = self.db.cursor()
cur.execute("select max(table_no) from game g where g.round_no = ?", (round_no,))
row = cur.fetchone()
if row is None or row[0] is None:
next_table_no = 1
else:
next_table_no = row[0] + 1
cur.close()
return next_table_no
def get_next_free_seq_number_in_round(self, round_no):
cur = self.db.cursor()
cur.execute("select max(seq) from game g where g.round_no = ?", (round_no,))
row = cur.fetchone()
if row is None or row[0] is None:
next_seq_no = 1
else:
next_seq_no = row[0] + 1
cur.close()
return next_seq_no
def get_next_free_round_number_for_division(self, div):
cur = self.db.cursor()
cur.execute("select max(round_no) from game g where g.division = ?", (div,))
row = cur.fetchone()
if row is None or row[0] is None:
round_no = 1
else:
round_no = row[0] + 1
cur.close()
return round_no
def get_round_name(self, round_no):
cur = self.db.cursor();
cur.execute("select name from " + self.round_view_name + " where id = ?", (round_no,));
row = cur.fetchone();
if not row:
cur.close();
return None;
else:
cur.close();
return row[0];
def get_short_round_name(self, round_no):
cur = self.db.cursor();
cur.execute("select cast(id as text) short_name from rounds where id = ?", (round_no,));
row = cur.fetchone();
if not row:
cur.close();
return None;
else:
cur.close();
return row[0];
def get_rounds(self):
cur = self.db.cursor();
cur.execute("select g.round_no, r.name from game g left outer join " +
self.round_view_name + " r on g.round_no = r.id group by g.round_no");
rounds = [];
for row in cur:
rdict = dict();
if not row[1]:
rdict["name"] = "Round " + str(row[0]);
else:
rdict["name"] = row[1];
rdict["num"] = row[0];
rounds.append(rdict);
cur.close();
return rounds;
def get_round(self, round_no):
cur = self.db.cursor();
cur.execute("select r.id, r.name from " + self.round_view_name + " r where id = ?", (round_no,));
row = cur.fetchone()
d = None
if row is not None:
d = dict()
d["num"] = row[0]
d["name"] = row[1]
cur.close()
return d
def name_round(self, round_no, round_name):
# Does round_no already exist?
cur = self.db.cursor();
cur.execute("select id from rounds where id = ?", (round_no,));
rows = cur.fetchall();
if len(rows) > 0:
cur.close();
cur = self.db.cursor();
cur.execute("update rounds set name = ?, type = null where id = ?", (round_name, round_no));
else:
cur.close();
cur = self.db.cursor();
cur.execute("insert into rounds(id, name, type) values (?, ?, null)", (round_no, round_name));
self.db.commit();
cur.close()
def get_largest_table_game_count(self, round_no):
cur = self.db.cursor()
cur.execute("select max(num_games) from (select table_no, count(*) num_games from game where round_no = ? group by table_no) x", (round_no,))
result = cur.fetchone()
if result[0] is None:
count = 0
else:
count = int(result[0])
self.db.commit()
cur.close()
return count;
def player_name_exists(self, name):
cur = self.db.cursor()
cur.execute("select count(*) from player where lower(name) = ? or name = ?", (name.lower(), name))
row = cur.fetchone()
if row[0]:
cur.close()
return True
else:
cur.close()
return False
def set_player_avoid_prune(self, name, value):
if self.db_version < (0, 7, 7):
return
cur = self.db.cursor()
cur.execute("update player set avoid_prune = ? where lower(name) = ? or name = ?", (1 if value else 0, name.lower(), name))
cur.close()
self.db.commit()
def get_player_avoid_prune(self, name):
if self.db_version < (0, 7, 7):
return False
cur = self.db.cursor()
cur.execute("select avoid_prune from player where lower(name) = ? or name = ?", (name.lower(), name))
row = cur.fetchone()
if row:
retval = bool(row[0])
else:
raise PlayerDoesNotExistException("Can't get whether player \"%s\" is allowed to play prunes because there is no player with that name." % (name))
cur.close()
self.db.commit()
return retval
def add_player(self, name, rating, division=0):
if self.player_name_exists(name):
raise PlayerExistsException("Can't add player \"%s\" because there is already a player with that name." % (name))
cur = self.db.cursor()
cur.execute("insert into player(name, rating, team_id, short_name, withdrawn, division, division_fixed) values(?, ?, ?, ?, ?, ?, ?)",
(name, rating, None, "", 0, division, 0))
cur.close()
self.db.commit()
# Recalculate everyone's short names
cur = self.db.cursor()
players = self.get_players()
for p in players:
short_name = get_short_name(p.get_name(), [ x.get_name() for x in players ])
cur.execute("update player set short_name = ? where (lower(name) = ? or name = ?)", (short_name, p.get_name().lower(), p.get_name()))
self.db.commit()
# players must be a list of EnteredPlayer objects.
# This function removes any players currently registered.
def set_players(self, players, auto_rating_behaviour=RATINGS_UNIFORM):
# If there are any games, in this tournament, it's too late to
# replace the player list. You can, however, withdraw players or
# add individual players.
if self.get_num_games() > 0:
raise TourneyInProgressException("Replacing the player list is not permitted once the tournament has started.");
# Make sure no player names are blank
for p in players:
if not p.get_name():
raise InvalidPlayerNameException()
# Make sure all the player names are case-insensitively unique
for pi in range(len(players)):
for opi in range(pi + 1, len(players)):
if players[pi].get_name().lower() == players[opi].get_name().lower():
raise DuplicatePlayerException("No two players are allowed to have the same name, and you've got more than one %s." % (players[pi].get_name()))
teams = self.get_teams()
team_ids = [t.get_id() for t in teams]
# Make sure for each player that if they're on a team, that team
# exists
for p in players:
team = p.get_team_id()
if team is not None and team not in team_ids:
raise InvalidTeamException("Player \"%s\" is being assigned to a team with an invalid or nonexistent number.\n" % (p.get_name()))
# For each player, work out a "short name", which will be the first
# of their first name, first name and last initial, and full name,
# which is unique for that player.
for p in players:
p.set_short_name(get_short_name(p.get_name(), [ x.get_name() for x in players]))
# Check the ratings, if given, are sane
new_players = [];
for p in players:
if p.get_division() < 0:
raise InvalidDivisionNumberException("Player \"%s\" has been given a division number of %d. It's not allowed to be negative." % (p[0], p[3]))
if p.get_rating() is not None:
rating = p.get_rating()
if rating != 0 and auto_rating_behaviour != RATINGS_MANUAL:
# Can't specify any non-zero ratings if automatic
# rating is enabled.
raise InvalidRatingException("Player \"%s\" has been given a rating (%g) but you have not selected manual rating. If manual rating is not used, players may not be given manual ratings in the initial player list except a rating of 0 to indicate a prune or bye." % (p.get_name(), rating))
else:
if auto_rating_behaviour == RATINGS_MANUAL:
# Can't have unrated players if automatic rating
# has been disabled.
raise InvalidRatingException("Player \"%s\" does not have a rating. If manual rating is selected, all players must be given a rating." % (p.get_name()))
if auto_rating_behaviour != RATINGS_MANUAL:
if auto_rating_behaviour == RATINGS_GRADUATED:
max_rating = 2000
min_rating = 1000
else:
max_rating = 1000
min_rating = 1000
new_players = [];
rating = max_rating;
num_unrated_players = len([x for x in players if x.get_rating() is None])
num_players_given_auto_rating = 0
if max_rating != min_rating and num_unrated_players > max_rating - min_rating:
raise TooManyPlayersException("I don't know what kind of crazy-ass tournament you're running here, but it appears to have more than %d players in it. Automatic rating isn't going to work, and to be honest I'd be surprised if anything else did." % num_unrated_players)
for p in players:
if num_unrated_players == 1:
rating = max_rating
else:
rating = float(max_rating - num_players_given_auto_rating * (max_rating - min_rating) / (num_unrated_players - 1))
rating = round(rating, 2)
if p.get_rating() is None:
p.set_rating(rating)
num_players_given_auto_rating += 1
self.set_attribute("autoratingbehaviour", auto_rating_behaviour);
self.db.execute("delete from player");
self.db.executemany("insert into player(name, rating, team_id, short_name, withdrawn, division, division_fixed, avoid_prune, require_accessible_table, preferred_table) values (?, ?, ?, ?, ?, ?, 0, ?, ?, ?)",
[ (p.get_name(), p.get_rating(), p.get_team_id(),
p.get_short_name(), int(p.get_withdrawn()),
p.get_division(), int(p.get_avoid_prune()),
int(p.get_requires_accessible_table()),
int(p.get_preferred_table()) if p.get_preferred_table() is not None else -1) for p in players ]);
self.db.commit();
def get_auto_rating_behaviour(self):
return self.get_int_attribute("autoratingbehaviour", RATINGS_UNIFORM)
def get_active_players(self):
# Return the list of players in the tournament who are not marked
# as withdrawn.
return self.get_players(exclude_withdrawn=True)
def get_withdrawn_players(self):
return [x for x in self.get_players() if x.withdrawn]
def get_players(self, exclude_withdrawn=False):
cur = self.db.cursor();
if self.db_version < (0, 7, 7):
avoid_prune_value = "0"
else:
avoid_prune_value = "p.avoid_prune"
if self.db_version < (1, 0, 4):
accessible_value = "0"
else:
accessible_value = "p.require_accessible_table"
if self.db_version < (1, 0, 5):
preferred_table_value = "-1"
else:
preferred_table_value = "p.preferred_table"
if exclude_withdrawn:
condition = "where p.withdrawn = 0"
else:
condition = ""
cur.execute("select p.name, p.rating, t.id, t.name, t.colour, p.short_name, p.withdrawn, p.division, p.division_fixed, p.id, %s, %s, %s from player p left outer join team t on p.team_id = t.id %s order by p.rating desc, p.name" % (avoid_prune_value, accessible_value, preferred_table_value, condition))
players = [];
for row in cur:
if row[2] is not None:
team = Team(row[2], row[3], row[4])
else:
team = None
players.append(Player(row[0], row[1], team, row[5], bool(row[6]), row[7], row[8], row[9], row[10], row[11], row[12]));
cur.close();
return players;
def rerate_player(self, name, rating):
try:
rating = float(rating)
except ValueError:
raise InvalidRatingException("Cannot set %s's rating - invalid rating." % name);
cur = self.db.cursor();
cur.execute("update player set rating = ? where (lower(name) = ? or name = ?)", (rating, name.lower(), name));
if cur.rowcount < 1:
self.db.rollback();
raise PlayerDoesNotExistException("Cannot change the rating of player \"" + name + "\" because no player by that name exists.");
cur.close();
self.db.commit();
def rename_player(self, oldname, newname):
newname = newname.strip();
if newname == "":
raise InvalidPlayerNameException()
if self.player_name_exists(newname):
raise PlayerExistsException("Cannot rename player \"%s\" to \"%s\" because there's already another player with that name." % (oldname, newname));
cur = self.db.cursor();
cur.execute("update player set name = ? where (lower(name) = ? or name = ?)", (newname, oldname.lower(), oldname));
if cur.rowcount < 1:
self.db.rollback();
raise PlayerDoesNotExistException("Cannot rename player \"" + oldname + "\" because no player by that name exists.");
cur.close();
# Recalculate everyone's short names, because this name change might
# mean that short names are no longer unique
cur = self.db.cursor()
players = self.get_players()
for p in players:
short_name = get_short_name(p.get_name(), [ x.get_name() for x in players ])
cur.execute("update player set short_name = ? where (lower(name) = ? or name = ?)", (short_name, p.get_name().lower(), p.get_name()))
cur.close()
self.db.commit();
def set_player_division(self, player_name, new_division):
cur = self.db.cursor()
cur.execute("update player set division = ? where (lower(name) = ? or name = ?)", (new_division, player_name.lower(), player_name))
cur.close()
self.db.commit()
# Put each player in a division. The active players are split into
# num_divisions divisions, each of which must have a multiple of
# division_size_multiple players. Names listed as strings in
# automatic_top_div_players are put in the top division. Beyond that,
# players are distributed among the divisions so as to make their sizes
# as equal as possible, while still preserving that the size of every
# division must be a multiple of division_size_multiple.
def set_player_divisions(self, num_divisions, division_size_multiple, by_rating=True, automatic_top_div_players=[]):
players = self.get_players(exclude_withdrawn=True)
# Make a player_ranks map. Players with lower numbers go in higher
# divisions. This may be derived from the player's rating (in which
# case we need to negate it so highly-rated players go in higher
# divisions) or from the player's position in the standings.
player_ranks = dict()
if by_rating:
for p in self.get_players(exclude_withdrawn=False):
player_ranks[p.get_name()] = -p.get_rating()
else:
for s in self.get_standings():
player_ranks[s.name] = s.position
if len(players) % division_size_multiple != 0:
raise IllegalDivisionException()
div_players = [ [] for i in range(num_divisions) ]
remaining_players = []
for p in players:
if p.get_name() in automatic_top_div_players:
div_players[0].append(p)
else:
remaining_players.append(p)
remaining_players = sorted(remaining_players, key=lambda x : player_ranks[x.get_name()]);
# Number of players in the top division is at least
# num_players / num_divisions rounded up to the nearest multiple of
# division_size_multiple.
players_in_div = len(players) // num_divisions
if players_in_div % division_size_multiple > 0:
players_in_div += division_size_multiple - (players_in_div % division_size_multiple)
max_tables_in_div = (len(players) // division_size_multiple) // num_divisions
if (len(players) // division_size_multiple) % num_divisions > 0:
max_tables_in_div += 1
while len(div_players[0]) < players_in_div:
div_players[0].append(remaining_players[0])
remaining_players = remaining_players[1:]
# If division 1 now has an illegal number of players, which is possible
# if, for example, there are 64 players in total but 21 players have
# opted in to division 1, add enough players to satisfy the multiple.
if len(div_players[0]) % division_size_multiple > 0:
num_to_add = division_size_multiple - (len(div_players[0]) % division_size_multiple)
div_players[0] += remaining_players[0:num_to_add]
remaining_players = remaining_players[num_to_add:]
# Sanity check that we've got the right number of players left
if len(remaining_players) % division_size_multiple != 0:
raise IllegalDivisionException()
# Number of tables in total
num_tables = len(players) // division_size_multiple
# If we need an unequal number of players in each division, make
# sure the top divisions get more players
if num_tables % num_divisions > 0 and len(div_players[0]) < max_tables_in_div * division_size_multiple:
# Add another table to division 1
div_players[0] += remaining_players[0:division_size_multiple]
remaining_players = remaining_players[division_size_multiple:]
if num_divisions > 1:
# Distribute the remaining players among the remaining divisions as
# evenly as possible while keeping the size of each division a
# multiple of division_size_multiple.
if len(remaining_players) < division_size_multiple * (num_divisions - 1):
raise ImpossibleDivisionException()
# Number of tables in the divisions after division 1
num_tables = len(remaining_players) // division_size_multiple
# Distribute players amongst divisions, and if we have to have some
# divisions larger than others, make it the higher divisions.
for division in range(1, num_divisions):
div_players[division] += remaining_players[0:((num_tables // (num_divisions - 1)) * division_size_multiple)]
remaining_players = remaining_players[((num_tables // (num_divisions - 1)) * division_size_multiple):]
if num_tables % (num_divisions - 1) >= division:
# This division needs an extra tablesworth
div_players[division] += remaining_players[0:division_size_multiple]
remaining_players = remaining_players[division_size_multiple:]
# Finally, take the withdrawn players, which we haven't put into any
# division, and put them into the division appropriate for their rank.
div_rank_ranges = []
for div_index in range(num_divisions):
div_rank_ranges.append(
(min(player_ranks[x.get_name()] for x in div_players[div_index]),
max(player_ranks[x.get_name()] for x in div_players[div_index])
))
withdrawn_players = [x for x in self.get_players(exclude_withdrawn=False) if x.is_withdrawn()]
for p in withdrawn_players:
for div in range(num_divisions):
if div == num_divisions - 1 or player_ranks[p.get_name()] <= div_rank_ranges[div][1]:
div_players[div].append(p)
break
sql_params = []
division = 0
for l in div_players:
for p in l:
sql_params.append((division, int(p.get_name() in automatic_top_div_players), p.get_name().lower(), p.get_name()))
division += 1
cur = self.db.cursor()
cur.executemany("update player set division = ?, division_fixed = ? where (lower(name) = ? or name = ?)", sql_params)
cur.close()
self.db.commit()
def set_player_withdrawn(self, name, withdrawn):
withdrawn = bool(withdrawn)
cur = self.db.cursor()
cur.execute("update player set withdrawn = ? where name = ?", (1 if withdrawn else 0, name))
if cur.rowcount < 1:
self.db.rollback()
raise PlayerDoesNotExistException("Cannot change withdrawn status for player \"%s\" because no player by that name exists." % (name))
cur.close()
self.db.commit()
def withdraw_player(self, name):
# Set a player as withdrawn, so that the player is not included in the
# player list supplied to the fixture generator for future rounds.
self.set_player_withdrawn(name, 1)
def unwithdraw_player(self, name):
# Change a players withdrawn status to 0
self.set_player_withdrawn(name, 0)
def set_player_requires_accessible_table(self, name, value):
if self.db_version < (1,0,4):
return
cur = self.db.cursor()
cur.execute("update player set require_accessible_table = ? where name = ?", (value, name))
cur.close()
self.db.commit()
def get_player_requires_accessible_table(self, name):
if self.db_version < (1,0,4):
return False
cur = self.db.cursor()
cur.execute("select require_accessible_table from player where name = ?", (name,))
row = cur.fetchone()
if row is None:
raise PlayerDoesNotExistException()
retval = (row[0] != 0)
cur.close()
return retval
def set_player_preferred_table(self, name, value):
if self.db_version < (1, 0, 5):
return
cur = self.db.cursor()
cur.execute("update player set preferred_table = ? where name = ?", (value if value is not None else -1, name))
cur.close()
self.db.commit()
def get_player_preferred_table(self, name):
if self.db_version < (1, 0, 5):
return None
cur = self.db.cursor()
cur.execute("select preferred_table from player where name = ?", (name,))
row = cur.fetchone()
if row is None:
raise PlayerDoesNotExistException()
retval = row[0]
cur.close()
if retval is not None and retval < 0:
retval = None
return retval
def get_player_name(self, player_id):
cur = self.db.cursor();
cur.execute("select name from player where id = ?", (player_id,));
rows = cur.fetchall();
if len(rows) < 1:
raise PlayerDoesNotExistException();
cur.close();
self.db.commit();
return rows[0];
def get_player_tournament_rating(self, name):
cur = self.db.cursor()
cur.execute("select tournament_rating from tournament_rating where (lower(name) = ? or name = ?)", (name.lower(), name))
row = cur.fetchone()
if row is None:
raise PlayerDoesNotExistException()
tournament_rating = row[0]
cur.close()
return tournament_rating
def get_tournament_rating_bonus_value(self):
cur = self.db.cursor()
cur.execute("select bonus from tr_opts")
row = cur.fetchone()
if row is None:
bonus = 50
else:
bonus = row[0]
cur.close()
return bonus
def get_tournament_rating_diff_cap(self):
cur = self.db.cursor()
cur.execute("select rating_diff_cap from tr_opts")
row = cur.fetchone()
if row is None:
diff_cap = 40
else:
diff_cap = row[0]
cur.close()
return diff_cap
def set_tournament_rating_config(self, bonus=50, diff_cap=40):
cur = self.db.cursor()
cur.execute("update tr_opts set bonus = ?, rating_diff_cap = ?", (bonus, diff_cap))
cur.close()
self.db.commit()
def get_show_tournament_rating_column(self):
return bool(self.get_int_attribute("showtournamentratingcolumn", 0))
def set_show_tournament_rating_column(self, value):
self.set_attribute("showtournamentratingcolumn", str(int(value)))
# games is a list of tuples:
# (round_no, seq, table_no, game_type, name1, score1, name2, score2, tiebreak)
def merge_games(self, games):
try:
known_games = [x for x in games if x.are_players_known()];
pending_games = [x for x in games if not x.are_players_known()];
# Records to insert into game_staging, where we use NULL if the
# player isn't known yet
game_records = [(x.round_no, x.seq, x.table_no,
x.division, x.game_type,
x.p1.name if x.p1.is_player_known() else None, x.s1,
x.p2.name if x.p2.is_player_known() else None, x.s2,
x.tb) for x in games];
cur = self.db.cursor();
cur.execute("""create temporary table if not exists game_staging(
round_no int, seq int, table_no int, division int,
game_type text, name1 text, score1 integer,
name2 text, score2 integer, tiebreak integer)""");
cur.execute("""create temporary table if not exists game_staging_ids(
round_no int, seq int, table_no int, division int,
game_type text, p1 integer, score1 integer,
p2 integer, score2 integer, tiebreak integer)""");
cur.execute("""create temporary table if not exists game_pending_staging(
round_no int, seq int, seat int, player_id int)""");
cur.execute("delete from temp.game_staging");
cur.execute("delete from temp.game_staging_ids");
cur.execute("delete from temp.game_pending_staging");
cur.executemany("insert into temp.game_staging values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", game_records);
cur.execute("""insert into temp.game_staging_ids
select g.round_no, g.seq, g.table_no, g.division, g.game_type,
p1.id, g.score1, p2.id, g.score2, g.tiebreak
from temp.game_staging g left outer join player p1
on g.name1 = p1.name left outer join player p2
on g.name2 = p2.name""");
#where g.name1 = p1.name and g.name2 = p2.name""");
cur.execute("select count(*) from temp.game_staging_ids")
results = cur.fetchone()
# Remove any rows that are already in GAME
cur.execute("""delete from temp.game_staging_ids
where exists(select * from game g where
g.round_no = game_staging_ids.round_no and
g.seq = game_staging_ids.seq and
g.table_no = game_staging_ids.table_no and
g.division = game_staging_ids.division and
g.game_type = game_staging_ids.game_type and
g.p1 = game_staging_ids.p1 and
g.p1_score is game_staging_ids.score1 and
g.p2 = game_staging_ids.p2 and
g.p2_score is game_staging_ids.score2 and
g.tiebreak is game_staging_ids.tiebreak)""");
# Write "new result" logs for rows that don't have a matching
# entry in GAME for (round_no, table_no, game_type, p1, p2)
# with a non-NULL score but the entry we're writing has a
# non-NULL score.
cur.execute("""insert into game_log(
ts, round_no, round_seq, table_no, division, game_type,
p1, p1_score, p2, p2_score, tiebreak, log_type)
select current_timestamp, round_no, seq, table_no, division,
game_type, p1, score1, p2, score2, tiebreak, 1
from temp.game_staging_ids gs
where score1 is not null and score2 is not null and
p1 is not null and p2 is not null and
not exists(select * from game g where
g.round_no = gs.round_no and
g.seq = gs.seq and
g.table_no = gs.table_no and
g.division = gs.division and
g.game_type = gs.game_type and
g.p1 = gs.p1 and
g.p2 = gs.p2 and
g.p1_score is not null and
g.p2_score is not null)""");
# And write "correction" logs for rows that do have a matching
# entry in game for (round_no, table_no, game_type, p1, p2)
# with a non-NULL score.
cur.execute("""insert into game_log(
ts, round_no, round_seq, table_no, division, game_type,
p1, p1_score, p2, p2_score, tiebreak, log_type)
select current_timestamp, round_no, seq, table_no, division,
game_type, p1, score1, p2, score2, tiebreak, 2
from temp.game_staging_ids gs
where p1 is not null and p2 is not null and
exists(select * from game g where
g.round_no = gs.round_no and
g.seq = gs.seq and
g.table_no = gs.table_no and
g.division = gs.division and
g.game_type = gs.game_type and
g.p1 = gs.p1 and
g.p2 = gs.p2 and
g.p1_score is not null and
g.p2_score is not null)""");
# Insert rows into game if they're not there already
cur.execute("""insert or replace into game(
round_no, seq, table_no, division, game_type,
p1, p1_score, p2, p2_score, tiebreak)
select * from temp.game_staging_ids""");
# Insert into GAME_PENDING any sides of a game where the player
# is not yet known
pending_games_records = [];
for g in pending_games:
if not g.p1.is_player_known():
pending_games_records.append((g.round_no, g.seq, 1, g.p1.winner, g.p1.round_no, g.p1.round_seq));
if not g.p2.is_player_known():
pending_games_records.append((g.round_no, g.seq, 2, g.p2.winner, g.p2.round_no, g.p2.round_seq));
cur.executemany("""insert or replace into
game_pending
values (?, ?, ?, ?, ?, ?)""",
pending_games_records);
# If we inserted any rows into GAME whose (round_no, round_seq)
# corresponds to (from_round_no, from_round_seq) in GAME_PENDING,
# it means that we can fill in one or more unknown players in
# GAME. For example, if we inserted the result for a semi-final,
# then we might now be able to fill in the player ID for one side
# of the final.
cur.execute("""insert into temp.game_pending_staging
select gp.round_no, gp.seq, gp.seat,
case when gp.winner = 1 and gsi.score1 > gsi.score2
then gsi.p1
when gp.winner = 1 and gsi.score2 > gsi.score1
then gsi.p2
when gp.winner = 0 and gsi.score1 > gsi.score2
then gsi.p2
when gp.winner = 0 and gsi.score2 > gsi.score1
then gsi.p1
else NULL
end player_id
from game_staging_ids gsi, game_pending gp
on gsi.round_no = gp.from_round_no and
gsi.seq = gp.from_seq""");
cur.execute("select * from temp.game_pending_staging");
updcur = self.db.cursor();
for row in cur:
(round_no, seq, seat, player_id) = row;
updcur.execute("update game set p%d = ? where round_no = ? and seq = ? and p1_score is NULL and p2_score is NULL" % (seat), (player_id, round_no, seq));
self.db.commit();
except:
self.db.rollback();
raise;
def post_news_item(self, round_no, text, post_to_videprinter, post_to_web):
if self.db_version >= (1, 0, 6):
cur = self.db.cursor()
log_type = LOG_TYPE_COMMENT
if post_to_videprinter:
log_type |= LOG_TYPE_COMMENT_VIDEPRINTER_FLAG
if post_to_web:
log_type |= LOG_TYPE_COMMENT_WEB_FLAG
cur.execute("""insert into game_log (ts, round_no, round_seq,
table_no, division, game_type, p1, p1_score, p2, p2_score,
tiebreak, log_type, comment) values (
current_timestamp, ?, null,
null, null, null, null, null, null, null,
null, ?, ?)""", (round_no, log_type, text))
cur.close()
self.db.commit()
def edit_news_item(self, seq, new_text, post_to_videprinter, post_to_web):
if self.db_version >= (1, 0, 6):
cur = self.db.cursor()
log_type = LOG_TYPE_COMMENT
if post_to_videprinter:
log_type |= LOG_TYPE_COMMENT_VIDEPRINTER_FLAG
if post_to_web:
log_type |= LOG_TYPE_COMMENT_WEB_FLAG
cur.execute("update game_log set comment = ?, log_type = ? where seq = ? and (log_type & ?) != 0", (new_text, log_type, seq, LOG_TYPE_COMMENT))
cur.close()
self.db.commit()
def delete_round_div(self, round_no, division):
try:
cur = self.db.cursor()
cur.execute("delete from game where round_no = ? and division = ?", (round_no, division))
num_deleted = cur.rowcount
cur.execute("select count(*) from game where round_no = ?", (round_no,))
row = cur.fetchone()
games_left_in_round = -1
if row is not None and row[0] is not None:
games_left_in_round = row[0]
if games_left_in_round == 0:
cur.execute("delete from rounds where id = ?", (round_no,))
cur.close()
self.db.commit()
return num_deleted
except:
self.db.rollback()
raise
def delete_round(self, round_no):
latest_round_no = self.get_latest_round_no();
if latest_round_no is None:
raise NoGamesException()
if latest_round_no != round_no:
raise NotMostRecentRoundException()
try:
cur = self.db.cursor()
cur.execute("delete from game where round_no = ?", (latest_round_no,))
cur.execute("delete from rounds where id = ?", (latest_round_no,))
self.db.commit()
except:
self.db.rollback()
raise
def alter_games(self, alterations):
# alterations is (round_no, seq, p1, p2, game_type)
# but we want (p1name, p2name, game_type, round_no, seq) for feeding
# into the executemany() call.
alterations_reordered = [(x[2].get_name().lower(), x[2].get_name(), x[3].get_name().lower(), x[3].get_name(), x[4], x[0], x[1]) for x in alterations];
cur = self.db.cursor();
cur.executemany("""
update game
set p1 = (select id from player where (lower(name) = ? or name = ?)),
p2 = (select id from player where (lower(name) = ? or name = ?)),
game_type = ?
where round_no = ? and seq = ?""", alterations_reordered);
rows_updated = cur.rowcount;
cur.close();
self.db.commit();
return rows_updated;
def get_player_from_name(self, name):
sql = "select p.name, p.rating, t.id, t.name, t.colour, p.short_name, p.withdrawn, p.division, p.division_fixed, p.id, %s, %s, %s from player p left outer join team t on p.team_id = t.id where (lower(p.name) = ? or p.name = ?)" % (
"0" if self.db_version < (0, 7, 7) else "p.avoid_prune",
"0" if self.db_version < (1, 0, 4) else "p.require_accessible_table",
"-1" if self.db_version < (1, 0, 5) else "p.preferred_table"
);
cur = self.db.cursor();
cur.execute(sql, (name.lower(), name));
row = cur.fetchone();
cur.close();
if row is None:
raise PlayerDoesNotExistException("Player with name \"%s\" does not exist." % name);
else:
if row[2] is not None:
team = Team(row[2], row[3], row[4])
else:
team = None
return Player(row[0], row[1], team, row[5], row[6], row[7], row[8], row[9], row[10], row[11], row[12]);
def get_player_from_id(self, player_id):
sql = "select p.name, p.rating, t.id, t.name, t.colour, p.short_name, p.withdrawn, p.division, p.division_fixed, %s, %s, %s from player p left outer join team t on p.team_id = t.id where p.id = ?" % (
"0" if self.db_version < (0, 7, 7) else "p.avoid_prune",
"0" if self.db_version < (1, 0, 4) else "p.require_accessible_table",
"-1" if self.db_version < (1, 0, 5) else "p.preferred_table"
);
cur = self.db.cursor();
cur.execute(sql, (player_id,));
row = cur.fetchone();
cur.close();
if row is None:
raise PlayerDoesNotExistException("No player exists with ID %d" % player_id);
else:
if row[2] is None:
team = None
else:
team = Team(row[2], row[3], row[4])
return Player(row[0], row[1], team, row[5], row[6], row[7], row[8], player_id, row[9], row[10], row[11]);
def get_latest_started_round(self):
cur = self.db.cursor()
sql = "select max(r.id) from rounds r where (exists(select * from completed_game cg where cg.round_no = r.id) or r.id = (select min(id) from rounds where id >= 0))"
cur.execute(sql)
row = cur.fetchone()
round_no = None
if row is not None and row[0] is not None:
round_no = row[0]
cur.close()
if round_no is None:
return None
return self.get_round(round_no)
def is_round_finished(self, round_no):
cur = self.db.cursor()
cur.execute("select count(*) from game g where round_no = ?", (round_no,))
row = cur.fetchone()
if row is None or row[0] is None:
num_games = 0
else:
num_games = row[0]
cur.execute("select count(*) from completed_game cg where round_no = ?", (round_no,))
row = cur.fetchone()
if row is None or row[0] is None:
num_completed_games = 0
else:
num_completed_games = row[0]
cur.close()
return (num_games > 0 and num_games == num_completed_games)
def round_contains_games_in_all_divisions(self, round_no):
ret = True
cur = self.db.cursor()
cur.execute("select d.division, count(g.round_no) from (select distinct(division) from player p) d left outer join game g on g.division = d.division and g.round_no = ? group by d.division", (round_no,))
for row in cur:
if row[1] == 0:
# There's at least one division that doesn't have
# games generated for it in this round, so don't
# consider this round to exist yet.
ret = False
break
cur.close()
return ret
def get_current_round(self, round_exists_when_all_divisions_have_games=False):
# Return the latest started round, or if that round is finished and
# there's a next round, the next round.
r = self.get_latest_started_round()
if r is None:
return None
if self.is_round_finished(r["num"]):
cur = self.db.cursor()
cur.execute("select min(id) from rounds where id > ?", (r["num"],))
row = cur.fetchone()
if row is not None and row[0] is not None:
next_round_no = row[0]
else:
next_round_no = None
cur.close()
if next_round_no is not None:
# There is a next round
if round_exists_when_all_divisions_have_games:
# Check that this round has at least one game in every
# division, otherwise we won't count it as a valid round
# because it hasn't been fully generated yet
if not self.round_contains_games_in_all_divisions(next_round_no):
next_round_no = None
if next_round_no is not None:
# The next round has been generated, so use that one
r = self.get_round(next_round_no)
else:
if round_exists_when_all_divisions_have_games:
if not self.round_contains_games_in_all_divisions(r["num"]):
r = None
return r
def get_latest_round_no(self):
cur = self.db.cursor();
cur.execute("select max(id) from rounds");
row = cur.fetchone();
if row is None:
cur.close();
return None;
else:
cur.close();
return row[0];
# Get the latest round number for which there is at least one game in
# this division
def get_latest_round_in_division(self, division):
cur = self.db.cursor()
cur.execute("select max(round_no) from game where division = ?", (division,))
row = cur.fetchone()
latest_round = None
if row is not None and row[0] is not None:
latest_round = row[0]
cur.close()
return latest_round
def get_played_unplayed_counts(self, round_no=None):
cur = self.db.cursor();
params = [];
conditions = "";
if round_no is not None:
conditions += "where round_no = ? ";
params.append(round_no);
sql = "select case when p1_score is NULL or p2_score is NULL then 0 else 1 end complete, count(*) from game " + conditions + " group by 1 order by 1";
if params:
cur.execute(sql, params);
else:
cur.execute(sql);
num_played = 0;
num_unplayed = 0;
for r in cur:
if r[0] == 0:
num_unplayed = r[1];
elif r[0] == 1:
num_played = r[1];
cur.close();
return (num_played, num_unplayed);
def count_games_between(self, p1, p2):
sql = """select count(*) from game g
where g.p1 is not null and g.p2 is not null
and (g.p1 = ? and g.p2 = ?) or (g.p1 = ? and g.p2 = ?)"""
cur = self.db.cursor()
cur.execute(sql, (p1.get_id(), p2.get_id(), p2.get_id(), p1.get_id()))
row = cur.fetchone()
cur.close()
if row and row[0]:
return row[0]
else:
return 0
def get_games_between(self, round_no, player_name_1, player_name_2):
conditions = []
params = []
if round_no is not None:
conditions.append("g.round_no = ?")
params.append(round_no)
conditions.append("(((lower(p1.name) = ? or p1.name = ?) and (lower(p2.name) = ? or p2.name = ?)) or ((lower(p2.name) = ? or p2.name = ?) and (lower(p1.name) = ? or p1.name = ?)))")
params.append(player_name_1.lower())
params.append(player_name_1)
params.append(player_name_2.lower())
params.append(player_name_2)
params.append(player_name_1.lower())
params.append(player_name_1)
params.append(player_name_2.lower())
params.append(player_name_2)
conditions.append("(g.p1 is not null and g.p2 is not null)")
cur = self.db.cursor()
sql = """select g.round_no, g.seq, g.table_no, g.division, g.game_type,
g.p1, g.p1_score, g.p2, g.p2_score, g.tiebreak
from game g, player p1 on g.p1 = p1.id,
player p2 on g.p2 = p2.id
where g.p1 is not null and g.p2 is not null """;
for c in conditions:
sql += " and " + c
sql += "\norder by g.round_no, g.division, g.seq";
if len(params) == 0:
cur.execute(sql)
else:
cur.execute(sql, params)
games = []
for row in cur:
(round_no, game_seq, table_no, division, game_type, p1, p1_score, p2, p2_score, tb) = row
if tb is not None:
if tb:
tb = True
else:
tb = False
p1 = self.get_player_from_id(p1)
p2 = self.get_player_from_id(p2)
game = Game(round_no, game_seq, table_no, division, game_type, p1, p2, p1_score, p2_score, tb)
games.append(game);
cur.close();
self.db.commit();
return games;
def get_games(self, round_no=None, table_no=None, game_type=None, only_players_known=True, division=None, only_unplayed=False):
conditions = [];
params = [];
if round_no is not None:
conditions.append("g.round_no = ?");
params.append(round_no);
if table_no is not None:
conditions.append("g.table_no = ?");
params.append(table_no);
if game_type is not None:
conditions.append("g.game_type = ?");
params.append(game_type);
if only_players_known:
conditions.append("(g.p1 is not null and g.p2 is not null)");
if division is not None:
conditions.append("g.division = ?")
params.append(division)
if only_unplayed:
conditions.append("(g.p1_score is null or g.p2_score is null)")
cur = self.db.cursor();
sql = """select g.round_no, g.seq, g.table_no, g.division, g.game_type,
g.p1, g.p1_score, g.p2, g.p2_score, g.tiebreak,
gp1.winner as seat1_which, gp1.from_round_no as seat1_round_no,
gp1.from_seq seat1_seq,
gp2.winner as seat2_which, gp2.from_round_no as seat2_round_no,
gp2.from_seq as seat2_seq
from game g left outer join game_pending gp1
on g.round_no = gp1.round_no and g.seq = gp1.seq and gp1.seat=1
left outer join game_pending gp2
on g.round_no = gp2.round_no and g.seq = gp2.seq and gp2.seat=2
where 1=1 """;
for c in conditions:
sql += " and " + c;
sql += "\norder by g.round_no, g.division, g.seq";
if len(params) == 0:
cur.execute(sql);
else:
cur.execute(sql, params);
rounds = self.get_rounds();
games = [];
for row in cur:
(round_no, game_seq, table_no, division, game_type, p1, p1_score, p2, p2_score, tb, seat1_which, seat1_round_no, seat1_seq, seat2_which, seat2_round_no, seat2_seq) = row
if tb is not None:
if tb:
tb = True
else:
tb = False
for p_index in (1,2):
if p_index == 1:
p_id = p1;
else:
p_id = p2;
if p_id is None:
if p_index == 1:
winner = bool(seat1_which);
of_round_no = int(seat1_round_no);
of_seq = int(seat1_seq);
else:
winner = bool(seat2_which);
of_round_no = int(seat2_round_no);
of_seq = int(seat2_seq);
short_name = "R" + str(of_round_no)
p = PlayerPending(of_round_no, of_seq, winner, short_name);
else:
p = self.get_player_from_id(p_id);
if p_index == 1:
p1 = p;
else:
p2 = p;
game = Game(round_no, game_seq, table_no, division, game_type, p1, p2, p1_score, p2_score, tb)
games.append(game);
cur.close();
self.db.commit();
return games;
def ranked_query(self, query, sort_cols=[]):
pos = 0;
joint = 0;
cur = self.db.cursor();
cur.execute(query);
prev_sort_vals = None;
results = [];
for row in cur:
if sort_cols:
sort_vals = [];
for c in sort_cols:
sort_vals.append(row[c - 1]);
sort_vals = tuple(sort_vals);
if prev_sort_vals and sort_vals == prev_sort_vals:
joint += 1;
else:
pos += joint + 1;
joint = 0;
prev_sort_vals = sort_vals;
else:
pos += 1;
result = [pos];
for val in row:
result.append(val);
result = tuple(result);
results.append(result);
cur.close();
return results;
def get_int_attribute(self, name, defval=None):
value = self.get_attribute(name, defval);
if value is not None:
value = int(value);
return value;
def get_attribute(self, name, defval=None):
cur = self.db.cursor();
cur.execute("select value from options where name = ?", (name,));
value = cur.fetchone();
if value is None or value[0] is None:
value = defval;
else:
value = str(value[0]);
cur.close();
return value;
def set_attribute(self, name, value):
cur = self.db.cursor();
if re.match("^ *-?[0-9]+ *$", str(value)):
value = int(value);
cur.execute("insert or replace into options values (?, ?)", (name, value));
cur.close();
self.db.commit();
def set_teleost_colour_palette(self, value):
self.set_attribute("teleostcolourpalette", value)
def get_teleost_colour_palette(self):
return self.get_attribute("teleostcolourpalette", "Standard")
def get_auto_use_vertical(self):
return self.get_int_attribute("autousevertical", 0) != 0
def set_auto_use_vertical(self, value):
self.set_attribute("autousevertical", str(int(value)))
def set_teleost_animate_scroll(self, value):
self.set_attribute("teleostanimatescroll", str(int(value)))
def get_teleost_animate_scroll(self):
return self.get_int_attribute("teleostanimatescroll", 1) != 0
def set_auto_use_table_index(self, value):
self.set_attribute("autousetableindex", str(int(value)))
def get_auto_use_table_index(self):
return self.get_int_attribute("autousetableindex", 0) != 0
def set_auto_current_round_must_have_games_in_all_divisions(self, value):
self.set_attribute("autocurrentroundmusthavegamesinalldivisions", str(int(value)))
def get_auto_current_round_must_have_games_in_all_divisions(self):
return self.get_int_attribute("autocurrentroundmusthavegamesinalldivisions", 1) != 0
def get_rank_method(self):
return self.get_int_attribute("rankmethod", RANK_WINS_POINTS);
def is_ranking_by_wins(self):
return self.get_rank_method() in [ RANK_WINS_POINTS, RANK_WINS_SPREAD ]
def is_ranking_by_points(self):
return self.get_rank_method() in [ RANK_WINS_POINTS, RANK_POINTS ]
def is_ranking_by_spread(self):
return self.get_rank_method() == RANK_WINS_SPREAD
def set_rank_method(self, method):
if method not in [RANK_WINS_POINTS, RANK_WINS_SPREAD, RANK_POINTS]:
raise UnknownRankMethodException("Can't rank tourney by method %d because I don't know what that is." % method);
self.set_attribute("rankmethod", method);
def set_table_size(self, table_size):
if table_size not in [2,3]:
raise InvalidTableSizeException("Number of players to a table must be 2 or 3.");
self.set_attribute("tablesize", int(table_size));
def get_table_size(self):
return self.get_int_attribute("tablesize", 3);
def set_show_draws_column(self, value):
self.set_attribute("showdrawscolumn", 1 if value else 0)
def get_show_draws_column(self):
return True if self.get_int_attribute("showdrawscolumn", 0) != 0 else False
def get_num_divisions(self):
cur = self.db.cursor()
cur.execute("select max(division) + 1 from player")
row = cur.fetchone()
value = row[0]
if value is None:
value = 1
cur.close()
return value
def get_num_active_players(self, div_index=None):
cur = self.db.cursor()
if div_index is not None:
cur.execute("select count(*) from player where division = %d and withdrawn = 0" % (div_index))
else:
cur.execute("select count(*) from player where withdrawn = 0")
row = cur.fetchone()
value = int(row[0])
cur.close()
return value
def get_num_active_players_requiring_accessible_table(self):
if self.db_version < (1, 0, 4):
return 0
cur = self.db.cursor()
cur.execute("select count(*) from player where require_accessible_table != 0 and withdrawn = 0")
row = cur.fetchone()
if row and row[0] is not None:
count = row[0]
else:
count = 0
cur.close()
return count
def get_division_name(self, num):
name = self.get_attribute("div%d_name" % (num))
if name:
return name
else:
return get_general_division_name(num)
def set_division_name(self, num, name):
self.set_attribute("div%d_name" % (num), name)
def get_short_division_name(self, num):
return get_general_short_division_name(num)
def get_standings(self, division=None, exclude_withdrawn_with_no_games=False, calculate_qualification=True):
method = self.get_rank_method();
if method == RANK_WINS_POINTS:
orderby = "s.wins * 2 + s.draws desc, s.points desc, p.name";
rankcols = [10, 4];
elif method == RANK_WINS_SPREAD:
orderby = "s.wins * 2 + s.draws desc, s.points - s.points_against desc, p.name"
rankcols = [10, 6]
elif method == RANK_POINTS:
orderby = "s.points desc, p.name";
rankcols = [4];
else:
raise UnknownRankMethodException("This tourney's standings are ranked by method %d, which I don't recognise." % method);
# If we're also taking account of any finals matches, then finals
# performance has a higher sorting priority than anything else.
rank_finals = self.get_rank_finals()
if rank_finals:
rankcols = [13] + rankcols
orderby = "13 desc, " + orderby
orderby = "order by " + orderby
conditions = []
if division is not None:
conditions.append("s.division = %d " % (division))
if exclude_withdrawn_with_no_games:
conditions.append("(p.withdrawn = 0 or s.played > 0)")
if conditions:
where_clause = "where " + " and ".join(conditions)
else:
where_clause = ""
results = self.ranked_query("select p.name, s.played, s.wins, s.points, s.draws, s.points - s.points_against spread, s.played_first, p.rating, tr.tournament_rating, s.wins * 2 + s.draws, p.withdrawn, %s, %s from player_standings s, player p on p.id = s.id left outer join tournament_rating tr on tr.id = p.id %s %s " % (
"s.finals_form" if self.db_version >= (1, 0, 7) else "''",
"s.finals_points" if self.db_version >= (1, 0, 7) else "0",
where_clause, orderby), rankcols);
standings = [ StandingsRow(x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], bool(x[11]), x[12], x[13]) for x in results ]
# If anyone has played any finals matches, don't calculate
# qualification because we're already past that and it wouldn't make
# sense anyway.
for s in standings:
if "W" in s.finals_form or "D" in s.finals_form or "L" in s.finals_form:
calculate_qualification = False
break
if division is not None and calculate_qualification:
# If we can, mark already-qualified players as such
qual_places = self.get_int_attribute("div%d_qualplaces" % (division), 0)
last_round = self.get_int_attribute("div%d_lastround" % (division), 0)
all_games_generated = (last_round != 0 and last_round == self.get_latest_round_in_division(division))
num_games_per_player = self.get_int_attribute("div%d_numgamesperplayer" % (division), 0)
draws_expected = self.get_show_draws_column()
if qual_places > 0 and num_games_per_player > 0:
qualification_standings = [
{
"pos" : x.position,
"name" : x.name,
"played" : x.played,
"win_points" : x.wins * 2 + x.draws,
"non_player" : (x.withdrawn or x.rating == 0)
}
for x in standings
]
# Look through the list for any withdrawn players or prunes,
# which will have a non_player value of True. Non-players
# aren't eligible to win anything, so any player ranked
# below a non-player gets bumped up for the purpose of
# deciding qualification.
num_non_players = 0
last_non_player_pos = None
for row in qualification_standings:
if row["non_player"]:
num_non_players += 1
last_non_player_pos = row["pos"]
elif num_non_players > 0:
# Any player below a non-player in the standings
# table gets bumped up one place. If they're below two
# non-players then they get bumped up two places,
# and so on.
if row["pos"] > last_non_player_pos:
row["pos"] -= num_non_players
# Now remove the non-players from the list we'll pass
# to player_has_qualified().
new_qual_standings = []
for row in qualification_standings:
if not row["non_player"]:
new_qual_standings.append(row)
qualification_standings = new_qual_standings
unplayed_games = [ g.get_player_names()
for g in self.get_games(
game_type="P", division=division,
only_unplayed=True
)
]
for row in qualification_standings:
if row["pos"] <= qual_places and method == RANK_WINS_POINTS:
# This player is in the qualification zone - work out if
# they are guaranteed to stay there
try:
qualified = qualification.player_has_qualified(
qualification_standings, row["name"],
unplayed_games, qual_places,
all_games_generated, num_games_per_player,
draws_expected)
except qualification.QualificationTimeoutException:
raise QualificationTimeoutException()
if qualified:
for standings_row in standings:
if standings_row.name == row["name"]:
standings_row.qualified = True
break
return standings
def get_logs_since(self, seq=None, include_new_games=False, round_no=None, maxrows=None):
cur = self.db.cursor();
sql = """select seq, datetime(ts, 'localtime') ts, round_no,
round_seq, table_no, game_type, p1.name p1, p1_score,
p2.name p2, p2_score, tiebreak, log_type, gl.division,
case when exists(
select * from game_log gl2
where gl.round_no = gl2.round_no
and gl.round_seq = gl2.round_seq
and gl.log_type > 0 and gl2.log_type > 0
and gl2.seq > gl.seq
) then 1 else 0 end superseded, %s
from game_log gl left outer join player p1 on gl.p1 = p1.id
left outer join player p2 on gl.p2 = p2.id where 1=1 """ % (
"comment" if self.db_version >= (1, 0, 6) else "null"
);
if seq is not None:
sql += " and seq > ?"
if round_no is not None:
sql += " and round_no = %d" % (round_no)
if not(include_new_games):
sql += " and log_type > 0";
sql += " order by seq desc";
if maxrows:
sql += " limit %d" % (maxrows)
if seq is not None:
cur.execute(sql, (seq,));
else:
cur.execute(sql)
results = cur.fetchall();
cur.close();
return results[::-1]
def get_teleost_modes(self):
cur = self.db.cursor()
cur.execute("select current_mode from teleost")
row = cur.fetchone()
if row is not None:
current_mode = row[0]
else:
current_mode = None
cur.close()
modes = []
for mode in teleost_modes:
mode_copy = mode.copy()
mode_copy["selected"] = False
modes.append(mode_copy)
if current_mode is not None and current_mode >= 0 and current_mode < len(modes):
modes[current_mode]["selected"] = True
return modes
def get_teleost_mode_info(self, mode_index):
if mode_index < 0 or mode_index >= len(teleost_modes):
return None
else:
return teleost_modes[mode_index]
def set_teleost_mode(self, mode):
cur = self.db.cursor();
cur.execute("update teleost set current_mode = ?", (mode,));
cur.close();
self.db.commit();
def define_teleost_modes(self, modes):
# No longer done by Teleost
return
def get_current_teleost_mode(self):
cur = self.db.cursor();
cur.execute("select current_mode from teleost");
row = cur.fetchone();
if row is None:
return teleost_mode_id_to_num.get("TELEOST_MODE_AUTO", 0)
return row[0];
def get_auto_effective_teleost_mode(self):
current_round = self.get_current_round(self.get_auto_current_round_must_have_games_in_all_divisions())
mode_name = None
if not current_round:
# There are no rounds yet, so just default to the standings table
mode_name = "TELEOST_MODE_STANDINGS"
else:
round_no = current_round["num"]
(played, unplayed) = self.get_played_unplayed_counts(round_no=round_no)
if played == 0 and unplayed == 0:
# No games in this round at all, so default to the videprinter
mode_name = "TELEOST_MODE_STANDINGS_VIDEPRINTER"
elif played == 0 and unplayed > 0:
# Fixtures announced, but no games played yet.
# If there is only one game, then show the standings/table
# results screen for this unplayed round, because it's likely
# this is the final and people want to know where they finished
# in the standings, so we don't want to show just the final
# fixture and nothing else.
# If there's more than one game then show the fixture list
# for this round.
if played + unplayed == 1:
mode_name = "TELEOST_MODE_STANDINGS_RESULTS"
elif self.get_auto_use_table_index():
mode_name = "TELEOST_MODE_TABLE_NUMBER_INDEX"
else:
mode_name = "TELEOST_MODE_FIXTURES"
elif played > 0 and unplayed == 0:
# All the games in this round have been played. Switch to the
# standings-and-results screen.
mode_name = "TELEOST_MODE_STANDINGS_RESULTS"
else:
# Otherwise, the round is in progress. Use the standings and
# videprinter display.
mode_name = "TELEOST_MODE_STANDINGS_VIDEPRINTER"
if not mode_name:
# Eh?
mode_name = "TELEOST_MODE_STANDINGS_VIDEPRINTER"
return teleost_mode_id_to_num.get(mode_name, 1)
def get_effective_teleost_mode(self):
# Same as get_current_teleost_mode() except that if it's auto then
# we look at the game state and return which view the display should
# be showing.
mode = self.get_current_teleost_mode();
if mode < 0 or mode >= len(teleost_modes):
return 1
else:
if teleost_modes[mode]["id"] == "TELEOST_MODE_AUTO":
mode = self.get_auto_effective_teleost_mode()
return mode
def is_videprinter_showing(self):
mode = self.get_effective_teleost_mode()
return teleost_modes[mode]["id"] == "TELEOST_MODE_STANDINGS_VIDEPRINTER"
def set_teleost_options(self, options):
# Nope
return
#if self.db_version < (0, 7, 7):
# print self.db_version
# return
#cur = self.db.cursor()
#options_rows = []
#for o in options:
# options_rows.append((o.mode, o.seq, o.name, o.control_type, o.desc, o.value))
# Insert option metadata
#cur.execute("delete from teleost_options")
#cur.executemany("insert into teleost_options(mode, seq, name, control_type, desc, default_value) values (?, ?, ?, ?, ?, ?)", options_rows)
#cur.close()
#self.db.commit()
def get_teleost_options(self, mode=None):
if self.db_version < (0, 7, 7):
return []
options = []
seq = -1
for opt in teleost_per_view_option_list:
seq += 1
cur = self.db.cursor()
if mode is not None and mode != opt[0]:
continue
cur.execute("select value from options where name = ?", (opt[1],))
row = cur.fetchone()
if row is None or row[0] is None:
value = opt[4] # default value
else:
if opt[2] == CONTROL_NUMBER:
value = int(row[0])
else:
value = row[0]
cur.close()
options.append(TeleostOption(
opt[0], # teleost mode
seq,
opt[1], # option name
opt[2], # control type
opt[3], # description
value # effective value
))
#if mode is not None:
# mode_clause = "where telo.mode = %d" % (mode)
#else:
# mode_clause = ""
#cur.execute("select telo.mode, telo.seq, telo.name, telo.control_type, telo.desc, telo.default_value, att.value from teleost_options telo left outer join options att on telo.name = att.name " + mode_clause + " order by telo.mode, telo.seq")
#for row in cur:
# options.append(TeleostOption(int(row[0]), int(row[1]), row[2], row[3], row[4], row[6] if row[6] is not None else row[5]))
#cur.close()
return options
def get_teleost_option_value(self, name):
if self.db_version < (0, 7, 7):
return None
#cur.execute("select telo.default_value, att.value from teleost_options telo left outer join options att on telo.name = att.name where telo.name = ?", (name,))
#row = cur.fetchone()
#value = None
#if row is not None:
# if row[1] is not None:
# value = row[1]
# else:
# value = row[0]
value = self.get_attribute(name, None)
if value is None:
for opt in teleost_per_view_option_list:
if opt[1] == name:
value = opt[4]
break
return value
def set_teleost_option_value(self, name, value):
self.set_attribute(name, value)
def get_num_games_to_play_by_table(self, round_no=None):
sql = """select table_no,
sum(case when p1_score is null and p2_score is null
then 1 else 0 end) games_left
from game""";
if round_no is not None:
sql += " where round_no = %d" % round_no;
sql += " group by table_no";
cur = self.db.cursor();
cur.execute(sql);
d = dict();
for (table, count) in cur:
d[table] = count;
cur.close();
return d;
def get_max_games_per_table(self, round_no=None):
sql = """select max(game_count) from (
select table_no, count(*) game_count
from game""";
if round_no is not None:
sql += " where round_no = %d" % (round_no)
sql += " group by table_no) x"
cur = self.db.cursor()
cur.execute(sql)
row = cur.fetchone()
value = None
if row is not None:
if row[0] is not None:
value = row[0]
cur.close()
return value
def get_latest_game_times_by_table(self, round_no=None):
sql = "select table_no, max(ts) from game_log";
sql += " where log_type = 1";
if round_no is not None:
sql += " and round_no = %d" % round_no;
sql += " group by 1 order by 2";
cur = self.db.cursor();
cur.execute(sql);
d = dict();
for (table, ts) in cur:
d[table] = str(ts);
cur.close();
return d;
def get_teams(self):
sql = "select id, name, colour from team order by id"
cur = self.db.cursor()
cur.execute(sql)
teams = []
for (team_id, team_name, colour) in cur:
teams.append(Team(team_id, team_name, colour))
cur.close()
return teams
def get_team_from_id(self, team_id):
sql = "select id, name, colour from team where id = ?"
cur = self.db.cursor()
cur.execute(sql, (team_id,))
(team_id, team_name, colour) = cur.fetchone();
cur.close()
return Team(team_id, team_name, colour)
def set_player_teams(self, player_teams):
# argument is list of 2-tuples, containing player name and team ID
sql = "update player set team_id = ? where name = ?"
params = []
for pt in player_teams:
params.append((None if pt[1] is None or pt[1] < 0 else pt[1], pt[0]))
self.db.executemany(sql, params)
self.db.commit()
def get_player_teams(self):
sql = "select p.id, t.id from player p left outer join team t on p.team_id = t.id order by p.name"
cur = self.db.cursor()
cur.execute(sql)
player_team_ids = []
for (player_id, team_id) in cur:
player_team_ids.append((player_id, team_id))
cur.close()
player_teams = []
for (p_id, t_id) in player_team_ids:
if t_id is None or t_id < 0:
team = None
else:
team = self.get_team_from_id(t_id)
player = self.get_player_from_id(p_id)
player_teams.append((player, team))
return player_teams
def are_players_assigned_teams(self):
sql = "select count(*) from player where team_id is not null"
cur = self.db.execute(sql)
(num,) = cur.fetchone()
cur.close()
return num > 0
def get_team_scores(self, round_no=None):
sql = """
select t.id, sum(case when p1.team_id != t.id and p2.team_id != t.id then 0
when p1.team_id == p2.team_id then 0
when p1.team_id is null or p2.team_id is null then 0
when p1.team_id = t.id and g.p1_score > g.p2_score then 1
when p2.team_id = t.id and g.p2_score > g.p1_score then 1
else 0 end) score
from team t, game g, player p1, player p2
where g.p1 = p1.id
and g.p2 = p2.id
and g.game_type = 'P'
"""
if round_no is not None:
sql += " and g.round_no = %d" % round_no
sql += " group by t.id order by t.id"
cur = self.db.cursor();
cur.execute(sql)
team_score = []
for (team_id, score) in cur:
team_score.append((self.get_team_from_id(team_id), score))
cur.close()
return team_score
def store_fixgen_settings(self, fixgen_name, settings):
cur = self.db.cursor()
cur.execute("delete from fixgen_settings where fixgen = ?", (fixgen_name,))
rows = []
for name in settings:
rows.append((fixgen_name, name, settings[name]))
cur.executemany("insert into fixgen_settings values (?, ?, ?)", rows)
self.db.commit()
def get_fixgen_settings(self, fixgen_name):
cur = self.db.cursor()
cur.execute("select name, value from fixgen_settings where fixgen = ?", (fixgen_name,))
settings = dict()
for row in cur:
settings[row[0]] = row[1]
self.db.commit()
return settings
def close(self):
self.db.commit();
self.db.close();
def list_occupied_tables_in_round(self, round_no):
table_list = []
cur = self.db.cursor()
cur.execute("select distinct(table_no) from game where round_no = ?", (round_no,))
for row in cur:
if row[0] is not None:
table_list.append(row[0])
cur.close()
return table_list
def get_max_table_number_in_round(self, round_no):
cur = self.db.cursor()
cur.execute("select max(table_no) from game where round_no = ?", (round_no,))
retval = cur.fetchone()[0]
cur.close()
return retval
def get_max_game_seq_in_round(self, round_no):
cur = self.db.cursor()
cur.execute("select max(seq) from game where round_no = ?", (round_no,))
retval = cur.fetchone()[0]
cur.close()
return retval
def list_divisions_playing_in_round(self, round_no):
cur = self.db.cursor()
cur.execute("select distinct(division) from game where round_no = ?", (round_no,))
divs = []
for row in cur:
divs.append(row[0])
cur.close()
return divs
def get_num_active_accessible_players_in_divisions(self, div_set):
if self.db_version < (1, 0, 4) or len(div_set) == 0:
return 0
cur = self.db.cursor()
cur.execute("select count(*) from player where require_accessible_table != 0 and withdrawn = 0 and division in (%s)" % (",".join([str(x) for x in div_set])))
row = cur.fetchone()
if row is None or row[0] is None:
count = 0
else:
count = row[0]
cur.close()
return count
def first_acc_player(self, group):
group_acc_players = [ p for p in group if p.is_requiring_accessible_table() ]
if not group_acc_players:
return ""
else:
return sorted(group_acc_players, key=lambda x : x.get_name())[0].get_name()
# generated_groups is fixgen.GeneratedGroups object
def make_fixtures_from_groups(self, generated_groups):
fixtures = []
num_divisions = self.get_num_divisions()
players = self.get_active_players()
(all_accessible_tables, acc_default) = self.get_accessible_tables()
for rd in generated_groups.get_rounds():
round_no = rd.get_round_no()
# Find out which tables (if any) already have players on, so we
# can avoid giving out those table numbers
occupied_tables = set(self.list_occupied_tables_in_round(round_no))
# Build a list of the remaining players - that is, those players
# who are not in generated_groups and who have not had any games
# generated for them so far this round.
# Also, while we're at it, populate natural_div_to_table numbers
# based on the set of occupied table numbers and the number of
# groups in each division.
remaining_players = players[:]
# remaining_players is all the active players who aren't being
# assigned a game in this round right now.
# Also remove from remaining_players all players who have
# previously been assigned a table in this round. We'll be left
# with the players whose games are yet to be decided, but who
# might want to reserve their favourite table.
games_this_round = self.get_games(round_no=round_no)
for g in games_this_round:
for p in g.get_players():
if p in remaining_players:
remaining_players.remove(p)
start_round_seq = self.get_max_game_seq_in_round(round_no)
if start_round_seq is None:
next_round_seq = 1
else:
next_round_seq = start_round_seq + 1
candidate_tables = cttable.get_candidate_tables(rd, remaining_players, occupied_tables, all_accessible_tables, acc_default)
for ct in candidate_tables:
group_fixtures = self.make_fixtures_from_group(ct.get_group(),
ct.get_round_no(), ct.get_division(),
ct.get_table_no(), next_round_seq, ct.get_game_type(),
ct.get_repeat_threes())
next_round_seq += len(group_fixtures)
fixtures += group_fixtures
return fixtures
def make_fixtures_from_group(self, group, round_no, division, table_no, next_round_seq, game_type, repeat_threes):
group_fixtures = []
round_seq = next_round_seq
if len(group) % 2 == 1:
# If there are an odd number of players on this table, then
# each player takes a turn at hosting, and the player X places
# clockwise from the host plays the player X places
# anticlockwise from the host,
# for X in 1 .. (len(group) - 1) / 2.
for host in range(len(group)):
for x in range(1, (len(group) - 1) // 2 + 1):
left = (host + len(group) + x) % len(group)
right = (host + len(group) - x) % len(group)
p1 = group[left]
p2 = group[right]
fixture = Game(round_no, round_seq, table_no, division, game_type, p1, p2)
group_fixtures.append(fixture)
round_seq += 1
if repeat_threes and len(group) == 3:
fixture = Game(round_no, round_seq, table_no, division, game_type, p2, p1)
group_fixtures.append(fixture)
round_seq += 1
elif len(group) == 4:
# Four players on each table. Don't do the general catch-all
# thing in the next branch, instead show the matches in a
# specific order so that the first two can be played
# simultaneously, then the next two, then the last two.
indices = [ (0,1), (2,3), (0,2), (1,3), (1,2), (3,0) ]
for (x, y) in indices:
fixture = Game(round_no, round_seq, table_no, division, game_type, group[x], group[y])
group_fixtures.append(fixture)
round_seq += 1
else:
# There are an even number of players. Each player X from
# X = 0 .. len(group) - 1 plays each player Y for
# Y in X + 1 .. len(group) - 1
for x in range(len(group)):
for y in range(x + 1, len(group)):
p1 = group[x]
p2 = group[y]
if round_seq % 2 == 0 and len(group) > 2:
(p1, p2) = (p2, p1)
fixture = Game(round_no, round_seq, table_no, division, game_type, p1, p2)
group_fixtures.append(fixture)
round_seq += 1
return group_fixtures
def get_tim_down_award_standings(self, division, num_losing_games):
cur = self.db.cursor()
# Get the set of all players who have lost at least num_losing_games
# games of type P
rows = cur.execute("select p.id, sum(case when (p.id = g.p1 and g.p1_score < g.p2_score) or (p.id = g.p2 and g.p2_score < g.p1_score) then 1 else 0 end) losses from player p, game g where g.game_type = 'P' and p.division = ? and (g.p1 = p.id or g.p2 = p.id) group by p.id", (division,))
eligible_player_ids = set()
for row in rows:
if row[1] >= num_losing_games:
eligible_player_ids.add(row[0])
cur.close()
# Get the list of opponents of these players
p_id_to_opp_list = {}
cur = self.db.cursor()
rows = cur.execute("select p_id, opp_id from heat_game_divided where p_id in (%s) order by p_id, opp_id" % (", ".join([ str(x) for x in eligible_player_ids ])))
for row in rows:
p_id = row[0]
opp_id = row[1]
p_id_to_opp_list[p_id] = p_id_to_opp_list.get(p_id, []) + [opp_id]
cur.close()
# Get the standings table, and for each eligible player, work out the
# average current standings position of their opponents
standings = self.get_standings(division, False, False)
player_name_to_id = {}
for p in self.get_players():
player_name_to_id[p.get_name()] = p.get_id()
p_id_to_standings_pos = {}
for s in standings:
p_id = player_name_to_id.get(s.name)
if p_id is not None:
p_id_to_standings_pos[p_id] = s.position
# For each eligible player, return a tuple containing
# (player object, list of opponent ranks, average opponent ranks)
results = []
for p_id in p_id_to_opp_list:
total_opp_rank = 0
num_opps = 0
rank_list = []
for opp_id in p_id_to_opp_list[p_id]:
pos = p_id_to_standings_pos.get(opp_id)
if pos is not None:
# We only count opponents which are in the current
# division
num_opps += 1
total_opp_rank += pos
rank_list.append(pos)
results.append((self.get_player_from_id(p_id), sorted(rank_list), float(total_opp_rank) / num_opps))
return sorted(results, key=lambda x : x[2])
def get_players_tuff_luck(self, num_losing_games):
p_id_to_losing_margins = dict()
cur = self.db.cursor()
rows = cur.execute("select case when p1_score > p2_score " +
"then p2 else p1 end p_id, " +
"case when tiebreak then 0 else abs(p1_score - p2_score) end margin " +
"from game " +
"where p1_score is not null and p2_score is not null " +
"and p1 is not null and p2 is not null and " +
"p1_score <> p2_score and " +
"game_type = 'P' " +
"order by 1")
for row in rows:
p_id = row[0]
margin = row[1]
p_id_to_losing_margins[p_id] = p_id_to_losing_margins.get(p_id, []) + [margin]
cur.close()
new_margin_map = dict()
for p_id in p_id_to_losing_margins:
# Limit each player to a maximum of num_losing_games, and remove
# from the list any player who has fewer losses than that
margin_list = p_id_to_losing_margins[p_id]
if len(margin_list) >= num_losing_games:
new_margin_map[p_id] = sorted(margin_list)[0:num_losing_games]
p_id_to_losing_margins = new_margin_map
# Return a list of tuples of the form (player, tuffness, margin_list)
tuffness_list = []
for p_id in p_id_to_losing_margins:
margin_list = p_id_to_losing_margins[p_id]
p = self.get_player_from_id(p_id)
if p:
tuffness_list.append((p, sum(margin_list), margin_list))
return sorted(tuffness_list, key=lambda x : x[1])
def get_players_overachievements(self, div_index):
# Get every player's standing position in this division
standings = self.get_standings(div_index)
p_id_to_standings_pos = dict()
p_id_to_rating = dict()
for s in standings:
player = self.get_player_from_name(s.name)
if player:
p_id_to_standings_pos[player.get_id()] = s.position
p_id_to_rating[player.get_id()] = s.rating
p_ids_by_rating = sorted(p_id_to_rating, key=lambda x : p_id_to_rating[x], reverse=True)
# Work out each player's seed, remembering that two players might have
# the same rating
p_id_to_seed = dict()
seed = 0
joint = 1
prev_rating = None
for p_id in p_ids_by_rating:
rating = p_id_to_rating[p_id]
if prev_rating is None or prev_rating != rating:
seed += joint
joint = 1
else:
joint += 1
p_id_to_seed[p_id] = seed
prev_rating = rating
overachievements = []
for p_id in p_id_to_standings_pos:
position = p_id_to_standings_pos[p_id]
seed = p_id_to_seed[p_id]
# We want positive numbers to indicate overachievement
overachievement = seed - position;
player = self.get_player_from_id(p_id)
if player:
overachievements.append((player, seed, position, overachievement))
return sorted(overachievements, key=lambda x : (x[3], x[1]), reverse=True)
# Return true if all player ratings in a division are the same, with the
# exception of players with a zero rating.
def are_player_ratings_uniform(self, div_index):
cur = self.db.cursor()
cur.execute("select p.id, p.rating from player p where p.rating > 0 and p.division = ?", (div_index,))
rating = None
found_difference = False
for row in cur:
if rating is None:
rating = row[1]
else:
if row[1] != rating:
found_difference = True
break
cur.close()
return not found_difference
def get_banner_text(self):
return self.get_attribute("teleost_banner_text", "")
def set_banner_text(self, text):
self.set_attribute("teleost_banner_text", text)
def clear_banner_text(self):
self.set_attribute("teleost_banner_text", "")
def get_game_table_revision_no(self, round_no):
cur = self.db.cursor()
cur.execute("select max(seq) from game_log where round_no = ?", (round_no,))
row = cur.fetchone()
if row is None or row[0] is None:
revision_no = 0
else:
revision_no = row[0]
cur.close()
return revision_no
def get_game_table_revision_time(self, round_no, revision_no):
cur = self.db.cursor()
cur.execute("select datetime(ts, 'localtime') ts from game_log where round_no = ? and seq = ?", (round_no, revision_no))
row = cur.fetchone()
if row is None or row[0] is None:
timestamp = None
else:
timestamp = row[0]
cur.close()
return timestamp
def query_result_to_game_dict_list(self, query):
cur = self.db.cursor()
cur.execute(query)
retlist = []
for row in cur:
retlist.append({
"round_num" : row[0],
"division" : row[3],
"name1" : row[4],
"name2" : row[5],
"score1" : row[6],
"score2" : row[7],
"tb" : row[8]
})
cur.close()
return retlist
def get_highest_winning_scores(self, max_rows):
return self.query_result_to_game_dict_list(
"""
select g.round_no, g.seq, g.table_no, g.division, p1.name, p2.name,
g.p1_score, g.p2_score, g.tiebreak, case when g.p1_score > g.p2_score then g.p1_score else g.p2_score end winning_score
from game g,
player p1 on g.p1 = p1.id,
player p2 on g.p2 = p2.id
where g.game_type = 'P'
and g.p1_score is not null and g.p2_score is not null
and g.p1_score <> g.p2_score
order by 10 desc, 1, 2 limit %d
""" % (max_rows)
)
def get_highest_losing_scores(self, max_rows):
return self.query_result_to_game_dict_list(
"""
select g.round_no, g.seq, g.table_no, g.division, p1.name, p2.name,
g.p1_score, g.p2_score, g.tiebreak,
case when g.p1_score < g.p2_score then g.p1_score else g.p2_score end losing_score
from game g,
player p1 on g.p1 = p1.id,
player p2 on g.p2 = p2.id
where g.game_type = 'P'
and g.p1_score is not null and g.p2_score is not null
and g.p1_score <> g.p2_score
order by 10 desc, 1, 2 limit %d
""" % (max_rows)
)
def get_highest_combined_scores(self, max_rows):
return self.query_result_to_game_dict_list(
"""
select g.round_no, g.seq, g.table_no, g.division, p1.name, p2.name,
g.p1_score, g.p2_score, g.tiebreak,
g.p1_score + g.p2_score combined_score
from game g,
player p1 on g.p1 = p1.id,
player p2 on g.p2 = p2.id
where g.game_type = 'P'
and g.p1_score is not null and g.p2_score is not null
and g.p1_score <> g.p2_score
order by 10 desc, 1, 2 limit %d
""" % (max_rows)
)
def rerate_players_by_id(self):
cur = self.db.cursor()
cur.execute("select id, rating from player where rating != 0 order by id")
player_ids = []
for row in cur:
player_ids.append(row[0])
player_ids_new_ratings = []
max_rating = 2000
min_rating = 1000
for idx in range(len(player_ids)):
pid = player_ids[idx]
if len(player_ids) == 1:
new_rating = max_rating
else:
new_rating = max_rating - float(idx * (max_rating - min_rating)) / (len(player_ids) - 1)
new_rating = round(new_rating, 2)
player_ids_new_ratings.append((new_rating, pid))
cur.executemany("update player set rating = ? where id = ?", player_ids_new_ratings)
cur.close()
self.db.commit()
self.set_attribute("autoratingbehaviour", RATINGS_GRADUATED);
def is_table_accessible(self, table_no):
if self.db_version < (1, 0, 4):
return False
else:
cur = self.db.cursor()
cur.execute("select table_no, accessible from board where table_no in (-1, ?)", (table_no,))
default_value = False
value = None
for row in cur:
if row[0] == -1:
default_value = bool(row[1])
elif row[1] is not None:
value = bool(row[1])
if value is None:
value = default_value
cur.close()
return value
def get_num_accessible_tables(self):
if self.db_version < (1, 0, 4):
return 0
cur = self.db.cursor()
cur.execute("select accessible from board where table_no = -1")
row = cur.fetchone()
if row:
if row[0] is not None and row[0] != 0:
# All tables are accessible except those listed, but we don't
# know how many tables there are.
cur.close()
return None
cur.close()
cur = self.db.cursor()
cur.execute("select count(*) from board where table_no >= 0 and accessible != 0")
row = cur.fetchone()
if row and row[0] is not None:
count = row[0]
else:
count = 0;
cur.close()
return count
# Return value is a pair (int list, bool).
# The bool is the default value for any table number not in the list, and
# the list contains those table numbers which don't agree with that boolean.
# For example, ([1,2,5], True) means all tables are accessible except
# 1, 2 and 5. ([17,18], False) means only tables 17 and 18 are accessible.
def get_accessible_tables(self):
if self.db_version < (1, 0, 4):
return ([], False)
accessible_tables = []
non_accessible_tables = []
defaultly_accessible_tables = []
default_value = False
cur = self.db.cursor()
cur.execute("select table_no, accessible from board order by table_no")
for row in cur:
if row[0] == -1:
default_value = bool(row[1])
elif row[1] is None:
defaultly_accessible_tables.append(row[0])
elif row[1] != 0:
accessible_tables.append(row[0])
else:
non_accessible_tables.append(row[0])
cur.close()
if default_value:
return (non_accessible_tables, True)
else:
return (accessible_tables, False)
def set_accessible_tables(self, table_list, all_except=False):
if self.db_version < (1, 0, 4):
return
cur = self.db.cursor()
# If we add any more columns to BOARD, we'll need to change this so
# we set accessible to NULL in all existing rows, then do an
# insert-or-replace.
cur.execute("delete from board")
# Remove duplicate table numbers
table_set = set(table_list)
table_list = sorted(list(table_set))
params = [ ( x, 0 if all_except else 1 ) for x in table_list ] + [ (-1, 1 if all_except else 0) ]
cur.executemany("insert into board (table_no, accessible) values (?, ?)", params)
cur.close()
self.db.commit()
def get_unique_id(self):
unique_id = self.get_attribute("uniqueid", None)
if unique_id is None:
return self.get_name()
else:
return unique_id
def log_successful_upload(self):
if self.db_version >= (1, 0, 6):
self.db.execute("update upload_success set ts = current_timestamp")
self.db.commit()
def log_failed_upload(self, failure_type, message):
if self.db_version >= (1, 0, 6):
self.db.execute("insert into upload_error_log(ts, failure_type, message) values (current_timestamp, ?, ?)", (failure_type, message))
self.db.commit()
def get_last_successful_upload_time(self):
if self.db_version >= (1, 0, 6):
cur = self.db.cursor()
cur.execute("select strftime('%s', ts) from upload_success")
row = cur.fetchone()
ts = None
if not row or not row[0]:
ts = None
else:
ts = row[0]
if ts is not None:
ts = int(ts)
cur.close()
return ts
else:
return None
def get_last_failed_upload(self):
if self.db_version >= (1, 0, 6):
cur = self.db.cursor()
cur.execute("select strftime('%s', ts), failure_type, message from upload_error_log order by ts desc limit 1")
row = cur.fetchone()
upload_desc = None
if row:
(ts, failure_type, message) = row
if ts is not None:
ts = int(ts)
upload_desc = {}
upload_desc["ts"] = ts
upload_desc["failure_type"] = int(failure_type)
upload_desc["message"] = message
cur.close()
return upload_desc
else:
return None
def set_broadcast_private(self, value):
self.set_attribute("broadcastprivate", 1 if value else 0)
def is_broadcast_private(self):
return self.get_int_attribute("broadcastprivate", 0) != 0
def is_post_to_videprinter_set(self):
return self.get_int_attribute("posttovideprinter", 1) != 0
def is_post_to_web_set(self):
return self.get_int_attribute("posttoweb", 1) != 0
def set_post_to_videprinter(self, value):
return self.set_attribute("posttovideprinter", 1 if value else 0)
def set_post_to_web(self, value):
return self.set_attribute("posttoweb", 1 if value else 0)
def get_rank_finals(self):
return self.get_int_attribute("rankfinals", 1) != 0
def set_rank_finals(self, rank_finals):
return self.set_attribute("rankfinals", 1 if rank_finals else 0)
def get_5_3_table_sizes(num_players):
if num_players < 8:
return []
table_sizes = []
players_left = num_players
while players_left % 5 != 0:
table_sizes.append(3)
players_left -= 3
while players_left > 0:
table_sizes = [5] + table_sizes
players_left -= 5
return table_sizes
def get_game_types():
return [
{ "code" : "P", "name" : "Standard heat game" },
{ "code" : "QF", "name" : "Quarter-final" },
{ "code" : "SF", "name" : "Semi-final" },
{ "code" : "3P", "name" : "Third-place play-off" },
{ "code" : "F", "name" : "Final" } ,
{ "code" : "N", "name" : "Other game not counted in standings" }
]
unique_id_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
def generate_unique_id():
return "".join([ random.choice(unique_id_chars) for x in range(10) ])
def tourney_open(dbname, directory="."):
if not re.match("^[A-Za-z0-9_-]+$", dbname):
raise InvalidDBNameException("The tourney database name can only contain letters, numbers, underscores and hyphens.");
if directory:
if directory[-1] != os.sep:
directory += os.sep;
dbpath = directory + dbname + ".db";
if not os.path.exists(dbpath):
raise DBNameDoesNotExistException("The tourney \"%s\" does not exist." % dbname);
else:
tourney = Tourney(dbpath, dbname, versioncheck=True);
return tourney;
def tourney_create(dbname, directory="."):
if not re.match("^[A-Za-z0-9_-]+$", dbname):
raise InvalidDBNameException("The tourney database name can only contain letters, numbers, underscores and hyphens.");
if len(dbname) > 60:
raise InvalidDBNameException("The tourney database name may not be more than 60 characters long.")
if directory:
if directory[-1] != '/':
directory += "/";
dbpath = directory + dbname + ".db";
if os.path.exists(dbpath):
raise DBNameExistsException("The tourney \"%s\" already exists. Pick another name." % dbname);
tourney = Tourney(dbpath, dbname, versioncheck=False);
tourney.db_version = SW_VERSION_SPLIT;
tourney.db.executescript(create_tables_sql);
tourney.db.execute("insert into options values ('atropineversion', ?)", (SW_VERSION,))
# We now generate a unique ID for each tourney db file. This helps with the
# web broadcast feature. It stops us from accidentally uploading an
# existing tourney such that it overwrites and destroys a different but
# identically-named one on the website.
unique_id = generate_unique_id()
tourney.db.execute("insert into options values ('uniqueid', ?)", (unique_id,))
tourney.db.commit();
return tourney;
def get_software_version():
return SW_VERSION
| 39.691004 | 388 | 0.586035 | 121,193 | 0.83495 | 0 | 0 | 129 | 0.000889 | 0 | 0 | 52,514 | 0.361791 |
dec0da50ce4a56fc78832aa67c6d71d1a1a1c437 | 995 | py | Python | t/plugin/plugin_020deploy_test.py | jrmsdev/pysadm | 0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37 | [
"BSD-3-Clause"
]
| 1 | 2019-10-15T08:37:56.000Z | 2019-10-15T08:37:56.000Z | t/plugin/plugin_020deploy_test.py | jrmsdev/pysadm | 0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37 | [
"BSD-3-Clause"
]
| null | null | null | t/plugin/plugin_020deploy_test.py | jrmsdev/pysadm | 0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37 | [
"BSD-3-Clause"
]
| null | null | null | # Copyright (c) Jeremías Casteglione <[email protected]>
# See LICENSE file.
from glob import glob
from os import path, makedirs
def test_deploy_testing(testing_plugin):
makedirs(path.join('tdata', 'deploy', 'plugin'), exist_ok = True)
p = testing_plugin('testing', ns = '_sadmtest', deploy = True)
print('-- deploy plugin: testing')
p.deploy()
def test_all_deploy(testing_plugin):
makedirs(path.join('tdata', 'deploy', 'plugin'), exist_ok = True)
t = testing_plugin(ns = '_sadmtest', deploy = True, buildDeploy = False)
for opt in t._env.profile.config.options('deploy'):
if opt.startswith('env.'):
pname = '.'.join(opt.split('.')[1:])
if pname == 'testing':
continue
cfgdir = path.join('tdata', 'plugin', pname.replace('.', path.sep), 'config')
for fn in sorted(glob(path.join(cfgdir, '*.ini'))):
cfgfn = path.basename(fn)
print('-- deploy plugin:', pname, cfgfn)
p = testing_plugin(pname, deploy = True, buildCfg = cfgfn)
p.deploy(mockCfg = cfgfn)
| 36.851852 | 80 | 0.676382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.262048 |
dec30d56b6d0887d305f33e490a67d25b3dd39cd | 4,189 | py | Python | jsonReadWrite.py | nsobczak/ActivityWatchToCSV | cefb67e9f1c834008f2b39c0baf6c7c506327a4d | [
"Apache-2.0"
]
| null | null | null | jsonReadWrite.py | nsobczak/ActivityWatchToCSV | cefb67e9f1c834008f2b39c0baf6c7c506327a4d | [
"Apache-2.0"
]
| null | null | null | jsonReadWrite.py | nsobczak/ActivityWatchToCSV | cefb67e9f1c834008f2b39c0baf6c7c506327a4d | [
"Apache-2.0"
]
| null | null | null | """
##############
# jsonReader #
##############
"""
# Import
import json
from platform import system
from enum import Enum
from datetime import timedelta
# %% ____________________________________________________________________________________________________
# ____________________________________________________________________________________________________
# Functions
class Watcher(Enum):
AFK = 1
WEB = 2
WINDOW = 3
def jsonReadWrite(pathToJson, pathWhereToCreateFile, watcher, printJsonFile=False):
"""
Write csv formatted data into file
:param path: path where to create file
:type path: str
:param watcher: watcher type of the file
:type watcher: Watcher
:param printJsonFile: ???
:type printJsonFile: bool
:return: return csv formatted string
:rtype: str
"""
res = "file generated"
with open(pathToJson) as json_file:
dataDict = json.load(json_file)
if (system() != 'Linux' and system() != 'Windows'):
print("{} operating system not supported".format(system()))
else:
print("{} operating system detected".format(system()))
if printJsonFile:
print(json.dumps(dataDict, indent=4))
csvFile = open(pathWhereToCreateFile, "w") # "w" to write strings to the file
if watcher == Watcher.AFK:
print("watcher == Watcher.AFK")
# duration: 956.016
# id: 316
# timestamp: 2019 - 01 - 28
# T10: 28:13.770000 + 00: 00
# data: {'status': 'not-afk'}
res = "Watcher.AFK detected => does nothing"
elif watcher == Watcher.WEB:
print("watcher == Watcher.WEB")
# duration: 1.518
# id: 3210
# timestamp: 2019 - 01 - 31
# T18: 01:45.794000 + 00: 00
# data: {'title': 'New Tab', 'url': 'about:blank', 'audible': False, 'tabCount': 3, 'incognito': False}
res = "Watcher.WEB detected => does nothing"
elif watcher == Watcher.WINDOW:
print("watcher == Watcher.WINDOW")
# duration: 4.017 # <= in seconds
# id: 17
# timestamp: 2019 - 01 - 28
# T01: 11:55.570000 + 00: 00
# data: {'title': 'Terminal - arch@ArchDesktop:~', 'app': 'Xfce4-terminal'} # <= app is the interesting thing
# if printJsonFile:
# # check
# for d in dataDict:
# print('duration: ' + str(d['duration']))
# print('id: ' + str(d['id']))
# print('timestamp: ' + str(d['timestamp']))
# print('data: ' + str(d['data']))
# print(' title: ' + str(d['data']['title']))
# print(' app: ' + str(d['data']['app']))
# print('')
handleWindowWatcher(csvFile, dataDict)
else:
res = "failed to identify watcher type"
print(res)
return res
def handleWindowWatcher(csvFile, dataDict):
columnTitleRow = "date; app; duration(s); duration(h:m:s)\n"
csvFile.write(columnTitleRow)
sortedData = {}
for d in dataDict:
# timestamp only beginning: "2019-01-28T01:11:32.482000+00:00"
date = str(d['timestamp'])[:10]
if not (date in sortedData):
sortedData[date] = {}
app = str(d['data']['app'])
if not (app in sortedData[date]):
sortedData[date][app] = 0
duration = float(d['duration'])
sortedData[date][app] += duration
rows = ""
for keyDate, valueAppDict in sortedData.items():
for keyApp, valueDuration in valueAppDict.items():
# date
rows += keyDate + "; "
# app
rows += keyApp + "; "
# duration
valueDurationStr = str(valueDuration)
leftPart, righPart = valueDurationStr.split('.')
valueDurationStr = leftPart + "," + righPart[:3]
rows += valueDurationStr + "; "
rows += str(timedelta(seconds=valueDuration)) + "\n"
rows += "\n"
csvFile.write(rows)
| 30.136691 | 121 | 0.545476 | 59 | 0.014085 | 0 | 0 | 0 | 0 | 0 | 0 | 1,951 | 0.465744 |
dec3721fd14d0e108bf21ac443dd1b7796946011 | 286 | py | Python | pythons/reTesting.py | whats2000/coding-stuff-I-make-from-learning | d82809ba12f9d74bdb41eca5ba8f12f4cd96929e | [
"MIT"
]
| null | null | null | pythons/reTesting.py | whats2000/coding-stuff-I-make-from-learning | d82809ba12f9d74bdb41eca5ba8f12f4cd96929e | [
"MIT"
]
| null | null | null | pythons/reTesting.py | whats2000/coding-stuff-I-make-from-learning | d82809ba12f9d74bdb41eca5ba8f12f4cd96929e | [
"MIT"
]
| null | null | null | import re
test = input("請輸入字串 : ")
test.encode('unicode-escape').decode().replace('\\\\', '\\')
print("輸入為 : "+test)
if re.match(test, "a"):
print(test + " Match 1")
if re.match(test, "aa"):
print(test + " Match 2")
if re.match(test, "aaaa"):
print(test + " Match 3")
| 16.823529 | 60 | 0.562937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.34106 |
dec3efd877d3ce87cbe9fc53530bf43be70d8149 | 306 | py | Python | 2021-12-23/1.py | xiaozhiyuqwq/seniorschool | 7375038b00a6d2deaec5d70bfac25ddbf4d2558e | [
"Apache-2.0"
]
| null | null | null | 2021-12-23/1.py | xiaozhiyuqwq/seniorschool | 7375038b00a6d2deaec5d70bfac25ddbf4d2558e | [
"Apache-2.0"
]
| null | null | null | 2021-12-23/1.py | xiaozhiyuqwq/seniorschool | 7375038b00a6d2deaec5d70bfac25ddbf4d2558e | [
"Apache-2.0"
]
| null | null | null | #初始化
t=0
#运算
for x in range(1,9):
for y in range(1,11):
for z in range(1,13):
if 6*x+5*y+4*z==50:
print("计算出x值为 ",x," y值为 ",y," z值为 ",z," 。")
t=t+1
print("计算出一共有 {} 个结果。".format(t))
#by xiaozhiyuqwq
#https://www.rainyat.work
#2021-12-23
| 21.857143 | 60 | 0.46732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.441011 |
dec6337c650811d4d0bda0d9fb32eb5e333b7344 | 15,088 | py | Python | Project-2/Ishan Pandey/job_compare.py | Mercury1508/IEEE-LEAD-2.0 | 91d24ccf2f24c62f92f0d23bcfcb3988e6d5acd8 | [
"MIT"
]
| 1 | 2021-06-03T16:08:33.000Z | 2021-06-03T16:08:33.000Z | Project-2/Ishan Pandey/job_compare.py | Mercury1508/IEEE-LEAD-2.0 | 91d24ccf2f24c62f92f0d23bcfcb3988e6d5acd8 | [
"MIT"
]
| 16 | 2021-04-27T12:58:03.000Z | 2021-05-28T14:02:14.000Z | Project-2/Ishan Pandey/job_compare.py | Mercury1508/IEEE-LEAD-2.0 | 91d24ccf2f24c62f92f0d23bcfcb3988e6d5acd8 | [
"MIT"
]
| 70 | 2021-04-26T13:48:35.000Z | 2021-05-28T21:04:34.000Z | # from job_scrapper_gui import naukri_gui
from tkinter import *
from PIL import ImageTk
import PIL.Image
import naukri_scrapper
import linkedin_scrapper
import indeed
import simply_hired_scrapper
from selenium import webdriver
root = Tk()
root.title("Compare Jobs")
root.geometry("1000x670")
root.configure(background='white')
# ----------------Header GUI----------------
# -------Creating All labels--------
logo = ImageTk.PhotoImage(PIL.Image.open("./Images\compare.png"))
header_frame = LabelFrame(bg="#135EC2",borderwidth=0,highlightthickness=0)
# logo
logo_label = Label(header_frame,image=logo,bg='#135EC2')
# job title container
job_title_frame = LabelFrame(header_frame,bg='#135EC2',borderwidth=0,highlightthickness=0)
job_label = Label(job_title_frame,text="JOB TITLE",bg='#135EC2',fg="white",font=('Bahnschrift Light', 13, 'normal'))
job_profile_box = Entry(job_title_frame, width=30, font=('Bahnschrift Light', 13, 'normal'))
# location container
location_frame = LabelFrame(header_frame,bg='#135EC2',borderwidth=0,highlightthickness=0)
location_label = Label(location_frame,text="LOCATION",bg='#135EC2',fg="white",font=('Bahnschrift Light', 13, 'normal'))
location_box = Entry(location_frame, width=30, font=('Bahnschrift Light', 13, 'normal'))
# compare button container
compare_button_frame = LabelFrame(header_frame,padx=50,pady=10,bg='#135EC2',borderwidth=0,highlightthickness=0)
# ------labels created-------
# ------packing labels-------
header_frame.pack(fill=X)
logo_label.grid(row=0,column=0,pady=3,padx=10)
job_title_frame.grid(row=0,column=1,pady=10,padx=20)
job_profile_box.pack()
job_label.pack(side=LEFT)
location_frame.grid(row=0,column=2,pady=10,padx=20)
location_box.pack()
location_label.pack(side=LEFT)
compare_button_frame.grid(row=1,column=1,columnspan=2)
# ------------Header GUI ends--------------
# ------------Compare JOBS GUI--------------
card_container = LabelFrame(root,bg="white",pady=20,borderwidth=0,highlightthickness=0)
card_container.pack(fill=X)
def visit(website):
if website=="Naukri.com":
url = str(naukri_scrapper.card_link)
driver = webdriver.Chrome("./chromedriver.exe")
driver.get(url)
if website=="indeed.com":
url = str(indeed.card_link)
driver = webdriver.Chrome("./chromedriver.exe")
driver.get(url)
if website=="Linkedin":
url = str(linkedin_scrapper.card_link)
driver = webdriver.Chrome("./chromedriver.exe")
driver.get(url)
if website=="SimplyHired":
url = str(simply_hired_scrapper.card_link)
driver = webdriver.Chrome("./chromedriver.exe")
driver.get(url)
# ----Naukri.com GUI----
def naukri_gui():
if len(naukri_scrapper.cards)==0:
error_card_frame = LabelFrame(card_container,bg="white")
site_name_label = Label(error_card_frame,text="Naukri.com",bg="white",font=('Bahnschrift Light', 11, 'bold'),fg="#135EC2",highlightthickness=0)
no_result_label = Label(error_card_frame,text="No result",bg="white",font=('Bahnschrift Light', 12, 'bold'),fg="#135EC2",highlightthickness=0)
message_frame = LabelFrame(error_card_frame,bg="white",borderwidth=0,highlightthickness=0)
message_lable = Label(message_frame,text="OOPS!!! check your keyword and try again",bg="white",font=('Bahnschrift Light', 10, 'bold'),fg="#135EC2",highlightthickness=0)
error_card_frame.pack()
site_name_label.pack(fill=X,pady=1)
no_result_label.pack()
message_frame.pack()
message_lable.pack(side=LEFT)
else :
card_frame = LabelFrame(card_container,bg="white")
site_name_frame = LabelFrame(card_frame,bg="white",borderwidth=0,highlightthickness=0)
site_name_lable = Label(site_name_frame,text="Naukri.com",bg="white",font=('Bahnschrift Light', 11, 'bold'),fg="#135EC2",highlightthickness=0)
visit_button = Button(site_name_frame,text="Visit",font=('Bahnschrift Light', 10, 'bold'),fg="#135EC2",bg="white",command=lambda: visit("Naukri.com"))
head_frame = LabelFrame(card_frame,borderwidth=0,highlightthickness=0,bg="#135EC2")
title_lable = Label(head_frame,text=naukri_scrapper.title,wraplength=300,font=('Bahnschrift Light', 13, 'bold'),bg="#135EC2",fg="white")
company_name_lable = Label(head_frame,text=naukri_scrapper.company_name,wraplength=250,font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
job_location_label = Label(head_frame,text=naukri_scrapper.job_location,wraplength=250,font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
rating_label = Label(head_frame,text=f"Rating: {naukri_scrapper.rating}",font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
salary_label = Label(head_frame,text=f"Salary: {naukri_scrapper.salary}",font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
skills_label = Label(head_frame,text=f"Skills:\n{naukri_scrapper.skills}",wraplength=300,font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
card_frame.grid(row=0,column=0,padx=100)
site_name_frame.pack(fill=X)
visit_button.pack(side=RIGHT,pady=1,padx=1)
site_name_lable.pack(pady=1)
head_frame.pack(fill=X)
title_lable.pack()
company_name_lable.pack()
job_location_label.pack()
rating_label.pack()
salary_label.pack()
skills_label.pack()
# ----Naukir.com GUI completed----
# --------indeed GUI--------------
def indeed_gui():
if len(indeed.cards)==0:
error_card_frame = LabelFrame(card_container,bg="white")
site_name_label = Label(error_card_frame,text="indeed.com",bg="white",font=('Bahnschrift Light', 11, 'bold'),fg="#135EC2",highlightthickness=0)
no_result_label = Label(error_card_frame,text="No result",bg="white",font=('Bahnschrift Light', 12, 'bold'),fg="#135EC2",highlightthickness=0)
message_frame = LabelFrame(error_card_frame,bg="white",borderwidth=0,highlightthickness=0)
message_lable = Label(message_frame,text="OOPS!!! check your keyword and try again",bg="white",font=('Bahnschrift Light', 10, 'bold'),fg="#135EC2",highlightthickness=0)
error_card_frame.grid(row=0,column=1)
site_name_label.pack(fill=X,pady=1)
no_result_label.pack()
message_frame.pack()
message_lable.pack(side=LEFT)
else:
card_frame = LabelFrame(card_container,bg="white")
site_name_frame = LabelFrame(card_frame,bg="white",borderwidth=0,highlightthickness=0)
site_name_lable = Label(site_name_frame,text="indeed.com",bg="white",font=('Bahnschrift Light', 11, 'bold'),fg="#135EC2",highlightthickness=0)
visit_button = Button(site_name_frame,text="Visit",font=('Bahnschrift Light', 10, 'bold'),fg="#135EC2",bg="white",command=lambda: visit("indeed.com"))
head_frame = LabelFrame(card_frame,borderwidth=0,highlightthickness=0,bg="#135EC2")
title_lable = Label(head_frame,text=indeed.title,wraplength=300,font=('Bahnschrift Light', 13, 'bold'),bg="#135EC2",fg="white")
company_name_lable = Label(head_frame,text=indeed.company_name,wraplength=250,font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
job_location_label = Label(head_frame,text=indeed.job_location,wraplength=250,font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
rating_label = Label(head_frame,text=f"Rating: {indeed.rating}",font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
salary_label = Label(head_frame,text=f"Salary: {indeed.salary}",font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
card_frame.grid(row=0,column=1,padx=50)
site_name_frame.pack(fill=X)
visit_button.pack(side=RIGHT,pady=1,padx=1)
site_name_lable.pack(pady=1)
head_frame.pack(fill=X)
title_lable.pack(padx=50)
company_name_lable.pack()
job_location_label.pack()
rating_label.pack()
salary_label.pack()
# ----indeed GUI completed----
# -------Linkedin GUI---------
def linkedin_gui():
if len(linkedin_scrapper.cards)==0:
error_card_frame = LabelFrame(card_container,bg="white")
site_name_label = Label(error_card_frame,text="Linkedin",bg="white",font=('Bahnschrift Light', 11, 'bold'),fg="#135EC2",highlightthickness=0)
no_result_label = Label(error_card_frame,text="No result",bg="white",font=('Bahnschrift Light', 12, 'bold'),fg="#135EC2",highlightthickness=0)
message_frame = LabelFrame(error_card_frame,bg="white",borderwidth=0,highlightthickness=0)
message_lable = Label(message_frame,text="OOPS!!! check your keyword and try again",bg="white",font=('Bahnschrift Light', 10, 'bold'),fg="#135EC2",highlightthickness=0)
error_card_frame.grid(row=1,column=0)
site_name_label.pack(fill=X,pady=1)
no_result_label.pack()
message_frame.pack()
message_lable.pack(side=LEFT)
else:
card_frame = LabelFrame(card_container,bg="white")
site_name_frame = LabelFrame(card_frame,bg="white",borderwidth=0,highlightthickness=0)
site_name_lable = Label(site_name_frame,text="indeed.com",bg="white",font=('Bahnschrift Light', 11, 'bold'),fg="#135EC2",highlightthickness=0)
visit_button = Button(site_name_frame,text="Visit",font=('Bahnschrift Light', 10, 'bold'),fg="#135EC2",bg="white",command=lambda: visit("Linkedin"))
head_frame = LabelFrame(card_frame,borderwidth=0,highlightthickness=0,bg="#135EC2")
title_lable = Label(head_frame,text=linkedin_scrapper.title,wraplength=300,font=('Bahnschrift Light', 13, 'bold'),bg="#135EC2",fg="white")
company_name_lable = Label(head_frame,text=linkedin_scrapper.company_name,wraplength=250,font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
job_location_label = Label(head_frame,text=linkedin_scrapper.job_location,wraplength=250,font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
rating_label = Label(head_frame,text="Rating: Not Available",font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
salary_label = Label(head_frame,text="Salary: Not Available",font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
card_frame.grid(row=1,column=0,padx=100,pady=5)
site_name_frame.pack(fill=X)
visit_button.pack(side=RIGHT,pady=1,padx=1)
site_name_lable.pack(pady=1)
head_frame.pack(fill=X)
title_lable.pack(padx=50)
company_name_lable.pack(pady=5)
job_location_label.pack(pady=5)
rating_label.pack()
salary_label.pack()
# --------Linkedin GUI completed------------
# ---------SimplyHired GUI------------------
def simply_hired_gui():
if len(simply_hired_scrapper.cards)==0:
error_card_frame = LabelFrame(card_container,bg="white",borderwidth=0,highlightthickness=0)
site_name_label = Label(error_card_frame,text="SimplyHired",bg="white",font=('Bahnschrift Light', 11, 'bold'),fg="#135EC2",highlightthickness=0)
no_result_label = Label(error_card_frame,text="No result",bg="white",font=('Bahnschrift Light', 12, 'bold'),fg="#135EC2",highlightthickness=0)
message_frame = LabelFrame(error_card_frame,bg="white")
message_lable = Label(message_frame,text="OOPS!!! check your keyword and try again",bg="white",font=('Bahnschrift Light', 10, 'bold'),fg="#135EC2",highlightthickness=0)
error_card_frame.grid(row=1,column=1)
site_name_label.pack(fill=X,pady=1)
no_result_label.pack()
message_frame.pack()
message_lable.pack(side=LEFT)
else:
card_frame = LabelFrame(card_container,bg="white")
site_name_frame = LabelFrame(card_frame,bg="white",borderwidth=0,highlightthickness=0)
site_name_lable = Label(site_name_frame,text="indeed.com",bg="white",font=('Bahnschrift Light', 11, 'bold'),fg="#135EC2",highlightthickness=0)
visit_button = Button(site_name_frame,text="Visit",font=('Bahnschrift Light', 10, 'bold'),fg="#135EC2",bg="white",command=lambda: visit("SimplyHired"))
head_frame = LabelFrame(card_frame,borderwidth=0,highlightthickness=0,bg="#135EC2")
title_lable = Label(head_frame,text=simply_hired_scrapper.title,wraplength=300,font=('Bahnschrift Light', 13, 'bold'),bg="#135EC2",fg="white")
company_name_lable = Label(head_frame,text=simply_hired_scrapper.company_name,wraplength=250,font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
job_location_label = Label(head_frame,text=simply_hired_scrapper.job_location,wraplength=250,font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
rating_label = Label(head_frame,text="Rating: Not Available",font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
salary_label = Label(head_frame,text="Salary: Not Available",font=('Bahnschrift Light', 11, 'normal'),bg="#135EC2",fg="white")
card_frame.grid(row=1,column=1,padx=50,pady=5)
site_name_frame.pack(fill=X)
visit_button.pack(side=RIGHT,pady=1,padx=1)
site_name_lable.pack(pady=1)
head_frame.pack(fill=X)
title_lable.pack(pady=5,padx=50)
company_name_lable.pack(pady=5)
job_location_label.pack(pady=5)
rating_label.pack()
salary_label.pack()
# ----------SimplyHired GUI-------------
def close():
global card_container
card_container.pack_forget()
card_container = LabelFrame(root,bg="white",pady=20,borderwidth=0,highlightthickness=0)
card_container.pack(fill=X)
is_both_empty = False
def compare():
global card_container
card_container.pack_forget()
card_container = LabelFrame(root,bg="white",pady=20,borderwidth=0,highlightthickness=0)
card_container.pack(fill=X)
if not str(job_profile_box.get()) and not str(location_box.get()) :
global is_both_empty
is_both_empty=True
message_frame = LabelFrame(card_container,bg="white",padx=20,pady=30)
message_label = Label(message_frame,text="Please enter Job Title and Location to compare",font=('Bahnschrift Light', 14, 'bold'),bg="white",fg="#135EC2")
close_button = Button(card_container,text="Close",font=('Bahnschrift Light', 14, 'bold'),bg="white",fg="#135EC2",command=close)
message_frame.pack(pady=20)
message_label.pack()
close_button.pack()
else:
job_profile = str(job_profile_box.get())
location = str(location_box.get())
naukri_scrapper.naukri_search(job_profile,location,0)
indeed.indeed_search(job_profile,location,0)
linkedin_scrapper.linkedin_search(job_profile,location,0)
simply_hired_scrapper.simplyhired_search(job_profile,location,0)
naukri_gui()
indeed_gui()
linkedin_gui()
simply_hired_gui()
compare_button = Button(compare_button_frame,text="Comapre",padx=15,pady=7,font=('Bahnschrift Light', 12, 'bold'),bg="white",fg="#135EC2",command=compare)
compare_button.pack()
root.mainloop() | 57.808429 | 176 | 0.696911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,716 | 0.246288 |
dec771d07fef05c3b6f9bec75d34bca56cffa1b5 | 3,648 | py | Python | data_augmentor/multidimension.py | ZhiangChen/tornado_ML | d8bded61a6a234ca67e31776bc8576c6c18f5621 | [
"MIT"
]
| 2 | 2018-12-09T20:08:51.000Z | 2021-02-01T17:49:14.000Z | data_augmentor/multidimension.py | ZhiangChen/tornado_ML | d8bded61a6a234ca67e31776bc8576c6c18f5621 | [
"MIT"
]
| 1 | 2019-11-15T06:15:03.000Z | 2019-11-15T06:15:03.000Z | multidimension.py | DREAMS-lab/data_augmentor | f204ee3af805b17d9946d3d5c6e7ca62398f09e5 | [
"MIT"
]
| null | null | null | """
multispectrum
Zhiang Chen,
Feb, 2020
"""
import gdal
import cv2
import numpy as np
import math
import os
class MultDim(object):
def __init__(self):
pass
def readTiff(self, tif_file, channel=3):
self.ds = gdal.Open(tif_file)
B = self.ds.GetRasterBand(1).ReadAsArray()
G = self.ds.GetRasterBand(2).ReadAsArray()
R = self.ds.GetRasterBand(3).ReadAsArray()
if channel ==3:
cv2.imwrite("./datasets/Rock/R.png", R)
cv2.imwrite("./datasets/Rock/G.png", G)
cv2.imwrite("./datasets/Rock/B.png", B)
if channel == 5:
RE = self.ds.GetRasterBand(4).ReadAsArray()
NIR = self.ds.GetRasterBand(5).ReadAsArray()
cv2.imwrite("./datasets/Rock/R.png", R)
cv2.imwrite("./datasets/Rock/G.png", G)
cv2.imwrite("./datasets/Rock/B.png", B)
cv2.imwrite("./datasets/Rock/RE.png", RE)
cv2.imwrite("./datasets/Rock/NIR.png",NIR)
def readImage(self, image_file, channel=3):
if channel==1:
img = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE).astype(np.uint8)
img = np.expand_dims(img, axis=2)
else:
img = cv2.imread(image_file).astype(np.uint8)
return img
def cat(self, data1, data2):
return np.append(data1, data2, axis=2)
def split(self, data, step, path, overlap=0):
dim = data.shape
mult = np.zeros((dim[0]+step, dim[1]+step, dim[2]))
mult[:dim[0], :dim[1], :] = data
xn = int(math.ceil(float(dim[0])/(step-overlap)))
yn = int(math.ceil(float(dim[1])/(step-overlap)))
for i in range(xn):
for j in range(yn):
x = i*(step-overlap)
y = j*(step-overlap)
dt = mult[x:x+step, y:y+step, :]
name = os.path.join(path, str(i)+"_"+str(j)+".npy")
np.save(name, dt)
def addAnnotation(self, mult_path, annotation_path, save_path):
ann_files = os.listdir(annotation_path)
mult_files = os.listdir(mult_path)
for f in ann_files:
if f in mult_files:
ann_name = os.path.join(annotation_path, f)
mult_name = os.path.join(mult_path, f)
ann = np.load(ann_name)
mult = np.load(mult_name)
data = np.append(mult, ann, axis=2)
save_name = os.path.join(save_path, f)
np.save(save_name, data)
if __name__ == '__main__':
st = MultDim()
# split tiles
"""
st.readTiff("./datasets/C3/Orth5.tif", channel=5)
R = st.readImage("./datasets/Rock/R.png", channel=1)
G = st.readImage("./datasets/Rock/G.png", channel=1)
B = st.readImage("./datasets/Rock/B.png", channel=1)
RE = st.readImage("./datasets/Rock/RE.png", channel=1)
NIR = st.readImage("./datasets/Rock/NIR.png", channel=1)
DEM = st.readImage("./datasets/Rock/DEM3.png", channel=3)
data = st.cat(R, G)
data = st.cat(data, B)
data = st.cat(data, RE)
data = st.cat(data, NIR)
data = st.cat(data, DEM)
st.split(data, 400, "./datasets/Rock/mult_10", overlap=10)
"""
# add annotations
# st.addAnnotation("./datasets/Rock/mult/", "./datasets/Rock_test/npy/", "./datasets/Rock_test/mult")
#"""
RGB = st.readImage("./datasets/C3/C3.png", channel=3)
DEM = st.readImage("./datasets/C3/C3_dem.png", channel=3)
data = st.cat(RGB, DEM)
st.split(data, 400, './datasets/C3/rgbd', overlap=10)
#"""
#st.addAnnotation("./datasets/C3/rgbd/", "./datasets/C3_test/npy/", "./datasets/C3_test/rocks") | 35.076923 | 105 | 0.569353 | 2,394 | 0.65625 | 0 | 0 | 0 | 0 | 0 | 0 | 1,170 | 0.320724 |
dec7a039bcd25fbdc90d163100b8870f23f0424a | 399 | py | Python | tests/serializers/test_template_data_serializers.py | banillie/bcompiler-engine | 26b63b6e630e2925175ffff6b48b42d70f7ba544 | [
"MIT"
]
| 2 | 2019-09-23T08:51:48.000Z | 2019-10-14T08:44:28.000Z | tests/serializers/test_template_data_serializers.py | banillie/bcompiler-engine | 26b63b6e630e2925175ffff6b48b42d70f7ba544 | [
"MIT"
]
| 27 | 2019-07-08T11:15:03.000Z | 2020-06-22T15:47:25.000Z | tests/serializers/test_template_data_serializers.py | yulqen/bcompiler-engine | 40eff19e04eabacac991bb34d31a7e7a7d6b729a | [
"MIT"
]
| 1 | 2019-09-07T14:05:16.000Z | 2019-09-07T14:05:16.000Z | import json
from engine.serializers.template import TemplateCellSerializer
def test_template_cell_to_dict(template_cell_obj):
assert template_cell_obj.to_dict()["sheet_name"] == "Test Sheet 1"
def test_template_cell_serializer(template_cell_obj):
json_output = json.dumps(template_cell_obj, cls=TemplateCellSerializer)
assert json.loads(json_output)["sheet_name"] == "Test Sheet 1"
| 30.692308 | 75 | 0.802005 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.130326 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.