hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fd107c2da7b904339edb0406a2c83b2ca10efada
| 8,850 |
py
|
Python
|
coadd_mdetsims/tests/test_shear_bias_meas.py
|
beckermr/metadetect-coadding-sims
|
15ccaec353aa61c69ac9d78d1dfca8ce25bca3cf
|
[
"BSD-3-Clause"
] | null | null | null |
coadd_mdetsims/tests/test_shear_bias_meas.py
|
beckermr/metadetect-coadding-sims
|
15ccaec353aa61c69ac9d78d1dfca8ce25bca3cf
|
[
"BSD-3-Clause"
] | null | null | null |
coadd_mdetsims/tests/test_shear_bias_meas.py
|
beckermr/metadetect-coadding-sims
|
15ccaec353aa61c69ac9d78d1dfca8ce25bca3cf
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
from ..shear_bias_meas import (
measure_shear_metadetect, estimate_m_and_c,
estimate_m_and_c_patch_avg)
def test_measure_shear_metadetect_smoke():
dtype = [('flags', 'i4'), ('wmom_g', 'f4', (2,)),
('wmom_s2n', 'f4'), ('wmom_T_ratio', 'f4'), ('ormask', 'i4')]
rng = np.random.RandomState(seed=10)
res = {}
for key in ['noshear', '1p', '1m', '2p', '2m']:
res[key] = np.zeros(10, dtype=dtype)
for col in ['wmom_s2n', 'wmom_T_ratio']:
res[key][col] = rng.uniform(size=10) * 10
res[key]['wmom_g'][:, 0] = rng.uniform(size=10) * 10
res[key]['wmom_g'][:, 1] = rng.uniform(size=10) * 10
res[key]['flags'] = (rng.uniform(size=10) * 1.5).astype(np.int32)
res[key]['ormask'] = (rng.uniform(size=10) * 1.5).astype(np.int32)
val = measure_shear_metadetect(
res, s2n_cut=2, t_ratio_cut=1, cut_interp=False)
assert val is not None
for key in ['1p', '1m', '1', '2p', '2m', '2']:
gkey = 'g' + key
nkey = 'n' + key
reskey = key if ('p' in key or 'm' in key) else 'noshear'
ind = int(key[0]) - 1
q = ((res[reskey]['wmom_s2n'] > 2) &
(res[reskey]['wmom_T_ratio'] > 1) &
(res[reskey]['flags'] == 0))
assert np.mean(res[reskey]['wmom_g'][q, ind]) == val[gkey][0]
assert len(res[reskey]['wmom_g'][q, ind]) == val[nkey][0]
val = measure_shear_metadetect(
res, s2n_cut=2, t_ratio_cut=1, cut_interp=True)
assert val is not None
for key in ['1p', '1m', '1', '2p', '2m', '2']:
gkey = 'g' + key
nkey = 'n' + key
reskey = key if ('p' in key or 'm' in key) else 'noshear'
ind = int(key[0]) - 1
q = ((res[reskey]['wmom_s2n'] > 2) &
(res[reskey]['wmom_T_ratio'] > 1) &
(res[reskey]['flags'] == 0) &
(res[reskey]['ormask'] == 0))
assert np.mean(res[reskey]['wmom_g'][q, ind]) == val[gkey][0]
assert len(res[reskey]['wmom_g'][q, ind]) == val[nkey][0]
@pytest.mark.parametrize('kind', ['noshear', '1p', '1m', '2p', '2m'])
def test_measure_shear_metadetect_none(kind):
dtype = [('flags', 'i4'), ('wmom_g', 'f4', (2,)),
('wmom_s2n', 'f4'), ('wmom_T_ratio', 'f4'), ('ormask', 'i4')]
rng = np.random.RandomState(seed=10)
res = {}
for key in ['noshear', '1p', '1m', '2p', '2m']:
res[key] = np.zeros(10, dtype=dtype)
for col in ['wmom_s2n', 'wmom_T_ratio']:
res[key][col] = rng.uniform(size=10) * 10
res[key]['wmom_g'][:, 0] = rng.uniform(size=10) * 10
res[key]['wmom_g'][:, 1] = rng.uniform(size=10) * 10
res[key]['flags'] = (rng.uniform(size=10) * 1.5).astype(np.int32)
res[key]['ormask'] = (rng.uniform(size=10) * 1.5).astype(np.int32)
for key in ['noshear', '1p', '1m', '2p', '2m']:
# we make sure it would return something if not for the bad flags etc
val = measure_shear_metadetect(
res, s2n_cut=2, t_ratio_cut=1, cut_interp=False)
assert val is not None
val = measure_shear_metadetect(
res, s2n_cut=2, t_ratio_cut=1, cut_interp=True)
assert val is not None
# now test some columns
for col, col_val in [
('flags', 1), ('wmom_s2n', 0), ('wmom_T_ratio', 0)]:
old_col_val = res[key][col].copy()
res[key][col] = col_val
val = measure_shear_metadetect(
res, s2n_cut=2, t_ratio_cut=1, cut_interp=False)
assert val is None
res[key][col] = old_col_val
old_flags = res[key]['ormask'].copy()
res[key]['ormask'] = 1
val = measure_shear_metadetect(
res, s2n_cut=2, t_ratio_cut=1, cut_interp=True)
assert val is None
res[key]['ormask'] = old_flags
@pytest.mark.parametrize('g_true,step,swap12', [
(0.01, 0.02, True),
(0.01, 0.02, False),
(0.005, 0.05, False)])
def test_estimate_m_and_c(g_true, step, swap12):
rng = np.random.RandomState(seed=10)
def _shear_meas(g_true, _step, e1, e2):
if _step == 0:
_gt = g_true * (1.0 + 0.01)
cadd = 0.05 * 10
else:
_gt = g_true
cadd = 0.0
if swap12:
return np.mean(e1) + cadd + _step*10, np.mean(10*(_gt+_step)+e2)
else:
return np.mean(10*(_gt+_step)+e1), np.mean(e2) + cadd + _step*10
sn = 0.01
n_gals = 100000
n_sim = 1000
pres = np.zeros(n_sim, dtype=[
('g1p', 'f4'), ('n1p', 'f4'),
('g1m', 'f4'), ('n1m', 'f4'),
('g1', 'f4'), ('n1', 'f4'),
('g2p', 'f4'), ('n2p', 'f4'),
('g2m', 'f4'), ('n2m', 'f4'),
('g2', 'f4'), ('n2', 'f4')])
mres = np.zeros(n_sim, dtype=[
('g1p', 'f4'), ('n1p', 'f4'),
('g1m', 'f4'), ('n1m', 'f4'),
('g1', 'f4'), ('n1', 'f4'),
('g2p', 'f4'), ('n2p', 'f4'),
('g2m', 'f4'), ('n2m', 'f4'),
('g2', 'f4'), ('n2', 'f4')])
for i in range(n_sim):
e1 = rng.normal(size=n_gals) * sn
e2 = rng.normal(size=n_gals) * sn
g1, g2 = _shear_meas(g_true, 0, e1, e2)
g1p, g2p = _shear_meas(g_true, step, e1, e2)
g1m, g2m = _shear_meas(g_true, -step, e1, e2)
pres['g1p'][i] = g1p
pres['g1m'][i] = g1m
pres['g1'][i] = g1
pres['g2p'][i] = g2p
pres['g2m'][i] = g2m
pres['g2'][i] = g2
pres['n1p'][i] = n_gals
pres['n1m'][i] = n_gals
pres['n1'][i] = n_gals
pres['n2p'][i] = n_gals
pres['n2m'][i] = n_gals
pres['n2'][i] = n_gals
g1, g2 = _shear_meas(-g_true, 0, e1, e2)
g1p, g2p = _shear_meas(-g_true, step, e1, e2)
g1m, g2m = _shear_meas(-g_true, -step, e1, e2)
mres['g1p'][i] = g1p
mres['g1m'][i] = g1m
mres['g1'][i] = g1
mres['g2p'][i] = g2p
mres['g2m'][i] = g2m
mres['g2'][i] = g2
mres['n1p'][i] = n_gals
mres['n1m'][i] = n_gals
mres['n1'][i] = n_gals
mres['n2p'][i] = n_gals
mres['n2m'][i] = n_gals
mres['n2'][i] = n_gals
m, _, c, _ = estimate_m_and_c(
pres, mres, g_true, swap12=swap12, step=step)
assert np.allclose(m, 0.01)
assert np.allclose(c, 0.05)
@pytest.mark.parametrize('g_true,step,swap12', [
(0.01, 0.02, True),
(0.01, 0.02, False),
(0.005, 0.05, False)])
def test_estimate_m_and_c_patch_avg(g_true, step, swap12):
rng = np.random.RandomState(seed=10)
def _shear_meas(g_true, _step, e1, e2):
if _step == 0:
_gt = g_true * (1.0 + 0.01)
cadd = 0.05 * 10
else:
_gt = g_true
cadd = 0.0
if swap12:
return np.mean(e1) + cadd + _step*10, np.mean(10*(_gt+_step)+e2)
else:
return np.mean(10*(_gt+_step)+e1), np.mean(e2) + cadd + _step*10
sn = 0.01
n_gals = 100000
n_sim = 1000
pres = np.zeros(n_sim, dtype=[
('g1p', 'f4'), ('n1p', 'f4'),
('g1m', 'f4'), ('n1m', 'f4'),
('g1', 'f4'), ('n1', 'f4'),
('g2p', 'f4'), ('n2p', 'f4'),
('g2m', 'f4'), ('n2m', 'f4'),
('g2', 'f4'), ('n2', 'f4')])
mres = np.zeros(n_sim, dtype=[
('g1p', 'f4'), ('n1p', 'f4'),
('g1m', 'f4'), ('n1m', 'f4'),
('g1', 'f4'), ('n1', 'f4'),
('g2p', 'f4'), ('n2p', 'f4'),
('g2m', 'f4'), ('n2m', 'f4'),
('g2', 'f4'), ('n2', 'f4')])
for i in range(n_sim):
e1 = rng.normal(size=n_gals) * sn
e2 = rng.normal(size=n_gals) * sn
g1, g2 = _shear_meas(g_true, 0, e1, e2)
g1p, g2p = _shear_meas(g_true, step, e1, e2)
g1m, g2m = _shear_meas(g_true, -step, e1, e2)
pres['g1p'][i] = g1p
pres['g1m'][i] = g1m
pres['g1'][i] = g1
pres['g2p'][i] = g2p
pres['g2m'][i] = g2m
pres['g2'][i] = g2
pres['n1p'][i] = n_gals
pres['n1m'][i] = n_gals
pres['n1'][i] = n_gals
pres['n2p'][i] = n_gals
pres['n2m'][i] = n_gals
pres['n2'][i] = n_gals
g1, g2 = _shear_meas(-g_true, 0, e1, e2)
g1p, g2p = _shear_meas(-g_true, step, e1, e2)
g1m, g2m = _shear_meas(-g_true, -step, e1, e2)
mres['g1p'][i] = g1p
mres['g1m'][i] = g1m
mres['g1'][i] = g1
mres['g2p'][i] = g2p
mres['g2m'][i] = g2m
mres['g2'][i] = g2
mres['n1p'][i] = n_gals
mres['n1m'][i] = n_gals
mres['n1'][i] = n_gals
mres['n2p'][i] = n_gals
mres['n2m'][i] = n_gals
mres['n2'][i] = n_gals
m, _, c, _ = estimate_m_and_c_patch_avg(
pres, mres, g_true, swap12=swap12, step=step)
assert np.allclose(m, 0.01)
assert np.allclose(c, 0.05)
| 34.570313 | 77 | 0.490508 | 0 | 0 | 0 | 0 | 6,773 | 0.765311 | 0 | 0 | 1,365 | 0.154237 |
fd1270311d2747042f749172a656ddde2d001d75
| 1,221 |
py
|
Python
|
src/topologies/simple.py
|
sevenEng/Resolving-Consensus
|
a508701e19bd4ec0df735f5b094487983272dbb6
|
[
"MIT"
] | null | null | null |
src/topologies/simple.py
|
sevenEng/Resolving-Consensus
|
a508701e19bd4ec0df735f5b094487983272dbb6
|
[
"MIT"
] | null | null | null |
src/topologies/simple.py
|
sevenEng/Resolving-Consensus
|
a508701e19bd4ec0df735f5b094487983272dbb6
|
[
"MIT"
] | null | null | null |
from mininet.net import Mininet
from mininet.node import Controller, UserSwitch, IVSSwitch, OVSSwitch
from mininet.log import info, setLogLevel
setLogLevel("info")
import importlib
switch_num = 1
def add_switch(net):
global switch_num
res = "s%s" % str(switch_num)
switch_num += 1
return net.addSwitch(res)
host_num = 1
def add_host(net):
global host_num
res = "h%s" % str(host_num)
host_num += 1
return net.addHost(res)
client_num = 1
def add_client(net):
global client_num
res = "mc%s" % str(client_num)
client_num += 1
return net.addHost(res)
def setup(n="3", nc="10"):
n = int(n)
nc = int(nc)
# - Core setup -------
net = Mininet(controller=Controller)
info("*** Adding controller\n")
net.addController("c0")
info("*** Adding switches\n")
sw = add_switch(net)
info("*** Adding hosts and links\n")
cluster = [add_host(net) for _ in range(n)]
clients = [add_client(net) for _ in range(nc)]
for host in cluster:
net.addLink(host, sw)
for client in clients:
net.addLink(client, sw)
net.start()
cluster_ips = [host.IP() for host in cluster]
return (net, cluster, clients)
| 18.784615 | 69 | 0.633907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.108927 |
fd1396e2ed5013e365c0832fe7ee283e5e1bda20
| 856 |
py
|
Python
|
lunchapi/permissions.py
|
pesusieni999/lunchapplication
|
2aa2a4320a2ad85b39b74c5dcc3d960a46cdb6ef
|
[
"MIT"
] | null | null | null |
lunchapi/permissions.py
|
pesusieni999/lunchapplication
|
2aa2a4320a2ad85b39b74c5dcc3d960a46cdb6ef
|
[
"MIT"
] | null | null | null |
lunchapi/permissions.py
|
pesusieni999/lunchapplication
|
2aa2a4320a2ad85b39b74c5dcc3d960a46cdb6ef
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
__author__ = "Ville Myllynen"
__copyright__ = "Copyright 2017, Ohsiha Project"
__credits__ = ["Ville Myllynen"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Ville Myllynen"
__email__ = "[email protected]"
__status__ = "Development"
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
Assumes the model instance has an `author` attribute.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Instance must have an attribute named `author`.
return obj.author == request.user
| 31.703704 | 73 | 0.712617 | 556 | 0.649533 | 0 | 0 | 0 | 0 | 0 | 0 | 426 | 0.497664 |
fd139df441393f7003990ec4900a0d7e845e7545
| 3,994 |
py
|
Python
|
MsSampleFmpDevicePkg/Tools/ConvertCerToH.py
|
kuqin12/mu_plus
|
f78c66d0508a7b884b3f73d7c86648656bf07fbb
|
[
"BSD-2-Clause"
] | null | null | null |
MsSampleFmpDevicePkg/Tools/ConvertCerToH.py
|
kuqin12/mu_plus
|
f78c66d0508a7b884b3f73d7c86648656bf07fbb
|
[
"BSD-2-Clause"
] | null | null | null |
MsSampleFmpDevicePkg/Tools/ConvertCerToH.py
|
kuqin12/mu_plus
|
f78c66d0508a7b884b3f73d7c86648656bf07fbb
|
[
"BSD-2-Clause"
] | null | null | null |
##
# Copyright (c) 2016, Microsoft Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##
# This is a sample tool and should not be used in production environments.
#
# This tool takes in sample certificates (.cer files) and outputs a .h file containing the
# certificates.
###
import re
import sys
print "This is a sample tool and should not be used in production environments\n"
raw_input('Press any key to continue . . .\n')
### Parse input parameters ###
if len(sys.argv) == 1 or sys.argv[1] == "-h" or sys.argv[1] == "-H" or sys.argv[1] == "-?":
print "This tool creates Certs.h with one or more certificates\n"
print "usage: ConvertCerToH.py <CertFiles...>"
print "example: ConvertCerToH.py SAMPLE_DEVELOPMENT.cer SAMPLE_PRODUCTION.cer"
print "example: ConvertCerToH.py SAMPLE_DEVELOPMENT1.cer SAMPLE_DEVELOPMENT2.cer SAMPLE_PRODUCTION.cer"
print "example: ConvertCerToH.py SAMPLE_PRODUCTION.cer"
sys.exit(-1)
if len(sys.argv) > 11:
print "Error: Currently limiting number of certificates to 10"
print "usage: ConvertCerToH.py <CertFiles...>"
sys.exit(-1)
### Process Certificates ###
Certs = []
sys.argv.remove(sys.argv[0])
for fileName in sys.argv:
print "Processing", fileName
# Open cert file
file = open(fileName, "rb")
# Read binary file
Cert = file.read()
# Close cert file
file.close()
CertHex = map(hex,map(ord,Cert))
Cert = re.sub(r'\'|\[|\]', "", str(CertHex))
Certs.append(Cert)
### Write certificates to Certs.h ###
# Open header file
HeaderFile = open("Certs.h", "w")
HeaderFile.write("//\n")
HeaderFile.write("// Certs.h\n")
HeaderFile.write("//\n\n")
HeaderFile.write("//\n")
HeaderFile.write("// These are the binary DER encoded Product Key certificates \n")
HeaderFile.write("// used to sign the UEFI capsule payload.\n")
HeaderFile.write("//\n\n")
index = 1
for Cert in Certs:
HeaderFile.write("CONST UINT8 CapsulePublicKeyCert"+str(index)+"[] =\n")
HeaderFile.write("{\n")
HeaderFile.write(Cert)
HeaderFile.write("\n};\n\n")
index = index + 1
HeaderFile.write("CONST CAPSULE_VERIFICATION_CERTIFICATE CapsuleVerifyCertificates[] = {\n")
index = 1
for Cert in Certs:
HeaderFile.write(" {CapsulePublicKeyCert"+str(index)+", sizeof(CapsulePublicKeyCert"+str(index)+")},\n")
index = index + 1
HeaderFile.write("};\n\n")
HeaderFile.write("CONST CAPSULE_VERIFICATION_CERTIFICATE_LIST CapsuleVerifyCertificateList = {\n")
HeaderFile.write(" sizeof(CapsuleVerifyCertificates)/sizeof(CAPSULE_VERIFICATION_CERTIFICATE),\n")
HeaderFile.write(" CapsuleVerifyCertificates\n")
HeaderFile.write("};\n\n")
# Close header file
HeaderFile.close()
print "\nCopy the output file Certs.h to folder MsSampleFmpDevicePkg\Library\CapsuleKeyBaseLib"
| 33.847458 | 107 | 0.73986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,912 | 0.729094 |
fd16505a157e42c0095152f4d85cf3c6e225daa0
| 2,899 |
py
|
Python
|
webtraversallibrary/app_info.py
|
redfungus/webtraversallibrary
|
d5013edc061deba40e859dcfcda314c7055ce82a
|
[
"Apache-2.0"
] | null | null | null |
webtraversallibrary/app_info.py
|
redfungus/webtraversallibrary
|
d5013edc061deba40e859dcfcda314c7055ce82a
|
[
"Apache-2.0"
] | null | null | null |
webtraversallibrary/app_info.py
|
redfungus/webtraversallibrary
|
d5013edc061deba40e859dcfcda314c7055ce82a
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Extract version information from several possible sources in a pre-assigned priority:
* environment
* git context
* .gitinfo file
* default value
"""
import os
import subprocess
def get_commit_hash(short: bool = False) -> str:
"""
Extract commit hash from either the environment, or the ``.gitinfo`` file.
If nothing works, returns "unknown".
"""
git_command = ["git", "rev-parse", "HEAD"]
if short:
git_command.insert(2, "--short")
try:
return subprocess.check_output(git_command, universal_newlines=True, stderr=subprocess.DEVNULL).strip()
except subprocess.CalledProcessError:
pass
# This may be a source copy without .git information. Look for .gitinfo file
try:
with open(".gitinfo") as f:
return f.readline().strip()
except OSError:
pass
# As a last resort, return a special value
return "unknown"
def get_branch_name() -> str:
"""
Extract branch name from either the environment, or the ``.gitinfo`` file.
Returns "unknown" if it couldn't be found.
"""
# If explicitly set, take the environment variable
if "BRANCH_NAME" in os.environ:
return os.environ["BRANCH_NAME"]
# Otherwise, try to get the context from current directory git metadata
try:
branch_name = subprocess.check_output(
["bash", "-c", 'git branch --list | grep -e "*" | cut -c 3-'],
universal_newlines=True,
stderr=subprocess.DEVNULL,
).strip()
if branch_name:
# The piped command chain will "succeed" even if "git branch --list" returns nothing
# In that case, branch_name will be empty and should not be returned
return branch_name
except subprocess.CalledProcessError:
pass
# If that's not available either, look for the .gitinfo file that gets added to Docker containers
try:
with open(".gitinfo") as f:
return f.readlines()[-1].strip()
except OSError:
pass
# As a last resort, return a special value
return "unknown"
| 32.573034 | 111 | 0.67506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,872 | 0.64574 |
fd166522613f70d68340ce87a9e8c0bff5f78c6b
| 4,863 |
py
|
Python
|
aws_reporting.py
|
jeberhar/DevOpsLearner
|
f9dce9cf2dc6e75494c1372a339e9f13e836102d
|
[
"MIT"
] | null | null | null |
aws_reporting.py
|
jeberhar/DevOpsLearner
|
f9dce9cf2dc6e75494c1372a339e9f13e836102d
|
[
"MIT"
] | null | null | null |
aws_reporting.py
|
jeberhar/DevOpsLearner
|
f9dce9cf2dc6e75494c1372a339e9f13e836102d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import boto
import boto.ec2
import sys
from boto.ec2.connection import EC2Connection
import pprint
account_string = "YOUR_ACCOUNT_STRING" #change this for each AWS account
class ansi_color: #unused class due to CSV file limitations
red = '\033[31m'
green = '\033[32m'
reset = '\033[0m'
grey = '\033[1;30m'
def instance_info(i):
groups = ""
volume_info = ""
count = 1
statusDummy = "Status Checks not available"
alarmDummy = "Alarm Status not available"
#logic for instance name
if 'Name' in i.tags:
name = str(i.tags['Name'])
if name == "":
name = '!!! No name specified !!!'
else:
name = '??? Name not in attributes for instance ???'
#n = n.ljust(16)[:16]
#if i.state == 'running':
# n = ansi_color.green + n + ansi_color.reset
#else:
# n = ansi_color.red + n + ansi_color.reset
#logic for public DNS
if i.state == 'running':
pub_name = i.public_dns_name
else:
pub_name = "Machine not running - no public DNS name"
#pub_name = ansi_color.red + pub_name + ansi_color.reset
#logic for private DNS
if i.state == 'running':
priv_name = i.private_dns_name
else:
priv_name = "Machine not running - no private DNS name"
#priv_name = ansi_color.red + priv_name + ansi_color.reset
#logic for instance groups
for group_name in i.groups:
groups = groups + str(group_name.name)
if len(i.groups) > 1:
if count < len(i.groups):
groups = groups + " AND "
count = count + 1
info = account_string
info = info + "," + name
info = info + "," + i.id
info = info + "," + i.instance_type
info = info + "," + i.placement
info = info + ',' + i.state
#info = info + ',' + statusDummy
#info = info + ',' + alarmDummy
info = info + ',' + pub_name
info = info + "," + str(i.ip_address)
info = info + ',' + priv_name
info = info + "," + str(i.key_name)
info = info + "," + str(i.monitored)
info = info + "," + str(i.launch_time)
info = info + ',' + groups
#EBS reporting works but painfully slow.....
for current_volumes in volumes:
#print "\t" + str(current_volumes) + "\n"
if current_volumes.attachment_state() == 'attached':
filter = {'block-device-mapping.volume-id':current_volumes.id}
#print "Starting connection for all instances....\n"
volumesinstance = conn.get_all_instances(filters=filter)
#print "Volumes by instance: " + str(len(volumesinstance))
#print "Ending connection for all instances....\n"
ids = [z for k in volumesinstance for z in k.instances]
for s in ids:
if (i.id == s.id):
#print "SUCCESS!!"
volume_info = volume_info + str(current_volumes.id) + ',' + str(s.id) + ',' + str(current_volumes.attach_data.device) + ',' + str(current_volumes.size) + ','
info = info + ',' + volume_info
volume_info = ""
return info
def print_instance(i):
print instance_info(i)
####main program execution####
regions = sys.argv[1:]
volume_info = ""
if len(regions) == 0:
regions=['us-east-1']
if len(regions) == 1 and regions[0] == "all":
working_regions = boto.ec2.regions()
#print working_regions #DEBUG: uncomment to view all the regions that will be searched for "all"
else:
working_regions = [ boto.ec2.get_region(x) for x in regions ]
for current_working_region in working_regions:
print "\n================"
print current_working_region.name
print "================"
print "Account Name,Instance Name,Instance ID,Instance Type,Availability Zone,Instance State,Public DNS,Public IP,Private DNS,Key Name,Monitoring,Launch Time,Security Groups,Attached Volume ID,Attached Volume Instance ID,Mounted Device Name,Attached Volume Size"
try:
conn = boto.connect_ec2(region = current_working_region)
#conn = EC2Connection() #same as boto.connect_ec2()
reservations = conn.get_all_instances()
volumes = conn.get_all_volumes()
#print "Volumes array has length of: " + str(len(volumes))
instances = [i for r in reservations for i in r.instances]
#pp = pprint.PrettyPrinter(indent=4)
for r in reservations:
for i in r.instances:
#pp.pprint(i.__dict__)
print_instance(i)
#print_ebs_info(i)
except boto.exception.EC2ResponseError:
print "ERROR -- Could not connect to " + current_working_region.name
pass
| 37.122137 | 267 | 0.584002 | 156 | 0.032079 | 0 | 0 | 0 | 0 | 0 | 0 | 1,872 | 0.384948 |
fd1683f2abe7dd47f1cee08ed7abab26a488dbc0
| 1,832 |
py
|
Python
|
code/functions/one_hop_majority_vote.py
|
YatongChen/decoupled_smoothing_on_graph
|
b5110db92841c00193577adb0f5d3daa70f46845
|
[
"MIT"
] | 5 |
2019-02-25T20:05:47.000Z
|
2021-06-23T21:38:52.000Z
|
code/functions/one_hop_majority_vote.py
|
YatongChen/decoupled_smoothing_on_graphs
|
b5110db92841c00193577adb0f5d3daa70f46845
|
[
"MIT"
] | null | null | null |
code/functions/one_hop_majority_vote.py
|
YatongChen/decoupled_smoothing_on_graphs
|
b5110db92841c00193577adb0f5d3daa70f46845
|
[
"MIT"
] | null | null | null |
# one hop helper function
def one_hop_majority_vote(G, gender_y_update,train_index, test_index):
A = np.array(nx.adjacency_matrix(G).todense())
D = np.diag(np.sum(A, axis=1))
d = np.diag(D)
theta = [None]*len(gender_y_update)
accuracy_score_benchmark = np.sum(gender_y_update[train_index])/len(train_index)
accuracy = []
micro_auc = []
wt_auc = []
for j in train_index:
theta[j] = gender_y_update[j]
for j in test_index:
#print(j)
j_neighbor = list(G.neighbors(j))
j_neighbor_label = list(set(j_neighbor).intersection(train_index))
if len(j_neighbor_label) ==0:
theta[j] = accuracy_score_benchmark
else:
theta[j] = np.sum(theta[ii] for ii in j_neighbor_label)/len(j_neighbor_label)
preference_by_class_matrix =np.zeros((len(theta),2))
preference_by_class_matrix[:,1] = theta
preference_by_class_matrix[:,0] = np.subtract(1, preference_by_class_matrix[:,1])
test_index = np.array(test_index)
micro_auc.append(metrics.roc_auc_score(label_binarize(gender_y_update[test_index],np.unique(gender_y_update)),preference_by_class_matrix[test_index,:][:,1]-preference_by_class_matrix[test_index,:][:,0],average='micro'))
wt_auc.append(metrics.roc_auc_score(label_binarize(gender_y_update[test_index],np.unique(gender_y_update)),preference_by_class_matrix[test_index,:][:,1]-preference_by_class_matrix[test_index,:][:,0],average='weighted'))
## f1-score version
y_true = label_binarize(gender_y_update[test_index],np.unique(gender_y_update))
y_pred = ((preference_by_class_matrix[test_index,:][:,1]) >accuracy_score_benchmark)+0
accuracy.append(f1_score(y_true, y_pred, average='macro'))
return(theta, micro_auc, wt_auc, accuracy)
| 50.888889 | 231 | 0.69869 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.042031 |
fd16885dbeb4939e362807cdc853aa44683b010f
| 18,284 |
py
|
Python
|
alphago/alphago.py
|
noahwaterfieldprice/alphago
|
4a7bba6d9758ccf1d2f2d7ae964b5d5d48021ee8
|
[
"MIT"
] | 4 |
2018-02-12T09:11:26.000Z
|
2022-01-24T20:46:15.000Z
|
alphago/alphago.py
|
noahwaterfieldprice/alphago
|
4a7bba6d9758ccf1d2f2d7ae964b5d5d48021ee8
|
[
"MIT"
] | null | null | null |
alphago/alphago.py
|
noahwaterfieldprice/alphago
|
4a7bba6d9758ccf1d2f2d7ae964b5d5d48021ee8
|
[
"MIT"
] | 3 |
2018-08-23T15:08:54.000Z
|
2020-03-13T14:21:08.000Z
|
from collections import OrderedDict
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from .player import MCTSPlayer, RandomPlayer, OptimalPlayer
from .evaluator import evaluate
from .mcts_tree import MCTSNode, mcts
from .utilities import sample_distribution
__all__ = ["train_alphago", "self_play", "process_self_play_data",
"process_training_data"]
def compute_checkpoint_name(step, path):
return path + "{}.checkpoint".format(step)
def train_alphago(game, create_estimator, self_play_iters, training_iters,
checkpoint_path, summary_path, alphago_steps=100,
evaluate_every=1, batch_size=32, mcts_iters=100, c_puct=1.0,
replay_length=100000, num_evaluate_games=500,
win_rate=0.55, verbose=True, restore_step=None,
self_play_file_path=None):
"""Trains AlphaGo on the game.
Parameters
----------
game: object
An object that has the attributes a game needs.
create_estimator: func
Creates a trainable estimator for the game. The estimator should
have a train function.
self_play_iters: int
Number of self-play games to play each self-play step.
training_iters: int
Number of training iters to use for each training step.
checkpoint_path: str
Where to save the checkpoints to.
summary_path: str
Where to save the summaries (tensorboard) to.
alphago_steps: int
Number of steps to run the alphago loop for.
evaluate_every: int
Evaluate the network every evaluate_every steps.
batch_size: int
Batch size to train with.
mcts_iters: int
Number of iterations to run MCTS for.
c_puct: float
Parameter for MCTS. See AlphaGo paper.
replay_length: int
The amount of training data to use. Only train on the most recent
training data.
num_evaluate_games: int
Number of games to evaluate the players for.
win_rate: float
Number between 0 and 1. Only update self-play player when training
player beats self-play player by at least this rate.
verbose: bool
Whether or not to output progress.
restore_step: int or None
If given, restore the network from the checkpoint at this step.
self_play_file_path: str or None
Where to load self play data from, if given.
"""
# TODO: Do self-play, training and evaluating in parallel.
# We use a fixed estimator (the best one that's been trained) to
# generate self-play training data. We then train the training estimator
# on that data. We produce a checkpoint every 1000 training steps. This
# checkpoint is then evaluated against the current best neural network.
# If it beats the current best network by at least 55% then it becomes
# the new best network.
# 1 is the fixed player, and 2 is the training player.
self_play_estimator = create_estimator()
training_estimator = create_estimator()
graph = tf.Graph()
sess = tf.Session(graph=graph)
with graph.as_default():
tf_success_rate = tf.placeholder(
tf.float32, name='success_rate_summary')
success_rate_summary = tf.summary.scalar(
'success_rate_summary', tf_success_rate)
tf_success_rate_random = tf.placeholder(
tf.float32, name='success_rate_random')
success_rate_random_summary = tf.summary.scalar(
'success_rate_random', tf_success_rate_random)
#tf_success_rate_optimal = tf.placeholder(
# tf.float32, name='success_rate_optimal')
#success_rate_optimal_summary = tf.summary.scalar(
# 'success_rate_optimal', tf_success_rate_optimal)
#merged_summary = tf.summary.merge([success_rate_summary,
# success_rate_random_summary,
# success_rate_optimal_summary])
merged_summary = tf.summary.merge([success_rate_summary,
success_rate_random_summary])
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(summary_path)
if restore_step:
restore_path = compute_checkpoint_name(restore_step, checkpoint_path)
self_play_estimator.restore(restore_path)
training_estimator.restore(restore_path)
all_losses = []
self_play_data = None
initial_step = restore_step + 1 if restore_step else 0
for alphago_step in range(initial_step, initial_step + alphago_steps):
self_play_data = generate_self_play_data(
game, self_play_estimator, mcts_iters, c_puct, self_play_iters,
verbose=verbose, data=self_play_data)
training_data = process_training_data(self_play_data, replay_length)
if len(training_data) < 100:
continue
optimise_estimator(training_estimator, training_data, batch_size,
training_iters, writer=writer, verbose=verbose)
# Evaluate the players and choose the best.
if alphago_step % evaluate_every == 0:
success_rate, success_rate_random = \
evaluate_model(game, self_play_estimator,
training_estimator, mcts_iters, c_puct,
num_evaluate_games, verbose=verbose)
summary = sess.run(merged_summary,
feed_dict=
{tf_success_rate: success_rate,
tf_success_rate_random: success_rate_random})
writer.add_summary(summary, training_estimator.global_step)
checkpoint_model(training_estimator, alphago_step, checkpoint_path)
# If training player beats self-play player by a large enough
# margin, then it becomes the new best estimator.
if success_rate > win_rate:
# Create a new self player, with the weights of the most
# recent training_estimator.
if verbose:
print("Updating self-play player.")
print("Restoring from step: {}".format(alphago_step))
self_play_estimator = create_estimator()
restore_path = compute_checkpoint_name(alphago_step,
checkpoint_path)
self_play_estimator.restore(restore_path)
return all_losses
def optimise_estimator(estimator, training_data, batch_size, training_iters,
mode='reinforcement', writer=None, verbose=True):
summary = estimator.train(training_data, batch_size, training_iters,
mode=mode, writer=writer, verbose=verbose)
return summary
def evaluate_model(game, player1, player2, mcts_iters, c_puct, num_games,
verbose=True):
# Checkpoint the model.
# TODO: Implement evaluation
# TODO: Choose tau more systematically.
if verbose:
print("Evaluating. Self-player vs training, then training vs "
"self-player")
wins1, wins2, draws = evaluate_estimators_in_both_positions(
game, player1.create_estimate_fn(), player2.create_estimate_fn(),
mcts_iters, c_puct, num_games, tau=0.01, verbose=verbose)
if verbose:
print("Self-play player wins: {}, Training player wins: {}, "
"Draws: {}".format(wins1, wins2, draws))
success_rate = (wins2 + draws) / (wins1 + wins2 + draws)
if verbose:
print("Win + draw rate for training player: {}".format(
success_rate))
# Also evaluate against a random player
wins1, wins2, draws = evaluate_mcts_against_random_player(
game, player2.create_estimate_fn(), mcts_iters, c_puct, num_games,
tau=0.01, verbose=verbose)
success_rate_random = (wins1 + draws) / (wins1 + wins2 + draws)
if verbose:
print("Training player vs random. Wins: {}, Losses: {}, "
"Draws: {}".format(wins1, wins2, draws))
## Also evaluate against an optimal player
#wins1, wins2, draws = evaluate_mcts_against_optimal_player(
# game, player2.create_estimate_fn(), mcts_iters, c_puct, num_games,
# tau=0.1, verbose=verbose)
#success_rate_optimal = (wins1 + draws) / (wins1 + wins2 + draws)
#if verbose:
# print("Training player vs optimal. Wins: {}, Losses: {}, "
# "Draws: {}".format(wins1, wins2, draws))
#return success_rate, success_rate_random, success_rate_optimal
return success_rate, success_rate_random
def checkpoint_model(player, step, path):
"""Checkpoint the training player.
"""
checkpoint_name = compute_checkpoint_name(step, path)
player.save(checkpoint_name)
def evaluate_mcts_against_optimal_player(game, estimator, mcts_iters,
c_puct, num_evaluate_games, tau,
verbose=True):
# Evaluate estimator1 vs estimator2.
players = {1: MCTSPlayer(game, estimator, mcts_iters, c_puct, tau=tau),
2: OptimalPlayer(game)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 = player1_results[1]
wins2 = player1_results[-1]
draws = player1_results[0]
# Evaluate estimator2 vs estimator1.
players = {1: OptimalPlayer(game),
2: MCTSPlayer(game, estimator, mcts_iters, c_puct, tau=tau)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 += player1_results[-1]
wins2 += player1_results[1]
draws += player1_results[0]
return wins1, wins2, draws
def evaluate_mcts_against_random_player(game, estimator, mcts_iters,
c_puct, num_evaluate_games, tau,
verbose=True):
# Evaluate estimator1 vs estimator2.
players = {1: MCTSPlayer(game, estimator, mcts_iters, c_puct, tau=tau),
2: RandomPlayer(game)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 = player1_results[1]
wins2 = player1_results[-1]
draws = player1_results[0]
# Evaluate estimator2 vs estimator1.
players = {1: RandomPlayer(game),
2: MCTSPlayer(game, estimator, mcts_iters, c_puct, tau=tau)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 += player1_results[-1]
wins2 += player1_results[1]
draws += player1_results[0]
return wins1, wins2, draws
def evaluate_estimators_in_both_positions(game, estimator1, estimator2,
mcts_iters, c_puct,
num_evaluate_games, tau,
verbose=True):
# Evaluate estimator1 vs estimator2.
players = {1: MCTSPlayer(game, estimator1, mcts_iters, c_puct, tau=tau),
2: MCTSPlayer(game, estimator2, mcts_iters, c_puct, tau=tau)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 = player1_results[1]
wins2 = player1_results[-1]
draws = player1_results[0]
# Evaluate estimator2 vs estimator1.
players = {1: MCTSPlayer(game, estimator2, mcts_iters, c_puct, tau=tau),
2: MCTSPlayer(game, estimator1, mcts_iters, c_puct, tau=tau)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 += player1_results[-1]
wins2 += player1_results[1]
draws += player1_results[0]
return wins1, wins2, draws
def generate_self_play_data(game, estimator, mcts_iters, c_puct, num_iters,
data=None, verbose=True):
"""Generates self play data for a number of iterations for a given
estimator. Saves to save_file_path, if given.
"""
# if save_file_path is not None:
# with open(save_file_path, 'r') as f:
# data = json.load(save_file_path)
# index = max(data.keys()) + 1
if data is not None:
index = max(data.keys()) + 1
else:
data = OrderedDict()
index = 0
# Collect self-play training data using the best estimator.
disable_tqdm = False if verbose else True
for _ in tqdm(range(num_iters), disable=disable_tqdm):
data[index] = self_play(
game, estimator.create_estimate_fn(), mcts_iters, c_puct)
index += 1
# if save_file_path is not None:
# with open(save_file_path, 'w') as f:
# json.dump(data, f)
return data
def self_play(game, estimator, mcts_iters, c_puct):
"""Plays a single game using MCTS to choose actions for both players.
Parameters
----------
game: Game
An object representing the game to be played.
estimator: func
An estimate function.
mcts_iters: int
Number of iterations to run MCTS for.
c_puct: float
Parameter for MCTS.
Returns
-------
game_state_list: list
A list of game states encountered in the self-play game. Starts
with the initial state and ends with a terminal state.
action_probs_list: list
A list of action probability dictionaries, as returned by MCTS
each time the algorithm has to take an action. The ith action
probabilities dictionary corresponds to the ith game_state, and
action_probs_list has length one less than game_state_list,
since we don't have to move in a terminal state.
"""
node = MCTSNode(game.initial_state, game.current_player(game.initial_state))
game_state_list = [node.game_state]
action_probs_list = []
action_list = []
move_count = 0
while not node.is_terminal:
# TODO: Choose this better.
tau = 1
if move_count >= 10:
tau = 1 / (move_count - 10 + 1)
# First run MCTS to compute action probabilities.
action_probs = mcts(node, game, estimator, mcts_iters, c_puct, tau=tau)
# Choose the action according to the action probabilities.
action = sample_distribution(action_probs)
action_list.append(action)
# Play the action
node = node.children[action]
# Add the action probabilities and game state to the list.
action_probs_list.append(action_probs)
game_state_list.append(node.game_state)
move_count += 1
data = process_self_play_data(game_state_list, action_list,
action_probs_list, game, game.action_indices)
return data
def process_training_data(self_play_data, replay_length=None):
"""Takes self play data and returns a list of tuples (state,
action_probs, utility) suitable for training an estimator.
Parameters
----------
self_play_data: dict
Dictionary with keys given by an index (int) and values given by a
log of the game. This is a list of tuples as in generate self play
data.
replay_length: int or None
If given, only return the last replay_length (state, probs, utility)
tuples.
"""
training_data = []
for index, game_log in self_play_data.items():
for (state, action, probs_vector, z) in game_log:
training_data.append((state, probs_vector, z))
print("Training data length: {}".format(len(training_data)))
print("Self play data length: {}".format(len(self_play_data)))
if replay_length is not None:
training_data = training_data[-replay_length:]
return training_data
def process_self_play_data(states_, actions_, action_probs_, game,
action_indices):
"""Takes a list of states and action probabilities, as returned by
play, and creates training data from this. We build up a list
consisting of (state, probs, z) tuples, where player is the player
in state 'state', and 'z' is the utility to 'player' in 'last_state'.
We omit the terminal state from the list as there are no probabilities to
train. TODO: Potentially include the terminal state in order to train the
value. # TODO: why the underscores in the parameter names?
Parameters
----------
states_: list
A list of n states, with the last being terminal.
actions_: list
A list of n-1 actions, being the action taken in the corresponding
state.
action_probs_: list
A list of n-1 dictionaries containing action probabilities. The ith
dictionary applies to the ith state, representing the probabilities
returned by play of taking each available action in the state.
game: Game
An object representing the game to be played.
action_indices: dict
A dictionary mapping actions (in the form of the legal_actions
function) to action indices (to be used for training the neural
network).
Returns
-------
training_data: list
A list consisting of (state, action, probs, z) tuples, where player
is the player in state 'state', and 'z' is the utility to 'player' in
'last_state'.
"""
# Get the outcome for the game. This should be the last state in states_.
last_state = states_.pop()
outcome = game.utility(last_state)
# Now action_probs_ and states_ are the same length.
training_data = []
for state, action, probs in zip(states_, actions_, action_probs_):
# Get the player in the state, and the value to this player of the
# terminal state.
player = game.current_player(state)
z = outcome[player]
# Convert the probs dictionary to a numpy array using action_indices.
probs_vector = np.zeros(len(action_indices))
for a, prob in probs.items():
probs_vector[action_indices[a]] = prob
non_nan_state = np.nan_to_num(state)
training_data.append((non_nan_state, action, probs_vector, z))
return training_data
| 39.152034 | 80 | 0.644553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,753 | 0.424032 |
fd16ab23714a63a2de7b1edeefc67fc983b65d54
| 835 |
py
|
Python
|
SRCTF/SRCTF/django_reuse/reuse/migrations/0002_auto_20160829_1452.py
|
yinyueacm/work-uga
|
d8fd104b8c5600e1715491fc5eeffaf5c0b5896c
|
[
"MIT"
] | 1 |
2019-01-11T03:20:34.000Z
|
2019-01-11T03:20:34.000Z
|
SRCTF/SRCTF/django_reuse/reuse/migrations/0002_auto_20160829_1452.py
|
yinyueacm/yinyue-thesis
|
d8fd104b8c5600e1715491fc5eeffaf5c0b5896c
|
[
"MIT"
] | null | null | null |
SRCTF/SRCTF/django_reuse/reuse/migrations/0002_auto_20160829_1452.py
|
yinyueacm/yinyue-thesis
|
d8fd104b8c5600e1715491fc5eeffaf5c0b5896c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-29 14:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reuse', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Pswd_table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pswd', models.CharField(default='123456', max_length=20)),
],
),
migrations.AddField(
model_name='guest',
name='pswd',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='reuse.Pswd_table'),
),
]
| 28.793103 | 118 | 0.602395 | 645 | 0.772455 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.186826 |
fd17609bb6160f46ba3a4f3809db927e0e55536c
| 5,976 |
py
|
Python
|
complexity_science/ca/ca/ca1d.py
|
KristerJazz/complexity-science
|
2340b7467d50c45d06fa93db7603f7f2778d3c4c
|
[
"BSD-3-Clause"
] | null | null | null |
complexity_science/ca/ca/ca1d.py
|
KristerJazz/complexity-science
|
2340b7467d50c45d06fa93db7603f7f2778d3c4c
|
[
"BSD-3-Clause"
] | 2 |
2021-03-28T16:23:00.000Z
|
2021-04-05T08:10:35.000Z
|
complexity_science/ca/ca/ca1d.py
|
KristerJazz/complexity-science
|
2340b7467d50c45d06fa93db7603f7f2778d3c4c
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from .rule_manager import RuleManager
class CA1D():
def __init__(self, N):
"""
Creates an uninitialized 1D cellular automata object with length N
-------------
Parameters:
N = number of cells
n1 : left neighbor
n2 : right neighbor
-------------
Returns:
None
"""
self.num_cells = N
self.cells = np.zeros([N])
self.rm = RuleManager()
self.update_neighbors()
def update_neighbors(self):
"""
Updates the neighbor dictionary according to the neighborhood
"""
raise NotImplementedError("This is the base 1D CA class; please use a CA with predetermined neighborhood; SimpleCA, CA_nt, CA_t.")
def set_rule(self, rule_object):
"""
Sets the CA rule to a wolfram rule number
-------------
Parameters:
rule_number = values from (0-255). Refer to wolfram's rule numbers
-------------
Returns:
None
"""
self.rm.set_rule(rule_object)
def evolve(self):
"""
Evolves the CA according to the rule applied.
Updates the state of the cell.
cell = new_state
Updates the neighbor according to the new_state
n1 = left neighbor of new_state
n2 = right neighbor of new_state
-------------
Returns:
new_state = new state after applying the rule
"""
new_state = self.rm.apply(self.cells, self.neighbors)
self.cells = new_state
#Dont forget to update neighbors after evolution
self.update_neighbors()
return new_state
def initialize_index(self, index_list):
"""
Initializes the ca from the list of index
cells[index] = 1
Automatically updates neighbors after initialization
-------------
Parameters:
index_list = list of index to be initialized with value 1
-------------
Returns:
None : Updates the cell with initialized values
"""
assert(type(index_list)==type([])), "index_list must be a list"
self.cells = np.zeros([self.num_cells], dtype=int)
for i in index_list:
self.cells[i] = 1
self.update_neighbors()
def initialize_binary(self, ratio):
"""
Initializes the CA with a ratio of 1 and 0 values
Automatically updates neighbors after initialization
-------------
Parameters:
ratio = ratio of "1" state over the "0" state
-------------
Returns:
None : Updates the cell with initialized values
"""
self.cells = (np.random.random(self.num_cells)>ratio).astype(int)
self.update_neighbors()
def reset(self):
"""
Initializes the cells with zero values
-------------
Returns:
None : Updates the cells to zero values
"""
self.cells = np.zeros(self.num_cells)
self.update_neighbors()
def initialize_random(self):
"""
Initializes the CA with a random value from 0 to 1
Automatically updates neighbors after initialization
-------------
Returns:
None : Updates the cell with initialized values
"""
self.cells = np.random.random(self.num_cells)
self.update_neighbors()
def run(self, iterations, show_figure=True):
"""
Run cellular automata according to the wolfram rule_number assigned.
-------------
Parameters:
iterations = number of times for the rule to be applied
show_figure = Default is True, outputs the figure at run time
-------------
Returns:
result = resulting N x iteration array of the CA evolution
"""
result = [self.cells]
for i in range(iterations):
result.append(self.evolve())
if show_figure:
plt.imshow(result, cmap='Greens')
plt.show()
plt.clf()
return result
def add_rule(self, rule_object):
"""
Add the rule object to the rule manager.
This rule will apply for every evolve() function call.
"""
self.rm.add_rule(rule_object)
def reset_rule(self):
"""
Resets the rule list from RuleManager
"""
self.rm.reset_rule()
class SimpleCA(CA1D):
def __init__(self, N):
CA1D.__init__(self, N)
self.neighborhood = "No neighbors are automatically considered"
def update_neighbors(self):
"""
No neighors
"""
pass
class CA_t(CA1D):
def __init__(self, N):
CA1D.__init__(self, N)
self.neighborhood = "Two neighbors (left and right) are automatically considered"
def update_neighbors(self):
"""
Update the neighbor values
n1 : left neighbor
n2 : right neighbor
-------------
Returns:
None
"""
self.neighbors = {}
self.neighbors['left'] = np.roll(self.cells, 1)
self.neighbors['right'] = np.roll(self.cells, -1)
class CA_nt(CA1D):
def __init__(self, N):
CA1D.__init__(self, N)
self.neighborhood = "Two neighbors (left and right) are automatically considered with non toroidal boundaries"
def update_neighbors(self):
"""
Update the neighbor values
n1 : left neighbor
n2 : right neighbor
-------------
Returns:
None
"""
self.neighbors = {}
left = np.roll(self.cells, 1)
left[0] = 0
right = np.roll(self.cells, -1)
right[-1] = 0
self.neighbors['left'] = left
self.neighbors['right'] = right
| 29.15122 | 138 | 0.54836 | 5,877 | 0.983434 | 0 | 0 | 0 | 0 | 0 | 0 | 3,440 | 0.575636 |
fd1902e85156fc45744e6e48892733db33d5f755
| 4,373 |
py
|
Python
|
extract_skip_thought.py
|
youngfly11/ReferCOCO-Pretraining-Detectron2
|
8c8536a4d822b3cf9140380442a440d42e948c38
|
[
"Apache-2.0"
] | 2 |
2020-08-14T08:00:53.000Z
|
2020-11-21T11:01:55.000Z
|
extract_skip_thought.py
|
youngfly11/ReferCOCO-Pretraining-Detectron2
|
8c8536a4d822b3cf9140380442a440d42e948c38
|
[
"Apache-2.0"
] | null | null | null |
extract_skip_thought.py
|
youngfly11/ReferCOCO-Pretraining-Detectron2
|
8c8536a4d822b3cf9140380442a440d42e948c38
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/6/25 22:41
# @Author : Yongfei Liu
# @Email : [email protected]
import numpy as np
import os.path as osp
import os
import pickle
from collections import OrderedDict
import torch
import json
from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
def check_objects_vocab():
with open('./flickr30k_datasets/objects_vocab.txt', 'r') as load_f:
object_vocab = load_f.readlines()
with open('./flickr30k_datasets/skip-thoughts/dictionary.txt', 'r') as load_f:
skip_dict = load_f.readlines()
object_v1 = []
for vocab in object_vocab:
object_v1.append(vocab.strip())
skip_v1 = []
for sk_dict in skip_dict:
skip_v1.append(sk_dict.strip())
for vocab in object_v1:
vocab = vocab.split(' ')
for vo in vocab:
if vo not in skip_v1:
print(vocab)
def _make_emb_state_dict(self, dictionary, parameters):
weight = torch.zeros(len(self.vocab)+1, 620) # first dim = zeros -> +1
unknown_params = parameters[dictionary['UNK']]
nb_unknown = 0
for id_weight, word in enumerate(self.vocab):
if word in dictionary:
id_params = dictionary[word]
params = parameters[id_params]
else:
print('Warning: word `{}` not in dictionary'.format(word))
params = unknown_params
nb_unknown += 1
weight[id_weight+1] = torch.from_numpy(params)
state_dict = OrderedDict({'weight':weight})
if nb_unknown > 0:
print('Warning: {}/{}({}) words are not in dictionary, thus set UNK embedding parameter to init'
.format(nb_unknown, len(self.vocab), len(dictionary)))
return state_dict
def extract_embedding():
# {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
path = './RefSegDatasets/refseg_anno'
dataset = 'refcoco+'
# object_vocab = []
# for cat in COCO_CATEGORIES:
# thing = cat['isthing']
# if thing==1:
# object_vocab.append(cat['name'])
# with open('./flickr30k_datasets/objects_vocab.txt', 'r') as load_f:
# object_vocab = load_f.readlines()
with open('./flickr30k_datasets/skip-thoughts/dictionary.txt', 'r') as load_f:
skip_dict = load_f.readlines()
skip_dict = {word.strip():idx for idx, word in enumerate(skip_dict)}
path_params = './flickr30k_datasets/skip-thoughts/utable.npy'
params = np.load(path_params, encoding='latin1', allow_pickle=True) # to load from python2
# object_embed = []
# for vocab in object_vocab:
# vocab = vocab.strip().split(' ')
# vocab_eb = []
# for vb in vocab:
# vb_idx = skip_dict.get(vb)
# vocab_eb.append(params[vb_idx].squeeze())
#
# vocab_eb = np.stack(vocab_eb, axis=0).mean(0)
# object_embed.append(vocab_eb)
#
# object_embed = np.array(object_embed) ## 1600*620
# print('object_dim', object_embed.shape)
#
# with open(osp.join(path, dataset, 'skip_label.pkl'), 'wb') as pickle_dump:
# pickle.dump(object_embed, pickle_dump)
vocab_file = open(osp.join(path, dataset, 'vocab.json'))
vocab = json.load(vocab_file)
vocab_file.close()
# add_vocab = ['relate', 'butted']
# vocab.extend(add_vocab)
skip_thoughts_dict = {}
for vb in vocab:
vb = vb.strip()
vb_idx = skip_dict.get(vb)
if vb_idx is not None:
skip_thoughts_dict[vb] = params[vb_idx].squeeze()
else:
vb_split = vb.split('-')
vb_split_embed = []
for vbs in vb_split:
vbs_idx = skip_dict.get(vbs)
if vbs_idx is not None:
vb_split_embed.append(params[vbs_idx].squeeze())
else:
print(vb, 'not in dictionary')
break
if len(vb_split_embed) == len(vb_split):
# print(vb, 'are in list')
vb_split_embed = np.stack(vb_split_embed, axis=0).mean(0)
skip_thoughts_dict[vb] = vb_split_embed
print(len(vocab))
with open(osp.join(path, dataset, 'skip_vocab.pkl'), 'wb') as pickle_dump:
pickle.dump(skip_thoughts_dict, pickle_dump)
if __name__ == '__main__':
extract_embedding()
| 30.58042 | 104 | 0.607363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,546 | 0.353533 |
fd1912c311e861ca371e1043073ef9f199c996c4
| 4,909 |
py
|
Python
|
pyutil_mongo/cfg.py
|
chhsiao1981/pyutil_mongo
|
facea2376b48dd7157d4633ab8128c8daf7e59ef
|
[
"MIT"
] | null | null | null |
pyutil_mongo/cfg.py
|
chhsiao1981/pyutil_mongo
|
facea2376b48dd7157d4633ab8128c8daf7e59ef
|
[
"MIT"
] | null | null | null |
pyutil_mongo/cfg.py
|
chhsiao1981/pyutil_mongo
|
facea2376b48dd7157d4633ab8128c8daf7e59ef
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Attributes:
config (dict): Description
logger (logging.Logger): Description
"""
import logging
import pymongo
logger = None
config = {}
class MongoMap(object):
"""Info about MongoDB
Attributes:
ca (None, optional): ssl-ca
cert (None, optional): ssl-cert
collection_map (dict): mapping for the collection-name in the code vs. real collection-name in the mongo.
db_name (str, optional): db-name used in the code.
ensure_index (None, optional): ensure-index
ensure_unique_index (None, optional): ensure-unique-index
hostname (str): hostname of the real mongo.
mongo_db_name (str, optional): real db-name in mongodb.
ssl (bool, optional): whether to use ssl
"""
def __init__(self, collection_map: dict, ensure_index=None, ensure_unique_index=None, db_name="mongo", hostname="localhost:27017", mongo_db_name="test", ssl=False, cert=None, ca=None):
self.db_name = db_name
self.hostname = hostname
self.mongo_db_name = mongo_db_name
self.collection_map = collection_map
self.ensure_index = ensure_index
self.ensure_unique_index = ensure_unique_index
self.ssl = ssl
self.cert = cert
self.ca = ca
def init(the_logger: logging.Logger, mongo_maps: list):
"""init
Args:
the_logger (logging.Logger): Description
mongo_maps (list): list of MongoDB info
Returns:
TYPE: Description
"""
global logger
logger = the_logger
return restart_mongo(mongo_maps=mongo_maps)
def restart_mongo(collection_name="", db_name="", mongo_maps=None):
"""restarting mongo
Args:
collection_name (str, optional): collection-name
db_name (str, optional): db-name
mongo_maps (None, optional): mongo-maps
Returns:
TYPE: Description
"""
'''
initialize mongo
'''
global config
if mongo_maps is None:
mongo_maps = [each['mongo_map'] for each in config.values()]
if len(mongo_maps) == 0:
return
errs = []
for idx, mongo_map in enumerate(mongo_maps):
each_err = _init_mongo_map_core(mongo_map, collection_name=collection_name, db_name=db_name)
if each_err:
errs.append(each_err)
logger.error('(%s/%s): e: %s', idx, len(mongo_maps), each_err)
if not errs:
return None
err_str = ','.join(['%s' % (each) for each in errs])
return Exception(err_str)
def _init_mongo_map_core(mongo_map: MongoMap, collection_name="", db_name=""):
"""Summary
Args:
mongo_map (MongoMap): Description
collection_name (str, optional): Description
db_name (str, optional): Description
Returns:
TYPE: Description
"""
global config
global logger
mongo_map_db_name, hostname, mongo_db_name, collection_map, ensure_index, ensure_unique_index = mongo_map.db_name, mongo_map.hostname, mongo_map.mongo_db_name, mongo_map.collection_map, mongo_map.ensure_index, mongo_map.ensure_unique_index
if db_name != '' and mongo_map_db_name != db_name:
return
if collection_name != '' and collection_name not in collection_map:
return
if collection_name == '' and mongo_map_db_name in config:
return Exception('db already in config: db_name: %s config: %s', mongo_map_db_name, config[mongo_map_db_name])
if ensure_index is None:
ensure_index = {}
if ensure_unique_index is None:
ensure_unique_index = {}
# mongo_server_url
mongo_server_url = 'mongodb://%s/%s' % (hostname, mongo_db_name)
# mongo-server-client
mongo_kwargs = {}
if mongo_map.ssl:
mongo_kwargs.update({
'ssl': True,
'authSource': '$external',
'authMechanism': 'MONGODB-X509',
'ssl_certfile': mongo_map.cert,
'ssl_ca_certs': mongo_map.ca,
})
mongo_server_client = pymongo.MongoClient(
mongo_server_url,
**mongo_kwargs,
)[mongo_db_name]
# config-by-db-name
config_by_db_name = {'mongo_map': mongo_map, 'db': {}, 'url': mongo_server_url}
# collection
for (key, val) in collection_map.items():
logger.info('mongo: %s => %s', key, val)
config_by_db_name['db'][key] = mongo_server_client[val]
# enure index
for key, val in ensure_index.items():
logger.info('to ensure_index: key: %s', key)
config_by_db_name['db'][key].create_index(val, background=True)
# enure unique index
for key, val in ensure_unique_index.items():
logger.info('to ensure_unique_index: key: %s', key)
config_by_db_name['db'][key].create_index(val, background=True, unique=True)
config[mongo_map_db_name] = config_by_db_name
def clean():
"""Reset config
"""
global config
config = {}
| 28.375723 | 243 | 0.646771 | 1,113 | 0.226726 | 0 | 0 | 0 | 0 | 0 | 0 | 1,780 | 0.362599 |
fd196b9ee70ef729b16290f14477ca8c3d79df73
| 172 |
py
|
Python
|
exhaustive_search/05_multiple-arrays/05-01-01.py
|
fumiyanll23/algo-method
|
d86ea1d399cbc5a1db0ae49d0c82e41042f661ab
|
[
"MIT"
] | null | null | null |
exhaustive_search/05_multiple-arrays/05-01-01.py
|
fumiyanll23/algo-method
|
d86ea1d399cbc5a1db0ae49d0c82e41042f661ab
|
[
"MIT"
] | null | null | null |
exhaustive_search/05_multiple-arrays/05-01-01.py
|
fumiyanll23/algo-method
|
d86ea1d399cbc5a1db0ae49d0c82e41042f661ab
|
[
"MIT"
] | null | null | null |
# input
N, M = map(int, input().split())
As = [*map(int, input().split())]
Bs = [*map(int, input().split())]
# compute
# output
print(sum(A > B for B in Bs for A in As))
| 17.2 | 41 | 0.569767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.139535 |
fd19ca0b5b114583ad7ed023250d660c51840010
| 141 |
py
|
Python
|
solutions/1639.py
|
nxexox/acm.timus
|
9548d2a0b54fdd99bd60071f3be2fb7f897a7303
|
[
"MIT"
] | null | null | null |
solutions/1639.py
|
nxexox/acm.timus
|
9548d2a0b54fdd99bd60071f3be2fb7f897a7303
|
[
"MIT"
] | null | null | null |
solutions/1639.py
|
nxexox/acm.timus
|
9548d2a0b54fdd99bd60071f3be2fb7f897a7303
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
a, b = [int(i) for i in input().split()]
c = a * b
if c % 2 == 0:
print('[:=[first]')
else:
print('[second]=:]')
| 14.1 | 40 | 0.475177 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.297872 |
fd19e6853abb785c9505d8a5f4aaf9326f1ad438
| 374 |
py
|
Python
|
lstmcpipe/config/__init__.py
|
cta-observatory/lst-i-rf
|
7a634e0b3b07dda2b20df47875d97616eab65821
|
[
"MIT"
] | 2 |
2021-02-01T17:30:46.000Z
|
2021-02-22T13:59:49.000Z
|
lstmcpipe/config/__init__.py
|
cta-observatory/lst-i-rf
|
7a634e0b3b07dda2b20df47875d97616eab65821
|
[
"MIT"
] | 106 |
2021-04-16T21:15:20.000Z
|
2022-03-31T23:02:50.000Z
|
lstmcpipe/config/__init__.py
|
cta-observatory/lstmcpipe
|
7a634e0b3b07dda2b20df47875d97616eab65821
|
[
"MIT"
] | 3 |
2022-03-02T09:23:09.000Z
|
2022-03-03T16:00:25.000Z
|
import os
from shutil import which
from .pipeline_config import load_config
__all__ = ["load_config"]
def export_env(outdir="."):
if which("conda") is not None:
os.system((f"conda env export > {os.path.join(outdir, 'conda_env.yml')}"))
elif which("pip") is not None:
os.system(f"python -m pip freeze > {os.path.join(outdir, 'requirements.txt')}")
| 28.769231 | 87 | 0.671123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.419786 |
fd1a5012b7966cdeb8f03d71591cc6d8a74c6420
| 2,337 |
py
|
Python
|
tests/stakkr_compose_test.py
|
dwade75/stakkr
|
ae77607e84b5b305ae8f5a14eb8f22237d943a29
|
[
"Apache-2.0"
] | null | null | null |
tests/stakkr_compose_test.py
|
dwade75/stakkr
|
ae77607e84b5b305ae8f5a14eb8f22237d943a29
|
[
"Apache-2.0"
] | null | null | null |
tests/stakkr_compose_test.py
|
dwade75/stakkr
|
ae77607e84b5b305ae8f5a14eb8f22237d943a29
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import stakkr.stakkr_compose as sc
import subprocess
import unittest
base_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, base_dir + '/../')
# https://docs.python.org/3/library/unittest.html#assert-methods
class StakkrComposeTest(unittest.TestCase):
services = {
'a': 'service_a.yml',
'b': 'service_b.yml',
}
def test_get_invalid_config_in_cli(self):
cmd = ['stakkr-compose', '-c', base_dir + '/static/config_invalid.ini']
result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in result.stdout:
self.assertRegex(line.decode(), 'Failed validating.*config_invalid.ini.*')
break
def test_get_uid(self):
uid = '1000' if os.name == 'nt' else str(os.getuid())
self.assertEqual(sc._get_uid(None), uid)
def test_get_gid(self):
gid = '1000' if os.name == 'nt' else str(os.getgid())
self.assertEqual(sc._get_gid(None), gid)
def test_get_uid_whenset(self):
self.assertEqual(sc._get_uid(1000), '1000')
def test_get_gid_whenset(self):
self.assertEqual(sc._get_gid(1000), '1000')
def test_get_wrong_enabled_service(self):
with self.assertRaises(SystemExit):
sc.get_enabled_services(['c'])
def test_get_right_enabled_service(self):
services_files = sc.get_enabled_services(['maildev'])
self.assertTrue(services_files[0].endswith('static/services/maildev.yml'))
def test_get_available_services(self):
services = sc.get_available_services()
self.assertTrue('apache' in services)
self.assertTrue('mongo' in services)
self.assertTrue('php' in services)
self.assertTrue('elasticsearch' in services)
self.assertEqual('static/services/apache.yml', services['apache'][-26:])
self.assertEqual('static/services/mongo.yml', services['mongo'][-25:])
def test_get_valid_configured_services(self):
services = sc.get_configured_services(base_dir + '/static/config_valid.ini')
self.assertTrue('maildev' in services)
self.assertTrue('php' in services)
self.assertFalse('mongo' in services)
self.assertFalse('elasticsearch' in services)
if __name__ == "__main__":
unittest.main()
| 31.16 | 86 | 0.670946 | 2,037 | 0.87163 | 0 | 0 | 0 | 0 | 0 | 0 | 445 | 0.190415 |
fd1dc3801dfc8c9bd5d0968ee738990d98e2881b
| 2,792 |
py
|
Python
|
DQIC/backtesting/run.py
|
bladezzw/DeepQuantInChina
|
ce74a9bf8db91e3545ccc3e7af81f80796a536fa
|
[
"MIT"
] | 8 |
2019-04-14T03:05:19.000Z
|
2020-02-13T18:35:41.000Z
|
DQIC/backtesting/run.py
|
bladezzw/DeepQuantInChina
|
ce74a9bf8db91e3545ccc3e7af81f80796a536fa
|
[
"MIT"
] | null | null | null |
DQIC/backtesting/run.py
|
bladezzw/DeepQuantInChina
|
ce74a9bf8db91e3545ccc3e7af81f80796a536fa
|
[
"MIT"
] | 2 |
2019-05-08T08:23:50.000Z
|
2020-01-23T03:54:41.000Z
|
import os,sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(BASE_DIR)
import datetime
import time
from backtesting.backtest import Backtest
from backtesting.data import HistoricCSVDataHandler
from backtesting.execution import SimulatedExecutionHandler
from backtesting.portfolio import Portfolio,Portfolio_For_futures
from Strategy.strategy import MovingAverageCrossStrategy
def exc_time(func, *args, **kwargs):
"""
计算运行时间
:param func: a function
:param args: arguements of the function
:param kwargs: arguements of the function
:return: time costed on running the function
"""
def wrapper(func, *args, **kwargs):
start_time = time.time()
func(*args, **kwargs)
stop_time = time.time()
print("run time is %s" % (stop_time - start_time))
return wrapper(func, *args, **kwargs)
if __name__ == "__main__":
csv_dir = r"~/data"
symbol_list = ['000001.SH']
initial_capital = 5000.0
start_date = datetime.datetime(2012,1,1,0,0,0) # 这个没用上.
heartbeat = 0.0
#default: MACS = MovingAverageCrossStrategy(short_window=10,long_window=30)
MACS = MovingAverageCrossStrategy
commission = 5
exchangeID = ''
lever = 10 # 杠杆,(在期货中,一手跳动一个单位的价格,如:rb一手,跳动1个点表示10元)
backtest_type = 'futures'
if backtest_type == 'stock':
backtest = Backtest(csv_dir=csv_dir,
symbol_list=symbol_list,
initial_capital=initial_capital,
heartbeat=heartbeat,
start_date=start_date,
data_handler=HistoricCSVDataHandler,
execution_handler=SimulatedExecutionHandler,
portfolio=Portfolio,
strategy=MACS,
commission=commission,
exchangeID=None,
lever=1
)
elif backtest_type == 'futures':
backtest = Backtest(csv_dir=csv_dir,
symbol_list=symbol_list,
initial_capital=initial_capital,
heartbeat=heartbeat,
start_date=start_date,
data_handler=HistoricCSVDataHandler,
execution_handler=SimulatedExecutionHandler,
portfolio=Portfolio_For_futures,
strategy=MACS,
commission=commission,
exchangeID=exchangeID,
lever=lever
)
exc_time(backtest.simulate_trading) # run time is 1.2880792617797852
| 33.638554 | 78 | 0.567335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.173291 |
fd1e95f3c8250711415f3acb25bf5e3b26c63f39
| 3,306 |
py
|
Python
|
Emergency/DB/DRAW3D.py
|
LeeDaeil/CNS_Autonomous
|
2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f
|
[
"Apache-2.0"
] | 2 |
2020-03-22T14:35:00.000Z
|
2020-05-26T05:06:41.000Z
|
Emergency/DB/DRAW3D.py
|
LeeDaeil/CNS_Autonomous
|
2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f
|
[
"Apache-2.0"
] | null | null | null |
Emergency/DB/DRAW3D.py
|
LeeDaeil/CNS_Autonomous
|
2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
from COMMONTOOL import PTCureve
DB = pd.read_csv('0_228.txt')
# DB = pd.read_csv('../3차 검증/322.txt')
# target_time = 100
# for i in range(0, len(DB)):
# if DB['KCNTOMS'].loc[i] != target_time:
# DB.drop([i], inplace=True)
# else:
# target_time += 100
x, y, z, zero = [], [], [], []
PTY, PTX, BotZ, UpZ = [], [], [], []
RateX, RateY, RateZ = [], [], []
SaveTIMETEMP = {'Temp':0, 'Time':0}
for temp, t, pres, co1, co2 in zip(DB['UAVLEG2'].tolist(), DB['KCNTOMS'].tolist(),
DB['ZINST65'].tolist(), DB['KLAMPO6'].tolist(),
DB['KLAMPO9'].tolist()):
x.append(temp)
y.append(-t)
z.append(pres)
if co1 == 0 and co2 == 1 and t > 1500:
if SaveTIMETEMP['Time'] == 0:
SaveTIMETEMP['Time'] = t
SaveTIMETEMP['Temp'] = temp
rate = -55 / (60 * 60 * 5)
get_temp = rate * (t - SaveTIMETEMP['Time']) + SaveTIMETEMP['Temp']
RateX.append(get_temp)
RateY.append(-t)
RateZ.append(0)
zero.append(0)
Temp = []
UpPres = []
BotPres = []
for _ in range(0, 350):
uppres, botpres = PTCureve()._get_pres(_)
Temp.append([_])
UpPres.append([uppres])
BotPres.append([botpres])
PTX = np.array(Temp)
BotZ = np.array(BotPres)
UpZ = np.array(UpPres)
PTY = np.array([[0] for _ in range(0, 350)])
PTX = np.hstack([PTX[:, 0:1], Temp])
BotZ = np.hstack([BotZ[:, 0:1], BotPres])
UpZ = np.hstack([UpZ[:, 0:1], UpPres])
PTY = np.hstack([PTY[:, 0:1], np.array([[-t] for _ in range(0, 350)])])
print(np.shape(PTX))
fig = plt.figure()
ax1 = plt.axes(projection='3d')
ax1.plot3D(RateX, RateY, RateZ, color='orange', lw=1.5, ls='--')
ax1.plot3D([170, 0, 0, 170, 170],
[y[-1], y[-1], 0, 0, y[-1]],
[29.5, 29.5, 29.5, 29.5, 29.5], color='black', lw=0.5, ls='--')
ax1.plot3D([170, 0, 0, 170, 170],
[y[-1], y[-1], 0, 0, y[-1]],
[17, 17, 17, 17, 17], color='black', lw=0.5, ls='--')
ax1.plot3D([170, 170], [y[-1], y[-1]],
[17, 29.5], color='black', lw=0.5, ls='--')
ax1.plot3D([170, 170], [0, 0], [17, 29.5], color='black', lw=0.5, ls='--')
ax1.plot3D([0, 0], [y[-1], y[-1]], [17, 29.5], color='black', lw=0.5, ls='--')
ax1.plot3D([0, 0], [0, 0], [17, 29.5], color='black', lw=0.5, ls='--')
ax1.plot_surface(PTX, PTY, UpZ, rstride=8, cstride=8, alpha=0.15, color='r')
ax1.plot_surface(PTX, PTY, BotZ, rstride=8, cstride=8, alpha=0.15, color='r')
# ax1.scatter(PTX, PTY, BotZ, marker='*')
ax1.plot3D(x, y, z, color='blue', lw=1.5)
# linewidth or lw: float
ax1.plot3D([x[-1], x[-1]], [y[-1], y[-1]], [0, z[-1]], color='blue', lw=0.5, ls='--')
ax1.plot3D([0, x[-1]], [y[-1], y[-1]], [z[-1], z[-1]], color='blue', lw=0.5, ls='--')
ax1.plot3D([x[-1], x[-1]], [0, y[-1]], [z[-1], z[-1]], color='blue', lw=0.5, ls='--')
# each
ax1.plot3D(x, y, zero, color='black', lw=1, ls='--') # temp
ax1.plot3D(zero, y, z, color='black', lw=1, ls='--') # pres
ax1.plot3D(x, zero, z, color='black', lw=1, ls='--') # PT
# 절대값 처리
ax1.set_yticklabels([int(_) for _ in abs(ax1.get_yticks())])
ax1.set_xlabel('Temperature')
ax1.set_ylabel('Time [Tick]')
ax1.set_zlabel('Pressure')
ax1.set_xlim(0, 350)
ax1.set_zlim(0, 200)
plt.show()
| 30.897196 | 85 | 0.5366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 608 | 0.183022 |
fd1f6bede9086f03b21d05de1c334c5625e057f0
| 658 |
py
|
Python
|
aeroplast/images.py
|
kk6/aeroplast
|
8347bf071f43a560b865a7f37b76fb05f5cea57d
|
[
"MIT"
] | 1 |
2019-11-12T07:02:20.000Z
|
2019-11-12T07:02:20.000Z
|
aeroplast/images.py
|
kk6/aeroplast
|
8347bf071f43a560b865a7f37b76fb05f5cea57d
|
[
"MIT"
] | 14 |
2018-11-13T09:57:09.000Z
|
2019-04-05T20:02:46.000Z
|
aeroplast/images.py
|
kk6/aeroplast
|
8347bf071f43a560b865a7f37b76fb05f5cea57d
|
[
"MIT"
] | 1 |
2018-12-20T07:52:59.000Z
|
2018-12-20T07:52:59.000Z
|
# -*- coding: utf-8 -*-
"""
Transparent PNG conversion
~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from PIL import Image
def get_new_size(original_size):
"""
Returns each width and height plus 2px.
:param original_size: Original image's size
:return: Width / height after calculation
:rtype: tuple
"""
return tuple(x + 2 for x in original_size)
def resize_image(image, size):
"""
Resize the image to fit in the specified size.
:param image: Original image.
:param size: Tuple of (width, height).
:return: Resized image.
:rtype: :py:class: `~PIL.Image.Image`
"""
image.thumbnail(size)
return image
| 18.8 | 50 | 0.617021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 466 | 0.708207 |
fd22786701bf4e42b8d3932674e46c80a650982c
| 678 |
py
|
Python
|
common/common/management/commands/makemessages.py
|
FSTUM/rallyetool-v2
|
2f3e2b5cb8655abe023ed1215b7182430b75bb23
|
[
"MIT"
] | 1 |
2021-10-30T09:31:02.000Z
|
2021-10-30T09:31:02.000Z
|
common/common/management/commands/makemessages.py
|
FSTUM/rallyetool-v2
|
2f3e2b5cb8655abe023ed1215b7182430b75bb23
|
[
"MIT"
] | 9 |
2021-11-23T10:13:43.000Z
|
2022-03-01T15:04:15.000Z
|
common/common/management/commands/makemessages.py
|
CommanderStorm/rallyetool-v2
|
721413d6df8afc9347dac7ee83deb3a0ad4c01bc
|
[
"MIT"
] | 1 |
2021-10-16T09:07:47.000Z
|
2021-10-16T09:07:47.000Z
|
from django.core.management.commands import makemessages
class Command(makemessages.Command):
def build_potfiles(self):
potfiles = super().build_potfiles()
for potfile in sorted(set(potfiles)):
self._remove_pot_creation_date(potfile)
return potfiles
@staticmethod
def _remove_pot_creation_date(path):
modified_lines = []
with open(path, "rb") as file:
for line in file:
if not line.startswith(b'"POT-Creation-Date: '):
modified_lines.append(line)
with open(path, "wb") as file:
for line in modified_lines:
file.write(line)
| 27.12 | 64 | 0.610619 | 618 | 0.911504 | 0 | 0 | 378 | 0.557522 | 0 | 0 | 31 | 0.045723 |
fd2333a5ba2bad8fcd4a158981a7c15852072e07
| 6,529 |
py
|
Python
|
app/api/v2/users/views_update.py
|
Raywire/iReporter
|
ac58414b84b9c96f0be5e0d477355d0811d8b9c5
|
[
"MIT"
] | 3 |
2019-01-09T15:17:28.000Z
|
2019-12-01T18:40:50.000Z
|
app/api/v2/users/views_update.py
|
Raywire/iReporter
|
ac58414b84b9c96f0be5e0d477355d0811d8b9c5
|
[
"MIT"
] | 13 |
2018-11-30T05:33:13.000Z
|
2021-04-30T20:46:41.000Z
|
app/api/v2/users/views_update.py
|
Raywire/iReporter
|
ac58414b84b9c96f0be5e0d477355d0811d8b9c5
|
[
"MIT"
] | 3 |
2018-12-02T16:10:12.000Z
|
2019-01-04T14:51:04.000Z
|
"""Views for users"""
from flask_restful import Resource
from flask import jsonify, request
from app.api.v2.users.models import UserModel
from app.api.v2.decorator import token_required, get_token
from app.api.v2.send_email import send
class UserStatus(Resource):
"""Class with method for updating a specific user admin status"""
def __init__(self):
self.db = UserModel()
@token_required
def patch(current_user, self, username):
"""method to promote a user"""
user = self.db.get_user(username)
if user is None:
return jsonify({
"status": 404,
"message": "user does not exist"
})
if current_user['isadmin'] is not True or self.db.get_user(username)['id'] is 1:
return jsonify({
"status": 403,
"message": "You cannot change the status of this user"
})
if current_user['username'] == username:
return jsonify({
"status": 403,
"message": "You cannot change your own admin status"
})
user_status_updated = self.db.promote_user(username)
if user_status_updated is True:
success_message = {
"username": username,
"message": "User status has been updated"
}
return jsonify({
"status": 200,
"data": success_message
})
class UserActivity(Resource):
"""Class with method for disabling or enabling user activity"""
def __init__(self):
self.db = UserModel()
@token_required
def patch(current_user, self, username):
"""method to deactivate/activate a user"""
user = self.db.get_user(username)
if user is None:
return jsonify({
"status": 404,
"message": "user does not exist"
})
if current_user['isadmin'] is not True or user['id'] == 1:
return jsonify({
"status": 403,
"message": "You cannot change this user's active status"
})
if current_user['username'] == username:
return jsonify({
"status": 403,
"message": "You cannot change your own active status"
})
user_activity_updated = self.db.activate_user(username)
if user_activity_updated is True:
return jsonify({
"status": 200,
"data": {
"username": username,
"message": "User active status has been updated"
}
})
class UserProfilePic(Resource):
"""Class with method for uploading a user's profile picture"""
def __init__(self):
self.db = UserModel()
@token_required
def patch(current_user, self, username):
"""method to upload a user's profile picture"""
if current_user['username'] != username:
return jsonify({
"status": 403,
"message": "A user can only upload a picture to their own profile"
})
upload_status = self.db.upload_profile_pic(username)
if upload_status == 'File type not supported' or upload_status == 'select an image' or upload_status == 'no file part':
return jsonify({
"status": 400,
"message": upload_status
})
if upload_status is True:
return jsonify({
"status": 200,
"data": {
"username": username,
"message": "Your profile picture has been uploaded"
}
})
class VerifyAccount(Resource):
"""Class with method to verify a user's account"""
@token_required
def patch(current_user, self, username):
"""Method to verify a user's account"""
loggedin_user = UserModel().get_user(username)
if current_user['verification'] is False:
return {"message": "Verification failed please use the link in your email address"}, 403
if loggedin_user is None:
return jsonify({
"status": 404,
"message": "user does not exist"
})
if current_user['username'] != username:
return jsonify({
"status": 403,
"message": "A user can only verify their own account"
})
verification = UserModel().verify_user(username)
if verification == 'account verified':
return jsonify({
"status": 200,
"data": {
"username": username,
"message": "Your account has been verified"
}
})
class RequestVerification(Resource):
"""Class with method to request an account verification link"""
@token_required
def post(current_user, self, username):
"""Method to request account verification"""
unverified_user = UserModel().get_user(username)
if current_user['username'] != username:
return {
"status": 403,
"message": "A user can only request verification for their own account"
}, 403
email = unverified_user['email']
public_id = unverified_user['public_id']
json_verification_link = request.json.get('verificationlink', None)
verification_token = get_token(public_id, 30, True).decode('UTF-8')
if json_verification_link is None:
return {
"message": {
"verificationlink": "This key is required"
}
}, 400
verification_link = json_verification_link + '?username=' + username + '?token=' + verification_token
verification_message = "If you didn't ask to verify this address, you can ignore this email.\n\nThanks,\n\nYour iReporter team"
subject = "Account Verification"
body = "Follow this link within half an hour to verify your email address: {0}\n{1}".format(
verification_link, verification_message)
if send(email, subject, body) is True:
return jsonify({
"status": 200,
"message": "Verification link has been sent to your email"
})
return {
"status": 400,
"message": "Verification failed please try again"
}, 400
| 33.482051 | 135 | 0.551386 | 6,278 | 0.961556 | 0 | 0 | 5,602 | 0.858018 | 0 | 0 | 2,047 | 0.313524 |
fd247bba2b56b1306086374a12025c1833517c10
| 7,357 |
py
|
Python
|
LoadDataAndPrepare/Make_Dictionaries/4_make_reviews_all_words_vocab_dictionary.py
|
ngrover2/Automatic_Lexicon_Induction
|
b58a1d55f294293161dc23ab2e6d669c1c5e90d8
|
[
"MIT"
] | null | null | null |
LoadDataAndPrepare/Make_Dictionaries/4_make_reviews_all_words_vocab_dictionary.py
|
ngrover2/Automatic_Lexicon_Induction
|
b58a1d55f294293161dc23ab2e6d669c1c5e90d8
|
[
"MIT"
] | null | null | null |
LoadDataAndPrepare/Make_Dictionaries/4_make_reviews_all_words_vocab_dictionary.py
|
ngrover2/Automatic_Lexicon_Induction
|
b58a1d55f294293161dc23ab2e6d669c1c5e90d8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
import traceback
import ujson
from pprint import pprint
from textblob import TextBlob as tb
from textblob import Word as wd
import shutil
from collections import defaultdict
from gensim.corpora import Dictionary
from socialconfig import config
class get_reviews_iterable(object):
def __init__(self, rfname):
self.read_file = rfname
def __iter__(self):
try:
print("Opening file:{file}".format(file=self.read_file))
with open(self.read_file,'r') as file:
for line in file:
yield line
except:
raise
print("Error reading {f}:".format(f=self.read_file))
# print(traceback.format_exc())
class make_dictionaries(object):
"""docstring for ClassName"""
def __init__(self):
pass
def create_dictionary(self):
YELP_DATASET_DIR = config.get("YELP_DATASET_DIR",None)
SAVE_REVIEWS_BY_CATEGORY_DIRECTORY = config.get("SAVE_REVIEWS_BY_CATEGORY_DIRECTORY",None)
SAVE_DICTIONARY_DIR = config.get("SAVE_DICTIONARY_DIR",None)
SAVE_BAG_OF_WORDS_DIR = config.get("SAVE_BAG_OF_WORDS_DIR",None)
SAVE_N_BAG_OF_WORDS_DOCS_PER_FILE = int(config.get("SAVE_N_BAG_OF_WORDS_DOCS_PER_FILE",25000))
if not (YELP_DATASET_DIR and SAVE_REVIEWS_BY_CATEGORY_DIRECTORY and SAVE_DICTIONARY_DIR and SAVE_BAG_OF_WORDS_DIR and SAVE_DICTIONARY_DIR):
print("config keys are not set correctly in the config file: socialconfig.py")
exit(0)
SAVE_UNFILTERED_DICTIONARY_DIR = os.path.join(SAVE_DICTIONARY_DIR,"Unfiltered")
if not os.path.exists(SAVE_REVIEWS_BY_CATEGORY_DIRECTORY) and not os.path.isdir(SAVE_REVIEWS_BY_CATEGORY_DIRECTORY):
raise("Directory {d} does not exist".format(d=SAVE_REVIEWS_BY_CATEGORY_DIRECTORY))
if not (os.path.exists(SAVE_BAG_OF_WORDS_DIR) and os.path.isdir(SAVE_BAG_OF_WORDS_DIR)):
os.makedirs(SAVE_BAG_OF_WORDS_DIR)
if not (os.path.exists(SAVE_UNFILTERED_DICTIONARY_DIR) and os.path.isdir(SAVE_UNFILTERED_DICTIONARY_DIR)):
os.makedirs(SAVE_UNFILTERED_DICTIONARY_DIR)
for pardir, sub_dirs, files in os.walk(SAVE_REVIEWS_BY_CATEGORY_DIRECTORY):
if len(files) > 0:
error_count = 0
review_docs = []
negative_docs = []
positive_docs = []
doc_count = 0
docs_per_file = SAVE_N_BAG_OF_WORDS_DOCS_PER_FILE
file_num = str((doc_count/docs_per_file) + 1)
for file in files:
if "yelp_reviews_" in file and "category" in pardir:
reviews = get_reviews_iterable(os.path.join(pardir,file))
yelp_category = pardir.split('/')[-1]
CATEGORY_SPECIFIC_BAG_OF_WORDS_DIR = os.path.join(SAVE_BAG_OF_WORDS_DIR,yelp_category)
if not (os.path.exists(CATEGORY_SPECIFIC_BAG_OF_WORDS_DIR) and os.path.isdir(CATEGORY_SPECIFIC_BAG_OF_WORDS_DIR)):
os.makedirs(CATEGORY_SPECIFIC_BAG_OF_WORDS_DIR)
fname = os.path.join(SAVE_BAG_OF_WORDS_DIR,yelp_category,"{cat}_file_{file_num}.txt".format(cat=yelp_category,file_num=file_num))
bow_file = open(fname,'w')
print("Writing docs (in bag of words form) for {cat} to directory: {d}".format(cat=yelp_category,d=os.path.join(SAVE_BAG_OF_WORDS_DIR,yelp_category)))
for review in reviews:
try:
review_dict = ujson.loads(review)
except:
error_count += 1
pass
adjs = review_dict.get("adjectives",None)
rating = int(review_dict.get("rating",-1))
if adjs:
doc_count += 1
bow_file.write(ujson.dumps(adjs.encode("utf-8")) + "\n")
review_docs.append(adjs.strip().split())
if (doc_count%docs_per_file) == 0:
if bow_file:
bow_file.close()
file_num = str((doc_count/docs_per_file) + 1)
fname = os.path.join(SAVE_BAG_OF_WORDS_DIR,yelp_category,"{cat}_file_{file_num}.txt".format(cat=yelp_category,file_num=file_num))
bow_file = open(fname,'w')
if rating:
if rating > 3:
positive_docs.append(adjs.strip().split())
elif rating < 3:
negative_docs.append(adjs.strip().split())
else:
pass
print("Wrote {total} docs in {cat} category".format(total=str(doc_count),cat=yelp_category))
dictionary = Dictionary(review_docs)
CATEGORY_SPECIFIC_DICT_DIR = os.path.join(SAVE_UNFILTERED_DICTIONARY_DIR,yelp_category)
POSITIVE_SUB_DIR = os.path.join(CATEGORY_SPECIFIC_DICT_DIR,"positive")
NEGATIVE_SUB_DIR = os.path.join(CATEGORY_SPECIFIC_DICT_DIR,"negative")
if not (os.path.exists(CATEGORY_SPECIFIC_DICT_DIR) and os.path.isdir(CATEGORY_SPECIFIC_DICT_DIR)):
os.makedirs(CATEGORY_SPECIFIC_DICT_DIR)
os.makedirs(POSITIVE_SUB_DIR)
os.makedirs(NEGATIVE_SUB_DIR)
dictionary.save(os.path.join(CATEGORY_SPECIFIC_DICT_DIR,"{yelp_category}_dict.dict".format(yelp_category=yelp_category)))
dictionary.save_as_text(os.path.join(CATEGORY_SPECIFIC_DICT_DIR,"{yelp_category}_dict.txt".format(yelp_category=yelp_category)))
sorted_doc_freqs = sorted(dictionary.dfs.items(),key = lambda x : x[1],reverse=True)
# print("Will save file in:\n " + os.path.join(CATEGORY_SPECIFIC_DICT_DIR,"{yelp_category}_dict.txt".format(yelp_category=yelp_category)))
with open(os.path.join(CATEGORY_SPECIFIC_DICT_DIR,"{yelp_category}_words_doc_frequencies.txt".format(yelp_category=yelp_category)),'w') as df_file:
for (token_id,doc_freq) in sorted_doc_freqs:
df_file.write(str(dictionary.get(token_id,"Unknown").encode('utf-8')) + " " + str(doc_freq) + "\n")
del dictionary
del review_docs
del sorted_doc_freqs
pos_dictionary = Dictionary(positive_docs)
del positive_docs
neg_dictionary = Dictionary(negative_docs)
del negative_docs
pos_dictionary.save(os.path.join(POSITIVE_SUB_DIR,"{yelp_category}_pos_dict.dict".format(yelp_category=yelp_category)))
pos_dictionary.save_as_text(os.path.join(POSITIVE_SUB_DIR,"{yelp_category}_pos_dict.txt".format(yelp_category=yelp_category)))
sorted_pos_doc_freqs = sorted(pos_dictionary.dfs.items(),key = lambda x : x[1],reverse=True)
with open(os.path.join(POSITIVE_SUB_DIR,"{yelp_category}_pos_words_doc_frequencies.txt".format(yelp_category=yelp_category)),'w') as df_file:
for (token_id,doc_freq) in sorted_pos_doc_freqs:
df_file.write(str(pos_dictionary.get(token_id,"Unknown").encode('utf-8')) + " " + str(doc_freq) + "\n")
del pos_dictionary
del sorted_pos_doc_freqs
neg_dictionary.save(os.path.join(NEGATIVE_SUB_DIR,"{yelp_category}_neg_dict.dict".format(yelp_category=yelp_category)))
neg_dictionary.save_as_text(os.path.join(NEGATIVE_SUB_DIR,"{yelp_category}_neg_dict.txt".format(yelp_category=yelp_category)))
sorted_neg_doc_freqs = sorted(neg_dictionary.dfs.items(),key = lambda x : x[1],reverse=True)
with open(os.path.join(NEGATIVE_SUB_DIR,"{yelp_category}_neg_words_doc_frequencies.txt".format(yelp_category=yelp_category)),'w') as df_file:
for (token_id,doc_freq) in sorted_neg_doc_freqs:
df_file.write(str(neg_dictionary.get(token_id,"Unknown").encode('utf-8')) + " " + str(doc_freq) + "\n")
del neg_dictionary
del sorted_neg_doc_freqs
print("{count} {cat} reviews were discarded because of parsing errors".format(count=error_count,cat=yelp_category))
print("Created dictionary for {cat} tokens".format(cat=yelp_category))
if __name__ == "__main__":
try:
mk = make_dictionaries()
mk.create_dictionary()
except:
raise
| 42.773256 | 156 | 0.740791 | 6,956 | 0.945494 | 276 | 0.037515 | 0 | 0 | 0 | 0 | 1,254 | 0.17045 |
fd25176d16569d58b76384f5892d5e731b3f5603
| 434 |
py
|
Python
|
tests/test_demo.py
|
PrabhuLoganathan/Pytest-Selenium-POM--Google
|
d0b50faf28a0c3797d1b0bc838d77f82dd0be81c
|
[
"MIT"
] | null | null | null |
tests/test_demo.py
|
PrabhuLoganathan/Pytest-Selenium-POM--Google
|
d0b50faf28a0c3797d1b0bc838d77f82dd0be81c
|
[
"MIT"
] | 1 |
2021-04-22T12:02:19.000Z
|
2021-04-22T12:02:19.000Z
|
tests/test_demo.py
|
wadle/test_demo_pytest_selenium
|
bc9f2b4a9da895568da2cc3f0bc904f78775884c
|
[
"MIT"
] | 1 |
2020-11-26T20:03:07.000Z
|
2020-11-26T20:03:07.000Z
|
from pages.demo_page import DemoPage
class TestDemo:
def test_open_page(self, browser):
"""
:param browser:
:return:
"""
obj = DemoPage(browser)
obj.open_page()
def test_verify_page_title(self, browser):
"""
:param browser:
:return:
"""
obj = DemoPage(browser)
obj.open_page()
assert obj.driver.title == "Google"
| 16.074074 | 46 | 0.532258 | 392 | 0.903226 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.281106 |
fd266f079f9daa527e01e31d5df3c4df79e8150b
| 1,126 |
py
|
Python
|
day 03/day03_part1.py
|
MischaDy/PyAdventOfCode2020
|
3e0a1a61ac930d7e30a0104ac617008297508fcb
|
[
"CC0-1.0"
] | 2 |
2020-12-17T18:49:20.000Z
|
2021-02-20T16:48:14.000Z
|
day 03/day03_part1.py
|
MischaDy/PyAdventOfCode2020
|
3e0a1a61ac930d7e30a0104ac617008297508fcb
|
[
"CC0-1.0"
] | null | null | null |
day 03/day03_part1.py
|
MischaDy/PyAdventOfCode2020
|
3e0a1a61ac930d7e30a0104ac617008297508fcb
|
[
"CC0-1.0"
] | 3 |
2020-12-20T19:08:32.000Z
|
2020-12-26T22:11:15.000Z
|
from helpers.cyclic_list import CyclicList
from helpers.coordinates2d import Coordinates2D
RUN_TEST = False
TEST_SOLUTION = 7
TEST_INPUT_FILE = 'test_input_day_03.txt'
INPUT_FILE = 'input_day_03.txt'
START = Coordinates2D((0, 0)) # top left corner
TRAJECTORY = Coordinates2D((3, 1)) # right 3, down 1
ARGS = [START, TRAJECTORY]
def main_part1(input_file, start, trajectory):
with open(input_file) as file:
tree_map = list(map(lambda line: CyclicList(line.rstrip()), file.readlines()))
solution = count_trees(tree_map, trajectory, start)
return solution
def count_trees(tree_map, trajectory, start):
bottom = len(tree_map)
cur_pos = start
num_trees = 0 # start is a tree-free square
while cur_pos.y < bottom:
num_trees += tree_map[cur_pos.y][cur_pos.x] == '#'
cur_pos = cur_pos + trajectory
return num_trees
if __name__ == '__main__':
if RUN_TEST:
solution = main_part1(TEST_INPUT_FILE, *ARGS)
print(solution)
assert (TEST_SOLUTION == solution)
else:
solution = main_part1(INPUT_FILE, *ARGS)
print(solution)
| 26.186047 | 86 | 0.688277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.103908 |
fd279c40ef3cc1786017d66b7c1e1b885d2e67e1
| 807 |
py
|
Python
|
binary_tree/e_path_sum.py
|
dhrubach/python-code-recipes
|
14356c6adb1946417482eaaf6f42dde4b8351d2f
|
[
"MIT"
] | null | null | null |
binary_tree/e_path_sum.py
|
dhrubach/python-code-recipes
|
14356c6adb1946417482eaaf6f42dde4b8351d2f
|
[
"MIT"
] | null | null | null |
binary_tree/e_path_sum.py
|
dhrubach/python-code-recipes
|
14356c6adb1946417482eaaf6f42dde4b8351d2f
|
[
"MIT"
] | null | null | null |
###############################################
# LeetCode Problem Number : 112
# Difficulty Level : Easy
# URL : https://leetcode.com/problems/path-sum/
###############################################
from binary_search_tree.tree_node import TreeNode
class BinaryTree:
def hasPathSum(self, root: TreeNode, sum: int) -> bool:
if not root:
return False
def dfs(node: TreeNode, total: int) -> bool:
nonlocal sum
res = False
total += node.val
if not node.left and not node.right:
return total == sum
if node.left:
res = dfs(node.left, total)
if node.right and not res:
res = dfs(node.right, total)
return res
return dfs(root, 0)
| 25.21875 | 59 | 0.484511 | 552 | 0.684015 | 0 | 0 | 0 | 0 | 0 | 0 | 197 | 0.244114 |
fd2a11e31304af5fc6efc0099f574785c36f1cf8
| 127 |
py
|
Python
|
High School/9th Grade APCSP (Python)/Unit 7/07.02.03.py
|
SomewhereOutInSpace/Computer-Science-Class
|
f5d21850236a7a18dc53b4a650ecbe9a11781f1d
|
[
"Unlicense"
] | null | null | null |
High School/9th Grade APCSP (Python)/Unit 7/07.02.03.py
|
SomewhereOutInSpace/Computer-Science-Class
|
f5d21850236a7a18dc53b4a650ecbe9a11781f1d
|
[
"Unlicense"
] | null | null | null |
High School/9th Grade APCSP (Python)/Unit 7/07.02.03.py
|
SomewhereOutInSpace/Computer-Science-Class
|
f5d21850236a7a18dc53b4a650ecbe9a11781f1d
|
[
"Unlicense"
] | null | null | null |
st1 = input()
st2 = input()
st3 = input()
listy = [st1, st2, st3]
listy.sort()
print(listy[0])
print(listy[1])
print(listy[2])
| 14.111111 | 23 | 0.629921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
fd2a6655ca5c2bb5ada546fc62616fc063ba84a1
| 3,900 |
py
|
Python
|
tests/unit/guests/linux/storage/disk.py
|
tessia-project/tessia-baselib
|
07004b7f6462f081a6f7e810954fd7e0d2cdcf6b
|
[
"Apache-2.0"
] | 1 |
2022-01-27T01:32:14.000Z
|
2022-01-27T01:32:14.000Z
|
tests/unit/guests/linux/storage/disk.py
|
tessia-project/tessia-baselib
|
07004b7f6462f081a6f7e810954fd7e0d2cdcf6b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/guests/linux/storage/disk.py
|
tessia-project/tessia-baselib
|
07004b7f6462f081a6f7e810954fd7e0d2cdcf6b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test module for disk module
"""
#
# IMPORTS
#
from tessia.baselib.common.ssh.client import SshClient
from tessia.baselib.common.ssh.shell import SshShell
from tessia.baselib.guests.linux.storage import disk as disk_module
from unittest import mock
from unittest import TestCase
#
# CONSTANTS AND DEFINITIONS
#
PARAMS_WITH_SYS_ATTRS = {
"system_attributes": {
"libvirt": "somexml"
},
"volume_id": "some_disk_id"
}
PARAMS_WITHOUT_SYS_ATTRS = {
"volume_id": "some_disk_id"
}
#
# CODE
#
class TestDisk(TestCase):
"""
Class that provides unit tests for the DiskBase class.
"""
def setUp(self):
"""
Create mocks that are used in all test cases.
"""
# since the class is abstract we need to define a concrete child class
# to be able to instantiate it
class DiskConcrete(disk_module.DiskBase):
"""
Concrete class of DiskBase
"""
def activate(self, *args, **kwargs):
super().activate(*args, **kwargs)
self._disk_cls = DiskConcrete
patcher = mock.patch.object(disk_module, 'sleep', autospec=True)
patcher.start()
self.addCleanup(patcher.stop)
self._mock_host_conn = mock.Mock(spec_set=SshClient)
self._mock_shell = mock.Mock(spec_set=SshShell)
self._mock_host_conn.open_shell.return_value = self._mock_shell
# setUp()
def _create_disk(self, parameters):
"""
Auxiliary method to create a disk.
"""
return self._disk_cls(parameters, self._mock_host_conn)
def test_abstract_methods(self):
"""
Confirm that abstract methods raise NotImplementedError if called.
"""
disk = self._create_disk(PARAMS_WITH_SYS_ATTRS)
self.assertRaises(NotImplementedError, disk.activate)
# test_abstract_methods()
def test_init(self):
"""
Test proper initialization
"""
disk = self._create_disk(PARAMS_WITH_SYS_ATTRS)
self.assertEqual(disk.volume_id, 'some_disk_id')
# test_init()
def test_enable_device(self):
"""
Test the protected method that enable the device.
"""
disk = self._create_disk({'volume_id': 'some_id'})
devicenr = "some device number"
self._mock_shell.run.side_effect = [(0, ""), (0, "")]
disk._enable_device(devicenr)
cmd1 = "echo free {} > /proc/cio_ignore".format(devicenr)
cmd2 = 'chccwdev -e {}'.format(devicenr)
calls = [mock.call(cmd1), mock.call(cmd2)]
self.assertEqual(self._mock_shell.run.mock_calls, calls)
# test_enable_device()
def test_enable_device_fails(self):
"""
Test the protected method that enable the device in the case
it fails to be enabled.
"""
disk = self._create_disk({'volume_id': 'some_id'})
devicenr = "some device number"
ret_output = [(0, "")]
# _enable_device perform many attempts
for _ in range(0, 6):
ret_output.append((1, ""))
self._mock_shell.run.side_effect = ret_output
self.assertRaisesRegex(RuntimeError, "Failed to activate",
disk._enable_device, devicenr)
# test_enable_device_fails()
# TestBaseDisk
| 30 | 78 | 0.646923 | 2,792 | 0.715897 | 0 | 0 | 0 | 0 | 0 | 0 | 1,740 | 0.446154 |
fd2a6afac5326ae943f8dfecdd01a780b28ea0e3
| 5,888 |
py
|
Python
|
scripts/mv_drawables.py
|
euitam/MoveDrawable
|
33923276d856de8effff9e958ba7a33767739137
|
[
"Apache-2.0"
] | null | null | null |
scripts/mv_drawables.py
|
euitam/MoveDrawable
|
33923276d856de8effff9e958ba7a33767739137
|
[
"Apache-2.0"
] | null | null | null |
scripts/mv_drawables.py
|
euitam/MoveDrawable
|
33923276d856de8effff9e958ba7a33767739137
|
[
"Apache-2.0"
] | null | null | null |
import sys, os, errno, shutil, signal, subprocess
from glob import glob
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def printTitle():
print bcolors.WARNING
print " _ _ _ _ "
print " /_\ _ __ __| |_ __ ___ (_) __| |"
print " //_\\\\| '_ \ / _` | '__/ _ \| |/ _` |"
print "/ _ \ | | | (_| | | | (_) | | (_| |"
print "\_/ \_/_| |_|\__,_|_| \___/|_|\__,_|"
print ""
print " ___ _ _ "
print " / \_ __ __ ___ ____ _| |__ | | ___ "
print " / /\ / '__/ _` \ \ /\ / / _` | '_ \| |/ _ \\"
print " / /_//| | | (_| |\ V V / (_| | |_) | | __/"
print "/___,' |_| \__,_| \_/\_/ \__,_|_.__/|_|\___|"
print bcolors.ENDC
def moveFileToTmp(file):
destFile = ''
if '1.5x' in file:
filename = os.path.basename(file).replace('@1.5x', '').replace('1.5x', '').strip()
destFile = HDPI_DIR + filename
elif '2x' in file:
filename = os.path.basename(file).replace('@2x', '').replace('2x', '').strip()
destFile = XHDPI_DIR + filename
elif '3x' in file:
filename = os.path.basename(file).replace('@3x', '').replace('3x', '').strip()
destFile = XXHDPI_DIR + filename
elif '4x' in file:
filename = os.path.basename(file).replace('@4x', '').replace('4x', '').strip()
destFile = XXXHDPI_DIR + filename
else:
filename = os.path.basename(file).replace('@1x', '').replace('1x', '').strip()
destFile = MDPI_DIR + filename
if len(destFile) > 0:
shutil.copyfile(file, destFile)
def createDirIfNotExists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def renameFile(srcFilename, destFilename):
images = [y for x in os.walk(TMP_DIR) for y in glob(os.path.join(x[0], srcFilename))]
for file in images:
dirName = os.path.dirname(file)
os.rename(file, dirName + '/' + destFilename)
def moveToDest(cwebp):
for folder in RESULT_DIRS:
currentFolder = TMP_DIR + folder
destFolder = destDir + folder
createDirIfNotExists(destFolder)
srcFiles = os.listdir(currentFolder)
for file in srcFiles:
fullFileName = os.path.join(currentFolder, file)
if len(cwebp) > 0:
destFile = destFolder + file.replace('.png', '.webp').strip()
FNULL = open(os.devnull, 'w')
subprocess.call([cwebp, '-q', '80', fullFileName, '-o', destFile], stdout=FNULL, stderr=subprocess.STDOUT)
else:
destFile = destFolder + file
shutil.move(fullFileName, destFile)
print "move {} to {}".format(fullFileName, destFile)
def cleanup():
if os.path.isdir(TMP_DIR):
shutil.rmtree(TMP_DIR)
def getWebpConverter():
base_dir = os.path.dirname(os.path.realpath(__file__))
cwebp = "{}/cwebp".format(base_dir)
if os.path.isfile(cwebp):
return cwebp
else:
return ""
def sigint_handler(signum, frame):
print ""
print ""
print bcolors.WARNING + "Exit script requested" + bcolors.ENDC
sys.exit()
signal.signal(signal.SIGINT, sigint_handler)
# BEGIN SCRIPT
printTitle()
print bcolors.FAIL
print "Exit script anytime with CTRL+C"
print bcolors.ENDC
# Check argument number
if len(sys.argv) < 3:
print "Usage: {} SRC_DIRECTORY ANDROID_RESOURCES_DIRECTORY".format(sys.argv[0])
sys.exit()
srcDir = sys.argv[1]
destDir = sys.argv[2]
TMP_DIR = srcDir + '/tmp'
MDPI = '/drawable-mdpi/'
HDPI = '/drawable-hdpi/'
XHDPI = '/drawable-xhdpi/'
XXHDPI = '/drawable-xxhdpi/'
XXXHDPI = '/drawable-xxxhdpi/'
MDPI_DIR = TMP_DIR + MDPI
HDPI_DIR = TMP_DIR + HDPI
XHDPI_DIR = TMP_DIR + XHDPI
XXHDPI_DIR = TMP_DIR + XXHDPI
XXXHDPI_DIR = TMP_DIR + XXXHDPI
RESULT_DIRS = [MDPI, HDPI, XHDPI, XXHDPI, XXXHDPI]
cleanup()
# Check source directory
if not os.path.isdir(srcDir):
print "{} should be a directory".format(srcDir)
exit()
# Check destination directory
if not os.path.isdir(destDir):
print "{} should be a directory".format(destDir)
exit()
if not os.path.isdir(destDir+'/values'):
print "{} is not an Android resources directory".format(destDir)
exit()
if not os.path.isdir(destDir+'/layout'):
print "{} is not an Android resources directory".format(destDir)
exit()
images = [y for x in os.walk(srcDir) for y in glob(os.path.join(x[0], '*.png'))]
if len(images) == 0:
print bcolors.FAIL+"No files to process"+bcolors.ENDC
exit()
# CONSTANTS
# Create density directories
createDirIfNotExists(MDPI_DIR)
createDirIfNotExists(HDPI_DIR)
createDirIfNotExists(XHDPI_DIR)
createDirIfNotExists(XXHDPI_DIR)
createDirIfNotExists(XXXHDPI_DIR)
for file in images:
# print "- {}".format(file)
moveFileToTmp(file)
# build distinct names
newImages = [y for x in os.walk(TMP_DIR) for y in glob(os.path.join(x[0], '*.png'))]
distinctNames = []
print bcolors.BOLD + 'Drawable files found:' + bcolors.ENDC
for file in newImages:
name = os.path.basename(file)
if not name in distinctNames:
print '- {}'.format(name)
distinctNames.append(name)
# Ask for renaming
print ""
if len(distinctNames):
print bcolors.HEADER + "Any existing file will be overwritten by the renaming" + bcolors.ENDC
for name in distinctNames:
newName = raw_input('Rename '+bcolors.OKBLUE+name.replace('.png', '')+bcolors.ENDC+' to ('+bcolors.UNDERLINE+'leave blank to skip renaming'+bcolors.ENDC+'): ')
newName = "{}.png".format(newName).strip()
if len(newName) > 0:
renameFile(name, newName)
# Ask for WebP compression
cwebp = getWebpConverter()
compress = False
if len(cwebp) > 0:
compressResponse = raw_input('Compress files to WebP format? [y] or [n] ')
compressResponse = compressResponse.strip()
if len(compressResponse) > 0 and (compressResponse == 'y' or compressResponse == 'Y'):
compress = True
# Move to destination folder
if compress:
moveToDest(cwebp)
else:
moveToDest("")
print ""
print bcolors.OKGREEN + '{} resource files moved to workspace'.format(len(newImages)) + bcolors.ENDC
cleanup()
| 28.038095 | 160 | 0.667969 | 178 | 0.030231 | 0 | 0 | 0 | 0 | 0 | 0 | 1,553 | 0.263757 |
fd2ae8dc293d1b7377165b6678e015927d2d75d1
| 5,479 |
py
|
Python
|
kornia/x/trainer.py
|
AK391/kornia
|
a2535eb7593ee2fed94d23cc720804a16f9f0e7e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/x/trainer.py
|
AK391/kornia
|
a2535eb7593ee2fed94d23cc720804a16f9f0e7e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/x/trainer.py
|
AK391/kornia
|
a2535eb7593ee2fed94d23cc720804a16f9f0e7e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import logging
from typing import Callable, Dict
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
# the accelerator library is a requirement for the Trainer
# but it is optional for grousnd base user of kornia.
try:
from accelerate import Accelerator
except ImportError:
Accelerator = None
from .metrics import AverageMeter
from .utils import Configuration, TrainerState
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
callbacks_whitelist = [
"preprocess", "augmentations", "evaluate", "fit", "checkpoint", "terminate"
]
class Trainer:
"""Base class to train the different models in kornia.
.. warning::
The API is experimental and subject to be modified based on the needs of kornia models.
Args:
model: the nn.Module to be optimized.
train_dataloader: the data loader used in the training loop.
valid_dataloader: the data loader used in the validation loop.
criterion: the nn.Module with the function that computes the loss.
optimizer: the torch optimizer object to be used during the optimization.
scheduler: the torch scheduler object with defiing the scheduling strategy.
accelerator: the Accelerator object to distribute the training.
config: a TrainerConfiguration structure containing the experiment hyper parameters.
callbacks: a dictionary containing the pointers to the functions to overrides. The
main supported hooks are ``evaluate``, ``preprocess``, ``augmentations`` and ``fit``.
.. important::
The API heavily relies on `accelerate <https://github.com/huggingface/accelerate/>`_.
In order to use it, you must: ``pip install kornia[x]``
.. seealso::
Learn how to use the API in our documentation
`here <https://kornia.readthedocs.io/en/latest/get-started/training.html>`_.
"""
def __init__(
self,
model: nn.Module,
train_dataloader: DataLoader,
valid_dataloader: DataLoader,
criterion: nn.Module,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler.CosineAnnealingLR,
config: Configuration,
callbacks: Dict[str, Callable] = {},
) -> None:
# setup the accelerator
if Accelerator is None:
raise ModuleNotFoundError(
"accelerate library is not installed: pip install kornia[x]")
self.accelerator = Accelerator()
# setup the data related objects
self.model = self.accelerator.prepare(model)
self.train_dataloader = self.accelerator.prepare(train_dataloader)
self.valid_dataloader = self.accelerator.prepare(valid_dataloader)
self.criterion = criterion.to(self.device)
self.optimizer = self.accelerator.prepare(optimizer)
self.scheduler = scheduler
self.config = config
# configure callbacks
for fn_name, fn in callbacks.items():
if fn_name not in callbacks_whitelist:
raise ValueError(f"Not supported: {fn_name}.")
setattr(self, fn_name, fn)
# hyper-params
self.num_epochs = config.num_epochs
self._logger = logging.getLogger('train')
@property
def device(self) -> torch.device:
return self.accelerator.device
def backward(self, loss: torch.Tensor) -> None:
self.accelerator.backward(loss)
def fit_epoch(self, epoch: int) -> None:
# train loop
self.model.train()
losses = AverageMeter()
for sample_id, sample in enumerate(self.train_dataloader):
source, target = sample # this might change with new pytorch dataset structure
self.optimizer.zero_grad()
# perform the preprocess and augmentations in batch
img = self.preprocess(source)
img = self.augmentations(img)
# make the actual inference
output = self.model(img)
loss = self.criterion(output, target)
self.backward(loss)
self.optimizer.step()
losses.update(loss.item(), img.shape[0])
if sample_id % 50 == 0:
self._logger.info(
f"Train: {epoch + 1}/{self.num_epochs} "
f"Sample: {sample_id + 1}/{len(self.train_dataloader)} "
f"Loss: {losses.val:.3f} {losses.avg:.3f}"
)
def fit(self,) -> None:
# execute the main loop
# NOTE: Do not change and keep this structure clear for readability.
for epoch in range(self.num_epochs):
# call internally the training loop
# NOTE: override to customize your evaluation routine
self.fit_epoch(epoch)
# call internally the evaluation loop
# NOTE: override to customize your evaluation routine
valid_stats = self.evaluate()
self.checkpoint(self.model, epoch, valid_stats)
state = self.terminate(self.model, epoch, valid_stats)
if state == TrainerState.TERMINATE:
break
# END OF THE EPOCH
self.scheduler.step()
...
def evaluate(self):
...
def preprocess(self, x):
return x
def augmentations(self, x):
return x
def checkpoint(self, *args, **kwargs):
...
def terminate(self, *args, **kwargs):
...
| 34.459119 | 95 | 0.632962 | 4,882 | 0.891039 | 0 | 0 | 86 | 0.015696 | 0 | 0 | 2,259 | 0.412302 |
fd2c1f1342cad7325a43f5762d5c2d1d94cfe573
| 2,795 |
py
|
Python
|
datasets/transformations/jpeg_compress.py
|
bytedance/Hammer
|
388ed20b3d9b34f33f5357d75f8fe5d726782ec8
|
[
"MIT"
] | 97 |
2022-02-08T09:00:57.000Z
|
2022-03-23T05:33:35.000Z
|
datasets/transformations/jpeg_compress.py
|
bytedance/Hammer
|
388ed20b3d9b34f33f5357d75f8fe5d726782ec8
|
[
"MIT"
] | null | null | null |
datasets/transformations/jpeg_compress.py
|
bytedance/Hammer
|
388ed20b3d9b34f33f5357d75f8fe5d726782ec8
|
[
"MIT"
] | 7 |
2022-02-08T15:13:02.000Z
|
2022-03-19T19:11:13.000Z
|
# python3.7
"""Implements JPEG compression on images."""
import cv2
import numpy as np
try:
import nvidia.dali.fn as fn
import nvidia.dali.types as types
except ImportError:
fn = None
from utils.formatting_utils import format_range
from .base_transformation import BaseTransformation
__all__ = ['JpegCompress']
class JpegCompress(BaseTransformation):
"""Applies random JPEG compression to images.
This transformation can be used as an augmentation by distorting images.
In other words, the input image(s) will be first compressed (i.e., encoded)
with a random quality ratio, and then decoded back to the image space.
The distortion is introduced in the encoding process.
Args:
quality_range: The range within which to uniformly sample a quality
value after compression. 100 means highest and 0 means lowest.
(default: (40, 60))
prob: Probability of applying JPEG compression. (default: 0.5)
"""
def __init__(self, prob=0.5, quality_range=(40, 60)):
super().__init__(support_dali=(fn is not None))
self.prob = np.clip(prob, 0, 1)
self.quality_range = format_range(quality_range, min_val=0, max_val=100)
def _CPU_forward(self, data):
# Early return if no compression is needed.
if np.random.uniform() >= self.prob:
return data
# Set compression quality.
quality = np.random.randint(*self.quality_range)
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
# Compress images.
outputs = []
for image in data:
_, encoded_image = cv2.imencode('.jpg', image, encode_param)
decoded_image = cv2.imdecode(encoded_image, cv2.IMREAD_UNCHANGED)
if decoded_image.ndim == 2:
decoded_image = decoded_image[:, :, np.newaxis]
outputs.append(decoded_image)
return outputs
def _DALI_forward(self, data):
# Set compression quality.
if self.quality_range[0] == self.quality_range[1]:
quality = self.quality_range[0]
else:
quality = fn.random.uniform(range=self.quality_range)
quality = fn.cast(quality, dtype=types.INT32)
# Compress images.
compressed_data = fn.jpeg_compression_distortion(
data, quality=quality)
if not isinstance(compressed_data, (list, tuple)):
compressed_data = [compressed_data]
# Determine whether the transformation should be applied.
cond = fn.random.coin_flip(dtype=types.BOOL, probability=self.prob)
outputs = []
for image, compressed_image in zip(data, compressed_data):
outputs.append(compressed_image * cond + image * (cond ^ True))
return outputs
| 35.379747 | 80 | 0.65975 | 2,465 | 0.881932 | 0 | 0 | 0 | 0 | 0 | 0 | 872 | 0.311986 |
fd2ca1a6e56d2464e000ae2d9a68e5afd6f6c238
| 2,046 |
py
|
Python
|
venv/VFR/flask_app_3d.py
|
flhataf/Virtual-Fitting-Room
|
e5b41849df963cebd3b7deb7e87d643ece5b6d18
|
[
"MIT"
] | null | null | null |
venv/VFR/flask_app_3d.py
|
flhataf/Virtual-Fitting-Room
|
e5b41849df963cebd3b7deb7e87d643ece5b6d18
|
[
"MIT"
] | null | null | null |
venv/VFR/flask_app_3d.py
|
flhataf/Virtual-Fitting-Room
|
e5b41849df963cebd3b7deb7e87d643ece5b6d18
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 15 00:31:35 2021
@author: RayaBit
"""
from flask import Flask, render_template, Response
from imutils.video import VideoStream
from skeletonDetector import skeleton
import cv2
from skeleton3DDetector import Skeleton3dDetector
from visualization import Visualizer
import time
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index2.html')
def gen():
vs = VideoStream(src=0).start()
#vs = cv2.VideoCapture("kid3.mp4")
# sk3d = Skeleton3dDetector(width = vs.get(3), height = vs.get(4))#width and height?
# 320, 240
sk3d = Skeleton3dDetector(width = 320, height = 240)#width and height?
init_buff=[]
for _ in range(2):
frame = vs.read()
while frame is None:
frame = vs.read()
processed_fram, x = skeleton(frame)
init_buff.append(x)
sk3d.fill_buff(init_buff)
cv2.waitKey(0)
#prev_frame = processed_fram
visualizer = Visualizer(frame.shape[0], frame.shape[1])
while cv2.waitKey(1) < 0: # breaks on pressing q
t = time.time()
frame = vs.read()
if frame is None:
continue
prev_frame = processed_fram #curr actually
processed_fram, x = skeleton(frame)
kps3d = sk3d.detect(x) # buff is initialized fr3d are kps3d for prev_frame
img = visualizer.draw(prev_frame,kps3d)
cv2.putText(img, "time taken = {:.2f} sec".format(time.time() - t), (50, 50), cv2.FONT_HERSHEY_COMPLEX, .5,
(255, 50, 0), 2, lineType=cv2.LINE_AA)
(flag, encodedImage) = cv2.imencode(".jpg", img)
if not flag:
continue
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
bytearray(encodedImage) + b'\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host="127.0.0.1", port=8087)
| 26.571429 | 115 | 0.616813 | 0 | 0 | 1,422 | 0.695015 | 195 | 0.095308 | 0 | 0 | 525 | 0.256598 |
fd2dbbfa0aac3167c6b35b08529f51283edf8826
| 8,144 |
py
|
Python
|
schedsi/threads/thread.py
|
z33ky/schedsi
|
3affe28a3e1d2001c639d7c0423cb105d1991590
|
[
"CC0-1.0"
] | 1 |
2017-08-03T12:58:53.000Z
|
2017-08-03T12:58:53.000Z
|
schedsi/threads/thread.py
|
z33ky/schedsi
|
3affe28a3e1d2001c639d7c0423cb105d1991590
|
[
"CC0-1.0"
] | null | null | null |
schedsi/threads/thread.py
|
z33ky/schedsi
|
3affe28a3e1d2001c639d7c0423cb105d1991590
|
[
"CC0-1.0"
] | null | null | null |
"""Define the :class:`Thread`."""
import threading
from schedsi.cpu import request as cpurequest
from schedsi.cpu.time import Time
#: Whether to log individual times, or only the sum
LOG_INDIVIDUAL = True
class _ThreadStats: # pylint: disable=too-few-public-methods
"""Thread statistics."""
def __init__(self):
"""Create a :class:`_ThreadStats`."""
self.finished_time = None
self.response_time = None
self.ctxsw = []
self.run = []
self.total_run = Time(0)
self.wait = [[]]
class Thread:
"""The basic thread class.
A thread has
* an associated module
* a locally unique thread id
* ready time (`None` if finished)
* response units - after how many units to set
:attr:`stats.response_time` (`None` if irrelevant)
* remaining workload (`None` if infinite)
* a lock indicating whether this thread is currently active
* :class:`_ThreadStats`
"""
def __init__(self, module, tid=None, *, ready_time=0, units=None, response_units=None):
"""Create a :class:`Thread`."""
assert ready_time >= 0
assert units is None or units >= 0
assert response_units is None or units is None or response_units <= units
self.module = module
if tid is None:
tid = str(module.num_work_threads())
self.tid = tid
self.ready_time = ready_time
self.response_units = response_units
self.remaining = units
self.is_running = threading.Lock()
self.stats = _ThreadStats()
def execute(self):
"""Simulate execution.
The thread will run for as long as it can.
Yields a :class:`~schedsi.cpurequest.Request`.
Consumes the current time.
"""
locked = self.is_running.acquire(False)
assert locked
current_time = yield cpurequest.Request.current_time()
while True:
current_time = yield from self._execute(current_time, None)
def _update_ready_time(self, current_time):
"""Update ready_time while executing.
Includes some checks to make sure the state is sane.
"""
assert self.ready_time is not None and 0 <= self.ready_time <= current_time
assert self.is_running.locked()
self.ready_time = current_time
def _execute(self, current_time, run_time):
"""Simulate execution.
Update some state.
Yields an execute :class:`~schedsi.cpurequest.Request`
respecting :attr:`remaining`, so it won't yield more than that.
Returns the next current time or None if :attr:`remaining` is 0.
"""
assert not self.is_finished()
self._update_ready_time(current_time)
if run_time is None:
run_time = self.remaining
else:
assert run_time > 0
assert self.remaining is None or run_time <= self.remaining
current_time = yield cpurequest.Request.execute(run_time)
if self.is_finished():
yield cpurequest.Request.idle()
return
return current_time
def is_finished(self):
"""Check if the :class:`Thread` is finished.
Returns True if the :class:`Thread` still has something to do,
False otherwise.
"""
return self.remaining == 0
def run_ctxsw(self, _current_time, run_time):
"""Update runtime state.
This should be called just after a context switch to another thread
when this was just the active thread, or when returning to this thread,
whether the context switch was successful or not.
`current_time` refers to the time just after the context switch.
"""
if not self.is_running.locked():
# this can happen if the thread was just switched to when the timer elapsed
# and we now switch away from this thread
locked = self.is_running.acquire(False)
assert locked
if LOG_INDIVIDUAL:
self.stats.ctxsw.append(run_time)
def run_background(self, _current_time, _run_time):
"""Update runtime state.
This should be called while the thread is in the context stack, but not
active (not the top).
`_current_time` refers to the time just after the active thread has run.
"""
assert self.is_running.locked()
# never called for work threads
assert False
def run_crunch(self, current_time, run_time):
"""Update runtime state.
This should be called while the thread is active.
`current_time` refers to the time just after this thread has run.
"""
assert self.is_running.locked()
self.stats.total_run += run_time
if LOG_INDIVIDUAL:
self.stats.run[-1].append(run_time)
assert self.stats.total_run == sum(map(sum, self.stats.run))
self.ready_time += run_time
assert self.ready_time == current_time
if self.response_units is not None:
self.response_units -= run_time
if self.response_units <= 0:
self.stats.response_time = current_time + self.response_units
self.response_units = None
if self.remaining is not None:
assert self.remaining >= run_time
self.remaining -= run_time
if self.is_finished():
# the job was completed within the slice
self.end()
return
def end(self):
"""End execution."""
assert self.is_finished()
assert self.response_units is None
self.stats.finished_time = self.ready_time
# never start again
self.ready_time = None
def suspend(self, current_time):
"""Become suspended.
This should be called when the thread becomes inactive,
but will be resumed later.
`current_time` refers to the time before the context switch
away from this thread.
"""
if self.is_running.locked():
if LOG_INDIVIDUAL:
# only record waiting time if the thread has executed
self.stats.wait.append([])
if self.ready_time is not None:
self.ready_time = max(self.ready_time, current_time)
else:
assert self.stats.finished_time > 0
def resume(self, current_time, returning):
"""Resume execution.
This should be called after :meth:`suspend` to become active again.
`current_time` refers to the time just after the context switch.
"""
if self.is_finished():
return
assert self.ready_time is not None
if returning:
self._update_ready_time(current_time)
else:
if current_time >= self.ready_time:
if LOG_INDIVIDUAL:
# we only want to record waiting time if the thread is ready to execute
self.stats.wait[-1].append(current_time - self.ready_time)
self.stats.run.append([])
# we can't use _update_ready_time() here because we might not yet be executing
self.ready_time = current_time
def finish(self, _current_time):
"""Become inactive.
This should be called when the thread becomes inactive.
"""
assert self.is_running.locked()
self.is_running.release()
def get_statistics(self, current_time):
"""Obtain statistics.
Not thread-safe.
"""
# the CPU should be locked during this
# this means we can read data without locking self.is_running
stats = self.stats.__dict__.copy()
if not self.is_finished() and current_time >= self.ready_time:
assert self.ready_time is not None
stats['waiting'] = current_time - self.ready_time
if stats['wait'] and stats['wait'][-1] == []:
stats['wait'].pop()
stats['remaining'] = self.remaining
return stats
| 32.706827 | 94 | 0.608915 | 7,930 | 0.973723 | 1,215 | 0.14919 | 0 | 0 | 0 | 0 | 3,229 | 0.396488 |
fd2e8e4f0baea2b1df6722ef44a729d6280f10cc
| 5,552 |
py
|
Python
|
application/src/initializer/auxiliary_methods.py
|
eyardley/CSC648-SoftwareEngineering-Snapster
|
6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a
|
[
"MIT"
] | null | null | null |
application/src/initializer/auxiliary_methods.py
|
eyardley/CSC648-SoftwareEngineering-Snapster
|
6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a
|
[
"MIT"
] | 3 |
2021-06-08T21:39:12.000Z
|
2022-01-13T02:46:20.000Z
|
application/src/initializer/auxiliary_methods.py
|
eyardley/CSC648-SoftwareEngineering-Snapster
|
6dbe1cf9b34de6d6dbc6be75db3a34583f67c01a
|
[
"MIT"
] | 1 |
2021-05-09T21:01:28.000Z
|
2021-05-09T21:01:28.000Z
|
def fill_team_about(db):
###############################
# FILL TEAM ABOUT TABLE #
###############################
print('Inserting data into team member profiles table')
add_about_team_query = ("INSERT INTO team_about "
"(name, link, position, image, description, facebook, twitter, instagram, linkedin) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)")
team_about_entries = [
('Avery Chen', '/avery', 'Back-end Member', 'static/team_images/avery.jpg', 'Undergraduate student at SFSU',
'', '', '', ''),
('Akhil Gandu', '/akhil', 'GitHub Master', 'static/team_images/akhil.jpg', 'Graduate Student at SFSU',
'https://www.facebook.com/rockstar290', 'https://twitter.com/Rockstar5645',
'https://www.instagram.com/akhil_gandu/', 'https://www.linkedin.com/in/akhilgandu/'),
('Chris Eckhardt', '/chris', 'Back-end lead', 'static/team_images/chris.jpg', 'Undergrad at SFState', '', '',
'https://www.instagram.com/chris_evan_eckhardt/', 'https://www.linkedin.com/in/christopher-eckhardt-04abb1119/'),
('Elliot Yardley', '/elliot', 'Front-end lead', 'static/team_images/elliot.jpg', 'Graduate Student, SFSU',
'', '', '', ''),
('Thomas Yu', '/thomas', 'Front End Member', 'static/team_images/Thomas.jpg', 'Undergraduate Student, SFSU',
'', '', '', ''),
('Bakulia Kurmant', '/bakulia', 'Team Lead', 'static/team_images/bakulia.jpg', 'In my spare time I like going outdoors, techno music, reading, traveling', '', '', 'https://www.instagram.com/bakuliak/?hl=en', 'https://www.linkedin.com/in/bakulia-kurmant/')
]
for team_about_entry in team_about_entries:
db.query(add_about_team_query, team_about_entry)
db.commit()
print('Inserted data into team member profiles table')
def fill_media_types(db):
############################
# FILL MEDIA TYPES #
############################
print('Initializing media types table with an enumeration of available media types')
add_media_type_query = ("INSERT INTO media_types "
"(media_type)"
"VALUES (%s)")
media_types_entries = [
('all',),
('image',),
('video',),
('audio',),
('document',)
]
for media_type_entry in media_types_entries:
db.query(add_media_type_query, media_type_entry)
db.commit()
print('Media types table initialized')
def fill_digital_media(db):
############################
# FILL DIGITAL MEDIA #
############################
print('Inserting data test entries into digital media table')
add_digital_media_query = ("INSERT INTO digital_media "
"(owner_id, name, description, file_path, thumbnail_path, category, media_type, price, approval) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)")
digital_media_entries = [
('1', 'sponge bob 1', 'this is the first test photo', 'M2_test_images/sb1.jpg', 'thumbnails/sb1_t.jpg',
'all', 'image', 00.00, 1),
('2', 'sponge bob 2', 'this is the second test photo', 'M2_test_images/sb2.jpg','thumbnails/sb2_t.jpg' ,
'all', 'image' , 89.99, 1),
('3', 'sponge bob 3', 'this is the third test photo', 'M2_test_images/sb3.jpg','thumbnails/sb3_t.jpg' ,
'all', 'image' , 00.00, 1),
('4', 'sponge bob 4', 'this is the fourth test photo', 'M2_test_images/sb4.jpg','thumbnails/sb4_t.jpg' ,
'all', 'image' , 69.99, 1),
('5', 'sponge bob 5', 'this is the fifth test photo', 'M2_test_images/sb5.jpg','thumbnails/sb5_t.jpg' ,
'all', 'image' , 00.00, 1),
('1', 'sponge bob 6', 'this is the sixth test photo', 'M2_test_images/sb1.jpg', 'thumbnails/sb1_t.jpg',
'all', 'image', 99.99, 1),
('2', 'sponge bob 7', 'this is the seventh test photo', 'M2_test_images/sb2.jpg','thumbnails/sb2_t.jpg' ,
'all', 'image' , 89.99, 1),
('3', 'sponge bob 8', 'this is the eight test photo', 'M2_test_images/sb3.jpg','thumbnails/sb3_t.jpg' ,
'all', 'image' , 79.99, 1),
('4', 'sponge bob 9', 'this is the ninth test photo', 'M2_test_images/sb4.jpg','thumbnails/sb4_t.jpg' ,
'all', 'image' , 69.99, 1),
('5', 'sponge bob 10', 'this is the tenth test photo', 'M2_test_images/sb5.jpg','thumbnails/sb5_t.jpg' ,
'all', 'image' , 59.99, 1),
('1', 'sponge bob 11', 'this is the eleventh test photo', 'M2_test_images/sb1.jpg', 'thumbnails/sb1_t.jpg',
'all', 'image', 99.99, 1),
('2', 'sponge bob 12', 'this is the twelfth test photo', 'M2_test_images/sb2.jpg','thumbnails/sb2_t.jpg' ,
'all', 'image' , 89.99, 1),
('3', 'sponge bob 13', 'this is the thirteenth test photo', 'M2_test_images/sb3.jpg','thumbnails/sb3_t.jpg' ,
'all', 'image' , 79.99, 1),
('4', 'sponge bob 14', 'this is the fourteenth test photo', 'M2_test_images/sb4.jpg','thumbnails/sb4_t.jpg' ,
'all', 'image' , 69.99, 1),
('5', 'sponge bob 15', 'this is the fifteenth test photo', 'M2_test_images/sb5.jpg','thumbnails/sb5_t.jpg' ,
'all', 'image' , 59.99, 0)
]
for digital_media_entry in digital_media_entries:
db.query(add_digital_media_query, digital_media_entry)
db.commit()
print('Initialization of digital media entries table complete')
| 52.377358 | 263 | 0.568444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,596 | 0.647695 |
fd3187f8e540b93ec7789114fa6cc6e3969608ec
| 1,335 |
py
|
Python
|
pyBRML/pyBRML/core.py
|
anich003/brml_toolkit
|
de8218bdf333902431d4c0055fcf5cb3dc47d0c1
|
[
"MIT"
] | null | null | null |
pyBRML/pyBRML/core.py
|
anich003/brml_toolkit
|
de8218bdf333902431d4c0055fcf5cb3dc47d0c1
|
[
"MIT"
] | null | null | null |
pyBRML/pyBRML/core.py
|
anich003/brml_toolkit
|
de8218bdf333902431d4c0055fcf5cb3dc47d0c1
|
[
"MIT"
] | null | null | null |
import copy
from pyBRML import utils
from pyBRML import Array
def multiply_potentials(list_of_potentials):
"""
Returns the product of each potential in list_of_potentials, useful for
calculating joint probabilities.
For example, if the joint probability of a system is defined as
p(A,B,C) = p(C|A,B) p(A) p(B)
then, list_of_potentials should contain 3 potentials corresponding to each factor.
Since, potentials can be defined in an arbitrary order, each potential will be
reshaped and cast using numpy ndarray functions before being multiplied, taking
advantage of numpy's broadcasting functionality.
"""
# Collect the set of variables from each pot. Used to reshape each potential.table
variable_set = set(var for potential in list_of_potentials for var in potential.variables)
variable_set = list(variable_set)
# Copy potentials to avoid mutating original objects
potentials = copy.deepcopy(list_of_potentials)
# Reshape each potential prior to taking their product
for pot in potentials:
pot = utils.format_table(pot.variables, pot.table, variable_set)
# Multiply potentials and return
new_potential = potentials[0]
for pot in potentials[1:]:
new_potential.table = new_potential.table * pot.table
return new_potential
| 35.131579 | 94 | 0.743071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 761 | 0.570037 |
fd3394f2b7968055dc2a5d2b8bdde46ae4644c49
| 2,098 |
py
|
Python
|
lib/akf_known_uncategories.py
|
UB-Mannheim/docxstruct
|
dd6d99b6fd6f5660fdc61a14b60e70a54ac9be85
|
[
"Apache-2.0"
] | 1 |
2019-03-06T14:59:44.000Z
|
2019-03-06T14:59:44.000Z
|
lib/akf_known_uncategories.py
|
UB-Mannheim/docxstruct
|
dd6d99b6fd6f5660fdc61a14b60e70a54ac9be85
|
[
"Apache-2.0"
] | null | null | null |
lib/akf_known_uncategories.py
|
UB-Mannheim/docxstruct
|
dd6d99b6fd6f5660fdc61a14b60e70a54ac9be85
|
[
"Apache-2.0"
] | null | null | null |
import re
class KnownUncategories(object):
"""
List of known entries in test_data which are no categories,
but are recognized as such
"""
def __init__(self):
# un-category regex strings (care for commas)
self.uc = [
"Beteiligung", # 1956: is part of Beteiligungen
"Ferngespräche", # 1956: is part of Fernruf/Telefon
"Kapital", # 1956: is part of multiple top-level items
"Umstellung \d\d?", # 1956: is part of Grundkapital or other
"Dividenden ab \d{4}.*", # 1956: is part of Dividenden or other (with year or yearspan)s
"^Kurs.*", # 1956: second level tag
"ab \d{4}(\/\d{2})?" # 1956: i.e "ab 1949/50"-part of other categories
]
# non-specific keys (which get not removed from original-rest in analysis)
self.nkeys = [
"street",
"street_number",
"additional_info",
"city",
"name",
"title",
"rest",
"location",
"number_Sa.-Nr.",
"rest_info",
"bank",
"title",
"amount",
"ord_number",
"organization",
]
# create corresponding regexes
self.uc_regex = []
for item in self.uc:
regex_compiled = re.compile(item)
self.uc_regex.append(regex_compiled)
@property
def uncategories(self):
return self.uc
@property
def unkeys(self):
return self.nkeys
def check_uncategories(self, text_to_check):
"""
Allows to compare a tag against the existing uncategories
:param text_to_check: tag text
:return: True if un-category, False if not
"""
for regex_to_check in self.uc_regex:
match_result = regex_to_check.search(text_to_check)
if match_result is not None:
return True
return False
| 31.313433 | 108 | 0.515253 | 2,088 | 0.994759 | 0 | 0 | 117 | 0.055741 | 0 | 0 | 968 | 0.461172 |
fd33bdf592a5bbf5b20d72627b7e89fa294ef5bf
| 1,640 |
py
|
Python
|
maps/templatetags/mapas_tags.py
|
lsalta/mapground
|
d927d283dab6f756574bd88b3251b9e68f000ca7
|
[
"MIT"
] | null | null | null |
maps/templatetags/mapas_tags.py
|
lsalta/mapground
|
d927d283dab6f756574bd88b3251b9e68f000ca7
|
[
"MIT"
] | 3 |
2020-02-11T23:04:56.000Z
|
2021-06-10T18:07:53.000Z
|
maps/templatetags/mapas_tags.py
|
lsalta/mapground
|
d927d283dab6f756574bd88b3251b9e68f000ca7
|
[
"MIT"
] | 1 |
2021-08-20T14:49:09.000Z
|
2021-08-20T14:49:09.000Z
|
from django import template
register = template.Library()
def mostrar_resumen_mapa(context, m, order_by):
# print context
to_return = {
'mapa': m,
'order_by': order_by,
}
return to_return
@register.inclusion_tag('mapas/lista_mapas.html')
def mostrar_mapas(lista_mapas, order_by):
to_return = {
'lista_mapas': lista_mapas,
'order_by': order_by
}
return to_return
register.inclusion_tag('mapas/mapa.html', takes_context=True)(mostrar_resumen_mapa)
def quitar_char(value, arg):
return value.replace(arg, ' ')
register.filter('quitar_char',quitar_char)
def replace_text(value, replacement):
rep = replacement.split(',')
try:
return value.replace(rep[0], rep[1])
except:
return value
register.filter('replace_text',replace_text)
def truncar_string(value, max_length=0):
if max_length==0:
return value
return value[:max_length]+('...' if len(value)>max_length else '')
register.filter('truncar_string',truncar_string)
def get_range(value):
"""
Filter - returns a list containing range made from given value
Usage (in template):
<ul>{% for i in 3|get_range %}
<li>{{ i }}. Do something</li>
{% endfor %}</ul>
Results with the HTML:
<ul>
<li>0. Do something</li>
<li>1. Do something</li>
<li>2. Do something</li>
</ul>
Instead of 3 one may use the variable set in the views
"""
try:
return range( value )
except:
return None
register.filter('get_range',get_range)
def sort_by(queryset, order):
return queryset.order_by(*order.split(','))
register.filter('sort_by',sort_by)
| 22.777778 | 83 | 0.668902 | 0 | 0 | 0 | 0 | 201 | 0.122561 | 0 | 0 | 570 | 0.347561 |
fd349e3352814cffd9d5b6c0c4f84624bb4c6bc6
| 1,868 |
py
|
Python
|
app/services/aggregator/aggr.py
|
maestro-server/report-app
|
0bf9014400f2979c51c1c544347d5134c73facdf
|
[
"Apache-2.0"
] | 1 |
2020-05-19T20:18:05.000Z
|
2020-05-19T20:18:05.000Z
|
app/services/aggregator/aggr.py
|
maestro-server/report-app
|
0bf9014400f2979c51c1c544347d5134c73facdf
|
[
"Apache-2.0"
] | 2 |
2019-10-21T14:56:04.000Z
|
2020-03-27T12:48:26.000Z
|
app/services/aggregator/aggr.py
|
maestro-server/report-app
|
0bf9014400f2979c51c1c544347d5134c73facdf
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from pydash.objects import get
class Aggregator(object):
def __init__(self, field, lens=None, sublens='_id', include=[], opts={}):
self._field = field
self._lens = lens
self._sublens = sublens
self._result = []
self._transf = []
self._include = include
self._opts = opts
self._df = None
def execute(self, df, entity='all'):
if (not self._include) or (entity in self._include):
self.aggregate(df)
def aggregate(self, df):
self._tmp_dataframe = df \
.dropna() \
.apply(self.transformData)
if "stack" in self._transf:
self._tmp_dataframe = self._tmp_dataframe.stack() \
.reset_index(level=1, drop=True)
self.groupCount()
def groupAggrCount(self):
self._result = self._tmp_dataframe \
.groupby(self._sub) \
.agg({self._aggrk: self._aggr}) \
.get(self._aggrk)
def groupCount(self):
self._result = self._tmp_dataframe \
.groupby(self._tmp_dataframe) \
.count()
def transformData(self, data):
if isinstance(data, dict):
data = get(data, self._lens)
if isinstance(data, list):
self._transf.append("stack")
data = map(self.reducev, data)
return pd.Series(data)
return data
def reducev(self, data):
if isinstance(data, dict):
return get(data, self._sublens)
return data
def getField(self):
return self._field
def uniqueField(self):
arr = [self._field]
if self._lens:
arr += self._lens.split(".")
return "_".join(arr)
def getResult(self):
return self._result
def getOpts(self):
return self._opts
| 23.64557 | 77 | 0.556745 | 1,814 | 0.971092 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.01606 |
fd375dd0458ad06acf734748313eaba69b115090
| 310 |
py
|
Python
|
email.py
|
zhaoxiaoyunok/python-library-fuzzers
|
83db496f75280795415821097802a96fbf72f50f
|
[
"MIT"
] | 4 |
2019-07-03T06:01:08.000Z
|
2019-07-29T08:07:54.000Z
|
email.py
|
zhaoxiaoyunok/python-library-fuzzers
|
83db496f75280795415821097802a96fbf72f50f
|
[
"MIT"
] | null | null | null |
email.py
|
zhaoxiaoyunok/python-library-fuzzers
|
83db496f75280795415821097802a96fbf72f50f
|
[
"MIT"
] | 4 |
2019-07-03T03:24:56.000Z
|
2021-12-11T12:30:31.000Z
|
from email.parser import BytesParser, Parser
from email.policy import default, HTTP
def FuzzerRunOne(FuzzerInput):
try:
Parser(policy=HTTP).parsestr(FuzzerInput.decode("utf-8", "replace"))
# #Parser(policy=default).parsestr(FuzzerInput.decode("utf-8", "replace"))
except:
pass
| 28.181818 | 81 | 0.696774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.3 |
fd37bfae595f1bdb3cabeb62c9beddb408f90236
| 315 |
py
|
Python
|
tests/conftest.py
|
calpt/flask-filealchemy
|
b3575299f0230d5a64865af8066122c2e0c485ec
|
[
"MIT"
] | 16 |
2018-10-16T03:32:39.000Z
|
2020-09-04T02:05:37.000Z
|
tests/conftest.py
|
calpt/flask-filealchemy
|
b3575299f0230d5a64865af8066122c2e0c485ec
|
[
"MIT"
] | 8 |
2019-02-25T10:59:15.000Z
|
2019-03-11T08:36:57.000Z
|
tests/conftest.py
|
calpt/flask-filealchemy
|
b3575299f0230d5a64865af8066122c2e0c485ec
|
[
"MIT"
] | 3 |
2019-11-22T23:46:16.000Z
|
2020-06-05T19:17:23.000Z
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import pytest
@pytest.fixture
def app():
return Flask(__name__)
@pytest.fixture
def db(app):
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
return SQLAlchemy(app)
| 18.529412 | 64 | 0.742857 | 0 | 0 | 0 | 0 | 231 | 0.733333 | 0 | 0 | 77 | 0.244444 |
fd3bd480b9c0a1b8e0dc9e02d722d288943bec44
| 357 |
py
|
Python
|
DataStructuresAndAlgorithms/sorting algorithms/SelectionSort.py
|
armaan2k/Training-Exercises
|
6dd94efb6cd6e0dc6c24e2b7d5e74588a74d190d
|
[
"MIT"
] | null | null | null |
DataStructuresAndAlgorithms/sorting algorithms/SelectionSort.py
|
armaan2k/Training-Exercises
|
6dd94efb6cd6e0dc6c24e2b7d5e74588a74d190d
|
[
"MIT"
] | null | null | null |
DataStructuresAndAlgorithms/sorting algorithms/SelectionSort.py
|
armaan2k/Training-Exercises
|
6dd94efb6cd6e0dc6c24e2b7d5e74588a74d190d
|
[
"MIT"
] | null | null | null |
def selection_sort(A):
n = len(A)
for i in range(n - 1):
position = i
for j in range(i + 1, n):
if A[j] < A[position]:
position = j
temp = A[i]
A[i] = A[position]
A[position] = temp
A = [3, 5, 8, 9, 6, 2]
print('Original Array: ', A)
selection_sort(A)
print('Sorted Array: ', A)
| 21 | 34 | 0.473389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.095238 |
fd3cb5b3e208b581d3cf014077d7e88f0727e79e
| 3,465 |
py
|
Python
|
books/models.py
|
MattRijk/finance-ebook-site
|
c564d4bc9578f0a6f47efa53f0c81893fbee08f7
|
[
"MIT"
] | 1 |
2020-05-16T12:48:02.000Z
|
2020-05-16T12:48:02.000Z
|
books/models.py
|
MattRijk/finance-ebook-site
|
c564d4bc9578f0a6f47efa53f0c81893fbee08f7
|
[
"MIT"
] | null | null | null |
books/models.py
|
MattRijk/finance-ebook-site
|
c564d4bc9578f0a6f47efa53f0c81893fbee08f7
|
[
"MIT"
] | 3 |
2017-12-06T11:18:10.000Z
|
2020-05-16T12:49:32.000Z
|
from django.db import models
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.utils.text import slugify
class Author(models.Model):
name = models.CharField(max_length=50, unique=False)
slug = models.SlugField(max_length=200, blank=True, unique=False)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(Author, self).save(*args, **kwargs)
class Book(models.Model):
title = models.CharField(max_length=255, unique=True)
slug = models.SlugField(max_length=200, blank=True, unique=False)
year = models.CharField(max_length=10)
pages = models.CharField(max_length=10)
filesize = models.CharField(max_length=20)
file_format = models.CharField(max_length=20)
pdf = models.FileField(upload_to='pdfs/')
# image = models.FileField(upload_to='images/')
image = models.FileField(upload_to='images/')
authors = models.ManyToManyField(Author, through='BookHasAuthor')
published = models.DateTimeField(default=timezone.now)
class Meta:
ordering = ('-published',)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super(Book, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('books:detail_view', kwargs={'slug': self.slug})
class Category(models.Model):
title = models.CharField(max_length=50)
slug = models.SlugField(max_length=200, blank=True, unique=False)
books = models.ManyToManyField(Book, through='BookHasCategory')
class Meta:
verbose_name_plural = 'Categories'
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super(Category, self).save(*args, **kwargs)
class BookHasAuthor(models.Model):
name = models.CharField(max_length=45, blank=True, unique=False)
slug = models.SlugField(max_length=200, blank=True, unique=False)
author = models.ForeignKey(Author)
book = models.ForeignKey(Book)
class Meta:
unique_together = ['author', 'book']
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug or self.name:
self.name = self.author.name
self.slug = slugify(self.name)
super(BookHasAuthor, self).save(*args, **kwargs)
class BookHasCategory(models.Model):
title = models.CharField(max_length=45, blank=True, unique=False)
slug = models.SlugField(max_length=200, blank=True, unique=False)
book = models.ForeignKey(Book)
category = models.ForeignKey(Category)
class Meta:
unique_together = ['book', 'category']
verbose_name_plural = 'Book has categories'
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.title = self.category.title
self.slug = slugify(self.title)
super(BookHasCategory, self).save(*args, **kwargs)
| 33 | 72 | 0.610967 | 3,226 | 0.931025 | 0 | 0 | 0 | 0 | 0 | 0 | 200 | 0.05772 |
fd3e69529a275a604f79403141c9d3a32f7b625b
| 341 |
py
|
Python
|
bucketlist_django/bucketlist_django/settings/development.py
|
andela-tadesanya/django-bucketlist-application
|
315ceb77e635fe051b5600ada460af938c140af1
|
[
"MIT"
] | null | null | null |
bucketlist_django/bucketlist_django/settings/development.py
|
andela-tadesanya/django-bucketlist-application
|
315ceb77e635fe051b5600ada460af938c140af1
|
[
"MIT"
] | null | null | null |
bucketlist_django/bucketlist_django/settings/development.py
|
andela-tadesanya/django-bucketlist-application
|
315ceb77e635fe051b5600ada460af938c140af1
|
[
"MIT"
] | null | null | null |
# load defaults and override with devlopment settings
from defaults import *
DEBUG = True
WSGI_APPLICATION = 'bucketlist_django.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bucketlist',
'USER': 'bucketlist',
'PASSWORD': 'bucketlist'
}
}
| 21.3125 | 59 | 0.653959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.59824 |
fd45eaa50a88cd4355e2753ab9dc9b6e727d52ec
| 1,198 |
py
|
Python
|
storyboard/tests/plugin/scheduler/test_base.py
|
Sitcode-Zoograf/storyboard
|
5833f87e20722c524a1e4a0b8e1fb82206fb4e5c
|
[
"Apache-2.0"
] | null | null | null |
storyboard/tests/plugin/scheduler/test_base.py
|
Sitcode-Zoograf/storyboard
|
5833f87e20722c524a1e4a0b8e1fb82206fb4e5c
|
[
"Apache-2.0"
] | null | null | null |
storyboard/tests/plugin/scheduler/test_base.py
|
Sitcode-Zoograf/storyboard
|
5833f87e20722c524a1e4a0b8e1fb82206fb4e5c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
from storyboard.plugin.scheduler.base import SchedulerPluginBase
import storyboard.tests.base as base
class TestSchedulerBasePlugin(base.TestCase):
"""Test the scheduler app."""
def test_plugin_name(self):
plugin = TestPlugin(dict())
self.assertEqual(
"storyboard.tests.plugin.scheduler.test_base:TestPlugin",
plugin.get_name())
class TestPlugin(SchedulerPluginBase):
"""Test plugin to test the non-abstract methods."""
def enabled(self):
return True
def run(self):
pass
def trigger(self):
pass
| 29.219512 | 78 | 0.717863 | 484 | 0.404007 | 0 | 0 | 0 | 0 | 0 | 0 | 728 | 0.607679 |
fd464df7fbbebbe26bc4d827bb8cf980aecbe03a
| 13,019 |
py
|
Python
|
src/model/build_models.py
|
VinGPan/classification_model_search
|
fab7ce6fc131b858f1b79633e0f7b86d1446c93d
|
[
"MIT"
] | null | null | null |
src/model/build_models.py
|
VinGPan/classification_model_search
|
fab7ce6fc131b858f1b79633e0f7b86d1446c93d
|
[
"MIT"
] | null | null | null |
src/model/build_models.py
|
VinGPan/classification_model_search
|
fab7ce6fc131b858f1b79633e0f7b86d1446c93d
|
[
"MIT"
] | null | null | null |
import os
import os.path
import pickle
from shutil import copyfile
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.decomposition import KernelPCA
from sklearn.decomposition import PCA
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.exceptions import ConvergenceWarning
from sklearn.externals import joblib
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.manifold import Isomap
from sklearn.manifold import LocallyLinearEmbedding
from sklearn.metrics import accuracy_score, balanced_accuracy_score, r2_score, mean_absolute_error, mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.svm import SVC
from sklearn.utils.testing import ignore_warnings
from src.model.utils import makedir
from src.utils.logging import logger
#####################################################################
# HERE IS LIST OF VARIES LIBRARIES WE STUDIED DURING SCS_3253_024 Machine Learning COURSE that are
# relevant to classification problem. We will tray use as many ideas as possible for this project.
#####################################################################
def add_preproc_step(preproc_str, steps):
if preproc_str == 'min_max':
steps.append(('preprocs', MinMaxScaler()))
elif preproc_str == 'standard_scalar':
steps.append(('preprocs', StandardScaler()))
elif preproc_str == 'none':
pass
else:
logger.error("unsupported preprocs option " + preproc_str)
raise Exception("unsupported preprocs option " + preproc_str)
def add_transform_step(transforms_str, steps, transforms_info, param_grid):
if transforms_str == 'pca':
steps.append(('transforms', PCA()))
elif transforms_str == 'kpca':
steps.append(('transforms', KernelPCA(kernel='rbf')))
elif transforms_str == 'lle':
steps.append(('transforms', LocallyLinearEmbedding()))
# elif transforms_str == 'mds':
# steps.append(('transforms', MDS())) # DOES NOT HAVE transform() function
# param_grid[0]["transforms__n_components"] = [3, 4, 5]
elif transforms_str == 'isomap':
steps.append(('transforms', Isomap()))
# elif transforms_str == 'tsne': # DOES NOT HAVE transform() function
# steps.append(('transforms', TSNE()))
# param_grid[0]["transforms__n_components"] = [3, 4, 5]
elif transforms_str == 'none':
pass
else:
logger.error("unsupported transforms option " + transforms_str)
raise Exception("unsupported transforms option " + transforms_str)
if 'params' in transforms_info:
for trans_param in transforms_info['params']:
vals = trans_param['vals']
name = trans_param['name']
for vidx in range(len(vals)):
if vals[vidx] == 'None':
vals[vidx] = None
param_grid[0]["transforms__" + name] = vals
def add_classifier_step(clf_str, steps, clf_info, param_grid, tot_classes):
########### classifiers ################
if clf_str == 'logistic':
steps.append(('clf', LogisticRegression(multi_class='auto', random_state=0, solver='liblinear')))
elif clf_str == 'naive_bayes':
steps.append(('clf', GaussianNB()))
elif clf_str == 'knn':
steps.append(('clf', KNeighborsClassifier()))
elif clf_str == 'random_forest':
steps.append(('clf', RandomForestClassifier()))
elif clf_str == 'svc':
steps.append(('clf', SVC(class_weight='balanced', random_state=42)))
elif clf_str == 'xgboost':
steps.append(('clf', xgb.XGBClassifier(random_state=42, objective="multi:softmax", num_class=tot_classes)))
elif clf_str == 'adaboost':
steps.append(('clf', AdaBoostClassifier(random_state=42)))
elif clf_str == 'gradboost':
steps.append(('clf', GradientBoostingClassifier(random_state=42)))
#########################################
elif clf_str == 'linear':
steps.append(('clf', LinearRegression()))
else:
logger.error("Classifier option " + clf_str + " not supported")
raise Exception("Classifier option " + clf_str + " not supported")
if 'params' in clf_info:
for clf_param in clf_info['params']:
vals = clf_param['vals']
name = clf_param['name']
for vidx in range(len(vals)):
if vals[vidx] == 'None':
vals[vidx] = None
param_grid[0]["clf__" + name] = vals
def get_crashed_list(configs):
crashed_list_fname = "output/" + configs["experiment_name"] + "/" + "crashed.txt"
if os.path.exists(crashed_list_fname):
fid = open(crashed_list_fname, "r")
crashed_list = []
for line in fid:
crashed_list.append(line.strip())
fid.close()
else:
crashed_list = []
fid = open(crashed_list_fname, "w")
fid.close()
return crashed_list, crashed_list_fname
@ignore_warnings(category=ConvergenceWarning)
def build_models(configs):
mtype = configs['mtype']
data_path = "output/" + configs['experiment_name'] + "/features.csv"
train_path = "output/" + configs['experiment_name'] + "/train.csv"
val_path = train_path.replace("train.csv", "val.csv")
makedir("output/" + configs['experiment_name'] + "/interim")
logger.info('Building Models for ' + str(train_path))
# Read train and val sets
X = pd.read_csv(data_path)
y = X[configs['target']].values
X = X.drop([configs['target']], axis=1)
X = X.values
ids = list((pd.read_csv(train_path, header=None).values)[:, 0])
ids.extend(list((pd.read_csv(val_path, header=None).values)[:, 0]))
X = X[ids, :]
y = y[ids]
all_scores_path = "output/" + configs["experiment_name"] + "/all_scores.pkl"
all_scores_done_flg = "output/" + configs["experiment_name"] + "/all_scores_flg"
crashed_list, crashed_list_fname = get_crashed_list(configs)
if os.path.exists(all_scores_path) and os.path.exists(all_scores_done_flg):
# If allready model building is done just return the results.
# This is heplful to display result in a jupyter notebook.
logger.info('All the models have been built already.')
else:
all_scores = []
if mtype == 'classification':
tot_classes = np.unique(y).shape[0]
else:
tot_classes = None
# For Each classifier - For Each Data transform - For each Dimensionality reduction BUILD THE MODEL!
for clf_info in configs["models"]["classifiers"]:
for preproc_str in configs["models"]["preprocs"]:
for transforms_info in configs["models"]["transforms"]:
transforms_str = transforms_info['name']
clf_str = clf_info['name']
res_path = "output/" + configs["experiment_name"] + "/interim/" + clf_str + "_" + preproc_str + \
"_" + transforms_str + ".pkl"
model_str = 'classifier "' + clf_str + '" with preprocessing "' + preproc_str + \
'" and with transform "' + transforms_str + '"'
logger.info('Building ' + model_str)
################# ADD IMPUTERS #################################
steps = [('imputer', SimpleImputer(strategy='mean'))]
param_grid = [{}]
###################################################################
################# PICK A DATA TRANSFORM ###########################
add_preproc_step(preproc_str, steps)
###################################################################
################### PICK A Dimensionality reduction method ################
add_transform_step(transforms_str, steps, transforms_info, param_grid)
###################################################################
##################### PICK A classifier #######################
add_classifier_step(clf_str, steps, clf_info, param_grid, tot_classes)
###################################################################
##################### Perform grid search #####################
pipeline = Pipeline(steps=steps)
if mtype == 'classification':
scoring = {'balanced_accuracy': make_scorer(balanced_accuracy_score),
'accuracy': make_scorer(accuracy_score),
'f1': 'f1_micro'}
refit = 'balanced_accuracy'
else:
scoring = {'r2': make_scorer(r2_score), 'mae': make_scorer(mean_absolute_error),
'mse': make_scorer(mean_squared_error)}
refit = 'r2'
clf = GridSearchCV(estimator=pipeline, cv=5, param_grid=param_grid,
verbose=1, scoring=scoring, refit=refit)
if os.path.exists(res_path):
logger.info('Model has been built already. Loading the model ' + str(res_path))
clf = joblib.load(res_path)
elif model_str in crashed_list:
logger.info('Model fails to build. Ignoring. Please consider modifying the params.')
continue
else:
try:
clf.fit(X, y)
except Exception as e:
logger.info("Model building crashed for " + model_str)
logger.error(e, exc_info=True)
fid = open(crashed_list_fname, "a")
fid.write(model_str + "\n")
fid.close()
continue
joblib.dump(clf, res_path)
###################################################################
################### Record scores ################
cv_results = clf.cv_results_
score_info = {'classifier': clf_str, 'preprocess': preproc_str, 'transform': transforms_str,
'res_path': res_path}
for scorer in scoring:
best_index = np.nonzero(cv_results['rank_test_%s' % scorer] == 1)[0][0]
best_score = cv_results['mean_test_%s' % scorer][best_index]
if scorer == 'balanced_accuracy':
score_info['balanced_accuracy'] = np.round(best_score * 100, 1)
elif scorer == 'accuracy':
score_info['accuracy'] = np.round(best_score * 100, 1)
elif scorer == 'f1':
score_info['f1_score'] = np.round(best_score, 2)
elif scorer == 'r2':
score_info['r2'] = np.round(best_score, 2)
elif scorer == 'mae':
score_info['mae'] = np.round(best_score, 2)
elif scorer == 'mse':
score_info['mse'] = np.round(best_score, 2)
if mtype == 'classification':
res_str = ' => accuracy = ' + str(score_info['accuracy']) + '%, F1 = ' + \
str(score_info['f1_score'])
else:
res_str = ' => r2 = ' + str(score_info['r2'])
logger.info(model_str + res_str)
all_scores.append(score_info)
pickle.dump(all_scores, open(all_scores_path, "wb"))
###################################################################
# end each transforms
# end each preprocs
# end each classifiers
if mtype == 'classification':
all_scores = sorted(all_scores, key=lambda x: x['balanced_accuracy'], reverse=True)
else:
all_scores = sorted(all_scores, key=lambda x: x['r2'], reverse=True)
best_model_path = "output/" + configs["experiment_name"] + "/best_model.pkl"
copyfile(all_scores[0]['res_path'], best_model_path)
fid = open(all_scores_done_flg, "wb")
fid.close()
logger.info("All the models are built.")
| 47.688645 | 118 | 0.541132 | 0 | 0 | 0 | 0 | 7,659 | 0.588294 | 0 | 0 | 3,653 | 0.28059 |
fd4aa50f6950d8285cebb403be0898f64adbb857
| 2,495 |
py
|
Python
|
gleague/gleague/frontend/seasons.py
|
Nuqlear/genkstaleague
|
664ed1d3ebea9c43053546fc2d658083cc16526b
|
[
"MIT"
] | 7 |
2015-08-18T01:21:48.000Z
|
2021-04-30T03:10:38.000Z
|
gleague/gleague/frontend/seasons.py
|
Nuqlear/genkstaleague
|
664ed1d3ebea9c43053546fc2d658083cc16526b
|
[
"MIT"
] | 1 |
2019-04-28T10:02:39.000Z
|
2019-05-06T08:11:56.000Z
|
gleague/gleague/frontend/seasons.py
|
Nuqlear/genkstaleague
|
664ed1d3ebea9c43053546fc2d658083cc16526b
|
[
"MIT"
] | 3 |
2015-08-14T09:42:25.000Z
|
2018-11-08T07:07:58.000Z
|
from flask import Blueprint
from flask import abort
from flask import render_template
from flask import request
from flask import current_app
from sqlalchemy import desc
from gleague.core import db
from gleague.models import Match
from gleague.models import Season
from gleague.models import SeasonStats
from gleague.models.queries import season_analytic
seasons_bp = Blueprint("seasons", __name__)
def get_season_number_and_id(season_number):
if season_number == -1:
season = Season.current()
else:
season = Season.query.filter(Season.number == season_number).first()
if not season:
abort(404)
return season.number, season.id
@seasons_bp.route("/current/players", methods=["GET"])
@seasons_bp.route("/<int:season_number>/players", methods=["GET"])
def players(season_number=-1):
season_number, s_id = get_season_number_and_id(season_number)
q = request.args.get("q")
sort = request.args.get("sort", "pts")
page = int(request.args.get("page", 1))
stats = SeasonStats.get_stats(season_number, q, sort)
stats = stats.paginate(page, current_app.config["TOP_PLAYERS_PER_PAGE"], True)
seasons = [season[0] for season in db.session.query(Season.number).all()]
return render_template(
"/season/players.html",
stats=stats,
sort=sort,
seasons=seasons,
season_number=season_number,
)
@seasons_bp.route("/current/records", methods=["GET"])
@seasons_bp.route("/<int:season_number>/records", methods=["GET"])
def records(season_number=-1):
season_number, s_id = get_season_number_and_id(season_number)
template_context = {
"season_number": season_number,
"seasons": [season[0] for season in db.session.query(Season.number).all()],
}
template_context.update(season_analytic.get_all_season_records(season_number))
return render_template("/season/records.html", **template_context)
@seasons_bp.route("/current/heroes", methods=["GET"])
@seasons_bp.route("/<int:season_number>/heroes", methods=["GET"])
def heroes(season_number=-1):
season_number, s_id = get_season_number_and_id(season_number)
sort = request.args.get("sort", "played")
return render_template(
"/season/heroes.html",
season_number=season_number,
seasons=[season[0] for season in db.session.query(Season.number).all()],
sort=sort,
is_desc=desc,
in_season_heroes=season_analytic.get_player_heroes(s_id, sort),
)
| 34.652778 | 83 | 0.710621 | 0 | 0 | 0 | 0 | 1,806 | 0.723848 | 0 | 0 | 326 | 0.130661 |
fd4d6ed01b3decd5927f1d836a338350d16f500c
| 941 |
py
|
Python
|
LC_problems/822.py
|
Howardhuang98/Blog
|
cf58638d6d0bbf55b95fe08e43798e7dd14219ac
|
[
"MIT"
] | null | null | null |
LC_problems/822.py
|
Howardhuang98/Blog
|
cf58638d6d0bbf55b95fe08e43798e7dd14219ac
|
[
"MIT"
] | null | null | null |
LC_problems/822.py
|
Howardhuang98/Blog
|
cf58638d6d0bbf55b95fe08e43798e7dd14219ac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 822.py
@Contact : [email protected]
@Modify Time : 2022/3/29 13:19
------------
"""
from typing import List
class Solution:
def flipgame(self, fronts: List[int], backs: List[int]) -> int:
ans = float('+inf')
for i in range(len(fronts)):
if fronts[i] == backs[i]:
continue
for x in [fronts[i], backs[i]]:
success = True
for j in range(len(fronts)):
if j == i:
continue
if x != fronts[j] or x != backs[j]:
continue
else:
success = False
break
if success:
ans = min(ans, x)
return ans
if __name__ == '__main__':
s = Solution()
print(s.flipgame([1],[1]))
| 27.676471 | 67 | 0.420829 | 665 | 0.706695 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.196599 |
fd4d784f79a128a2168a7d3f9c317a2fb64d12f1
| 22,795 |
py
|
Python
|
result/analyze.py
|
kuriatsu/PIE_RAS
|
8dd33b4d4f7b082337a2645c0a72082374768b52
|
[
"Apache-2.0"
] | null | null | null |
result/analyze.py
|
kuriatsu/PIE_RAS
|
8dd33b4d4f7b082337a2645c0a72082374768b52
|
[
"Apache-2.0"
] | null | null | null |
result/analyze.py
|
kuriatsu/PIE_RAS
|
8dd33b4d4f7b082337a2645c0a72082374768b52
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import pickle
import pandas as pd
import xml.etree.ElementTree as ET
import math
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import csv
import glob
import scikit_posthocs as sp
from scipy import stats
import os
from scipy import stats
import scikit_posthocs as sp
sns.set(context='paper', style='whitegrid')
hue_order = ["traffic light", "crossing intention", "trajectory"]
eps=0.01
tl_black_list = [
"3_3_96tl",
"3_3_102tl",
"3_4_107tl",
"3_4_108tl",
"3_5_112tl",
"3_5_113tl",
"3_5_116tl",
"3_5_117tl",
"3_5_118tl",
"3_5_119tl",
"3_5_122tl",
"3_5_123tl",
"3_5_126tl",
"3_5_127tl",
"3_6_128tl",
"3_6_137tl",
"3_7_142tl",
"3_8_153tl",
"3_8_160tl",
"3_9_173tl",
"3_9_174tl",
"3_9_179tl",
"3_10_185tl",
"3_10_188tl",
"3_11_205tl",
"3_12_218tl",
"3_12_221tl",
"3_15_241tl",
"3_16_256tl",
"3_16_257tl",
]
opposite_anno_list = ["3_16_259tl", "3_16_258tl", "3_16_249tl"]
log_data = None
data_path = "/home/kuriatsu/Dropbox/data/pie202203"
for file in glob.glob(os.path.join(data_path, "log*.csv")):
buf = pd.read_csv(file)
filename =file.split("/")[-1]
count = int(filename.replace("log_data_", "").split("_")[-1].replace(".csv", ""))
print("{}".format(filename))
if count in [0, 1, 2]:
print("skipped")
continue
trial = filename.split("_")[-1].replace(".csv", "")
buf["subject"] = filename.replace("log_data_", "").split("_")[0]
buf["task"] = filename.replace("log_data_", "").split("_")[1]
correct_list = []
response_list = []
for idx, row in buf.iterrows():
if row.id in tl_black_list:
row.last_state = -2
if row.last_state == -1: # no intervention
correct_list.append(-1)
response_list.append(-1)
elif int(row.last_state) == int(row.state):
if row.id in opposite_anno_list:
correct_list.append(1)
if row.last_state == 1:
response_list.append(3)
elif row.last_state == 0:
response_list.append(0)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
else:
correct_list.append(0)
if row.last_state == 1:
response_list.append(1)
elif row.last_state == 0:
response_list.append(2)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
else:
if row.id in opposite_anno_list:
correct_list.append(0)
if row.last_state == 1:
response_list.append(1)
elif row.last_state == 0:
response_list.append(2)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
else:
correct_list.append(1)
if row.last_state == 1:
response_list.append(3)
elif row.last_state == 0:
response_list.append(0)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
buf["correct"] = correct_list
buf["response"] = response_list
len(correct_list)
if log_data is None:
log_data = buf
else:
log_data = log_data.append(buf, ignore_index=True)
task_list = {"int": "crossing intention", "tl": "traffic light", "traj":"trajectory"}
subject_data = pd.DataFrame(columns=["subject", "task", "acc", "int_length", "missing"])
for subject in log_data.subject.drop_duplicates():
for task in log_data.task.drop_duplicates():
for length in log_data.int_length.drop_duplicates():
target = log_data[(log_data.subject == subject) & (log_data.task == task) & (log_data.int_length == length)]
# acc = len(target[target.correct == 1])/(len(target))
acc = len(target[target.correct == 1])/(len(target[target.correct == 0]) + len(target[target.correct == 1])+eps)
missing = len(target[target.correct == -1])/(len(target[target.correct != -2])+eps)
buf = pd.DataFrame([(subject, task_list.get(task), acc, length, missing)], columns=subject_data.columns)
subject_data = pd.concat([subject_data, buf])
subject_data.acc = subject_data.acc * 100
subject_data.missing = subject_data.missing * 100
# sns.barplot(x="task", y="acc", hue="int_length", data=subject_data, ci="sd")
# sns.barplot(x="task", y="acc", data=subject_data, ci="sd")
################################################
print("check intervene acc")
################################################
for length in subject_data.int_length.drop_duplicates():
print(f"acc : length={length}")
target_df = subject_data[subject_data.int_length == length]
_, norm_p = stats.shapiro(target_df.acc.dropna())
_, var_p = stats.levene(
target_df[target_df.task == 'trajectory'].acc.dropna(),
target_df[target_df.task == 'crossing intention'].acc.dropna(),
target_df[target_df.task == 'traffic light'].acc.dropna(),
center='median'
)
# if norm_p < 0.05 or var_p < 0.05:
# print(f"norm:{norm_p}, var:{var_p}")
# print('steel-dwass\n', sp.posthoc_dscf(target_df, val_col='acc', group_col='task'))
# else:
# multicomp_result = multicomp.MultiComparison(np.array(target_df.dropna(how='any').acc, dtype="float64"), target_df.dropna(how='any').type)
# print(f"norm:{norm_p}, var:{var_p}")
# print('levene', multicomp_result.tukeyhsd().summary())
if norm_p < 0.05 or var_p < 0.05:
_, anova_p = stats.friedmanchisquare(
target_df[target_df.task == "trajectory"].acc,
target_df[target_df.task == "crossing intention"].acc,
target_df[target_df.task == "traffic light"].acc,
)
print(f"norm:{norm_p}, var:{var_p}")
print("anova(friedman test)", anova_p)
if anova_p < 0.05:
print('conover\n', sp.posthoc_conover(target_df, val_col="acc", group_col="task"))
print('steel-dwass\n', sp.posthoc_dscf(target_df, val_col='acc', group_col='task'))
else:
# melted_df = pd.melt(target_df, id_vars=["subject", "acc", "int_length"], var_name="task", value_name="rate")
aov = stats_anova.AnovaRM(melted_df, "missing", "subject", ["task"])
print(f"norm:{norm_p}, var:{var_p}")
print("reperted anova: ", aov.fit())
multicomp_result = multicomp.MultiComparison(melted_df[length], nasa_df.task)
print(melted_df.tukeyhsd().summary())
fig, ax = plt.subplots()
sns.pointplot(x="int_length", y="acc", data=subject_data, hue="task", hue_order=hue_order, ax=ax, capsize=0.1, ci="sd")
ax.set_ylim(0.0, 100.0)
ax.set_xlabel("intervention time [s]", fontsize=18)
ax.set_ylabel("intervention accuracy [%]", fontsize=18)
ax.tick_params(labelsize=14)
ax.legend(fontsize=14)
plt.show()
################################################
print("check miss rate")
################################################
for length in subject_data.int_length.drop_duplicates():
print(f"miss : length={length}")
target_df = subject_data[subject_data.int_length == length]
_, norm_p = stats.shapiro(target_df.missing.dropna())
_, var_p = stats.levene(
target_df[target_df.task == 'trajectory'].missing.dropna(),
target_df[target_df.task == 'crossing intention'].missing.dropna(),
target_df[target_df.task == 'traffic light'].missing.dropna(),
center='median'
)
# if norm_p < 0.05 or var_p < 0.05:
# print(f"norm:{norm_p}, var:{var_p}")
# print('steel-dwass\n', sp.posthoc_dscf(target_df, val_col='missing', group_col='task'))
# else:
# multicomp_result = multicomp.MultiComparison(np.array(target_df.dropna(how='any').missing, dtype="float64"), target_df.dropna(how='any').type)
# print(f"norm:{norm_p}, var:{var_p}")
# print('levene', multicomp_result.tukeyhsd().summary())
if norm_p < 0.05 or var_p < 0.05:
_, anova_p = stats.friedmanchisquare(
target_df[target_df.task == "trajectory"].missing,
target_df[target_df.task == "crossing intention"].missing,
target_df[target_df.task == "traffic light"].missing,
)
print(f"norm:{norm_p}, var:{var_p}")
print("anova(friedman test)", anova_p)
if anova_p < 0.05:
print('steel-dwass\n', sp.posthoc_dscf(target_df, val_col='missing', group_col='task'))
print('conover\n', sp.posthoc_conover(target_df, val_col="missing", group_col="task"))
else:
# melted_df = pd.melt(target_df, id_vars=["subject", "acc", "int_length"], var_name="task", value_name="rate")
aov = stats_anova.AnovaRM(melted_df, "missing", "subject", ["task"])
print(f"norm:{norm_p}, var:{var_p}")
print("reperted anova: ", aov.fit())
multicomp_result = multicomp.MultiComparison(melted_df[length], nasa_df.task)
print(melted_df.tukeyhsd().summary())
fig, ax = plt.subplots()
sns.pointplot(x="int_length", y="missing", data=subject_data, hue="task", hue_order=hue_order, ax = ax, capsize=0.1, ci=95)
ax.set_ylim(0.0, 100.0)
ax.set_xlabel("intervention time [s]", fontsize=18)
ax.set_ylabel("intervention missing rate [%]", fontsize=18)
ax.tick_params(labelsize=14)
ax.legend(fontsize=14)
plt.show()
#####################################
# mean val show
#####################################
target = subject_data[subject_data.task == "crossing intention"]
print("int acc mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].acc.mean(),
target[target.int_length == 3.0].acc.mean(),
target[target.int_length == 5.0].acc.mean(),
target[target.int_length == 8.0].acc.mean(),
target[target.int_length == 1.0].acc.std(),
target[target.int_length == 3.0].acc.std(),
target[target.int_length == 5.0].acc.std(),
target[target.int_length == 8.0].acc.std(),
))
target = subject_data[subject_data.task == "trajectory"]
print("traj acc mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].acc.mean(),
target[target.int_length == 3.0].acc.mean(),
target[target.int_length == 5.0].acc.mean(),
target[target.int_length == 8.0].acc.mean(),
target[target.int_length == 1.0].acc.std(),
target[target.int_length == 3.0].acc.std(),
target[target.int_length == 5.0].acc.std(),
target[target.int_length == 8.0].acc.std(),
))
target = subject_data[subject_data.task == "traffic light"]
print("tl acc mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].acc.mean(),
target[target.int_length == 3.0].acc.mean(),
target[target.int_length == 5.0].acc.mean(),
target[target.int_length == 8.0].acc.mean(),
target[target.int_length == 1.0].acc.std(),
target[target.int_length == 3.0].acc.std(),
target[target.int_length == 5.0].acc.std(),
target[target.int_length == 8.0].acc.std(),
))
target = subject_data[subject_data.task == "crossing intention"]
print("int missing mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].missing.mean(),
target[target.int_length == 3.0].missing.mean(),
target[target.int_length == 5.0].missing.mean(),
target[target.int_length == 8.0].missing.mean(),
target[target.int_length == 1.0].missing.std(),
target[target.int_length == 3.0].missing.std(),
target[target.int_length == 5.0].missing.std(),
target[target.int_length == 8.0].missing.std(),
))
target = subject_data[subject_data.task == "trajectory"]
print("traj missing mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].missing.mean(),
target[target.int_length == 3.0].missing.mean(),
target[target.int_length == 5.0].missing.mean(),
target[target.int_length == 8.0].missing.mean(),
target[target.int_length == 1.0].missing.std(),
target[target.int_length == 3.0].missing.std(),
target[target.int_length == 5.0].missing.std(),
target[target.int_length == 8.0].missing.std(),
))
target = subject_data[subject_data.task == "traffic light"]
print("tl missing mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].missing.mean(),
target[target.int_length == 3.0].missing.mean(),
target[target.int_length == 5.0].missing.mean(),
target[target.int_length == 8.0].missing.mean(),
target[target.int_length == 1.0].missing.std(),
target[target.int_length == 3.0].missing.std(),
target[target.int_length == 5.0].missing.std(),
target[target.int_length == 8.0].missing.std(),
))
###########################################
# collect wrong intervention ids
###########################################
task_list = {"int": "crossing intention", "tl": "traffic light", "traj":"trajectory"}
id_data = pd.DataFrame(columns=["id", "task", "false_rate", "missing", "total"])
for id in log_data.id.drop_duplicates():
for task in log_data.task.drop_duplicates():
for length in log_data.int_length.drop_duplicates():
target = log_data[(log_data.id == id) & (log_data.task == task) & (log_data.int_length == length)]
# acc = len(target[target.correct == 1])/(len(target))
total = len(target)
name = id.replace("tl","")+task+"_"+str(length)
if len(target) > 0:
false_rate = len(target[target.correct == 0])/len(target)
else:
false_rate = 0.0
missing = len(target[target.correct == -1])
buf = pd.DataFrame([(name, task, false_rate, missing, total)], columns=id_data.columns)
id_data = pd.concat([id_data, buf])
pd.set_option("max_rows", None)
sort_val = id_data.sort_values(["false_rate","total"], ascending=False)
false_playlist = sort_val[(sort_val.false_rate>0.0)&(sort_val.total>1)]
print(false_playlist)
false_playlist.to_csv("/home/kuriatsu/Dropbox/data/pie202203/false_playlist.csv")
# sns.barplot(x="id", y="acc", hue="int_length", data=id_data)
###############################################################################################
print("response rate stacked bar plot")
###############################################################################################
response_summary_pred = pd.DataFrame(columns=["int_length", "task", "response", "count"])
for int_length in log_data.int_length.drop_duplicates():
for task in log_data.task.drop_duplicates():
for response in [0, 1, 2, 3, -1]:
buf = pd.Series([int_length, task, response,
len(log_data[(log_data.int_length==int_length) & (log_data.task==task) & (log_data.response <= response)])/len(log_data[(log_data.int_length==int_length) & (log_data.task==task) & (log_data.response!=4)])],
index=response_summary_pred.columns)
response_summary_pred = response_summary_pred.append(buf, ignore_index=True)
fig, axes = plt.subplots()
cr = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==3], ax=axes, palette=sns.color_palette(["turquoise"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
fa = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==2], ax=axes, palette=sns.color_palette(["orangered"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
miss = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==1], ax=axes, palette=sns.color_palette(["lightsalmon"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
hit = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==0], ax=axes, palette=sns.color_palette(["teal"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
no_int = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==-1], ax=axes, palette=sns.color_palette(["gray"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
axes.set_xticks([-0.3, -0.1, 0.1, 0.3, 0.7, 0.9, 1.1, 1.3, 1.7, 1.9, 2.1, 2.3])
axes.set_xticklabels(["1.0", "3.0", "5.0", "8.0", "1.0", "3.0", "5.0", "8.0", "1.0", "3.0", "5.0", "8.0"], fontsize=14)
# axes.set_yticklabels(fontsize=14)
ax_pos = axes.get_position()
fig.text(ax_pos.x1-0.75, ax_pos.y1-0.84, "traffic light", fontsize=14)
fig.text(ax_pos.x1-0.55, ax_pos.y1-0.84, "crossing intention", fontsize=14)
fig.text(ax_pos.x1-0.25, ax_pos.y1-0.84, "trajectory", fontsize=14)
axes.tick_params(labelsize=14)
axes.set_ylabel("Response Rate", fontsize=18)
axes.set_xlabel("")
handles, labels = axes.get_legend_handles_labels()
axes.legend(handles[::4], ["CR", "FA", "miss", "hit", "no_int"], bbox_to_anchor=(1.0, 1.0), loc='upper left', fontsize=14)
plt.show()
###############################################
# Workload
###############################################
workload = pd.read_csv("{}/workload.csv".format(data_path))
workload.satisfy = 10-workload.satisfy
workload_melted = pd.melt(workload, id_vars=["subject", "type"], var_name="scale", value_name="score")
#### nasa-tlx ####
for item in workload_melted.scale.drop_duplicates():
print(item)
_, norm_p1 = stats.shapiro(workload[workload.type == "trajectory"][item])
_, norm_p2 = stats.shapiro(workload[workload.type == "crossing intention"][item])
_, norm_p3 = stats.shapiro(workload[workload.type == "traffic light"][item])
_, var_p = stats.levene(
workload[workload.type == "trajectory"][item],
workload[workload.type == "crossing intention"][item],
workload[workload.type == "traffic light"][item],
center='median'
)
if norm_p1 < 0.05 or norm_p2 < 0.05 or norm_p3 < 0.05 or norm_p4 < 0.05:
_, anova_p = stats.friedmanchisquare(
workload[workload.type == "trajectory"][item],
workload[workload.type == "crossing intention"][item],
workload[workload.type == "traffic light"][item],
)
print("anova(friedman test)", anova_p)
if anova_p < 0.05:
print(sp.posthoc_conover(workload, val_col=item, group_col="type"))
else:
melted_df = pd.melt(nasa_df, id_vars=["name", "experiment_type"], var_name="type", value_name="score")
aov = stats_anova.AnovaRM(workload_melted[workload_melted.type == item], "score", "subject", ["type"])
print("reperted anova: ", aov.fit())
multicomp_result = multicomp.MultiComparison(workload_melted[item], nasa_df.type)
print(multicomp_result.tukeyhsd().summary())
fig, ax = plt.subplots()
sns.barplot(x="scale", y="score", data=workload_melted, hue="type", hue_order=hue_order, ax=ax)
ax.set_ylim(0, 10)
ax.legend(bbox_to_anchor=(0.0, 1.0), loc='lower left', fontsize=14)
ax.set_xlabel("scale", fontsize=18)
ax.set_ylabel("score (lower is better)", fontsize=18)
ax.tick_params(labelsize=14)
plt.show()
###############################################
# necessary time
###############################################
time = pd.read_csv("/home/kuriatsu/Dropbox/documents/subjective_time.csv")
fig, ax = plt.subplots()
# mean_list = [
# time[time.type=="crossing intention"].ideal_time.mean(),
# time[time.type=="trajectory"].ideal_time.mean(),
# time[time.type=="traffic light"].ideal_time.mean(),
# ]
# sem_list = [
# time[time.type=="crossing intention"].ideal_time.sem(),
# time[time.type=="trajectory"].ideal_time.sem(),
# time[time.type=="traffic light"].ideal_time.sem(),
# ]
_, norm_p = stats.shapiro(time.ideal_time.dropna())
_, var_p = stats.levene(
time[time.type == 'crossing intention'].ideal_time.dropna(),
time[time.type == 'trajectory'].ideal_time.dropna(),
time[time.type == 'traffic light'].ideal_time.dropna(),
center='median'
)
if norm_p < 0.05 or var_p < 0.05:
print('steel-dwass\n', sp.posthoc_dscf(time, val_col='ideal_time', group_col='type'))
else:
multicomp_result = multicomp.MultiComparison(np.array(time.dropna(how='any').ideal_time, dtype="float64"), time.dropna(how='any').type)
print('levene', multicomp_result.tukeyhsd().summary())
sns.pointplot(x="type", y="ideal_time", hue="type", hue_order=hue_order, data=time, join=False, ax=ax, capsize=0.1, ci=95)
ax.set_ylim(0.5,3.5)
plt.yticks([1, 2, 3, 4], ["<3", "3-5", "5-8", "8<"])
plt.show()
###############################################
# compare prediction and intervention
###############################################
with open("/home/kuriatsu/Dropbox/data/pie202203/database.pkl", "rb") as f:
database = pickle.load(f)
tl_result = pd.read_csv("/home/kuriatsu/Dropbox/data/pie202203/tlr_result.csv")
overall_result = pd.DataFrame(columns=["id", "task", "subject", "gt", "int", "prediction"])
log_data = None
data_path = "/home/kuriatsu/Dropbox/data/pie202203"
for file in glob.glob(os.path.join(data_path, "log*.csv")):
buf = pd.read_csv(file)
filename =file.split("/")[-1]
count = float(filename.replace("log_data_", "").split("_")[-1].replace(".csv", ""))
print("{}".format(filename))
if count in [0, 1, 2]:
print("skipped")
continue
subject = filename.replace("log_data_", "").split("_")[0]
task = filename.replace("log_data_", "").split("_")[1]
for idx, row in buf.iterrows():
if task != "tl":
database_id = row.id+task+"_"+str(float(row.int_length))
prediction = (database[database_id].get("likelihood") <= 0.5)
gt = False if row.state else True
else:
database_id = row.id+"_"+str(float(row.int_length))
prediction = 1 if float(tl_result[tl_result.id == row.id].result) == 2 else 0
gt = False if row.state else True
if row.id in tl_black_list:
intervention = -2
if row.last_state == -1: # no intervention
intervention = -1
else:
if row.id in opposite_anno_list:
intervention = False if row.last_state else True
else:
intervention = row.last_state
buf = pd.DataFrame([(row.id, task, subject, int(gt), int(intervention), int(prediction))], columns = overall_result.columns)
overall_result = pd.concat([overall_result, buf])
overall_result.to_csv("/home/kuriatsu/Dropbox/data/pie202203/acc.csv")
| 44.696078 | 222 | 0.612547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,655 | 0.29195 |
fd4df1f5acd3eb66e203334228aa56f68ab7a4a9
| 302 |
py
|
Python
|
tests/huge.py
|
nbdy/podb
|
684ed6b8330c0d18a2b89d6521cb15586d1f95a4
|
[
"MIT"
] | null | null | null |
tests/huge.py
|
nbdy/podb
|
684ed6b8330c0d18a2b89d6521cb15586d1f95a4
|
[
"MIT"
] | null | null | null |
tests/huge.py
|
nbdy/podb
|
684ed6b8330c0d18a2b89d6521cb15586d1f95a4
|
[
"MIT"
] | null | null | null |
import unittest
from podb import DB
from tqdm import tqdm
from . import HugeDBItem
db = DB("huge")
class HugeDBTest(unittest.TestCase):
def test_huge_insert(self):
for _ in tqdm(range(1000000)):
db.insert(HugeDBItem.random())
if __name__ == '__main__':
unittest.main()
| 17.764706 | 42 | 0.682119 | 150 | 0.496689 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.05298 |
fd5095688e3adf6f9ca25f40240ff9d7e4246e41
| 153 |
py
|
Python
|
moto/sts/__init__.py
|
pll/moto
|
e49e67aba5d108b03865bdb42124206ea7e572ea
|
[
"Apache-2.0"
] | null | null | null |
moto/sts/__init__.py
|
pll/moto
|
e49e67aba5d108b03865bdb42124206ea7e572ea
|
[
"Apache-2.0"
] | null | null | null |
moto/sts/__init__.py
|
pll/moto
|
e49e67aba5d108b03865bdb42124206ea7e572ea
|
[
"Apache-2.0"
] | null | null | null |
from .models import sts_backend
from ..core.models import base_decorator
sts_backends = {"global": sts_backend}
mock_sts = base_decorator(sts_backends)
| 25.5 | 40 | 0.810458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.052288 |
fd518544ef8c44c965453eb8925336fcec4f3ee3
| 3,005 |
py
|
Python
|
convert_nbrId_to_orgnr.py
|
obtitus/barnehagefakta_osm
|
4539525f6defcc67a087cc57baad996f8d76b8bd
|
[
"Apache-2.0"
] | 1 |
2018-10-05T17:00:23.000Z
|
2018-10-05T17:00:23.000Z
|
convert_nbrId_to_orgnr.py
|
obtitus/barnehagefakta_osm
|
4539525f6defcc67a087cc57baad996f8d76b8bd
|
[
"Apache-2.0"
] | 6 |
2016-05-29T09:33:06.000Z
|
2019-12-18T20:24:50.000Z
|
convert_nbrId_to_orgnr.py
|
obtitus/barnehagefakta_osm
|
4539525f6defcc67a087cc57baad996f8d76b8bd
|
[
"Apache-2.0"
] | null | null | null |
# Database switched from having nsrId to using orgnr, this script helps with this conversion.
import os
import re
import json
import subprocess
from glob import glob
from utility_to_osm import file_util
if __name__ == '__main__':
data_dir = 'data' #'barnehagefakta_osm_data/data'
nsrId_to_orgnr_filename = 'nsrId_to_orgnr.json'
if False:
# Done once, on a old dump of the database, to get mapping from nsrId to orgnr
nsrId_to_orgnr = dict()
for kommune_nr in os.listdir(data_dir):
folder = os.path.join(data_dir, kommune_nr)
if os.path.isdir(folder):
print(folder)
count = 0
for filename in glob(os.path.join(folder, 'barnehagefakta_no_nbrId*.json')):
content = file_util.read_file(filename)
if content == '404':
# cleanup
os.remove(filename)
continue
dct = json.loads(content)
nsrId = dct['nsrId']
orgnr = dct['orgnr']
if nsrId in nsrId_to_orgnr and nsrId_to_orgnr[nsrId] != orgnr:
raise ValueError('Duplicate key %s, %s != %s' % (nsrId, nsrId_to_orgnr[nsrId], orgnr))
nsrId_to_orgnr[nsrId] = orgnr
count += 1
print('Found', count)
with open(nsrId_to_orgnr_filename, 'w') as f:
json.dump(nsrId_to_orgnr, f)
content = file_util.read_file(nsrId_to_orgnr_filename)
nsrId_to_orgnr = json.loads(content)
if True:
# Rename files
for kommune_nr in os.listdir(data_dir):
folder = os.path.join(data_dir, kommune_nr)
if os.path.isdir(folder):
print(folder)
count = 0
for filename in glob(os.path.join(folder, 'barnehagefakta_no_nbrId*.json')):
reg = re.search('barnehagefakta_no_nbrId(\d+)', filename)
if reg:
nbrId = reg.group(1)
try:
orgnr = nsrId_to_orgnr[nbrId]
except KeyError as e:
content = file_util.read_file(filename)
print('ERROR', repr(e), filename, content)
if content == '404':
os.remove(filename)
continue
new_filename = filename.replace('barnehagefakta_no_nbrId%s' % nbrId,
'barnehagefakta_no_orgnr%s' % orgnr)
subprocess.run(['git', 'mv', filename, new_filename])
# if the file is still there, probably not version controlled
if os.path.exists(filename):
os.rename(filename, new_filename)
| 40.608108 | 110 | 0.509151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 547 | 0.18203 |
fd51dbb2cb05ef487bcf83c509336b681bc19872
| 769 |
py
|
Python
|
regcore/tests/layer_tests.py
|
navigo/regulations-core
|
0b2a2034baacfa1cc5ff87f14db7d1aaa8d260c3
|
[
"CC0-1.0"
] | 17 |
2016-06-14T19:06:02.000Z
|
2021-10-03T23:46:00.000Z
|
regcore/tests/layer_tests.py
|
navigo/regulations-core
|
0b2a2034baacfa1cc5ff87f14db7d1aaa8d260c3
|
[
"CC0-1.0"
] | 42 |
2016-04-06T22:34:26.000Z
|
2020-04-14T22:00:24.000Z
|
regcore/tests/layer_tests.py
|
navigo/regulations-core
|
0b2a2034baacfa1cc5ff87f14db7d1aaa8d260c3
|
[
"CC0-1.0"
] | 20 |
2016-05-04T06:04:34.000Z
|
2020-10-07T16:16:03.000Z
|
from django.test import TestCase
from regcore.layer import standardize_params
class LayerParamsTests(TestCase):
def test_old_format(self):
lp = standardize_params('label', 'version')
self.assertEqual(lp.doc_type, 'cfr')
self.assertEqual(lp.doc_id, 'version/label')
self.assertEqual(lp.tree_id, 'label')
def test_new_format(self):
lp = standardize_params('cfr', 'version/label')
self.assertEqual(lp.doc_type, 'cfr')
self.assertEqual(lp.doc_id, 'version/label')
self.assertEqual(lp.tree_id, 'label')
lp = standardize_params('preamble', 'docid')
self.assertEqual(lp.doc_type, 'preamble')
self.assertEqual(lp.doc_id, 'docid')
self.assertEqual(lp.tree_id, 'docid')
| 33.434783 | 55 | 0.669701 | 687 | 0.893368 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.170351 |
fd5213aa3c0233313738c5ac6fd68800d2601767
| 459 |
py
|
Python
|
asal.py
|
kedigucuk01/asal-sayi-bulucu
|
dffc81cec5c4bbd6b4423d8991a5559a79f26f92
|
[
"MIT"
] | 2 |
2021-06-10T16:27:42.000Z
|
2021-06-11T10:54:24.000Z
|
asal.py
|
kedigucuk01/asal-sayi-bulucu
|
dffc81cec5c4bbd6b4423d8991a5559a79f26f92
|
[
"MIT"
] | 1 |
2021-06-15T11:08:58.000Z
|
2021-08-10T20:23:11.000Z
|
asal.py
|
kedigucuk01/asal-sayi-bulucu
|
dffc81cec5c4bbd6b4423d8991a5559a79f26f92
|
[
"MIT"
] | null | null | null |
while True:
try:
i = int(input("Sayı giriniz: ")) # 2
except ValueError:
print("Hata Kodu: 5786, \nAçıklama: Lütfen bir \"tam sayı\" giriniz.")
else:
for s in range(2, i, 1):
if i%s == 0:
print(f"{i} sayısı, asal değildir.")
break
else:
if s == i - 1:
print(f"{i} sayısı, asal bir sayıdır.")
if i < 2:
print(f"{i} sayısı, asal değildir.")
elif i == 2:
print(f"{i} sayısı, asal bir sayıdır.")
| 20.863636 | 73 | 0.542484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.468619 |
fd543b58f8ff3f846e998d58939fe4d5bc4acf05
| 5,859 |
py
|
Python
|
main.py
|
MO-RISE/crowsnest-connector-cluon-n2k
|
11eaefd8ebe76829ec8fe91f99da9acbc84e5187
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
MO-RISE/crowsnest-connector-cluon-n2k
|
11eaefd8ebe76829ec8fe91f99da9acbc84e5187
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
MO-RISE/crowsnest-connector-cluon-n2k
|
11eaefd8ebe76829ec8fe91f99da9acbc84e5187
|
[
"Apache-2.0"
] | null | null | null |
"""Main entrypoint for this application"""
from pathlib import Path
from math import degrees
from datetime import datetime
import logging
import warnings
from environs import Env
from streamz import Stream
from paho.mqtt.client import Client as MQTT
from pycluon import OD4Session, Envelope as cEnvelope
from pycluon.importer import import_odvd
from marulc import NMEA2000Parser
from marulc.utils import filter_on_pgn, deep_get
from marulc.exceptions import MultiPacketInProcessError
from brefv.envelope import Envelope
from brefv.messages.observations.rudder import Rudder
from brefv.messages.observations.propeller import Propeller
# Reading config from environment variables
env = Env()
CLUON_CID = env.int("CLUON_CID", 111)
MQTT_BROKER_HOST = env("MQTT_BROKER_HOST")
MQTT_BROKER_PORT = env.int("MQTT_BROKER_PORT", 1883)
MQTT_CLIENT_ID = env("MQTT_CLIENT_ID", None)
MQTT_TRANSPORT = env("MQTT_TRANSPORT", "tcp")
MQTT_TLS = env.bool("MQTT_TLS", False)
MQTT_USER = env("MQTT_USER", None)
MQTT_PASSWORD = env("MQTT_PASSWORD", None)
MQTT_BASE_TOPIC = env("MQTT_BASE_TOPIC", "/test/test")
RUDDER_CONFIG = env.dict("RUDDER_CONFIG", default={})
PROPELLER_CONFIG = env.dict("PROPELLER_CONFIG", default={})
LOG_LEVEL = env.log_level("LOG_LEVEL", logging.WARNING)
## Import and generate code for message specifications
THIS_DIR = Path(__file__).parent
memo = import_odvd(THIS_DIR / "memo" / "memo.odvd")
# Setup logger
logging.basicConfig(level=LOG_LEVEL)
logging.captureWarnings(True)
warnings.filterwarnings("once")
LOGGER = logging.getLogger("crowsnest-connector-cluon-n2k")
mq = MQTT(client_id=MQTT_CLIENT_ID, transport=MQTT_TRANSPORT)
# Not empty filter
not_empty = lambda x: x is not None
## Main entrypoint for N2k frames
entrypoint = Stream()
parser = NMEA2000Parser()
def unpack_n2k_frame(envelope: cEnvelope):
"""Extract an n2k frame from an envelope and unpack it using marulc"""
LOGGER.info("Got envelope from pycluon")
try:
frame = memo.memo_raw_NMEA2000()
frame.ParseFromString(envelope.serialized_data)
LOGGER.debug("Frame: %s", frame.data)
msg = parser.unpack(frame.data)
LOGGER.debug("Unpacked: %s", msg)
msg["timestamp"] = envelope.sampled
return msg
except MultiPacketInProcessError:
LOGGER.debug("Multi-packet currently in process")
return None
except Exception: # pylint: disable=broad-except
LOGGER.exception("Exception when unpacking a frame")
return None
unpacked = entrypoint.map(unpack_n2k_frame).filter(not_empty)
## Rudder
def pgn127245_to_brefv(msg):
"""Converting a marulc dict to a brefv messages and packaging it into a a brefv construct"""
n2k_id = str(deep_get(msg, "Fields", "instance"))
if sensor_id := RUDDER_CONFIG.get(n2k_id):
crowsnest_id = list(RUDDER_CONFIG.keys()).index(n2k_id)
rud = Rudder(
sensor_id=sensor_id, angle=degrees(-1 * msg["Fields"]["angleOrder"])
) # Negating to adhere to brefv conventions
envelope = Envelope(
sent_at=datetime.utcfromtimestamp(msg["timestamp"]).isoformat(),
message_type="https://mo-rise.github.io/brefv/0.1.0/messages/observations/rudder.json",
message=rud.dict(
exclude_none=True, exclude_unset=True, exclude_defaults=True
),
)
LOGGER.info("Brefv envelope with Rudder message assembled")
LOGGER.debug("Envelope:\n%s", envelope)
return f"/observations/rudder/{crowsnest_id}", envelope
warnings.warn(f"No Rudder config found for N2k instance id: {n2k_id}")
return None
brefv_rudder = (
unpacked.filter(filter_on_pgn(127245)).map(pgn127245_to_brefv).filter(not_empty)
)
## Propeller (Using engine data for now...)
def pgn127488_to_brefv(msg):
"""Converting a marulc dict to a brefv messages and packaging it into a a brefv construct"""
n2k_id = str(deep_get(msg, "Fields", "instance"))
if sensor_id := PROPELLER_CONFIG.get(n2k_id):
crowsnest_id = list(PROPELLER_CONFIG.keys()).index(n2k_id)
prop = Propeller(sensor_id=sensor_id, rpm=msg["Fields"]["speed"])
envelope = Envelope(
sent_at=datetime.utcfromtimestamp(msg["timestamp"]).isoformat(),
message_type="https://mo-rise.github.io/brefv/0.1.0/messages/observations/propeller.json", # pylint: disable=line-too-long
message=prop.dict(
exclude_none=True, exclude_unset=True, exclude_defaults=True
),
)
LOGGER.info("Brefv envelope with Propeller message assembled")
LOGGER.debug("Envelope:\n%s", envelope)
return f"/observations/propeller/{crowsnest_id}", envelope
warnings.warn(f"No Propeller config found for {n2k_id}")
return None
brefv_propeller = (
unpacked.filter(filter_on_pgn(127488)).map(pgn127488_to_brefv).filter(not_empty)
)
# Finally, publish to mqtt
def to_mqtt(data):
"""Push data to a mqtt topic"""
subtopic, envelope = data
topic = f"{MQTT_BASE_TOPIC}{subtopic}"
LOGGER.debug("Publishing on %s", topic)
try:
mq.publish(
topic,
envelope.json(),
)
except Exception: # pylint: disable=broad-except
LOGGER.exception("Failed publishing to broker!")
if __name__ == "__main__":
print("All setup done, lets start processing messages!")
# Connect remaining pieces
brefv_rudder.latest().rate_limit(0.1).sink(to_mqtt)
brefv_propeller.latest().rate_limit(0.1).sink(to_mqtt)
# Connect to broker
mq.username_pw_set(MQTT_USER, MQTT_PASSWORD)
if MQTT_TLS:
mq.tls_set()
mq.connect(MQTT_BROKER_HOST, MQTT_BROKER_PORT)
# Register triggers
session = OD4Session(CLUON_CID)
session.add_data_trigger(10002, entrypoint.emit)
mq.loop_forever()
| 30.046154 | 135 | 0.70524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,821 | 0.310804 |
fd54b2677eda2400e60664de51925feee4550c09
| 7,569 |
py
|
Python
|
cocapi/client/api.py
|
bim-ba/coc-api
|
69ff957803cb991dfad8df3af752d193171f2ef0
|
[
"Unlicense"
] | 1 |
2022-03-29T12:39:36.000Z
|
2022-03-29T12:39:36.000Z
|
cocapi/client/api.py
|
bim-ba/coc-api
|
69ff957803cb991dfad8df3af752d193171f2ef0
|
[
"Unlicense"
] | null | null | null |
cocapi/client/api.py
|
bim-ba/coc-api
|
69ff957803cb991dfad8df3af752d193171f2ef0
|
[
"Unlicense"
] | null | null | null |
from typing import Any, Optional
from dataclasses import dataclass, field
import aiohttp
from ..types import aliases
from ..types import exceptions
@dataclass
class BaseMethod:
# config
base_url: aliases.Url | None = field(init=False, default=None)
default_http_method: aliases.RequestMethod | None = field(init=False, default=None)
# actual dataclass members
path: aliases.RelativeUrl
method: Optional[aliases.RequestMethod] = None
url: aliases.Url = field(init=False)
def __post_init__(self):
if not self.base_url:
raise NotImplementedError(
f"You must define static field 'base_url' for {self.__class__}"
) from None
if not self.method and not self.default_http_method:
raise NotImplementedError(
f"You must define either static field 'default_http_method' or pass it directly in {self.__class__}"
) from None
self.method = self.default_http_method if self.method is None else self.method
self.url = self.base_url + self.path
def __call__(self, **kwargs: Any):
try:
self.url = self.url.format(**kwargs)
return self
except KeyError as error:
(missing_field,) = error.args
raise KeyError(
f"Missing field: '{missing_field}' when formatting {self.url}"
) from error
class Methods:
class Method(BaseMethod):
base_url = "https://api.clashofclans.com/v1"
default_http_method = "GET"
# clans
CLANS = Method("/clans")
"""`GET`: `/clans`"""
CLAN = Method("/clans/{clantag}")
"""`GET`: `/clans/{clantag:str}`"""
CLAN_WARLOG = Method("/clans/{clantag}/warlog")
"""`GET`: `/clans/{clantag:str}/warlog`"""
CLAN_MEMBERS = Method("/clans/{clantag}/members")
"""`GET`: `/clans/{clantag:str}/members`"""
CLAN_CURRENT_WAR = Method("/clans/{clantag}/currentwar")
"""`GET`: `/clans/{clantag:str}/currentwar`"""
CLAN_CURRENT_WAR_LEAGUEGROUP = Method("/clans/{clantag}/currentwar/leaguegroup")
"""`GET`: `/clans/{clantag:str}/currentwar/leaguegroup`"""
CLAN_CURRENT_LEAGUE_WAR = Method("/clanwarleagues/wars/{wartag}")
"""`GET`: `/clanwarleagues/wars/{wartag:str}`"""
# players
PLAYER = Method("/players/{playertag}")
"""`GET`: `/players/{playertag:str}`"""
PLAYER_VERIFY_API_TOKEN = Method("/players/{playertag}/verifytoken", "POST")
"""`POST`: `/players/{playertag:str}/verifytoken`"""
# leagues
LEAGUES = Method("/leagues")
"""`GET`: `/leagues`"""
LEAGUE_INFO = Method("/leagues/{league_id}")
"""`GET`: `/leagues/{league_id:int}`"""
LEAGUE_SEASONS = Method("/leagues/{league_id}/seasons")
"""`GET`: `/leagues/{league_id:int}/seasons`"""
LEAGUE_SEASONS_RANKINGS = Method("/leagues/{league_id}/seasons/{season_id}")
"""`GET`: `/leagues/{league_id:int}/seasons/{season_id:int}`"""
WARLEAGUES = Method("/warleagues")
"""`GET`: `/warleagues`"""
WARLEAGUE_INFORMATION = Method("/warleagues/{league_id}")
"""`GET`: `/warleagues/{league_id:int}`"""
# locations
LOCATIONS = Method("/locations")
"""`GET`: `/locations`"""
LOCATION = Method("/locations/{location_id}")
"""`GET`: `/locations/{location_id:int}`"""
# rankings
CLAN_RANKINGS = Method("/locations/{location_id}/rankings/clans")
"""`GET`: `/locations/{location_id:int}/rankings/clans`"""
PLAYER_RANKINGS = Method("/locations/{location_id}/rankings/players")
"""`GET`: `/locations/{location_id:int}/rankings/players`"""
CLAN_VERSUS_RANKINGS = Method("/locations/{location_id}/rankings/clans-versus")
"""`GET`: `/locations/{location_id:int}/rankings/clans-versus`"""
PLAYER_VERSUS_RANKINGS = Method("/locations/{location_id}/rankings/players-versus")
"""`GET`: `/locations/{location_id:int}/rankings/players-versus`"""
# goldpass
GOLDPASS = Method("/goldpass/seasons/current")
"""`GET`: `/goldpass/seasons/current`"""
# labels
CLAN_LABELS = Method("/labels/clans")
"""`GET`: `/labels/clans`"""
PLAYER_LABELS = Method("/labels/players")
"""`GET`: `/labels/players`"""
# not used, but can be
class ServiceMethods:
class Method(BaseMethod):
base_url = "https://developer.clashofclans.com/api"
default_http_method = "POST"
# developer-api
LOGIN = Method("/login")
"""`POST`: `/login`"""
LIST_KEY = Method("/apikey/list")
"""`POST`: `/apikey/list`"""
CREATE_KEY = Method("/apikey/create")
"""`POST`: `/apikey/create`"""
REVOKE_KEY = Method("/apikey/revoke")
"""`POST`: `/apikey/revoke`"""
async def check_result(response: aiohttp.ClientResponse):
"""
Validate request for success.
Parameters
----------
response : aiohttp.ClientResponse
Actual response
Raises
------
``IncorrectParameters``, ``AccessDenied``, ``ResourceNotFound``,
``TooManyRequests``, ``UnknownError``, ``ServiceUnavailable``
According to the official Clash of Clans API
--------------------------------------------
- ``200`` Succesfull response.
- ``400`` Client provided incorrect parameters for the request.
- ``403`` Access denied, either because of missing/incorrect credentials or used API token does not grant access to the requested resource.
- ``404`` Resource was not found.
- ``429`` Request was throttled, because amount of requests was above the threshold defined for the used API token.
- ``500`` Unknown error happened when handling the request.
- ``503`` Service is temprorarily unavailable because of maintenance.
"""
# because json decoding must be executed in request context manager
json = await response.json()
if not response.ok:
match response.status:
case 400:
raise exceptions.IncorrectParameters(response)
case 403:
raise exceptions.AccessDenied(response)
case 404:
raise exceptions.ResourceNotFound(response)
case 429:
raise exceptions.TooManyRequests(response)
case 503:
raise exceptions.ServiceUnavailable(response)
case _: # 500 also
response_data = {
"error": json.get("error"),
"description": json.get("description"),
"reason": json.get("reason"),
"message": json.get("message"),
}
raise exceptions.UnknownError(response, data=response_data)
return response
async def make_request(
session: aiohttp.ClientSession,
api_method: BaseMethod,
**kwargs: Any,
) -> aiohttp.ClientResponse:
"""
Parameters
----------
session : ``aiohttp.ClientSession``
Client session to be used for requests
api_method : ``BaseMethod``
Request API method
**kwargs:
This keyword arguments are compatible with :meth:``aiohttp.ClientSession.request``
Returns
-------
aiohttp.ClientResponse
Response object.
"""
params = kwargs.pop("params", None)
if params is not None:
filtered_params = {
key: value for key, value in params.items() if value is not None
}
kwargs["params"] = filtered_params
async with session.request(
method=api_method.method, # type: ignore
url=api_method.url,
**kwargs,
) as response:
return await check_result(response)
| 34.880184 | 143 | 0.62069 | 4,480 | 0.591888 | 0 | 0 | 1,262 | 0.166733 | 2,890 | 0.381821 | 3,797 | 0.501651 |
fd56674cc383ba9fa6321e89c2463e251d94abf2
| 28,594 |
py
|
Python
|
ratings.py
|
struct-rgb/ratings
|
40d56455406cfee9731c564e54ed7610b5a9641c
|
[
"MIT"
] | null | null | null |
ratings.py
|
struct-rgb/ratings
|
40d56455406cfee9731c564e54ed7610b5a9641c
|
[
"MIT"
] | null | null | null |
ratings.py
|
struct-rgb/ratings
|
40d56455406cfee9731c564e54ed7610b5a9641c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import re
import json
import random
from pathlib import Path
from datetime import date
from typing import Any, Callable, Set, Tuple
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from tags import Filter, Box, CompilationError, escape, enum_subject_parser_factory, tagset, PredicateDefinitions, DEFAULT_BOOL_PARSER, highlighting
from model import Search, Sort, Score, Status, Page, Tag, Rating, Model
#
# filter setup
#
parser_score = enum_subject_parser_factory(Score)
parser_status = enum_subject_parser_factory(Status)
####################
# RANDOM PREDICATE #
####################
def parser_random(string: str) -> float:
if string == 'nan':
raise ValueError("percentage cannot be nan")
percent = float(string)
if percent < 0.0 or 100.0 < percent:
raise ValueError(f"percentage {percent} is not in range 0 to 100 inclusive")
return percent
def action_random(percent: float, ignore: Any) -> bool:
return random.random() * 100 <= percent
PREDICATES = PredicateDefinitions(
action=lambda tag, rating: tag in rating.tags
)
###################
# COUNT PREDICATE #
###################
count_pattern = re.compile(r"^(\d+)\s+of\s+(.*)$")
def parser_count(string: str) -> Tuple[Box[int], Callable[[Any], bool]]:
items = count_pattern.fullmatch(string)
if not items:
raise ValueError(f"subject {string} is not of the form: <number> of <expression>")
integer = int(items[1])
if integer < 0:
raise ValueError(f"counting must begin from 0 or greater, not {integer}")
try:
subroutine = Filter(items[2], PREDICATES)
except CompilationError as e:
# intercept the error and change the source
# to the source that we're trying to compile
e.reason = "in quotation: " + e.reason
e.source = source
raise e
return (Box(integer), subroutine)
def action_count(subject: Tuple[Box[int], Callable[[Any], bool]], rating: Rating) -> bool:
integer, subroutine = subject
if integer.value != 0 and subroutine(rating):
integer.value -= 1
return True
return False
##################
# EVAL PREDICATE #
##################
def parser_eval(source):
try:
return Filter(source, PREDICATES)
except CompilationError as e:
# intercept the error and change the source
# to the source that we're trying to compile
e.reason = "in quotation: " + e.reason
e.source = source
raise e
def parser_lower_str(source):
return source.lower()
###################
# DATE PREDICATES #
###################
def parser_date(isoformat: str):
try:
return date.fromisoformat(isoformat)
except ValueError as e:
raise ValueError("date must be in the format YYYY-MM-DD")
(PREDICATES
.define("tag",
readme="filter for ratings with the specified tag",
action=lambda tag, rating: tag in rating.tags,
parser=lambda x: x,
)
.define("score",
readme="filter for ratings with the specified score",
action=lambda score, rating: score == rating.score,
parser=parser_score,
)
.define("minimum score",
readme="filter for ratings with at least a certain score",
action=lambda score, rating: rating.score >= score,
parser=parser_score,
)
.define("maximum score",
readme="filter for ratings with at most a certain score",
action=lambda score, rating: rating.score <= score,
parser=parser_score,
)
.define("status",
readme="filter for ratings with the specified status",
action=lambda status, rating: status == rating.status,
parser=parser_status,
)
.define("minimum status",
readme="filter for ratings with at least a certain status",
action=lambda status, rating: rating.status >= status,
parser=parser_status,
)
.define("maximum status",
readme="filter for ratings with at most a certain status",
action=lambda status, rating: rating.status <= status,
parser=parser_status,
)
.define("tags",
readme="filter for ratings with a specific number of tags",
action=lambda number, rating: len(rating.tags) == number,
parser=int,
)
.define("minimum tags",
readme="filter for ratings with a specific number of tags",
action=lambda number, rating: len(rating.tags) >= number,
parser=int,
)
.define("maximum tags",
readme="filter for ratings with a specific number of tags",
action=lambda number, rating: len(rating.tags) <= number,
parser=int,
)
.define("random",
readme="filter ratings with a percent chance to include each",
action=action_random,
parser=parser_random,
pure=False
)
.define("count", # TODO possibly remove
readme="filter for a certain number of results at most",
action=action_count,
parser=parser_count,
pure=False
)
.define("eval", # TODO possibly remove
readme="evaluate a string as an expression",
action=lambda function, rating: function(rating),
parser=parser_eval,
pure=False
)
.define("title",
readme="filter for ratings with certain text in the title (case insensitive)",
action=lambda string, rating: rating.title.lower().find(string) != -1,
parser=parser_lower_str,
)
.define("comment",
readme="filter for ratings with certain text in the comments (case insensitive)",
action=lambda string, rating: rating.comments.lower().find(string) != -1,
parser=parser_lower_str,
)
.define("text",
readme="filter for ratings with certain text in the title or the comments (case insensitive)",
action=lambda string, rating: (
rating.title.lower().find(string) != -1 or rating.comments.lower().find(string) != -1
),
parser=parser_lower_str,
)
.define("commented",
readme="filter for ratings that either have or lack a comment",
action=lambda boolean, rating: bool(rating.comments) == boolean,
parser=DEFAULT_BOOL_PARSER,
)
.define("value",
readme="a literal boolean value; true or false",
action=lambda boolean, rating: boolean,
parser=DEFAULT_BOOL_PARSER,
)
.define("modified",
readme="ratings modified on YYYY-MM-DD",
action=lambda day, rating: rating.modified == day,
parser=parser_date,
)
.define("modified after",
readme="ratings modified after YYYY-MM-DD",
action=lambda day, rating: rating.modified > day,
parser=parser_date,
)
.define("modified before",
readme="ratings modified before YYYY-MM-DD",
action=lambda day, rating: rating.modified < day,
parser=parser_date,
)
.define("created",
readme="ratings created on YYYY-MM-DD",
action=lambda day, rating: rating.created == day,
parser=parser_date,
)
.define("created after",
readme="ratings created after YYYY-MM-DD",
action=lambda day, rating: rating.created > day,
parser=parser_date,
)
.define("created before",
readme="ratings created before YYYY-MM-DD",
action=lambda day, rating: rating.created < day,
parser=parser_date,
)
# alias definitions
.alias("minimum score", "min score")
.alias("maximum score", "max score")
.alias("minimum status", "min status")
.alias("maximum status", "max status")
.alias("minimum tags", "min tags")
.alias("maximum tags", "max tags")
.alias("commented", "has comment")
)
def create_rating_filter(filter_tab):
search = filter_tab.search
if search == Search.COMMENTS:
criterion = filter_tab.query.lower()
elif search == Search.ADVANCED:
adv = filter_tab.advanced
criterion = Filter(adv if adv else filter_tab.query, PREDICATES)
elif search == Search.TITLE:
criterion = filter_tab.query.lower()
else:
pass
def function(rating):
if search == Search.COMMENTS:
if rating.comments.lower().find(criterion) == -1:
return False
elif search == Search.ADVANCED:
if not criterion(rating):
return False
elif search == Search.TITLE:
if rating.title.lower().find(criterion) == -1:
return False
else:
pass
return True
return function
def create_tagging_filter(filter_tab):
search = filter_tab.tags_search
criterion = filter_tab.tags_query.lower()
def function(tag):
if search == Search.COMMENTS:
if tag.description.lower().find(criterion) == -1:
return False
elif search == Search.TITLE:
if tag.name.lower().find(criterion) == -1:
return False
else:
pass
return True
return (lambda item: True) if criterion == "" else function
class FilterTab(object):
def __init__(self, builder, model):
self._search = builder.get_object("filter_search_combobox")
self._tags_search = builder.get_object("filter_tags_search_combobox")
self._tags_ascending = builder.get_object("filter_tags_ascending")
self._tags_descending = builder.get_object("filter_tags_descending")
self._sort = builder.get_object("filter_sort_combobox")
self._query = builder.get_object("search_entry")
self._tags_query = builder.get_object("tagging_search_entry")
self._ascending = builder.get_object("filter_ascending")
self._descending = builder.get_object("filter_descending")
self.model = model
self._advanced = builder.get_object("highlight_textview").get_buffer()
self._advanced.create_tag("operator", foreground="red", background=None)
self._advanced.create_tag("predicate", foreground="magenta")
self._advanced.create_tag("grouping", foreground="red")
self._advanced.create_tag("error", foreground="white", background="red")
self.reset()
self.reset_tags()
def reset(self):
self.advanced = ""
self.search = Search.TITLE
self.sort = Sort.TITLE
def reset_tags(self):
self.tags_search = Search.TITLE
# BUG setting sorting does not actually work
# only sorts by title regardless of function
def configure_sorting(self, tree_sortable):
# a descending order is a good default
if self.descending:
order = Gtk.SortType.DESCENDING
else:
order = Gtk.SortType.ASCENDING
tree_sortable.set_sort_column_id(0, order)
sort = self.sort
if sort == Sort.SCORE:
def sort_func(model, a, b, userdata):
x = userdata[model.get(a, 1)].score
y = userdata[model.get(b, 1)].score
return 1 if x > y else -1 if x < y else 0
elif sort == Sort.STATUS:
def sort_func(model, a, b, userdata):
x = userdata[model.get(a, 1)].status
y = userdata[model.get(b, 1)].status
return 1 if x > y else -1 if x < y else 0
elif sort == Sort.TITLE:
def sort_func(model, a, b, userdata):
x = userdata[model.get(a, 1)].title
y = userdata[model.get(b, 1)].title
return 1 if x > y else -1 if x < y else 0
else:
raise ValueError('Enum value "%s" unknown for Sort' % sort)
tree_sortable.set_sort_func(1, sort_func, self.model.ratings)
def configure_sorting_tags(self, tree_sortable):
# a descending order is a good default
if self.tags_descending:
order = Gtk.SortType.DESCENDING
else:
order = Gtk.SortType.ASCENDING
tree_sortable.set_sort_column_id(0, order)
def _check_tag(self, criterion, tag):
return True
def create(self, kind):
if kind is Rating:
return create_rating_filter(self)
elif kind is Tag:
return create_tagging_filter(self)
else:
# TODO
raise TypeError("kind must be one of Rating or Tag")
def highlight(self):
highlights = highlighting(self.advanced, PREDICATES.keys())
self._advanced.remove_all_tags(*self._advanced.get_bounds())
for position, token, tag in highlights:
start = self._advanced.get_iter_at_offset(position)
end = self._advanced.get_iter_at_offset(position + len(token))
self._advanced.apply_tag_by_name(tag, start, end)
@property
def query(self):
return self._query.get_text()
@query.setter
def query(self, value):
self._query.set_text(value)
@property
def advanced(self):
return self._advanced.get_text(*self._advanced.get_bounds(), True)
@advanced.setter
def advanced(self, value):
self._advanced.set_text(value)
self.highlight()
@property
def tags_query(self):
return self._tags_query.get_text()
@tags_query.setter
def tags_query(self, value):
self._tags_query.set_text(value)
@property
def ascending(self):
return self._ascending.get_active()
@ascending.setter
def ascending(self, value):
self._ascending.set_active(value)
@property
def descending(self):
return self._descending.get_active()
@descending.setter
def descending(self, value):
self._descending.set_active(value)
@property
def tags_ascending(self):
return self._tags_ascending.get_active()
@ascending.setter
def tags_ascending(self, value):
self._tags_ascending.set_active(value)
@property
def tags_descending(self):
return self._tags_descending.get_active()
@descending.setter
def tags_descending(self, value):
self._tags_descending.set_active(value)
@property
def search(self):
return Search(int(self._search.get_active_id()))
@search.setter
def search(self, value):
self._search.set_active_id(str(value.value))
@property
def tags_search(self):
return Search(int(self._tags_search.get_active_id()))
@tags_search.setter
def tags_search(self, value):
self._tags_search.set_active_id(str(value.value))
@property
def sort(self):
return Sort(int(self._sort.get_active_id()))
@sort.setter
def sort(self, value):
self._sort.set_active_id(str(value.value))
class EditorTab(object):
def __init__(self, builder, model):
self._idnum = builder.get_object("id_label")
self._title = builder.get_object("title_entry")
self._score = builder.get_object("score_combobox")
self._recommendation = builder.get_object("recommend_combobox")
self._status = builder.get_object("status_combobox")
self._comments = builder.get_object("comments_textview").get_buffer()
self._tags = builder.get_object("tags_textview").get_buffer()
self._original_tags = None
self._created_label = builder.get_object("created_label")
self._modified_label = builder.get_object("modified_label")
self._created = date.today()
self._modified = date.today()
self.model = model
def copy_to(self, rating):
modified = (
rating.score != self.score
or rating.status != self.status
or rating.title != self.title
or rating.comments != self.comments
)
rating.title = self.title
rating.score = self.score
rating.status = self.status
rating.comments = self.comments
if self._original_tags != self._tags.get_text(*self._tags.get_bounds(), True):
self.model.change_tags(rating, self.tags)
modified = True
if modified:
rating.modified = date.today()
self.modified = rating.modified
def copy_from(self, value):
self.idnum = value.idnum
self.title = value.title
self.score = value.score
self.status = value.status
self.comments = value.comments
self.tags = value.tags
# set date attributes/labels these are not edittable
self.created = value.created
self.modified = value.modified
@property
def idnum(self):
return self._idnum.get_text()
@idnum.setter
def idnum(self, number):
self._idnum.set_text(str(number))
@property
def title(self):
return self._title.get_text()
@title.setter
def title(self, text):
self._title.set_text(text)
@property
def score(self):
return Score(int(self._score.get_active_id()))
@score.setter
def score(self, value):
self._score.set_active_id(str(value.value))
@property
def status(self):
return Status(int(self._status.get_active_id()))
@status.setter
def status(self, value):
self._status.set_active_id(str(value.value))
@property
def comments(self):
start, end = self._comments.get_bounds()
return self._comments.get_text(start, end, True)
@comments.setter
def comments(self, text):
self._comments.set_text(text)
@property
def tags(self):
start, end = self._tags.get_bounds()
text = self._tags.get_text(start, end, True)
return tagset(text)
@tags.setter
def tags(self, tag_set):
self._original_tags = ", ".join([escape(tag) for tag in tag_set])
self._tags.set_text(self._original_tags)
@property
def created(self):
return self._created
@created.setter
def created(self, day):
self._created = day
self._created_label.set_text(day.isoformat())
@property
def modified(self):
return self._modified
@modified.setter
def modified(self, day):
self._modified = day
self._modified_label.set_text(day.isoformat())
def clear(self):
self.title = ""
self.score = Score.UNSPECIFIED
self.status = Status.UNSPECIFIED
self.comments = ""
self.tags = ""
self._created = date.today()
self._modified = date.today()
class FilesTab(object):
def __init__(self, builder):
self._path = builder.get_object("filepath_entry")
self._chooser = builder.get_object("file_chooser_button")
self._chooser.set_current_folder(str(Path.home()))
# self.update_path()
@property
def path(self):
return self._path.get_text()
@path.setter
def path(self, text):
self._path.set_text(text)
def update_path(self):
self.path = self._chooser.get_filename()
class TaggingTab(object):
def __init__(self, builder, model):
self._idnum = builder.get_object("tagging_id_label")
self._name = builder.get_object("tagging_name_entry")
self._description = builder.get_object("tagging_description_textview").get_buffer()
self._original_name = None
self.selected = None
self.selected_row = None
self.model = model
def sort_func(model, a, b, userdata):
x = model.get(a, 0)
y = model.get(b, 0)
return 1 if x > y else -1 if x < y else 0
self.liststore = Gtk.ListStore(str)
self.liststore.set_sort_column_id(0, Gtk.SortType.DESCENDING)
self.liststore.set_sort_func(0, sort_func, self.model.tags)
self.treeview = builder.get_object("tagging_treeview")
self.treeview.set_model(self.liststore)
self.title_column = Gtk.TreeViewColumn("Name", Gtk.CellRendererText(), text=0)
self.treeview.append_column(self.title_column)
self.results_label = builder.get_object("tagging_results_label")
self.instances_label = builder.get_object("tagging_instances_label")
self.filter = lambda item: True
def copy_to(self, tag):
name = self.name
if self._original_name is not None and name != self._original_name:
self.model.rename(tag, name)
self.selected = None
self.refresh()
tag.description = self.description
def copy_from(self, tag):
self.idnum = tag.idnum
self.name = tag.name
self._original_name = self.name
self.description = tag.description
if tag.instances == 1:
self.instances_label.set_text("1 instance")
else:
self.instances_label.set_text("%i instances" % tag.instances)
def change_selection(self):
if self.selected is not None:
self.copy_to(self.selected)
model, tag = self.treeview.get_selection().get_selected()
if tag is None:
return
name = model.get_value(tag, 0)
self.selected = self.model.tags[name]
self.selected_row = self.liststore[tag]
self.copy_from(self.model.tags[name])
def remove(self):
self.model.remove(self.selected)
tag = self.liststore.get_iter_first()
while tag is not None:
if self.liststore.get_value(tag, 0) == self.selected.name:
has_next_row = self.liststore.remove(tag)
break
tag = self.liststore.iter_next(tag)
if has_next_row or (tag := self.liststore.get_iter_first()) is not None:
name = self.liststore.get_value(tag, 0)
self.selected = self.model.tags[name]
path = self.liststore.get_path(tag)
self.selected_row = self.liststore[path]
self.treeview.set_cursor(path, self.title_column, False)
self.copy_from(self.selected)
else:
self.selected = None
self.name = ""
self.description = ""
self.idnum = -1
self.instances_label.set_text("0 instances")
def vacuum(self):
self.model.vacuum()
self.refresh()
def refresh(self):
results = self.model.fill_in(Tag, self.liststore, self.filter)
self.results_label.set_text("%i results" % results)
if self.selected is not None:
for row in self.liststore:
if row[0] == self.selected.name:
self.selected_row = row
break
else:
self.selected = None
self.selected_row = None
@property
def idnum(self):
return self._idnum.get_text()
@idnum.setter
def idnum(self, number):
self._idnum.set_text(str(number))
@idnum.setter
def idnum(self, number):
self._idnum.set_text(str(number))
@property
def name(self):
return self._name.get_text()
@name.setter
def name(self, value):
self._name.set_text(value)
@property
def description(self):
start, end = self._description.get_bounds()
return self._description.get_text(start, end, True)
@description.setter
def description(self, text):
self._description.set_text(text)
class Rater:
#
# Location of configuration file
#
CONFIG = Path.home() / ".ratings.json"
def __init__(self):
builder = Gtk.Builder.new_from_file("ratings.glade")
self.selected = None
self.selected_row = None
self.model = Model()
self.editor = EditorTab(builder, self.model)
self.filter = FilterTab(builder, self.model)
self.files = FilesTab(builder)
self.tagging = TaggingTab(builder, self.model)
self.liststore = Gtk.ListStore(str, int)
self.treeview = builder.get_object("search_treeview")
self.treeview.set_model(self.liststore)
self.title_column = Gtk.TreeViewColumn("Title", Gtk.CellRendererText(), text=0)
self.treeview.append_column(self.title_column)
self.filter.configure_sorting(self.liststore)
self.rater_window = builder.get_object("rater_window")
self.rater_window.connect("destroy", self.exit_rater)
self.rater_window.show_all()
self.results_label = builder.get_object("results_label")
self.message_window = builder.get_object("message_window")
self.query_label = builder.get_object("message_window_query_label")
self.error_label = builder.get_object("message_window_error_label")
# TODO fix it so that closing the message window via border icon doesn't destroy it
self.message_window.connect("destroy",
self.on_close_message_button_clicked
)
self.message_window.connect("delete-event",
lambda widget, event: self.close_message_window()
)
self.message_window.connect("response",
lambda widget, event: self.close_message_window()
)
builder.connect_signals(self)
if Rater.CONFIG.is_file():
config = json.loads(Rater.CONFIG.read_text())
file = config["last_open"]
self.files.path = file if file is not None else ""
if not self.open_file():
self.new_file()
else:
self.file = None
self.new_file()
def exit_rater(self, widget):
Rater.CONFIG.write_text(
json.dumps({"last_open": self.file}, indent=4)
)
Gtk.main_quit()
def close_message_window(self):
self.message_window.hide()
def open_file(self):
if not self.model.load(self.files.path):
return False
results = self.model.fill_in(Tag, self.tagging.liststore)
self.tagging.results_label.set_text("%i results" % results)
results = self.model.fill_in(Rating, self.liststore)
self.results_label.set_text("%s results" % results)
tag = self.liststore.get_iter_first()
index = self.liststore.get_value(tag, 1)
self.selected = self.model.ratings[index]
path = self.liststore.get_path(tag)
self.selected_row = self.liststore[path]
self.treeview.set_cursor(path, self.title_column, False)
self.editor.copy_from(self.selected)
self.file = self.files.path
return True
def save_file(self):
if self.selected is not None:
self.editor.copy_to(self.selected)
self.model.save(self.files.path)
def new_file(self):
self.liststore.clear()
self.model.clear()
self.new_rating()
def new_rating(self):
if self.selected is not None:
self.editor.copy_to(self.selected)
self.selected = self.model.create_rating()
tag = self.liststore.prepend(
[self.selected.title, self.selected.idnum]
)
path = self.liststore.get_path(tag)
self.selected_row = self.liststore[path]
self.treeview.set_cursor(path, self.title_column, False)
self.editor.copy_from(self.selected)
def delete_rating(self):
self.model.remove(self.selected)
tag = self.liststore.get_iter_first()
while tag is not None:
if self.liststore.get_value(tag, 1) == self.selected.idnum:
has_next_row = self.liststore.remove(tag)
break
tag = self.liststore.iter_next(tag)
if has_next_row or (tag := self.liststore.get_iter_first()) is not None:
idnum = self.liststore.get_value(tag, 1)
self.selected = self.model.ratings[idnum]
path = self.liststore.get_path(tag)
self.selected_row = self.liststore[path]
self.treeview.set_cursor(path, self.title_column, False)
self.editor.copy_from(self.selected)
else:
self.new_rating()
def change_selection(self):
if self.selected is not None:
self.editor.copy_to(self.selected)
model, tag = self.treeview.get_selection().get_selected()
if tag is None:
return
idnum = model.get_value(tag, 1)
self.selected = self.model.ratings[idnum]
self.selected_row = self.liststore[tag]
self.editor.copy_from(self.selected)
def on_new_rating_button_clicked(self, widget):
self.new_rating()
def on_delete_rating_button_clicked(self, widget):
self.delete_rating()
def on_search_treeview_row_activated(self, widget, column, index):
self.change_selection()
def on_tagging_treeview_row_activated(self, widget, column, index):
self.tagging.change_selection()
def on_title_entry_changed(self, widget):
if self.selected_row is not None:
self.selected_row[0] = self.editor.title
def on_filter_apply_clicked(self, widget, option_a=None, option_b=None):
try:
function = self.filter.create(Rating)
except CompilationError as error:
self.query_label.set_text(
"%s%s" % (
error.source if error.source else self.filter.query,
"\n" + error.underline_string() if error.position else "",
)
)
self.error_label.set_text(error.reason)
self.message_window.show_all()
return
self.liststore.clear()
results = self.model.fill_in(Rating, self.liststore, function)
self.results_label.set_text("%i results" % results)
if self.selected is not None:
for row in self.liststore:
if row[1] == self.selected.idnum:
self.selected_row = row
break
else:
self.selected = None
self.selected_row = None
self.filter.configure_sorting(self.liststore)
def on_filter_tags_apply_clicked(self, widget, option_a=None, option_b=None):
self.tagging.filter = self.filter.create(Tag)
self.filter.configure_sorting_tags(self.tagging.liststore)
self.tagging.refresh()
def on_filter_reset_clicked(self, widget):
self.filter.reset()
def on_filter_tags_reset_clicked(self, widget):
self.filter.reset_tags()
def on_switch_page(self, widget, page, index):
if self.selected is not None:
self.editor.copy_to(self.selected)
if index == Page.TAGGING.value:
self.tagging.refresh()
elif self.tagging.selected is not None:
self.tagging.copy_to(self.tagging.selected)
def on_file_chooser_button_file_set(self, other):
self.files.update_path()
def on_open_file_button_clicked(self, widget):
self.open_file()
def on_save_file_button_clicked(self, widget):
self.save_file()
def on_new_file_button_clicked(self, widget):
self.new_file()
def on_attach_tag_button_clicked(self, widget):
if self.selected is not None and self.tagging.selected is not None:
self.model.attach_tag(self.selected, self.tagging.selected)
self.editor.tags = self.selected.tags
def on_detach_tag_button_clicked(self, widget):
if self.selected is not None and self.tagging.selected is not None:
self.model.detach_tag(self.selected, self.tagging.selected)
self.editor.tags = self.selected.tags
def on_delete_tag_button_clicked(self, widget):
if self.selected is not None and self.tagging.selected is not None:
self.editor.tags = self.selected.tags
self.tagging.remove()
def on_vacuum_tag_button_clicked(self, widget):
self.tagging.vacuum()
def on_search_tag_button_clicked(self, widget):
self.filter.search = Search.ADVANCED
self.filter.advanced = ""
self.filter.query = self.tagging.name
self.on_filter_apply_clicked(widget)
def on_close_message_button_clicked(self, widget):
self.close_message_window()
def on_highlight_buffer_changed(self, widget):
self.filter.highlight()
def main():
rater = Rater()
Gtk.main()
if __name__ == '__main__':
main()
| 26.305428 | 148 | 0.708435 | 20,378 | 0.712667 | 0 | 0 | 3,710 | 0.129747 | 0 | 0 | 3,782 | 0.132266 |
fd567ff8b78d041903de62043964d3c66a7450a4
| 10,218 |
py
|
Python
|
K64F Python Interfacing Testing/Loop_Read.py
|
Marnold212/CamLab-K64F
|
20689b4be38aa329990dbfe13eec43d74b3ae27a
|
[
"Apache-2.0"
] | null | null | null |
K64F Python Interfacing Testing/Loop_Read.py
|
Marnold212/CamLab-K64F
|
20689b4be38aa329990dbfe13eec43d74b3ae27a
|
[
"Apache-2.0"
] | null | null | null |
K64F Python Interfacing Testing/Loop_Read.py
|
Marnold212/CamLab-K64F
|
20689b4be38aa329990dbfe13eec43d74b3ae27a
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from serial.serialutil import SerialException
import serial.tools.list_ports as port_list
import serial
import time
def Hex_To_Dec(input): # input of form "40048024"
return int(input, 16)
def Hex_To_Bin(input): # input of form "40048024"
return bin(int(input, 16))
def Hex_To_Bytes(input): # input of form "40048024"
return bytes.fromhex(input)
# def List_All_Mbed_USB_Devices(self):
def List_All_Mbed_USB_Devices():
ports = list(port_list.comports())
Num_Serial_Devices = len(ports)
Num_Mbed_Devices = 0
COM_PORTS = []
connectionType = [] # Create a unique value for USB K64F devices which trigger new functions
# Say 11 = mbed USB, 10 = mbed ANY, 12 = mbed TCP, 14 = mbed WIFI
VID_PID = [] # USB VID:PID are the Vendor/Product ID respectively - smae for each K64F Board? - You can determine HIC ID from last 8 digits of Serial Number?
# Note that the 0240 at the start of Serial Number refers to the K64F Family
ID_USB = [] # ID_USB will be the USB serial number - should be unique
Baud_Rate = [] # For now assume all operating at 9600 - may change later so might need to add later on
# IP = [] # Don't think we need this for USB Serial(Mbed) devices
if Num_Serial_Devices > 0:
for i in range(Num_Serial_Devices):
COM_Port = ports[i].usb_description() # ports[i].device outputs COM_PORT (Note port[i][0][0:16] is a particular device - port[i][0] is the COM Port of the device)
if(ports[i][1].startswith("mbed Serial Port")): # port[i] is a particular device - port[i][1] is the description of the device - port[i][1][0:16] are the characters containing the mbed Serial Port description
default_baudrate = 115200 # Assume all boards use default baudrate of 9600
try:
Serial_device = serial.Serial(port=COM_Port, baudrate=default_baudrate, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
except:
raise Exception ("Issues connecting with mbed Device on %s", COM_Port) # Need to implement proper error handling
# How can we/Do we need to check we have actually connected to device - and that it is meant to be used for what we are using it for
if(not Serial_device.readable()):
raise Exception ("Issues connecting with mbed Device on %s", COM_Port) # Need to implement proper error handling
Num_Mbed_Devices += 1
COM_PORTS.append(COM_Port)
USB_INFO = ports[i].usb_info().split('=') # USB-PID should be Unique
USB_VIDPID = USB_INFO[1].split(' ')[0]
VID_PID.append(USB_VIDPID)
USB_Serial_Number = USB_INFO[2].split(' ')[0]
ID_USB.append(USB_Serial_Number)
connectionType.append(11) # Added 10 onto definitions used by LJM library to avoid mixing up - however can change if confusing
Serial_device.close() # Close COM Port communication once info obtained
if(ports[i][1].startswith("USB Serial Device")):
default_baudrate = 115200 # Assume all boards use default baudrate of 9600
try:
Serial_device = serial.Serial(port=COM_Port, baudrate=default_baudrate, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
except:
raise Exception ("Issues connecting with mbed Device on %s", COM_Port) # Need to implement proper error handling
# How can we/Do we need to check we have actually connected to device - and that it is meant to be used for what we are using it for
if(not Serial_device.readable()):
raise Exception ("Issues connecting with mbed Device on %s", COM_Port) # Need to implement proper error handling
Num_Mbed_Devices += 1
COM_PORTS.append(COM_Port)
USB_INFO = ports[i].usb_info().split('=') # USB-PID should be Unique
USB_VIDPID = USB_INFO[1].split(' ')[0]
VID_PID.append(USB_VIDPID)
USB_Serial_Number = USB_INFO[2].split(' ')[0]
ID_USB.append(USB_Serial_Number)
connectionType.append(11) # Added 10 onto definitions used by LJM library to avoid mixing up - however can change if confusing
Serial_device.close() # Close COM Port communication once info obtained
return(Num_Mbed_Devices, COM_PORTS, connectionType, ID_USB, VID_PID)
def _Serial_Read_Raw(Serial_Device, Expected_Bytes):
serialString = "" # Used to hold data coming over UART
# NEED TO ADD A TIMEOUT TO FOLLOWING LINE
# Removing while(1) should allow the read(size=Expected_Bytes) to naturally timeout after configured time
''' Don't need this chucnk since we know how many bytes we expect to get back
Therefore we can use the builtin timeout from the pyserial library
while(1):
print(self.Serial_Device.in_waiting)
if self.Serial_Device.in_waiting > 0:
# Read data out of the buffer until a carriage return / new line is found
# Note there is an inherant issue with this where, if a transmitted value has the equivalent hex value of 0a, it triggers the end of line
# If we are sending unsigned bytes across channel, inevitably some will have the value of '\n' character
# serialString = serialPort.readline()
'''
serialString = Serial_Device.read(size=Expected_Bytes)
serialString = serialString.hex() # Decode bytes into raw hex values
# if(serialString[Expected_Bytes-1] != 10): # 10 = 0x0a = '\n' LF character in Ascii
if(serialString[-2: ] != '0a'): # 10 = 0x0a = '\n' LF character in Ascii
# raise Exception ("Issue with Received Data") # Need to implement proper error handling
print("Error", serialString)
else:
return serialString[:-2]
# def Reverse_4byte_hex(input):
# reverse = ""
# if(len(input) == 4*2):
# reverse += input[6:8] + input[4:6] + input[2:4] + input[0:2]
# return reverse
def Reverse_Hex_Byte_Order(input):
reverse = ""
if(len(input) %2 == 0): # Should be twice as many hex characters as bytes therefore even number of hex
Num_Bytes = len(input) / 2
x = (int)(Num_Bytes)
while(x > 0):
y = 2 * (x - 1)
reverse += input[y : y+2]
x -= 1
return reverse
def ADC_8x_16_Raw_Read(Serial_Device):
Expected_Bytes = 8*2 + 4 + 1 # Inlcude EOL Char
Raw = _Serial_Read_Raw(Serial_Device, Expected_Bytes)
data = []
seconds = Hex_To_Dec(Reverse_Hex_Byte_Order(Raw[0:8]))
for x in range(2, 8+2, 2): # Bytes
y = x+1 # Due to byte order of device -
data.append(Hex_To_Dec(Raw[(4*y) + 0 : (4*y) + 4]))
data.append(Hex_To_Dec(Raw[(4*x) + 0 : (4*x) + 4]))
return seconds, data
# Assumes Data recieved is
def Convert_ADC_Raw(Raw_Reading, ADC_Resolution, Max_Min_Voltage):
Signed_Value = np.int16(Raw_Reading)
quant_step = (2 * Max_Min_Voltage) / (2**ADC_Resolution)
return Signed_Value * quant_step
def Decode_Time_In_Secs(HEX_four_bytes):
return Hex_To_Dec(Reverse_Hex_Byte_Order(HEX_four_bytes))
def Decode_8x16_Raw(HEX_sixteen_bytes):
if(len(HEX_sixteen_bytes) != 16*2):
raise ValueError
Raw_Readings = []
for x in range(8):
Raw_Readings.append(Hex_To_Dec(Reverse_Hex_Byte_Order(HEX_sixteen_bytes[4*x : 4*(x+1)])))
return Raw_Readings
def Decode_6_Compressed_PWM_Duty(HEX_six_bytes):
if(len(HEX_six_bytes) != 6*2):
raise ValueError
Duty_Cycles = []
for x in range(6):
Duty_Cycles.append(Hex_To_Dec(HEX_six_bytes[2*x : 2*x + 2]) / 100.)
return Duty_Cycles
def Decode_Raw_Data(Raw_Data):
Results = []
for sample in Raw_Data:
entry = []
time = Decode_Time_In_Secs(sample[0:(4*2)])
entry.append(time)
entry.append(Decode_8x16_Raw(sample[4 * 2 : (4 + (2*8)) * 2]))
entry.append(Decode_6_Compressed_PWM_Duty(sample[(4 + (2*8)) * 2 : 40 + 6*2]))
Results.append(entry)
return Results
# for x in range(2, 8+2, 2): # Bytes
# y = x+1 # Due to byte order of device -
# entry.append(Hex_To_Dec(sample[(4*y) + 0 : (4*y) + 4]))
# entry.append(Hex_To_Dec(sample[(4*x) + 0 : (4*x) + 4]))
# # entry.append(Convert_ADC_Raw(Hex_To_Dec(sample[(4*y) + 0 : (4*y) + 4]), 16, 5))
# # entry.append(Convert_ADC_Raw(Hex_To_Dec(sample[(4*x) + 0 : (4*x) + 4]), 16, 5))
# Results.append(entry)
# Testing
if __name__ == "__main__":
mbed_USB_info = List_All_Mbed_USB_Devices()
for i in range(5):
print(mbed_USB_info[i])
# serial_port = serial.Serial(port=mbed_USB_info[1][0], baudrate=115200, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
# for x in range(1000):
# raw_data = ADC_8x_16_Raw_Read(serial_port)
# # raw_data = serial_port.read(1)
# data = []
# for x in range(8):
# data.append(Convert_ADC_Raw(raw_data[1][x], 16, 5))
# # print(raw_data)
# print(data, raw_data [0])
Bytes_Per_Sample = 32
Number_Samples = 300
Serial_Baudrate = 230400 # 962100
serial_port = serial.Serial(port=mbed_USB_info[1][0], baudrate=Serial_Baudrate, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
data = []
for x in range(Number_Samples):
raw = serial_port.read(Bytes_Per_Sample).hex()
data.append(raw)
# print(data)
# print(data)
Formatted = Decode_Raw_Data(data)
print(Formatted[0], Formatted[Number_Samples - 1])
# print(Results[0:2])
#
# Serial_device = serial.Serial(port="COM4", baudrate=9600, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
# Target_Register = "0x40048024"
# Received_String = Read_K64F_Hex_Register(Serial_device, Target_Register, 4)
# print("READ COMMAND (0x30): Requested Register = %s; Contents of Register(Hex) = %s" % (Target_Register , Received_String[:-2]))
| 47.305556 | 224 | 0.641124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,763 | 0.466138 |
fd57c568a71d49ca653bf0ce40af26241330267b
| 190 |
py
|
Python
|
geosolver/text/generate_rules.py
|
mhrmm/geosolver
|
13ae2972c58d5ba4c4878576f9fba8569cc99629
|
[
"Apache-2.0"
] | 83 |
2015-09-14T13:50:42.000Z
|
2022-03-12T10:24:38.000Z
|
geosolver/text/generate_rules.py
|
nehamjadhav/geosolver
|
13ae2972c58d5ba4c4878576f9fba8569cc99629
|
[
"Apache-2.0"
] | 8 |
2021-07-21T09:55:42.000Z
|
2022-02-15T02:31:47.000Z
|
geosolver/text/generate_rules.py
|
nehamjadhav/geosolver
|
13ae2972c58d5ba4c4878576f9fba8569cc99629
|
[
"Apache-2.0"
] | 33 |
2015-06-16T18:52:43.000Z
|
2021-12-16T08:58:27.000Z
|
from geosolver.ontology.ontology_definitions import FunctionSignature, signatures
from geosolver.text.rule import TagRule
from geosolver.utils.num import is_number
__author__ = 'minjoon'
| 23.75 | 81 | 0.847368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.047368 |
fd5911cef504b719bc6cc6d734809ba588ffa54f
| 1,433 |
py
|
Python
|
fhir_dataframes/store.py
|
Tiro-health/fhir-dataframes
|
57086b7bb385ffbce55f57747903eca7a7f84665
|
[
"MIT"
] | 1 |
2022-02-09T08:16:09.000Z
|
2022-02-09T08:16:09.000Z
|
fhir_dataframes/store.py
|
Tiro-health/fhir-dataframes
|
57086b7bb385ffbce55f57747903eca7a7f84665
|
[
"MIT"
] | null | null | null |
fhir_dataframes/store.py
|
Tiro-health/fhir-dataframes
|
57086b7bb385ffbce55f57747903eca7a7f84665
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from itertools import tee
from typing import Iterable, Optional, Sequence, Union
import pandas as pd
from tiro_fhir import Resource
from fhir_dataframes import code_accessor
class LocalFHIRStore:
def __init__(self, resources: Iterable[Resource] = []):
self._resources: Iterable[Resource] = resources
@property
def resources(self):
self._resources, result = tee(self._resources)
return result
def to_pandas(self, keys: Optional[Sequence[str]] = None):
records = [resource.record(keys) for resource in self.resources]
return pd.DataFrame(records)
def __getitem__(self, key: Union[str, Sequence[str]]):
if isinstance(key, str):
return self.to_pandas([key])
else:
return self.to_pandas(key)
def get_by_resource_type(self, resource_type: str) -> LocalFHIRStore:
return LocalFHIRStore(
resources=filter(lambda r: r.resourceType == resource_type, self.resources)
)
def _repr_html_(self) -> str:
return pd.Series(self.resources).to_frame(name="resource")._repr_html_()
@property
def Observation(self):
return self.get_by_resource_type("Observation")
@property
def Procedure(self):
return self.get_by_resource_type("Procedure")
@property
def Condition(self):
return self.get_by_resource_type("Condition")
| 30.489362 | 87 | 0.686671 | 1,221 | 0.852059 | 0 | 0 | 379 | 0.26448 | 0 | 0 | 45 | 0.031403 |
fd5ed16b310aacd62d38f7ed79f88685cc24b454
| 1,189 |
py
|
Python
|
senlerpy/senler.py
|
tezmen/SenlerPy
|
ce8ab8512ed795e8e6f1e7ff76f54c6aa2d3cd82
|
[
"Apache-2.0"
] | 2 |
2019-03-19T08:46:27.000Z
|
2020-11-12T10:55:59.000Z
|
senlerpy/senler.py
|
tezmen/SenlerPy
|
ce8ab8512ed795e8e6f1e7ff76f54c6aa2d3cd82
|
[
"Apache-2.0"
] | 1 |
2021-03-30T16:55:09.000Z
|
2021-03-30T16:55:09.000Z
|
senlerpy/senler.py
|
tezmen/SenlerPy
|
ce8ab8512ed795e8e6f1e7ff76f54c6aa2d3cd82
|
[
"Apache-2.0"
] | 7 |
2019-03-19T08:47:35.000Z
|
2021-08-24T11:47:41.000Z
|
# -*- coding: utf-8 -*-
import json
import logging
from .request import RequestApi
from .exceptions import ApiError, WrongId, HttpError
logger = logging.getLogger(__name__)
class Senler:
def __init__(self, secret, vk_group_id=None):
self.vk_group = vk_group_id
self.__secret = str(secret).strip()
self._rq = RequestApi(self.secret)
def __error_handler(self, response):
if bool(response['success']):
return response
raise ApiError(response)
def __call__(self, method, **kwargs):
if 'vk_group_id' not in kwargs.keys():
if self.vk_group is None:
raise WrongId('vk_group_id is not specified by any of the methods')
kwargs['vk_group_id'] = self.vk_group
response = self._rq.send(str(method), kwargs)
json_response = {}
try:
json_response = json.loads(response.text)
except:
logger.debug(f'{response.status_code}:{response.text}')
raise HttpError(f'status_code:{response.status_code}, error with decode json')
return self.__error_handler(json_response)
@property
def secret(self):
return self.__secret
@property
def vk_group(self):
return self.__vk_group
@vk_group.setter
def vk_group(self, value):
self.__vk_group = str(value)
| 25.847826 | 81 | 0.735071 | 1,012 | 0.851135 | 0 | 0 | 181 | 0.152229 | 0 | 0 | 212 | 0.178301 |
fd5f3466a377d682676cf2f35cddaec4567f59df
| 11,354 |
py
|
Python
|
robinhoodbot/main.py
|
bpk9/Robinhood-Stock-Trading-Bot
|
c2ab0dd58f5236ee051ad38277c8ba5c46bd0aa4
|
[
"MIT"
] | null | null | null |
robinhoodbot/main.py
|
bpk9/Robinhood-Stock-Trading-Bot
|
c2ab0dd58f5236ee051ad38277c8ba5c46bd0aa4
|
[
"MIT"
] | null | null | null |
robinhoodbot/main.py
|
bpk9/Robinhood-Stock-Trading-Bot
|
c2ab0dd58f5236ee051ad38277c8ba5c46bd0aa4
|
[
"MIT"
] | null | null | null |
import pyotp
import robin_stocks as r
import pandas as pd
import numpy as np
import ta as ta
from pandas.plotting import register_matplotlib_converters
from ta import *
from misc import *
from tradingstats import *
from config import *
#Log in to Robinhood
#Put your username and password in a config.py file in the same directory (see sample file)
totp = pyotp.TOTP(rh_2fa_code).now()
login = r.login(rh_username,rh_password, totp)
#Safe divide by zero division function
def safe_division(n, d):
return n / d if d else 0
def get_spy_symbols():
"""
Returns: the symbol for each stock in the S&P 500 as a list of strings
"""
symbols = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')[0]['Symbol']
return list(symbols.values.flatten())
def get_watchlist_symbols():
"""
Returns: the symbol for each stock in your watchlist as a list of strings
"""
my_list_names = []
symbols = []
for name in r.get_all_watchlists(info='name'):
my_list_names.append(name)
for name in my_list_names:
list = r.get_watchlist_by_name(name)
for item in list:
instrument_data = r.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
def get_portfolio_symbols():
"""
Returns: the symbol for each stock in your portfolio as a list of strings
"""
symbols = []
holdings_data = r.get_open_stock_positions()
for item in holdings_data:
if not item:
continue
instrument_data = r.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
def get_position_creation_date(symbol, holdings_data):
"""Returns the time at which we bought a certain stock in our portfolio
Args:
symbol(str): Symbol of the stock that we are trying to figure out when it was bought
holdings_data(dict): dict returned by r.get_open_stock_positions()
Returns:
A string containing the date and time the stock was bought, or "Not found" otherwise
"""
instrument = r.get_instruments_by_symbols(symbol)
url = instrument[0].get('url')
for dict in holdings_data:
if(dict.get('instrument') == url):
return dict.get('created_at')
return "Not found"
def get_modified_holdings():
""" Retrieves the same dictionary as r.build_holdings, but includes data about
when the stock was purchased, which is useful for the read_trade_history() method
in tradingstats.py
Returns:
the same dict from r.build_holdings, but with an extra key-value pair for each
position you have, which is 'bought_at': (the time the stock was purchased)
"""
holdings = r.build_holdings()
holdings_data = r.get_open_stock_positions()
for symbol, dict in holdings.items():
bought_at = get_position_creation_date(symbol, holdings_data)
bought_at = str(pd.to_datetime(bought_at))
holdings[symbol].update({'bought_at': bought_at})
return holdings
def golden_cross(stockTicker, n1, n2, direction=""):
"""Determine if a golden/death cross has occured for a specified stock in the last X trading days
Args:
stockTicker(str): Symbol of the stock we're querying
n1(int): Specifies the short-term indicator as an X-day moving average.
n2(int): Specifies the long-term indicator as an X-day moving average.
(n1 should be smaller than n2 to produce meaningful results, e.g n1=50, n2=200)
direction(str): "above" if we are searching for an upwards cross, "below" if we are searching for a downwaords cross. Optional, used for printing purposes
Returns:
1 if the short-term indicator crosses above the long-term one
0 if the short-term indicator crosses below the long-term one
price(float): last listed close price
"""
history = get_historicals(stockTicker)
closingPrices = []
dates = []
for item in history:
closingPrices.append(float(item['close_price']))
dates.append(item['begins_at'])
price = pd.Series(closingPrices)
dates = pd.Series(dates)
dates = pd.to_datetime(dates)
ema1 = ta.trend.EMAIndicator(price, int(n1)).ema_indicator()
ema2 = ta.trend.EMAIndicator(price, int(n2)).ema_indicator()
if plot:
show_plot(price, ema1, ema2, dates, symbol=stockTicker, label1=str(n1)+" day EMA", label2=str(n2)+" day EMA")
return ema1.iat[-1] > ema2.iat[-1], closingPrices[len(closingPrices) - 1]
def get_rsi(symbol, days):
"""Determine the relative strength index for a specified stock in the last X trading days
Args:
symbol(str): Symbol of the stock we're querying
days(int): Specifies the maximum number of days that the cross can occur by
Returns:
rsi(float): Relative strength index value for a specified stock in the last X trading days
"""
history = get_historicals(symbol)
closingPrices = [ float(item['close_price']) for item in history ]
price = pd.Series(closingPrices)
rsi = ta.momentum.RSIIndicator(close=price, window=int(days), fillna=False).rsi()
return rsi.iat[-1]
def get_macd(symbol):
"""Determine the Moving Average Convergence/Divergence for a specified stock
Args:
symbol(str): Symbol of the stock we're querying
Returns:
rsi(float): Moving Average Convergence/Divergence value for a specified stock
"""
history = get_historicals(symbol)
closingPrices = [ float(item['close_price']) for item in history ]
price = pd.Series(closingPrices)
macd = ta.trend.MACD(price).macd_diff()
return macd.iat[-1]
def get_buy_rating(symbol):
"""Determine the listed investor rating for a specified stock
Args:
symbol(str): Symbol of the stock we're querying
Returns:
rating(int): 0-100 rating of a particular stock
"""
ratings = r.get_ratings(symbol=symbol)['summary']
if ratings:
return ratings['num_buy_ratings'] / (ratings['num_buy_ratings'] + ratings['num_hold_ratings'] + ratings['num_sell_ratings']) * 100
return 0
def sell_holdings(symbol, holdings_data):
""" Place an order to sell all holdings of a stock.
Args:
symbol(str): Symbol of the stock we want to sell
holdings_data(dict): dict obtained from get_modified_holdings() method
"""
shares_owned = int(float(holdings_data[symbol].get("quantity")))
if not debug:
r.order_sell_market(symbol, shares_owned)
print("####### Selling " + str(shares_owned) + " shares of " + symbol + " #######")
def buy_holdings(potential_buys, profile_data, holdings_data):
""" Places orders to buy holdings of stocks. This method will try to order
an appropriate amount of shares such that your holdings of the stock will
roughly match the average for the rest of your portfoilio. If the share
price is too high considering the rest of your holdings and the amount of
buying power in your account, it will not order any shares.
Args:
potential_buys(list): List of strings, the strings are the symbols of stocks we want to buy
symbol(str): Symbol of the stock we want to sell
holdings_data(dict): dict obtained from r.build_holdings() or get_modified_holdings() method
"""
cash = float(profile_data.get('cash'))
portfolio_value = float(profile_data.get('equity')) - cash
ideal_position_size = (safe_division(portfolio_value, len(holdings_data))+cash/len(potential_buys))/(2 * len(potential_buys))
prices = r.get_latest_price(potential_buys)
for i in range(0, len(potential_buys)):
stock_price = float(prices[i])
if(ideal_position_size < stock_price < ideal_position_size*1.5):
num_shares = int(ideal_position_size*1.5/stock_price)
elif (stock_price < ideal_position_size):
num_shares = int(ideal_position_size/stock_price)
else:
print("####### Tried buying shares of " + potential_buys[i] + ", but not enough buying power to do so#######")
break
print("####### Buying " + str(num_shares) + " shares of " + potential_buys[i] + " #######")
if not debug:
r.order_buy_market(potential_buys[i], num_shares)
def scan_stocks():
""" The main method. Sells stocks in your portfolio if their 50 day moving average crosses
below the 200 day, and buys stocks in your watchlist if the opposite happens.
###############################################################################################
WARNING: Comment out the sell_holdings and buy_holdings lines if you don't actually want to execute the trade.
###############################################################################################
If you sell a stock, this updates tradehistory.txt with information about the position,
how much you've earned/lost, etc.
"""
if debug:
print("----- DEBUG MODE -----\n")
print("----- Starting scan... -----\n")
register_matplotlib_converters()
spy_symbols = get_spy_symbols()
portfolio_symbols = get_portfolio_symbols()
holdings_data = get_modified_holdings()
potential_buys = []
sells = []
stock_data = []
print("Current Portfolio: " + str(portfolio_symbols) + "\n")
# print("Current Watchlist: " + str(watchlist_symbols) + "\n")
print("----- Scanning portfolio for stocks to sell -----\n")
print()
print("PORTFOLIO")
print("-------------------")
print()
print ("{}\t{}\t\t{}\t{}\t{}\t{}".format('SYMBOL', 'PRICE', 'RSI', 'MACD', 'RATING', 'EMA'))
print()
for symbol in portfolio_symbols:
cross, price = golden_cross(symbol, n1=50, n2=200, direction="below")
data = {'symbol': symbol, 'price': price, 'cross': cross, 'rsi': get_rsi(symbol=symbol, days=14), 'macd': get_macd(symbol=symbol), 'buy_rating': get_buy_rating(symbol=symbol)}
stock_data.append(data)
print ("{}\t${:.2f}\t\t{}\t{}\t{}\t{}".format(data['symbol'], data['price'], rsi_to_str(data['rsi']), macd_to_str(data['macd']), rating_to_str(data['buy_rating']), cross_to_str(data['cross'])))
if(cross == False):
sell_holdings(symbol, holdings_data)
sells.append(symbol)
profile_data = r.build_user_profile()
print("\n----- Scanning S&P 500 for stocks to buy -----\n")
for symbol in spy_symbols:
if(symbol not in portfolio_symbols):
cross, price = golden_cross(symbol, n1=50, n2=200, direction="above")
stock_data.append({'symbol': symbol, 'price': price, 'cross': cross, 'rsi': get_rsi(symbol=symbol, days=14), 'macd': get_macd(symbol=symbol), 'buy_rating': get_buy_rating(symbol=symbol)})
if(cross == True):
potential_buys.append(symbol)
if(len(potential_buys) > 0):
buy_holdings(potential_buys, profile_data, holdings_data)
if(len(sells) > 0):
update_trade_history(sells, holdings_data, "tradehistory.txt")
print("----- Scan over -----\n")
print_table(stock_data)
if debug:
print("----- DEBUG MODE -----\n")
#execute the scan
scan_stocks()
| 42.365672 | 201 | 0.656068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,418 | 0.477189 |
fd60adf005e921981d0393064770bc769120bb9d
| 3,401 |
py
|
Python
|
slivka/server/forms/file_proxy.py
|
warownia1/Slivca
|
5491afec63c8cd41d6f1389a5dd0ba9877b888a1
|
[
"Apache-2.0"
] | 5 |
2016-09-01T15:30:46.000Z
|
2019-07-15T12:26:46.000Z
|
slivka/server/forms/file_proxy.py
|
warownia1/Slivca
|
5491afec63c8cd41d6f1389a5dd0ba9877b888a1
|
[
"Apache-2.0"
] | 75 |
2016-08-31T11:32:49.000Z
|
2021-05-12T14:33:17.000Z
|
slivka/server/forms/file_proxy.py
|
warownia1/Slivca
|
5491afec63c8cd41d6f1389a5dd0ba9877b888a1
|
[
"Apache-2.0"
] | 3 |
2017-06-01T10:21:04.000Z
|
2020-06-12T10:32:49.000Z
|
import io
import os
import shutil
from base64 import urlsafe_b64decode
from bson import ObjectId
from slivka.db.documents import UploadedFile, JobRequest
class FileProxy:
_file: io.IOBase = None
closed = property(lambda self: self._file is None or self.file.closed)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
read = property(lambda self: self.file.read)
readable = property(lambda self: self.file.readable)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
seekable = property(lambda self: self.file.seekable)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
writable = property(lambda self: self.file.writable)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
@staticmethod
def from_id(file_id, database):
tokens = file_id.split('/', 1)
if len(tokens) == 1:
# user uploaded file
_id = ObjectId(urlsafe_b64decode(file_id))
uf = UploadedFile.find_one(database, _id=_id)
if uf is None: return None
return FileProxy(path=uf.path)
else:
# job output file
job_uuid, filename = tokens
request = JobRequest.find_one(database, id=job_uuid)
if request is not None:
path = os.path.join(request.job.cwd, filename)
if os.path.isfile(path):
return FileProxy(path=path)
return None
def __init__(self, file=None, path=None):
self.file = file
self.path = path
def __iter__(self):
return iter(self.file)
def __enter__(self):
return self
def __exit__(self, _exc_type, _exc_value, _tb):
self.close()
def reopen(self):
if not self.closed:
self.seek(0)
elif self.path and os.path.exists(self.path):
self.file = open(self.path, 'rb')
else:
raise OSError("can't open the file.")
return self
def _get_file(self):
if self._file is None:
if self.path is None:
raise ValueError("file not set")
self.reopen()
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def save_as(self, path, fp=None):
"""
Saves the file at the specified location. If ``fp`` is specified it
will be used to write the content. Otherwise, a new file at ``path``
will be created.
:param path: Path to the destination file
:param fp: File-like object at the location specified by ``path``
"""
self.reopen()
path = os.path.realpath(path)
if fp:
shutil.copyfileobj(self.file, fp)
else:
with open(path, 'wb') as dst:
shutil.copyfileobj(self.file, dst)
self.path = path
def close(self):
if self._file is not None:
self._file.close()
| 31.490741 | 76 | 0.612173 | 3,242 | 0.953249 | 0 | 0 | 706 | 0.207586 | 0 | 0 | 402 | 0.118201 |
fd618c3a159e9f99d7c6ca6d044db4a500817e13
| 1,160 |
py
|
Python
|
debug_toolbar/panels/profiling.py
|
chrismaille/fastapi-debug-toolbar
|
76d1e78eda4a23fc2b3e3d3c978ee9d8dbf025ae
|
[
"BSD-3-Clause"
] | 36 |
2021-07-22T08:11:31.000Z
|
2022-01-31T13:09:26.000Z
|
debug_toolbar/panels/profiling.py
|
chrismaille/fastapi-debug-toolbar
|
76d1e78eda4a23fc2b3e3d3c978ee9d8dbf025ae
|
[
"BSD-3-Clause"
] | 10 |
2021-07-21T19:39:38.000Z
|
2022-02-26T15:35:35.000Z
|
debug_toolbar/panels/profiling.py
|
chrismaille/fastapi-debug-toolbar
|
76d1e78eda4a23fc2b3e3d3c978ee9d8dbf025ae
|
[
"BSD-3-Clause"
] | 2 |
2021-07-28T09:55:13.000Z
|
2022-02-18T11:29:25.000Z
|
import typing as t
from fastapi import Request, Response
from pyinstrument import Profiler
from starlette.concurrency import run_in_threadpool
from debug_toolbar.panels import Panel
from debug_toolbar.types import Stats
from debug_toolbar.utils import is_coroutine, matched_endpoint
class ProfilingPanel(Panel):
title = "Profiling"
template = "panels/profiling.html"
async def process_request(self, request: Request) -> Response:
self.profiler = Profiler(**self.toolbar.settings.PROFILER_OPTIONS)
endpoint = matched_endpoint(request)
if endpoint is None:
return await super().process_request(request)
is_async = is_coroutine(endpoint)
async def call(func: t.Callable) -> None:
await run_in_threadpool(func) if not is_async else func()
await call(self.profiler.start)
try:
response = await super().process_request(request)
finally:
await call(self.profiler.stop)
return response
async def generate_stats(self, request: Request, response: Response) -> Stats:
return {"content": self.profiler.output_html()}
| 30.526316 | 82 | 0.70431 | 872 | 0.751724 | 0 | 0 | 0 | 0 | 769 | 0.662931 | 43 | 0.037069 |
fd63367d2463bae216c32c0f3162ba07be04c060
| 3,003 |
py
|
Python
|
test/system/auto/simple/compaction.py
|
marciosilva/accumulo
|
70404cbd1e0a2d2b7c2235009e158979abeef35f
|
[
"Apache-2.0"
] | 3 |
2021-11-11T05:18:23.000Z
|
2021-11-11T05:18:43.000Z
|
test/system/auto/simple/compaction.py
|
jatrost/accumulo
|
6be40f2f3711aaa7d0b68b5b6852b79304af3cff
|
[
"Apache-2.0"
] | 1 |
2021-06-22T09:52:37.000Z
|
2021-06-22T09:52:37.000Z
|
test/system/auto/simple/compaction.py
|
isabella232/accumulo-1
|
70404cbd1e0a2d2b7c2235009e158979abeef35f
|
[
"Apache-2.0"
] | 1 |
2021-06-22T09:33:38.000Z
|
2021-06-22T09:33:38.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import unittest
from simple.bulk import SimpleBulkTest
N = 100000
COUNT = 5
log = logging.getLogger('test.auto')
class CompactionTest(SimpleBulkTest):
"Start a clean accumulo, bulk import a lot of map files, read while a multi-pass compaction is happening"
order = 26
tableSettings = SimpleBulkTest.tableSettings.copy()
tableSettings['test_ingest'] = {
'table.compaction.major.ratio': 1.0
}
settings = SimpleBulkTest.settings.copy()
settings.update({
'tserver.compaction.major.files.open.max':4,
'tserver.compaction.major.delay': 1,
'tserver.compaction.major.concurrent.max':1,
})
def createRFiles(self, host):
handle = self.runClassOn(
self.masterHost(),
'org.apache.accumulo.server.test.CreateRFiles',
"testrf 4 0 500000 59".split())
out, err = handle.communicate()
self.assert_(handle.returncode == 0)
def runTest(self):
# initialize the database
self.createTable('test_ingest')
self.execute(self.masterHost(), 'hadoop dfs -rmr /testrf'.split())
self.execute(self.masterHost(), 'hadoop dfs -rmr /testrfFail'.split())
# insert some data
self.createRFiles(self.masterHost())
self.bulkLoad(self.masterHost(), '/testrf')
out, err, code = self.shell(self.masterHost(), "table !METADATA\nscan -b ! -c ~tab,file\n")
self.assert_(code == 0)
beforeCount = len(out.split('\n'))
log.info("Verifying Ingestion")
for c in range(5):
handles = []
for i in range(COUNT):
handles.append(self.verify(self.hosts[i%len(self.hosts)], N, i * N))
for h in handles:
out, err = h.communicate()
self.assert_(h.returncode == 0)
out, err, code = self.shell(self.masterHost(), "table !METADATA\nscan -b ! -c ~tab,file\n")
self.assert_(code == 0)
afterCount = len(out.split('\n'))
self.assert_(afterCount < beforeCount)
self.shutdown_accumulo()
def suite():
result = unittest.TestSuite()
result.addTest(CompactionTest())
return result
| 33.741573 | 109 | 0.656011 | 1,975 | 0.657676 | 0 | 0 | 0 | 0 | 0 | 0 | 1,342 | 0.446886 |
fd651bee99ddf05837b752023ba0b7975d179d63
| 660 |
py
|
Python
|
src/featurehub/problems/__init__.py
|
vishalbelsare/FeatureHub
|
dca517338b6e1359faa47ba309f05691cb96e8f8
|
[
"MIT"
] | 85 |
2017-09-11T22:37:34.000Z
|
2022-02-26T09:07:05.000Z
|
src/featurehub/problems/__init__.py
|
HDI-Project/FeatureFactory
|
dca517338b6e1359faa47ba309f05691cb96e8f8
|
[
"MIT"
] | 5 |
2018-08-08T15:34:36.000Z
|
2018-11-15T04:52:10.000Z
|
src/featurehub/problems/__init__.py
|
HDI-Project/FeatureFactory
|
dca517338b6e1359faa47ba309f05691cb96e8f8
|
[
"MIT"
] | 18 |
2017-11-01T04:14:16.000Z
|
2021-09-27T00:53:32.000Z
|
import imp
import sys
from sqlalchemy.exc import ProgrammingError
from featurehub.user.session import Session
from featurehub.admin.admin import Commands
try:
for _problem in Commands().get_problems():
# Create a session for each problem and make it importable
_commands = Session(_problem)
_module = imp.new_module(_problem)
_module.__dict__['commands'] = _commands
sys.modules['featurehub.problems.' + _problem] = _module
except ProgrammingError:
print("Competition not initialized properly. User commands "
"unavailable. Please contact the competition administrator.",
file=sys.stderr)
| 33 | 71 | 0.725758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.309091 |
b5b97c67425c6b42d928076e5a8d8cb8fc8a23c8
| 12,107 |
py
|
Python
|
python/lexical_analysis.py
|
Compiler-Construction-Uni-Freiburg/lecture-notes-2021
|
56300e6649e32f0594bbbd046a2e19351c57dd0c
|
[
"BSD-3-Clause"
] | 1 |
2022-01-05T07:11:01.000Z
|
2022-01-05T07:11:01.000Z
|
python/lexical_analysis.py
|
Compiler-Construction-Uni-Freiburg/lecture-notes-2021
|
56300e6649e32f0594bbbd046a2e19351c57dd0c
|
[
"BSD-3-Clause"
] | null | null | null |
python/lexical_analysis.py
|
Compiler-Construction-Uni-Freiburg/lecture-notes-2021
|
56300e6649e32f0594bbbd046a2e19351c57dd0c
|
[
"BSD-3-Clause"
] | null | null | null |
from dataclasses import dataclass
from functools import reduce
from typing import Callable, Iterable, Iterator
'''
The first phase of a compiler is called `lexical analysis` implemented by a `scanner` or `lexer`.
It breaks a program into a sequence `lexemes`:
meaningful substrings of the input.
It also transforms lexemes into `tokens`:
symbolic representations of lexemes with some internalized information.
The classic, state-of-the-art method to specify lexemes is by regular expressions.
'''
'''
1. Representation of regular expressions.
'''
@dataclass
class Regexp:
'abstract class for AST of regular expressions'
def is_null(self):
return False
@dataclass
class Null (Regexp):
'empty set: {}'
def is_null(self):
return True
@dataclass
class Epsilon (Regexp):
'empty word: { "" }'
@dataclass
class Symbol (Regexp):
'single symbol: { "a" }'
sym: str
@dataclass
class Concat(Regexp):
'concatenation: r1.r2'
left: Regexp
right: Regexp
@dataclass
class Alternative(Regexp):
'alternative: r1|r2'
left: Regexp
right: Regexp
@dataclass
class Repeat(Regexp):
'Kleene star: r*'
body: Regexp
## smart constructors for regular expressions
## goal: construct regexps in "normal form"
## * avoid Null() subexpressions
## * Epsilon() subexpressions as much as possible
## * nest concatenation and alternative to the right
null = Null()
epsilon = Epsilon()
symbol = Symbol
def concat(r1, r2):
match (r1, r2):
case (Null(), _) | (_, Null()):
return null
case (Epsilon(), _):
return r2
case (_, Epsilon()):
return r1
case (Concat(r11, r12), _):
return Concat(r11, concat(r12, r2))
case _:
return Concat(r1, r2)
def alternative(r1, r2):
match (r1, r2):
case (Null(), _):
return r2
case (_, Null()):
return r1
case (Alternative(r11, r12), _):
return Alternative(r11, alternative(r12, r2))
case _:
return Alternative(r1, r2)
def repeat(r: Regexp) -> Regexp:
match r:
case Null() | Epsilon():
return epsilon
case Repeat(r1): # r** == r*
return r
case _:
return Repeat(r)
## utilities to construct regular expressions
def optional(r : Regexp) -> Regexp:
'construct r?'
return alternative(r, epsilon)
def repeat_one(r : Regexp) -> Regexp:
'construct r+'
return concat(r, repeat(r))
def concat_list(rs : Iterable[Regexp]) -> Regexp:
return reduce(lambda out, r: concat(out, r), rs, epsilon)
def alternative_list(rs : Iterable[Regexp]) -> Regexp:
return reduce(lambda out, r: alternative(out, r), rs, null)
## a few examples for regular expressions (taken from JavaScript definition)
'''
⟨digit⟩ ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
⟨hexdigit⟩ ::= ⟨digit⟩ | A | B | C | D | E | F | a | b | c | d | e | f
⟨hexprefix⟩ ::= 0x | 0X
⟨sign⟩ ::= ⟨empty⟩ | -
⟨empty⟩ ::=
⟨integer-literal⟩ ::= ⟨sign⟩ ⟨digit⟩+ | ⟨sign⟩ ⟨hexprefix⟩ ⟨hexdigit⟩+
⟨letter⟩ ::= A | B | C | ...| Z | a | b | c | ...| z
⟨identifier-start⟩ ::= ⟨letter⟩ | $ | _
⟨identifier-part⟩ ::= ⟨identifier-start⟩ | ⟨digit⟩
⟨identifier⟩ ::= ⟨identifier-start⟩ ⟨identifier-part⟩*
'''
def class_regexp(s: str) -> Regexp:
'returns a regexp for the alternative of all characters in s'
return alternative_list(map(symbol, s))
def string_regexp(s: str) -> Regexp:
'returns a regexp for the concatenation of all characters in s'
return concat_list(map(symbol, s))
def char_range_regexp(c1: str, c2: str) -> Regexp:
return alternative_list(map(symbol, map(chr, range(ord(c1), ord(c2)+1))))
digit = class_regexp("0123456789")
hexdigit = alternative(digit, class_regexp("ABCDEFabcdef"))
hexprefix = alternative(string_regexp("0x"), string_regexp("0X"))
sign = optional(symbol('-'))
integer_literal = concat(sign, repeat_one(digit))
integer_literal_js = alternative( concat(sign, repeat_one(digit)),
concat_list([sign, hexprefix, repeat_one(hexdigit)]))
lc_letter = alternative_list(map(symbol, map(chr, range(ord('a'), ord('z')+1))))
uc_letter = alternative_list(map(symbol, map(chr, range(ord('A'), ord('Z')+1))))
letter = alternative(lc_letter, uc_letter)
identifier_start = alternative_list([letter, symbol('$'), symbol('_')])
identifier_part = alternative(identifier_start, digit)
identifier = concat(identifier_start, repeat(identifier_part))
blank_characters = "\t "
line_end_characters = "\n\r"
white_space = repeat_one(class_regexp(blank_characters + line_end_characters))
'''
2. Executing regular expressions
The standard method to 'execute' regular expressions is to transform them into finite automata.
Here we use a different method to execute them directly using `derivatives`.
This method uses regular expressions themselves as states of an automaton without constructing it.
We consider a regexp a final state if it accepts the empty word "".
This condition can be checked by a simple function on the regexp.
'''
def accepts_empty(r : Regexp) -> bool:
'check if r accepts the empty word'
match r:
case Null() | Symbol(_):
return False
case Epsilon() | Repeat(_):
return True
case Concat(r1, r2):
return accepts_empty(r1) and accepts_empty(r2)
case Alternative(r1, r2):
return accepts_empty(r1) or accepts_empty(r2)
'''
The transition function of a (deterministic) finite automaton maps
state `r0` and symbol `s` to the next state, say, `r1`.
If the state `r0` recognizes any words `w` that start with `s` (w[0] == s),
then state `r1` recognizes all those words `w` with the first letter removed (w[1:]).
This construction is called the `derivative` of a language by symbol `s`:
derivative(L, s) = { w[1:] | w in L and w[0] == s }
If L is the language recognized by regular expression `r0`,
then we can effectively compute a regular expression for derivative(L, s)!
As follows:
'''
def after_symbol(s : str, r : Regexp) -> Regexp:
'produces regexp after r consumes symbol s'
match r:
case Null() | Epsilon():
return null
case Symbol(s_expected):
return epsilon if s == s_expected else null
case Alternative(r1, r2):
return alternative(after_symbol(s, r1), after_symbol(s, r2))
case Concat(r1, r2):
return alternative(concat(after_symbol(s, r1), r2),
after_symbol(s, r2) if accepts_empty(r1) else null)
case Repeat(r1):
return concat(after_symbol(s, r1), Repeat(r1))
## matching against a regular expression
def matches(r : Regexp, ss: str) -> bool:
i = 0
while i < len(ss):
r = after_symbol(ss[i], r)
if r.is_null():
return False
i += 1
# reached end of string
return accepts_empty(r)
########################################################################
'''
3. Lexer descriptions
A lexer (scanner) is different from a finite automaton in several aspects.
1. The lexer must classify the next lexeme from a choice of several regular expressions.
It cannot match a single regexp, but it has to keep track and manage matching for
several regexps at the same time.
2. The lexer follows the `maximum munch` rule, which says that the next lexeme is
the longest prefix that matches one of the regular expressions.
3. Once a lexeme is identified, the lexer must turn it into a token and attribute.
Re maximum munch consider this input:
ifoundsalvationinapubliclavatory
Suppose that `if` is a keyword, why should the lexer return <identifier> for this input?
Similarly:
returnSegment
would count as an identifier even though starting with the keyword `return`.
These requirements motivate the following definitions.
A lex_action
* takes some (s : str, i : int position in s, j : int pos in s)
* consumes the lexeme sitting at s[i:j]
* returns (token for s[i:j], some k >= j)
'''
class Token: pass # abstract class of tokens
Position = int # input position
lex_result = tuple[Token, Position]
lex_action = Callable[[str, Position, Position], lex_result]
# a lexer rule attaches a lex_action to a regular expression
@dataclass
class Lex_rule:
re : Regexp
action: lex_action
# a lexer tries to match its input to a list of lex rules
Lex_state = list[Lex_rule]
# reading a symbol advances the regular expression of each lex rule
def next_state(state: Lex_state, ss: str, i: int):
return list(filter(lambda rule: not (rule.re.is_null()),
[Lex_rule(after_symbol(ss[i], rule.re), rule.action)
for rule in state]))
def initial_state(rules: list[Lex_rule]) -> Lex_state:
return rules
def matched_rules(state: Lex_state) -> Lex_state:
return [rule for rule in state if accepts_empty(rule.re)]
def is_stuck(state: Lex_state) -> bool:
return not state
#####################################################################
class ScanError (Exception): pass
@dataclass
class Match:
action: lex_action
final : Position
@dataclass
class Scan:
spec: Lex_state
def scan_one(self) -> Callable[[str, Position], lex_result]:
return lambda ss, i: self.scan_one_token(ss, i)
def scan_one_token(self, ss: str, i: Position) -> lex_result:
state = self.spec
j = i
last_match = None
while j < len(ss) and not is_stuck(state):
state = next_state(state, ss, j); j += 1
all_matches = matched_rules(state)
if all_matches:
this_match = all_matches[0]
last_match = Match(this_match.action, j)
match last_match:
case None:
raise ScanError("no lexeme found:", ss[i:])
case Match(action, final):
return action(ss, i, final)
raise ScanError("internal error: last_match=", last_match)
def make_scanner(scan_one: Callable[[str, Position], lex_result], ss: str) -> Iterator[Token]:
i = 0
while i < len(ss):
(token, i) = scan_one(ss, i)
yield (token)
## example: excerpt from JavaScript scanner
escaped_char = concat(symbol('\\'), alternative(symbol('\\'), symbol('"')))
content_char = alternative_list([symbol(chr(a))
for a in range(ord(' '), 128)
if a not in [ord('\\'), ord('"')]])
string_literal = concat_list([symbol('"'), repeat(alternative(escaped_char, content_char)), symbol('"')])
@dataclass
class Return(Token): pass
@dataclass
class Intlit(Token): value: int
@dataclass
class Ident(Token): name: str
@dataclass
class Lparen(Token): pass
@dataclass
class Rparen(Token): pass
@dataclass
class Slash(Token): pass
@dataclass
class Strlit(Token): value: str
string_spec: Lex_state = [
Lex_rule(escaped_char, lambda ss, i, j: (ss[i+1], j)),
Lex_rule(content_char, lambda ss, i, j: (ss[i], j))
]
string_token = Scan(string_spec).scan_one()
def strlit(ss: str) -> Strlit:
"use subsidiary scanner to transform string content"
return Strlit("".join(make_scanner(string_token, ss)))
js_spec: Lex_state = [
Lex_rule(string_regexp("return"), lambda ss, i, j: (Return(), j)),
Lex_rule(integer_literal, lambda ss, i, j: (Intlit(int(ss[i:j])), j)),
Lex_rule(identifier, lambda ss, i, j: (Ident(ss[i:j]), j)),
Lex_rule(white_space, lambda ss, i, j: js_token(ss, j)),
Lex_rule(symbol("("), lambda ss, i, j: (Lparen(), j)),
Lex_rule(symbol(")"), lambda ss, i, j: (Rparen(), j)),
Lex_rule(symbol("/"), lambda ss, i, j: (Slash(), j)),
Lex_rule(string_literal, lambda ss, i, j: (strlit(ss[i+1:j-1]), j))
]
js_token = Scan(js_spec).scan_one()
def example1():
return js_token(" 42...", 0)
def example2():
sc = make_scanner(js_token, "return Segment (pi / 2)")
for ta in sc:
print(ta)
def example3():
sc = make_scanner(js_token, 'return "foobar\\"..."')
for ta in sc:
print(ta)
| 32.810298 | 105 | 0.641943 | 1,737 | 0.142435 | 186 | 0.015252 | 1,848 | 0.151538 | 0 | 0 | 4,534 | 0.371792 |
b5ba7ba10498502b304fe0e8be303cfbec8a9050
| 179 |
py
|
Python
|
tapiriik/web/views/dashboard.py
|
prohfesor/tapiriik
|
0c476f8bb6b3d51674f0117b054777405ff2ee0d
|
[
"Apache-2.0"
] | 1,445 |
2015-01-01T21:43:31.000Z
|
2022-03-17T13:40:23.000Z
|
tapiriik/web/views/dashboard.py
|
prohfesor/tapiriik
|
0c476f8bb6b3d51674f0117b054777405ff2ee0d
|
[
"Apache-2.0"
] | 441 |
2015-01-02T03:37:49.000Z
|
2022-03-31T18:18:03.000Z
|
tapiriik/web/views/dashboard.py
|
prohfesor/tapiriik
|
0c476f8bb6b3d51674f0117b054777405ff2ee0d
|
[
"Apache-2.0"
] | 333 |
2015-01-06T12:14:15.000Z
|
2022-03-27T19:58:48.000Z
|
from django.shortcuts import render
from django.views.decorators.csrf import ensure_csrf_cookie
@ensure_csrf_cookie
def dashboard(req):
return render(req, "dashboard.html")
| 22.375 | 59 | 0.810056 | 0 | 0 | 0 | 0 | 80 | 0.446927 | 0 | 0 | 16 | 0.089385 |
b5bab08cb20bbb7d8d1adf2e537dc3cd96869fbf
| 2,830 |
py
|
Python
|
rcommander/src/rcommander/trigger_tool.py
|
rummanwaqar/rcommander-core
|
7106d5868db76c47dea6ad11118a54351a8bd390
|
[
"BSD-3-Clause"
] | 4 |
2015-04-08T09:57:43.000Z
|
2021-08-12T01:44:37.000Z
|
rcommander/src/rcommander/trigger_tool.py
|
jhu-lcsr-forks/rcommander-core
|
1a0350e9b93687eff6a4407f72b5250be5f56919
|
[
"BSD-3-Clause"
] | 1 |
2015-03-12T09:10:27.000Z
|
2015-03-12T09:10:27.000Z
|
rcommander/src/rcommander/trigger_tool.py
|
jhu-lcsr-forks/rcommander-core
|
1a0350e9b93687eff6a4407f72b5250be5f56919
|
[
"BSD-3-Clause"
] | 3 |
2015-03-12T10:59:17.000Z
|
2021-06-21T02:13:57.000Z
|
import tool_utils as tu
import PyQt4.QtGui as qtg
import PyQt4.QtCore as qtc
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import smach
import rospy
from msg import Trigger
TRIGGER_TOPIC = 'trigger'
class TriggerTool(tu.ToolBase):
def __init__(self, rcommander):
tu.ToolBase.__init__(self, rcommander, 'wait_trigger', 'Trigger', TriggerState)
self.trigger_pub = rospy.Publisher(TRIGGER_TOPIC, Trigger)
def fill_property_box(self, pbox):
formlayout = pbox.layout()
self.time_out_box = qtg.QDoubleSpinBox(pbox)
self.time_out_box.setMinimum(1.)
self.time_out_box.setMaximum(1000.)
self.time_out_box.setSingleStep(1.)
self.trigger_button = qtg.QPushButton(pbox)
self.trigger_button.setText('Send Trigger')
self.rcommander.connect(self.trigger_button, qtc.SIGNAL('clicked()'), self.send_trigger_cb)
formlayout.addRow('&Time out', self.time_out_box)
formlayout.addRow(self.trigger_button)
self.reset()
pbox.update()
def send_trigger_cb(self):
self.trigger_pub.publish(Trigger('a trigger!'))
rospy.sleep(.5)
self.trigger_pub.publish(Trigger('a trigger!'))
def new_node(self, name=None):
if name == None:
nname = self.name + str(self.counter)
else:
nname = name
return TriggerState(nname, self.time_out_box.value())
def set_node_properties(self, node):
self.time_out_box.setValue(node.time_out)
def reset(self):
self.time_out_box.setValue(10.)
class TriggerState(tu.StateBase):
def __init__(self, name, time_out):
tu.StateBase.__init__(self, name)
self.time_out = time_out
def get_smach_state(self):
return TriggerStateSmach(time_out = self.time_out)
class TriggerStateSmach(smach.State):
##
#@param message if self.message is None we will wait for a message, else use provided message
#@param time_out if we have to wait for a message specify how long to wait before timing out
def __init__(self, time_out):
smach.State.__init__(self,
outcomes = ['succeeded', 'preempted', 'timed_out'],
input_keys = [], output_keys = [])
self.time_out = time_out
def execute(self, userdata):
msg = None
t_start = rospy.get_time()
while msg == None:
try:
msg = rospy.wait_for_message(TRIGGER_TOPIC, Trigger, .1)
except rospy.ROSException, e:
pass
if self.preempt_requested():
self.service_preempt()
return 'preempted'
if (self.time_out != None) and ((rospy.get_time() - t_start) > self.time_out):
return 'timed_out'
return 'succeeded'
| 29.479167 | 99 | 0.639929 | 2,613 | 0.923322 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.121908 |
b5bace535ed77ddf8b3b03e2ed93c9e75ae9c3a6
| 1,488 |
py
|
Python
|
tests/helpers/fake_tunnel.py
|
intdata-bsc/idact
|
54cb65a711c145351e205970c27c83e6393cccf5
|
[
"MIT"
] | 5 |
2018-12-06T15:40:34.000Z
|
2019-06-19T11:22:58.000Z
|
tests/helpers/fake_tunnel.py
|
garstka/idact
|
b9c8405c94db362c4a51d6bfdf418b14f06f0da1
|
[
"MIT"
] | 9 |
2018-12-06T16:35:26.000Z
|
2019-04-28T19:01:40.000Z
|
tests/helpers/fake_tunnel.py
|
intdata-bsc/idact
|
54cb65a711c145351e205970c27c83e6393cccf5
|
[
"MIT"
] | 2 |
2019-04-28T19:18:58.000Z
|
2019-06-17T06:56:28.000Z
|
from contextlib import contextmanager
from idact import ClusterConfig
from idact.detail.nodes.node_impl import NodeImpl
from idact.detail.tunnel.tunnel_internal import TunnelInternal
class FakeTunnel(TunnelInternal):
"""Does nothing besides holding the remote port.
Local port is ignored.
:param there: Remote port.
"""
def __init__(self,
here: int,
there: int):
assert here is not None
assert here != 0
self._here = here
self._there = there
@property
def here(self) -> int:
return self._here
@property
def there(self) -> int:
return self._there
def close(self):
pass
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __str__(self):
return "FakeTunnel(here={here},there={there})".format(
here=self._here,
there=self._there)
def __repr__(self):
return str(self)
@property
def config(self) -> ClusterConfig:
raise NotImplementedError()
@contextmanager
def use_fake_tunnel():
"""A context manager that replaces :meth:`.NodeImpl.tunnel` with
a method returning :class:`.FakeTunnel`."""
def fake_tunnel(_, there: int, here: int):
return FakeTunnel(here=here, there=there)
saved_tunnel = NodeImpl.tunnel
try:
NodeImpl.tunnel = fake_tunnel
yield
finally:
NodeImpl.tunnel = saved_tunnel
| 22.892308 | 68 | 0.625672 | 892 | 0.599462 | 390 | 0.262097 | 616 | 0.413978 | 0 | 0 | 279 | 0.1875 |
b5bbdd0d4bc19356fb3ff4442955d7c4c889b2e9
| 3,226 |
py
|
Python
|
app/airtable/base_school_db/educators_schools.py
|
WildflowerSchools/wf-airtable-api
|
963021e5108462d33efa222fedb00890e1788ad6
|
[
"MIT"
] | null | null | null |
app/airtable/base_school_db/educators_schools.py
|
WildflowerSchools/wf-airtable-api
|
963021e5108462d33efa222fedb00890e1788ad6
|
[
"MIT"
] | null | null | null |
app/airtable/base_school_db/educators_schools.py
|
WildflowerSchools/wf-airtable-api
|
963021e5108462d33efa222fedb00890e1788ad6
|
[
"MIT"
] | null | null | null |
from datetime import date
from typing import Optional, Union
from pydantic import BaseModel, Field, validator
from . import educators as educators_models
from . import schools as schools_models
from app.airtable.response import AirtableResponse
from app.airtable.validators import get_first_or_default_none
class CreateUpdateAirtableEducatorsSchoolsFields(BaseModel):
educator: Optional[list[str]] = Field(alias="Educator")
school: Optional[list[str]] = Field(alias="School")
roles: Optional[list[str]] = Field(alias="Roles (staging)")
currently_active: Optional[bool] = Field(alias="Currently Active")
start_date: Optional[date] = Field(alias="Start Date")
end_date: Optional[date] = Field(alias="End Date")
mark_for_deletion: Optional[bool] = Field(alias="Mark for deletion")
class Config:
allow_population_by_field_name = True
class AirtableFindEducatorsSchoolsFields(CreateUpdateAirtableEducatorsSchoolsFields):
educator_id: Optional[list[str]] = Field(alias="Educator Record ID")
school_id: Optional[list[str]] = Field(alias="School Record ID")
class AirtableEducatorsSchoolsFields(CreateUpdateAirtableEducatorsSchoolsFields):
educator: Optional[Union[str, object]] = Field(alias="Educator")
school: Optional[Union[str, object]] = Field(alias="School")
_get_first_or_default_none = validator("educator", "school", pre=True, allow_reuse=True)(get_first_or_default_none)
def load_educator_relationship(self):
from ..client import AirtableClient
airtable_client = AirtableClient()
if self.educator is None:
return None
_record = None
_id = None
if isinstance(self.educator, educators_models.AirtableEducatorResponse):
_record = self.educator
elif isinstance(self.educator, str):
_id = self.educator
if _record is None:
airtable_educator = airtable_client.get_educator_by_id(educator_id=_id, load_relationships=False)
_record = airtable_educator
self.educator = _record
return _record
def load_school_relationship(self):
from ..client import AirtableClient
airtable_client = AirtableClient()
if self.school is None:
return None
_record = None
_id = None
if isinstance(self.school, schools_models.AirtableSchoolResponse):
_record = self.school
elif isinstance(self.school, str):
_id = self.school
if _record is None:
airtable_school = airtable_client.get_school_by_id(school_id=_id)
_record = airtable_school
self.school = _record
return _record
def load_relationships(self):
self.load_educator_relationship()
self.load_school_relationship()
class AirtableEducatorsSchoolsResponse(AirtableResponse):
fields: AirtableEducatorsSchoolsFields
def load_relationships(self):
self.fields.load_relationships()
class ListAirtableEducatorsSchoolsResponse(BaseModel):
__root__: list[AirtableEducatorsSchoolsResponse]
def load_relationships(self):
for r in self.__root__:
r.load_relationships()
| 32.26 | 119 | 0.712027 | 2,901 | 0.899256 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.052077 |
b5bc0b82b561c3ccd0c214272db1e77e19243f08
| 4,003 |
py
|
Python
|
rad/rest/client/cli/zpool/cmd_zpool_list.py
|
guillermomolina/rad-rest-client
|
c22528764bdf9dddc5ff7d269d7465d34878a7e3
|
[
"Apache-2.0"
] | 1 |
2021-09-17T13:40:13.000Z
|
2021-09-17T13:40:13.000Z
|
rad/rest/client/cli/zpool/cmd_zpool_list.py
|
guillermomolina/rad-rest-client
|
c22528764bdf9dddc5ff7d269d7465d34878a7e3
|
[
"Apache-2.0"
] | null | null | null |
rad/rest/client/cli/zpool/cmd_zpool_list.py
|
guillermomolina/rad-rest-client
|
c22528764bdf9dddc5ff7d269d7465d34878a7e3
|
[
"Apache-2.0"
] | 1 |
2021-09-17T16:26:32.000Z
|
2021-09-17T16:26:32.000Z
|
# Copyright 2021, Guillermo Adrián Molina
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import json
import yaml
from rad.rest.client.util import print_table, print_parsable
from rad.rest.client.api.authentication_1 import Session
from rad.rest.client.api.zfsmgr_1 import Zpool
from rad.rest.client.api.zfsmgr_1.zpool_resource import ZpoolResource
LOG = logging.getLogger(__name__)
class CmdZpoolList:
name = 'list'
aliases = ['ls']
@staticmethod
def init_parser(subparsers, parent_parser):
parser = subparsers.add_parser(CmdZpoolList.name,
aliases=CmdZpoolList.aliases,
parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='List ZFS pools',
help='List ZFS pools')
parser.add_argument('-c', '--columns',
nargs='+',
choices=ZpoolResource.get_property_names(),
default=['name', 'size', 'allocated', 'free',
'capacity', 'dedupratio', 'health', 'altroot'],
help='Specify wich columns to show in the table')
parser.add_argument('-s', '--sort-by',
choices=ZpoolResource.get_property_names(),
default='name',
help='Specify the sort order in the table')
group = parser.add_mutually_exclusive_group()
group.add_argument('-t', '--table',
action='store_true',
default=True,
help='Show output in table format')
group.add_argument('-y', '--yaml',
action='store_true',
help='Show output in yaml format')
group.add_argument('-j', '--json',
action='store_true',
help='Show output in json format')
group.add_argument('-d', '--delimiter',
help='Show output in a parsable format delimited by the string')
def __init__(self, options):
with Session(protocol=options.protocol, hostname=options.hostname, port=options.port) as session:
zpool_instances = session.list_objects(Zpool())
zpool_resources = [instance.get_properties(
options.columns) for instance in zpool_instances]
zpools = []
for zfs_resource in zpool_resources:
resource = {}
for property in zfs_resource.properties:
resource[property.name] = property
zpools.append(resource)
# sort by key
if options.sort_by is not None:
zpools = sorted(zpools, key=lambda i: i[options.sort_by])
if options.json:
resources = [resource.to_json() for resource in zpool_resources]
print(json.dumps(resources, indent=4))
elif options.yaml:
resources = [resource.to_json() for resource in zpool_resources]
print(yaml.dump(resources))
elif options.delimiter is not None:
print_parsable(zpools, options.delimiter)
elif options.table:
print_table(zpools)
| 43.043011 | 105 | 0.572321 | 3,088 | 0.771229 | 0 | 0 | 1,804 | 0.450549 | 0 | 0 | 1,047 | 0.261489 |
b5bcf620df665e14fd0ade4b0917ffe41b1ea768
| 3,736 |
py
|
Python
|
Sofware/main.py
|
Mark-MDO47/PiPod
|
990042ff5ad69d9fc93d1bd5bd684db730156222
|
[
"MIT"
] | 63 |
2018-08-02T20:50:41.000Z
|
2022-03-02T02:42:48.000Z
|
Sofware/main.py
|
Mark-MDO47/PiPod
|
990042ff5ad69d9fc93d1bd5bd684db730156222
|
[
"MIT"
] | 2 |
2018-08-30T16:31:48.000Z
|
2021-12-02T01:28:23.000Z
|
Sofware/main.py
|
Mark-MDO47/PiPod
|
990042ff5ad69d9fc93d1bd5bd684db730156222
|
[
"MIT"
] | 14 |
2018-08-05T04:45:07.000Z
|
2022-02-18T10:56:20.000Z
|
#!/usr/bin/python3
import playback
import display
import navigation
import device
import pygame
done = False
music = playback.music()
view = display.view()
menu = navigation.menu()
PiPod = device.PiPod()
menu.loadMetadata()
status = PiPod.getStatus()
songMetadata = music.getStatus()
displayUpdate = pygame.USEREVENT + 1
pygame.time.set_timer(displayUpdate, 500)
view.update(status, menu.menuDict, songMetadata)
while not done:
music.loop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
PiPod.toggleSleep()
elif event.key == pygame.K_u:
music.volumeUp()
elif event.key == pygame.K_d:
music.volumeDown()
elif event.key == pygame.K_UP:
if status[2]:
music.volumeUp()
elif menu.menuDict["current"] == "musicController":
menu.gotomenu()
else:
action = menu.up()
elif event.key == pygame.K_DOWN:
if status[2]:
music.volumeDown()
elif menu.menuDict["current"] == "musicController":
music.shuffle()
menu.menuDict["Queue"] = music.playlist
else:
action = menu.down()
elif event.key == pygame.K_LEFT:
if status[2] or menu.menuDict["current"] == "musicController":
music.prev()
else:
action = menu.left()
elif event.key == pygame.K_RIGHT:
if status[2] or menu.menuDict["current"] == "musicController":
music.next()
else:
action = menu.right()
if action == "updateList":
music.updateList(menu.menuDict["Queue"])
elif event.key == pygame.K_RETURN:
if status[2] or menu.menuDict["current"] == "musicController":
music.playPause()
else:
action = menu.select()
if action == "play":
music.loadList(menu.menuDict["Queue"])
music.play()
elif action == "clearQueue":
menu.menuDict["Queue"] = []
music.clearQueue()
elif action == "updateLibrary":
if music.updateLibrary():
done = True
elif action == "toggleSleep":
PiPod.toggleSleep()
elif action == "shutdown":
while not PiPod.shutdown():
view.popUp("Shutdown")
elif action == "reboot":
while not PiPod.reboot():
view.popUp("Reboot")
elif action == "playAtIndex":
if menu.menuDict["selectedItem"] == 0:
music.clearQueue()
menu.menuDict["Queue"] = []
else:
music.playAtIndex(menu.menuDict["selectedItem"]-1)
status = PiPod.getStatus()
songMetadata = music.getStatus()
view.update(status, menu.menuDict, songMetadata)
# display.update() without arguments updates the entire display just like display.flip()
pygame.time.Clock().tick(
30) # Limit the framerate to 20 FPS, this is to ensure it doesn't use all of the CPU resources
| 34.592593 | 103 | 0.482869 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 496 | 0.132762 |
b5bd68a22d06b0793abe8bb8a40789d31dac7150
| 3,250 |
py
|
Python
|
cloudshell/cli/session/telnet_session.py
|
test-gh-org-workflow/probable-garbanzo
|
c6b8a0dbc573a2a0073b5ab7c8619c4d0baf7088
|
[
"Apache-2.0"
] | 4 |
2017-01-31T14:05:19.000Z
|
2019-04-10T16:35:44.000Z
|
cloudshell/cli/session/telnet_session.py
|
test-gh-org-workflow/probable-garbanzo
|
c6b8a0dbc573a2a0073b5ab7c8619c4d0baf7088
|
[
"Apache-2.0"
] | 89 |
2016-05-25T14:17:38.000Z
|
2022-03-17T13:09:59.000Z
|
cloudshell/cli/session/telnet_session.py
|
test-gh-org-workflow/probable-garbanzo
|
c6b8a0dbc573a2a0073b5ab7c8619c4d0baf7088
|
[
"Apache-2.0"
] | 6 |
2016-07-21T12:24:10.000Z
|
2022-02-21T06:33:18.000Z
|
import socket
import telnetlib
from collections import OrderedDict
from cloudshell.cli.session.connection_params import ConnectionParams
from cloudshell.cli.session.expect_session import ExpectSession
from cloudshell.cli.session.session_exceptions import (
SessionException,
SessionReadEmptyData,
SessionReadTimeout,
)
class TelnetSessionException(SessionException):
pass
class TelnetSession(ExpectSession, ConnectionParams):
SESSION_TYPE = "TELNET"
AUTHENTICATION_ERROR_PATTERN = "%.*($|\n)"
def __init__(
self,
host,
username,
password,
port=None,
on_session_start=None,
*args,
**kwargs
):
ConnectionParams.__init__(
self, host, port=port, on_session_start=on_session_start
)
ExpectSession.__init__(self, *args, **kwargs)
if hasattr(self, "port") and self.port is None:
self.port = 23
self.username = username
self.password = password
self._handler = None
def __eq__(self, other):
"""Is equal.
:type other: TelnetSession
"""
return (
ConnectionParams.__eq__(self, other)
and self.username == other.username
and self.password == other.password
)
def __del__(self):
self.disconnect()
def _connect_actions(self, prompt, logger):
action_map = OrderedDict()
action_map[
"[Ll]ogin:|[Uu]ser:|[Uu]sername:"
] = lambda session, logger: session.send_line(session.username, logger)
action_map["[Pp]assword:"] = lambda session, logger: session.send_line(
session.password, logger
)
self.hardware_expect(
None,
expected_string=prompt,
timeout=self._timeout,
logger=logger,
action_map=action_map,
)
self._on_session_start(logger)
def _initialize_session(self, prompt, logger):
self._handler = telnetlib.Telnet()
self._handler.open(self.host, int(self.port), self._timeout)
if self._handler.get_socket() is None:
raise TelnetSessionException(
self.__class__.__name__, "Failed to open telnet connection."
)
self._handler.get_socket().send(telnetlib.IAC + telnetlib.WILL + telnetlib.ECHO)
def disconnect(self):
"""Disconnect / close the session."""
if self._handler:
self._handler.close()
self._active = False
def _send(self, command, logger):
"""Send message / command to device.
:param command: message / command to send
:type command: str
"""
byte_command = command.encode()
self._handler.write(byte_command)
def _receive(self, timeout, logger):
"""Read session buffer."""
timeout = timeout if timeout else self._timeout
self._handler.get_socket().settimeout(timeout)
try:
byte_data = self._handler.read_some()
except socket.timeout:
raise SessionReadTimeout()
if not byte_data:
raise SessionReadEmptyData()
data = byte_data.decode()
return data
| 27.542373 | 88 | 0.614462 | 2,912 | 0.896 | 0 | 0 | 0 | 0 | 0 | 0 | 356 | 0.109538 |
b5bdd10944be47a0eef70a2d5c3fc45fddcfaaf6
| 5,698 |
py
|
Python
|
src/contentbase/auditor.py
|
ClinGen/clincoded
|
5624c74546ce2a44eda00ee632a8de8c2099da10
|
[
"MIT"
] | 30 |
2015-09-23T20:38:57.000Z
|
2021-03-10T03:12:46.000Z
|
src/contentbase/auditor.py
|
ClinGen/clincoded
|
5624c74546ce2a44eda00ee632a8de8c2099da10
|
[
"MIT"
] | 2,132 |
2015-06-08T21:50:35.000Z
|
2022-02-15T22:44:18.000Z
|
src/contentbase/auditor.py
|
ClinGen/clincoded
|
5624c74546ce2a44eda00ee632a8de8c2099da10
|
[
"MIT"
] | 10 |
2015-09-25T20:11:25.000Z
|
2020-12-09T02:58:44.000Z
|
""" Cross-object data auditing
Schema validation allows for checking values within a single object.
We also need to perform higher order checking between linked objects.
"""
from past.builtins import basestring
import logging
import venusian
logger = logging.getLogger(__name__)
def includeme(config):
config.registry['auditor'] = Auditor()
config.add_directive('add_audit_checker', add_audit_checker)
config.add_request_method(audit, 'audit')
# Same as logging
_levelNames = {
0: 'NOTSET',
10: 'DEBUG',
20: 'INFO',
30: 'DCC_ACTION',
40: 'WARNING',
50: 'NOT_COMPLIANT',
60: 'ERROR',
'DEBUG': 10,
'ERROR': 60,
'INFO': 20,
'NOTSET': 0,
'WARNING': 40,
'NOT_COMPLIANT': 50,
'DCC_ACTION': 30,
}
class AuditFailure(Exception):
def __init__(self, category, detail=None, level=0, path=None, name=None):
super(AuditFailure, self)
self.category = category
self.detail = detail
if not isinstance(level, int):
level = _levelNames[level]
self.level = level
self.path = path
self.name = name
def __json__(self, request=None):
return {
'category': self.category,
'detail': self.detail,
'level': self.level,
'level_name': _levelNames[self.level],
'path': self.path,
'name': self.name,
}
class Auditor(object):
""" Data audit manager
"""
_order = 0
def __init__(self):
self.type_checkers = {}
def add_audit_checker(self, checker, item_type, condition=None, frame='embedded'):
checkers = self.type_checkers.setdefault(item_type, [])
self._order += 1 # consistent execution ordering
if isinstance(frame, list):
frame = tuple(sorted(frame))
checkers.append((self._order, checker, condition, frame))
def audit(self, request, types, path, **kw):
if isinstance(types, basestring):
types = [types]
checkers = set()
checkers.update(*(self.type_checkers.get(item_type, ()) for item_type in types))
errors = []
system = {
'request': request,
'path': path,
'types': types,
}
system.update(kw)
for order, checker, condition, frame in sorted(checkers):
if frame is None:
uri = path
elif isinstance(frame, basestring):
uri = '%s@@%s' % (path, frame)
else:
uri = '%s@@expand?expand=%s' % (path, '&expand='.join(frame))
value = request.embed(uri)
if condition is not None:
try:
if not condition(value, system):
continue
except Exception as e:
detail = '%s: %r' % (checker.__name__, e)
failure = AuditFailure(
'audit condition error', detail, 'ERROR', path, checker.__name__)
errors.append(failure.__json__(request))
logger.warning('audit condition error auditing %s', path, exc_info=True)
continue
try:
try:
result = checker(value, system)
except AuditFailure as e:
e = e.__json__(request)
if e['path'] is None:
e['path'] = path
e['name'] = checker.__name__
errors.append(e)
continue
if result is None:
continue
if isinstance(result, AuditFailure):
result = [result]
for item in result:
if isinstance(item, AuditFailure):
item = item.__json__(request)
if item['path'] is None:
item['path'] = path
item['name'] = checker.__name__
errors.append(item)
continue
raise ValueError(item)
except Exception as e:
detail = '%s: %r' % (checker.__name__, e)
failure = AuditFailure(
'audit script error', detail, 'ERROR', path, checker.__name__)
errors.append(failure.__json__(request))
logger.warning('audit script error auditing %s', path, exc_info=True)
continue
return errors
# Imperative configuration
def add_audit_checker(config, checker, item_type, condition=None, frame='embedded'):
auditor = config.registry['auditor']
config.action(None, auditor.add_audit_checker,
(checker, item_type, condition, frame))
# Declarative configuration
def audit_checker(item_type, condition=None, frame='embedded'):
""" Register an audit checker
"""
def decorate(checker):
def callback(scanner, factory_name, factory):
scanner.config.add_audit_checker(
checker, item_type, condition, frame)
venusian.attach(checker, callback, category='auditor')
return checker
return decorate
def audit(request, types=None, path=None, context=None, **kw):
auditor = request.registry['auditor']
if path is None:
path = request.path
if context is None:
context = request.context
if types is None:
types = [context.item_type] + context.base_types
return auditor.audit(
request=request, types=types, path=path, root=request.root, context=context,
registry=request.registry, **kw)
| 32.56 | 92 | 0.551071 | 3,792 | 0.665497 | 0 | 0 | 0 | 0 | 0 | 0 | 849 | 0.149 |
b5be5d0470828bf2d8483755e027514f357777f6
| 1,694 |
py
|
Python
|
PyTrinamic/ic/TMC2130/TMC2130.py
|
trinamic-AA/PyTrinamic
|
b054f4baae8eb6d3f5d2574cf69c232f66abb4ee
|
[
"MIT"
] | 37 |
2019-01-13T11:08:45.000Z
|
2022-03-25T07:18:15.000Z
|
PyTrinamic/ic/TMC2130/TMC2130.py
|
trinamic-AA/PyTrinamic
|
b054f4baae8eb6d3f5d2574cf69c232f66abb4ee
|
[
"MIT"
] | 56 |
2019-02-25T02:48:27.000Z
|
2022-03-31T08:45:34.000Z
|
PyTrinamic/ic/TMC2130/TMC2130.py
|
trinamic-AA/PyTrinamic
|
b054f4baae8eb6d3f5d2574cf69c232f66abb4ee
|
[
"MIT"
] | 26 |
2019-01-14T05:20:16.000Z
|
2022-03-08T13:27:35.000Z
|
'''
Created on 14.10.2019
@author: JM
'''
from PyTrinamic.ic.TMC2130.TMC2130_register import TMC2130_register
from PyTrinamic.ic.TMC2130.TMC2130_register_variant import TMC2130_register_variant
from PyTrinamic.ic.TMC2130.TMC2130_fields import TMC2130_fields
from PyTrinamic.helpers import TMC_helpers
class TMC2130():
"""
Class for the TMC2130 IC
"""
def __init__(self, channel):
self.__channel = channel
self.registers = TMC2130_register
self.fields = TMC2130_fields
self.variants = TMC2130_register_variant
self.MOTORS = 2
def showChipInfo(self):
print("TMC2130 chip info: The TMC2130 is a high-performane driver IC for two phase stepper motors. Standard SPI and STEP/DIR simply communication. Voltage supply: 4.75 - 46V")
def writeRegister(self, registerAddress, value, channel):
raise NotImplementedError
def readRegister(self, registerAddress, channel):
raise NotImplementedError
def writeRegisterField(self, field, value):
return self.writeRegister(field[0], TMC_helpers.field_set(self.readRegister(field[0], self.__channel), field[1], field[2], value), self.__channel)
def readRegisterField(self, field):
return TMC_helpers.field_get(self.readRegister(field[0], self.__channel), field[1], field[2])
def moveBy(self, motor, distance, velocity):
if not(0 <= motor < self.MOTORS):
raise ValueError
position = self.readRegister(self.registers.XACTUAL, self.__channel, signed=True)
self.moveTo(motor, position + distance, velocity)
return position + distance
def get_pin_state(self):
pass
| 32.576923 | 183 | 0.707202 | 1,389 | 0.819953 | 0 | 0 | 0 | 0 | 0 | 0 | 250 | 0.14758 |
b5be6e33c1957ff7fe4d9e1d181d17faa43d7603
| 319 |
py
|
Python
|
setup.py
|
space-cadet/tncontract
|
a5503951e218a91e9ba03e11c601b95b6bfcb72a
|
[
"MIT"
] | 39 |
2016-09-19T01:22:43.000Z
|
2022-01-12T07:26:29.000Z
|
setup.py
|
space-cadet/tncontract
|
a5503951e218a91e9ba03e11c601b95b6bfcb72a
|
[
"MIT"
] | 9 |
2016-09-25T22:51:35.000Z
|
2019-07-14T16:56:12.000Z
|
setup.py
|
space-cadet/tncontract
|
a5503951e218a91e9ba03e11c601b95b6bfcb72a
|
[
"MIT"
] | 12 |
2017-02-14T11:55:30.000Z
|
2021-02-01T01:09:31.000Z
|
from setuptools import setup, find_packages
# Get version from tncontract/version.py
exec(open("tncontract/version.py").read())
setup(
name = "tncontract",
version = __version__,
packages = find_packages(),
author = "Andrew Darmawan",
license = "MIT",
install_requires = ["numpy", "scipy"],
)
| 22.785714 | 43 | 0.677116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.347962 |
b5bf153601a744508ecc99c7f24b1fb9627883ce
| 150 |
py
|
Python
|
exampleb.py
|
JFletcher94/tBot
|
051281c81b5712f7ecdb4355b7ea7f6551dec7c7
|
[
"MIT"
] | null | null | null |
exampleb.py
|
JFletcher94/tBot
|
051281c81b5712f7ecdb4355b7ea7f6551dec7c7
|
[
"MIT"
] | null | null | null |
exampleb.py
|
JFletcher94/tBot
|
051281c81b5712f7ecdb4355b7ea7f6551dec7c7
|
[
"MIT"
] | null | null | null |
#exampleb generates a full tweet
#examplet only calls get_string()
def get_string():
'''generate full tweet text'''
return 'example #text'
| 18.75 | 34 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.733333 |
b5bf895845b26e76fb4d05e08f9ee6d0b182cce7
| 37 |
py
|
Python
|
reto_numeros_nones.py
|
Naxred/PensamientoComputacionalPython
|
a19fe394fd8b6265d486d432bbc5774d0cf35368
|
[
"Unlicense"
] | null | null | null |
reto_numeros_nones.py
|
Naxred/PensamientoComputacionalPython
|
a19fe394fd8b6265d486d432bbc5774d0cf35368
|
[
"Unlicense"
] | null | null | null |
reto_numeros_nones.py
|
Naxred/PensamientoComputacionalPython
|
a19fe394fd8b6265d486d432bbc5774d0cf35368
|
[
"Unlicense"
] | null | null | null |
for x in range(1,100,2):
print(x)
| 18.5 | 24 | 0.594595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b5c11f56555074149df0acc7544e0c995e6baf54
| 3,213 |
py
|
Python
|
gryphon/data_service/auditors/trades_volume_auditor.py
|
qiquanzhijia/gryphon
|
7bb2c646e638212bd1352feb1b5d21536a5b918d
|
[
"Apache-2.0"
] | 1,109 |
2019-06-20T19:23:27.000Z
|
2022-03-20T14:03:43.000Z
|
gryphon/data_service/auditors/trades_volume_auditor.py
|
qiquanzhijia/gryphon
|
7bb2c646e638212bd1352feb1b5d21536a5b918d
|
[
"Apache-2.0"
] | 63 |
2019-06-21T05:36:17.000Z
|
2021-05-26T21:08:15.000Z
|
gryphon/data_service/auditors/trades_volume_auditor.py
|
qiquanzhijia/gryphon
|
7bb2c646e638212bd1352feb1b5d21536a5b918d
|
[
"Apache-2.0"
] | 181 |
2019-06-20T19:42:05.000Z
|
2022-03-21T13:05:13.000Z
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from delorean import Delorean
from sqlalchemy import and_
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.task import LoopingCall
from twisted.python import log
from gryphon.data_service.auditors.auditor import Auditor
import gryphon.data_service.util as util
from gryphon.lib.models.emeraldhavoc.base import EmeraldHavocBase
from gryphon.lib.twistedbitcoinwisdom import TwistedBitcoinWisdom
metadata = EmeraldHavocBase.metadata
trades = metadata.tables['trade']
EXCHANGES = ['KRAKEN', 'BITSTAMP', 'BITFINEX', 'CAVIRTEX', 'VAULTOFSATOSHI']
class TradesVolumeAuditor(Auditor):
def start(self):
self.redis = yield util.setup_redis()
self.setup_mysql()
self.looping_call = LoopingCall(self.audit).start(30)
@defer.inlineCallbacks
def target_volume_in_trade_list(self, trade_list, target_volume):
n = len(trade_list)
found_match = False
def offline_summation(lower_bound, upper_bound, trade_list, trades, target_volume):
perm_sum = 0
for k in range(lower_bound, upper_bound):
perm_sum += trade_list[k][trades.c.volume]
return perm_sum == target_volume
if not n:
defer.returnValue(target_volume == 0)
elif target_volume == 0:
# TODO Fix this.
defer.returnValue(False)
else:
for i in range(n):
for j in range(n - i, n + 1):
found_match = yield reactor.callLater(
0,
offline_summation,
j - (n - i),
j,
trade_list,
trades,
target_volume,
)
if found_match:
break
defer.returnValue(found_match)
@inlineCallbacks
def audit(self, exchanges=EXCHANGES):
now = Delorean().datetime
outer_min = now
outer_max = Delorean(now - timedelta(minutes=9)).datetime
inner_max = Delorean(now - timedelta(minutes=6)).datetime
inner_min = Delorean(now - timedelta(minutes=3)).datetime
for exchange in exchanges:
real_volume = yield TwistedBitcoinWisdom(exchange=exchange)\
.volume_in_period(inner_max, inner_min)
real_volume = real_volume.amount
result = yield self.engine.execute(
trades.select(trades)
.where(and_(
trades.c.exchange.startswith(exchange),
trades.c.timestamp.between(outer_max, outer_min),
))
)
d_trades = yield result.fetchall()
target_in_trades = yield self.target_volume_in_trade_list(
d_trades,
real_volume,
)
if not target_in_trades:
log.err('Trade Volume Audit Failed : %s' % (exchange))
else:
log.msg('Trade Volume Audit Passed : %s' % (exchange))
| 32.13 | 91 | 0.591659 | 2,512 | 0.781824 | 2,412 | 0.7507 | 2,309 | 0.718643 | 0 | 0 | 164 | 0.051043 |
b5c68bd329a9e17d20d1b6dbb51e72b824cb0447
| 370 |
py
|
Python
|
shablbot/__main__.py
|
Blackgard/vk-bot-python
|
5d1eb269d76567a8e31dec47c0ea3c5cc1bcbc3c
|
[
"MIT"
] | 5 |
2019-11-12T05:15:07.000Z
|
2022-01-20T06:26:55.000Z
|
shablbot/__main__.py
|
Blackgard/vk-bot-python
|
5d1eb269d76567a8e31dec47c0ea3c5cc1bcbc3c
|
[
"MIT"
] | 1 |
2021-06-02T00:33:47.000Z
|
2021-06-02T00:33:47.000Z
|
shablbot/__main__.py
|
Blackgard/vk-bot-python
|
5d1eb269d76567a8e31dec47c0ea3c5cc1bcbc3c
|
[
"MIT"
] | 2 |
2021-12-18T17:03:10.000Z
|
2022-01-29T17:08:35.000Z
|
"""
Shablbot manager commands
"""
import sys
def main():
try:
from shablbot.core.manager_commands import manager_command
except ImportError as exc:
raise ImportError(
"Couldn't import Shablbot. Are you sure it's installed and activate venv?"
) from exc
manager_command(sys.argv)
if __name__ == '__main__':
main()
| 18.5 | 86 | 0.654054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.316216 |
b5c6fd5f851fb29a3b78c943cdd438c87f4e64cf
| 726 |
py
|
Python
|
Patterns_Custom_Row-Column_PYTHON.py
|
sukantadas194/Patters.Python
|
14e62b61defca2e1f192f6ac8b1484c0a9745cfb
|
[
"BSD-2-Clause"
] | null | null | null |
Patterns_Custom_Row-Column_PYTHON.py
|
sukantadas194/Patters.Python
|
14e62b61defca2e1f192f6ac8b1484c0a9745cfb
|
[
"BSD-2-Clause"
] | null | null | null |
Patterns_Custom_Row-Column_PYTHON.py
|
sukantadas194/Patters.Python
|
14e62b61defca2e1f192f6ac8b1484c0a9745cfb
|
[
"BSD-2-Clause"
] | null | null | null |
#Print Custom Row-Column Patterns..
#e.g. '''@ @ @ @
# @ @ @ @
# @ @ @ @'''
w = print("What do you want to print?")
wa = str(input("Answer : "))
try:
m1 = print("How many rows do you want to print?")
n1 = int(input("Answer : "))
m2 = print("How many columns do you want to print?")
n2 = int(input("Answer : "))
if n1 <= 0 or n2 <= 0:
print("Wrong Input")
print("Input should be positive & greater than '0'")
print("Start over again..")
for k in range(n1):
for i in range(n2):
print(wa, end=" ")
print()
except:
print("Wrong Input")
print("Only numbers are accepted")
print("Start over again..")
| 26.888889 | 61 | 0.506887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 369 | 0.508264 |
b5c737a61480861fc56685c1832b7805d5bbd65b
| 17,116 |
py
|
Python
|
app/cluegame.py
|
dabreese00/clue-solver
|
3b99778075882974459e1c75792e7d051b6fe20a
|
[
"MIT"
] | null | null | null |
app/cluegame.py
|
dabreese00/clue-solver
|
3b99778075882974459e1c75792e7d051b6fe20a
|
[
"MIT"
] | null | null | null |
app/cluegame.py
|
dabreese00/clue-solver
|
3b99778075882974459e1c75792e7d051b6fe20a
|
[
"MIT"
] | null | null | null |
"""cluegame.py -- Classes to track Clue game events and make inferences
The board game Clue is also known as Cluedo. This module contains classes that
make it possible to record specific knowledge-generating events that a Clue
player may observe during the course of a game (such as, for example, that
Player A showed Player B one of either Card X, Card Y, or Card Z).
More to the point, Clue is a game about building knowledge through logical
inference. As such, these classes are designed to track not only these events
themselves, but also the sum total of game knowledge that can be logically
inferred from them.
Classes:
ClueCardType -- an Enum of possible card types in the game
ClueRelationType -- an Enum of possible types of Player-Card relation
Player -- a player in the Clue game
Card -- a card in the Clue game
ClueRelation -- an individual Player-Card relation that is known
Game -- a tracker and inference engine for total game knowledge
Functions:
normalize_to_list -- matches an object (or its name) to a list of objects
"""
from app.objectfilter import ObjectFilter
import enum
import pickle
import os
import collections
class ClueCardType(enum.Enum):
"""an Enum of possible card types in a Clue game"""
PERSON = "Person"
WEAPON = "Weapon"
ROOM = "Room"
class ClueRelationType(enum.Enum):
"""an Enum of possible types of Player-Card relation to record in Clue
These represent my own semi-invented terminology for describing a player's
knowledge in a Clue game. In case their meanings are not entirely
self-evident, an explanation of these terms and how they relate to player
knowledge in the game of Clue can be found the package README.md file; see
esp. the "Theory" section.
"""
HAVE = "have"
PASS = "pass"
SHOW = "show"
Player = collections.namedtuple('Player', 'name hand_size')
Player.__doc__ += ': A player in the Clue game'
Player.name.__doc__ = 'A name by which this player is identified'
Player.hand_size.__doc__ = 'Number of cards in hand of this player'
Card = collections.namedtuple('Card', 'name card_type')
Card.__doc__ += ': A card in the Clue game'
Card.name.__doc__ = 'A name by which this card is identified'
Card.card_type.__doc__ = 'Which ClueCardType this card belongs to'
def normalize_to_list(obj, lst):
"""Returns a matching member of a Player or Card list, if possible.
Assumes names of objects in the list are unique, for match by name.
Arguments:
obj -- a Player, Card, or a name (string) representing one
lst -- a list of Players or Cards
Returns:
a Player or Card from the list, matching obj
"""
if obj in lst:
return obj
try:
my_obj = next(o for o in lst if o.name == obj)
except(StopIteration):
raise ValueError("No such Player/Card {} in list {}".format(
obj, lst))
return my_obj
class ClueRelation(collections.namedtuple(
'ClueRelation', 'rel_type player cards')):
"""A generalized representation of a piece of Clue game knowledge to record
A ClueRelation instance represents knowledge of a specific type of
relationship between a given Player, and one or more given Cards.
How many Cards are in a relation, depends on the relation's type:
A HAVE or PASS relation has 1 card.
A SHOW relation has 3 cards.
Instance variables:
rel_type -- a ClueRelationType, defining the type of this relation
player -- the Player who is involved in this relation
cards -- a list of the Card(s) involved in this relation
The __contains__ method is signficiantly customized (perhaps "bastardized")
to aid querying.
"""
__slots__ = ()
def __repr__(self):
return "ClueRelation(Type={}, Player={}, Cards={})".format(
self.rel_type, self.player, self.cards)
def __contains__(self, obj):
"""Checks for any of several distinct conditions
The inherited method is overridden, in a slight abuse, to check whether
ANY of the following two conditions holds (logical "or"):
- a given Player is self.player
- a given Card is in self.cards
- a given ClueCardType is represented among self.cards
The last condition may seem strange, but I found it a useful trick to
help keep the semantics consistently intuitive for certain queries that
become relevant when making inferences in the game of Clue. If I come
up with a better way (i.e. a simple way to query lists of ClueRelations
with intuitive-enough semantics that avoids the need for it), then I
may consider removing this last condition, as it seems potentially
confusing.
"""
return obj == self.player \
or obj in self.cards \
or obj in [c.card_type for c in self.cards]
class ClueRelationFilter(ObjectFilter):
"""A tool to query a list of ClueRelations
Public methods (besides inherited methods):
match -- recursively tests aspects of a ClueRelation
"""
def match(self, relation):
"""Recursively check if relation matches all filter statements.
Arguments:
relation -- the relation to check
"""
if self.statement == "and":
return self.left.match(relation) and self.right.match(relation)
elif self.statement == "or":
return self.left.match(relation) or self.right.match(relation)
elif self.statement == "not":
return not self.left.match(relation)
elif self.statement == "all":
return True
elif self.statement in list(ClueRelationType):
return relation.rel_type == self.statement
else:
return self.statement in relation
class Game:
"""Encapsulates and updates the total state of the game knowledge.
The Game instance knows the following:
- a set of Cards (self.cards)
- a set of Players (self.players)
- all known ClueRelations between Players and Cards (self.relations)
- which Cards are "in the file", meaning the Clue confidential file --
this is how you win folks! (self.cards_in_the_file)
The Game instance provides public methods to record new ClueRelations as
they become known.
These same public methods also recursively check for and record the
consequences of any logical deductions that follow from each
newly-discovered ClueRelation.
There are also save/load/delete methods to handle persisting the game
state.
Public methods:
record_have
record_pass
record_show
save
Class methods:
load
delete
Instance variables:
players
cards
cards_in_the_file
relations
"""
def __init__(self,
clue_cards_persons,
clue_cards_weapons,
clue_cards_rooms,
players):
"""Initializes the game state.
Creates a set of Players and a set of Cards. All other instance
variables are initialized to empty.
Arguments:
clue_cards_persons -- a list of Person card names to play with
clue_cards_weapons -- a list of Weapon card names to play with
clue_cards_rooms -- a list of Room card names to play with
players -- a list of tuples representing all Players in the Game
"""
# Setup the Cards
clue_cards = {
ClueCardType.PERSON: clue_cards_persons,
ClueCardType.WEAPON: clue_cards_weapons,
ClueCardType.ROOM: clue_cards_rooms
}
self.cards = set()
for t in ClueCardType:
for n in clue_cards[t]:
if n not in [c.name for c in self.cards]:
self.cards.add(Card(n, t))
else:
raise ValueError("Duplicate card name: {}".format(n))
# Setup the Players
self.players = set()
for p in players:
new_p = Player._make(p)
if new_p.name not in [q.name for q in self.players]:
self.players.add(new_p)
else:
raise ValueError(
"Duplicate player name: {}".format(new_p.name))
num_cards_in_the_file = len(clue_cards.keys())
num_cards_in_hands = sum([p.hand_size for p in self.players])
if len(self.cards) != num_cards_in_the_file + num_cards_in_hands:
raise ValueError("Player hand sizes and card count don't add up!")
# Setup the Game state knowledge
self.relations = []
@property
def cards_in_the_file(self):
"""If we know that no player has a given card, it is in the file!"""
cards_in_the_file = set()
for c in self.cards:
players_passing = [r.player for r in (
ClueRelationFilter(c) +
ClueRelationFilter(ClueRelationType.PASS)
).get(self.relations)]
if len(players_passing) == len(self.players):
cards_in_the_file.add(c)
return cards_in_the_file
def record_have(self, player, card):
"""Record a HAVE relation, and make deductions accordingly.
This is a thin wrapper for self.__record_clue_relation, solely to
enhance external usability.
"""
self.__record_clue_relation(ClueRelationType.HAVE, player, [card])
def record_pass(self, player, card):
"""Record a PASS relation, and make deductions accordingly.
This is a thin wrapper for self.__record_clue_relation, solely to
enhance external usability.
"""
self.__record_clue_relation(ClueRelationType.PASS, player, [card])
def record_show(self, player, cards):
"""Record a SHOW relation, and make deductions accordingly.
This is a thin wrapper for self.__record_clue_relation, solely to
enhance external usability.
"""
self.__record_clue_relation(ClueRelationType.SHOW, player, cards)
def __record_clue_relation(self, rel_type, player, cards):
"""Record a new ClueRelation, and make deductions accordingly."""
player, cards = self.__normalize_input(player, cards)
if rel_type in [ClueRelationType.HAVE, ClueRelationType.PASS]:
# Check for any redundancy or conflict with an existing HAVE/PASS
matching_haves_passes = (
ClueRelationFilter(player) +
ClueRelationFilter(cards[0]) +
(
ClueRelationFilter(ClueRelationType.HAVE) /
ClueRelationFilter(ClueRelationType.PASS)
)
).get(self.relations)
for h in matching_haves_passes:
if h.rel_type == rel_type:
return # Ignore attempted duplicate.
else:
raise ValueError("Cannot mark Relation {} {} {}; ".format(
rel_type, player, cards[0]) +
"the opposite is already marked!")
rel = ClueRelation(
rel_type=rel_type,
player=player,
cards=cards)
if rel:
self.relations.append(rel)
self.__draw_inferences_from_new_relation(rel)
def __draw_inferences_from_new_relation(self, new_relation):
"""Make and record all possible logical inferences from a ClueRelation
Given a newly-recorded ClueRelation, identify all possible additional
ClueRelations that can be inferred, and record them too (which may in
turn cause additional calls to __draw_inferences_..., and so on).
Arguments:
new_relation -- a (ostensibly newly-recorded) ClueRelation
"""
player = new_relation.player
if new_relation.rel_type == ClueRelationType.HAVE:
card = new_relation.cards[0]
self.__deduce_other_player_passes_from_have(player, card)
self.__deduce_player_passes_from_known_whole_hand(player)
self.__deduce_card_passes_from_cardtype_completion(
card.card_type)
elif new_relation.rel_type == ClueRelationType.PASS:
card = new_relation.cards[0]
matching_shows = (
ClueRelationFilter(player) +
ClueRelationFilter(card) +
ClueRelationFilter(ClueRelationType.SHOW)
).get(self.relations)
for s in matching_shows:
self.__deduce_have_from_show(s)
elif new_relation.rel_type == ClueRelationType.SHOW:
self.__deduce_have_from_show(new_relation)
def __deduce_other_player_passes_from_have(self, player, card):
"""If player has card, we infer all other players do not have card."""
for other_p in self.players:
if other_p != player:
self.record_pass(other_p, card)
def __deduce_player_passes_from_known_whole_hand(self, player):
"""If all player's cards are known, mark passes for all other cards."""
known_cards_in_hand = [r.cards[0] for r in (
ClueRelationFilter(player) +
ClueRelationFilter(ClueRelationType.HAVE)
).get(self.relations)]
if len(known_cards_in_hand) == player.hand_size:
for other_c in self.cards:
if other_c not in known_cards_in_hand:
self.record_pass(player, other_c)
def __deduce_card_passes_from_cardtype_completion(self, cluecardtype):
"""If all cards but 1 of this type are accounted for, mark passes.
If we know which player has every card of this type but 1, mark passes
for all players for the remaining card.
"""
cards_of_type_located = [r.cards[0] for r in (
ClueRelationFilter(cluecardtype) +
ClueRelationFilter(ClueRelationType.HAVE)
).get(self.relations)]
cards_of_type_total = []
for card_name in [c.name for c in
self.cards if c.card_type == cluecardtype]:
cards_of_type_total.append(
normalize_to_list(card_name, self.cards))
if len(cards_of_type_located) == len(cards_of_type_total) - 1:
remaining_card = (
set(cards_of_type_total) -
set(cards_of_type_located)
).pop()
for p in self.players:
self.record_pass(p, remaining_card)
def __deduce_have_from_show(self, show):
"""If given SHOW has 2 PASSed cards for player, infer & record a HAVE.
If show.cards contains exactly 2 cards that show.player has PASSes
recorded for, then infer and record a HAVE for show.player and the 3rd
card.
"""
q = ClueRelationFilter()
for c in show.cards:
q = q / ClueRelationFilter(c)
q = q + ClueRelationFilter(show.player)
if len((q + ClueRelationFilter(ClueRelationType.HAVE)
).get(self.relations)) == 0:
passed_cards = [r.cards[0] for r in
(q + ClueRelationFilter(ClueRelationType.PASS)
).get(self.relations)]
unpassed_cards = set(show.cards) - set(passed_cards)
if len(unpassed_cards) == 1:
for c in unpassed_cards:
self.record_have(show.player, c)
def __normalize_input(self, player, cards):
"""Allow to pass in Players/Cards either as objects, or by name
Assumes that Player and Card names are unique.
Arguments:
player -- a Player object, or name (string) to normalize
cards -- a list of Card objects, or names (strings) to normalize
Returns:
a Player object and a list of Card objects from the current Game
"""
player = normalize_to_list(player, self.players)
my_cards = []
for c in cards:
my_cards.append(
normalize_to_list(c, self.cards))
cards = my_cards
return player, cards
def save(self, path):
"""Persists the Game state to a file.
BEWARE: Overwrites any existing file at 'path'.
"""
with open(path, 'wb') as dbfile:
pickle.dump(self, dbfile)
def load(obj, path):
"""Loads a persisted Game state from a file."""
if os.path.isfile(path):
with open(path, 'rb') as dbfile:
return pickle.load(dbfile)
else:
return None
def delete(obj, path):
"""Deletes a persisted Game state in a file.
BEWARE: Deletes any existing file at 'path'.
"""
os.remove(path)
Game.load = classmethod(Game.load)
Game.delete = classmethod(Game.delete)
| 37.535088 | 79 | 0.62649 | 14,715 | 0.859722 | 0 | 0 | 510 | 0.029797 | 0 | 0 | 8,394 | 0.490418 |
b5cdf29e6b6b8257a8b1c9b388ba9bf3693defbc
| 726 |
py
|
Python
|
config.py
|
adesolagbenga0052/web-app
|
c6d6ca3f998897986ac25a1e93477af0a8bfacf6
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
adesolagbenga0052/web-app
|
c6d6ca3f998897986ac25a1e93477af0a8bfacf6
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
adesolagbenga0052/web-app
|
c6d6ca3f998897986ac25a1e93477af0a8bfacf6
|
[
"Apache-2.0"
] | null | null | null |
"""Flask configuration."""
from os import environ, path
basedir = path.abspath(path.dirname(__file__))
class Config:
"""Base config."""
SECRET_KEY = "qsZ5srBF9-j3tgdMsd11hdbg2VLUyKQYqWFQ1EZyKI6PDVVTLXduxWoM1N0wESR0zFvSPFDs9ogpMjgl9wFxXw"
STATIC_FOLDER = 'static'
TEMPLATES_FOLDER = 'templates'
class ProdConfig(Config):
FLASK_ENV = 'production'
DEBUG = False
TESTING = False
SQLALCHEMY_DATABASE_URI ="sqlite:///databank.sqlite"
SQLALCHEMY_TRACK_MODIFICATIONS = False
class DevConfig(Config):
FLASK_ENV = 'development'
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = "sqlite:///test.db"
SQLALCHEMY_TRACK_MODIFICATIONS = True
| 27.923077 | 106 | 0.698347 | 596 | 0.820937 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.305785 |
b5cff4fdd46f8909e02bbf2707f338423530762f
| 691 |
py
|
Python
|
tests/test_metrics.py
|
tolmanam/python-nomad-alt
|
f93d3f6553cdb1ee16dadabd385208b5cc550024
|
[
"MIT"
] | null | null | null |
tests/test_metrics.py
|
tolmanam/python-nomad-alt
|
f93d3f6553cdb1ee16dadabd385208b5cc550024
|
[
"MIT"
] | null | null | null |
tests/test_metrics.py
|
tolmanam/python-nomad-alt
|
f93d3f6553cdb1ee16dadabd385208b5cc550024
|
[
"MIT"
] | null | null | null |
from nomad_alt import Nomad
import json
import uuid
from pprint import pformat
import os
import pytest
import nomad_alt.exceptions
import tests.common as common
@pytest.fixture
def nomad_setup():
n = Nomad(host=common.IP, port=common.NOMAD_PORT, ssl_verify=False, token=common.NOMAD_TOKEN)
with open(common.EXAMPLE_JOB_JSON) as fh:
n.jobs.create(json.loads(fh.read()))
yield n
n.jobs.stop(common.EXAMPLE_JOB_NAME, purge=True)
def test_metrics(nomad_setup):
res = nomad_setup.metrics.fetch()
assert res is not None
assert 'Counters' in res
assert 'Gauges' in res
assert 'Points' in res
assert 'Samples' in res
assert 'Timestamp' in res
| 24.678571 | 97 | 0.732272 | 0 | 0 | 272 | 0.393632 | 288 | 0.416787 | 0 | 0 | 46 | 0.06657 |
b5d0213de62ed3ea48e3a10bf0cc5d6b41c2e553
| 5,979 |
py
|
Python
|
djproject/pictureupload/views.py
|
missingDown/webForUpload
|
fbd5ed9e8cfcd4ad906913f4a31c24e87919f9a3
|
[
"MIT"
] | null | null | null |
djproject/pictureupload/views.py
|
missingDown/webForUpload
|
fbd5ed9e8cfcd4ad906913f4a31c24e87919f9a3
|
[
"MIT"
] | null | null | null |
djproject/pictureupload/views.py
|
missingDown/webForUpload
|
fbd5ed9e8cfcd4ad906913f4a31c24e87919f9a3
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
import logging
import json
import base64
import time
# Create your views here.
logger = logging.getLogger(__name__)
# 文件上传:form-data/Multipart方式 POST方法
def index(request):
sendfile = request.FILES.items()
haveFiles = False
for key,value in sendfile:
fileData = request.FILES.getlist(key)
if len(fileData):
haveFiles = True
for fl in fileData:
name = fl.name
with open('/home/hych007/project/serverproject/'+name, 'wb') as fp:
fp.write(bytes(fl.read()))
if not haveFiles:
result = {"errcode": 511, "errmsg": "form-data中未读取到图片信息"}
return HttpResponse(json.dumps(result), content_type="application/json")
cameraCode = request.POST.get("cameraCode", "")
if not cameraCode:
result = {"errcode": 501, "errmsg": "cameraCode摄像头编号不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
doorCode = request.POST.get("i3310A", "")
if not doorCode:
result = {"errcode": 505, "errmsg": "i3310A门禁编码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
userCode = request.POST.get("i3310D", "")
if not userCode:
result = {"errcode": 507, "errmsg": "i3310D用户唯一标识码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
photoCodes = request.POST.get("photoCodes", "")
if not len(photoCodes):
result = {"errcode": 502, "errmsg": "photoCode照片编号不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
dataTimes = request.POST.get("dataTimes", "")
if not len(dataTimes):
result = {"errcode": 503, "errmsg": "dataTime拍照时间不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
result = {"errcode": 200, "errmsg": "上传成功"}
return HttpResponse(json.dumps(result), content_type="application/json")
# 文件上传:Body中传Json方式 POST方法
def body(request):
try:
jsData = json.loads(str(request.body, encoding='utf-8'))
except Exception as e:
result = {"errcode": -1, "errmsg": "数据不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if not isinstance(jsData, list):
result = {"errcode": 500, "errmsg": "上传的json数据错误"}
return HttpResponse(json.dumps(result), content_type="application/json")
for data in jsData:
imgData = data.get("base64String", "")
if len(imgData) <= 23:
result = {"errcode": 509, "errmsg": "照片转Base64String 编码后的字符串不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
imgData = imgData[23:]
temp = base64.b64decode(imgData)
picName = time.strftime("%Y%m%d%H%M%S", time.localtime())
#图片文件的读写要用二进制模式
with open("/home/hych007/project/serverproject/"+picName+".jpeg", 'wb') as f:
f.write(temp)
cameraCode = data.get("cameraCode", "")
if not cameraCode:
result = {"errcode": 501, "errmsg": "cameraCode摄像头编号不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
doorCode = data.get("i3310A", "")
if not doorCode:
result = {"errcode": 505, "errmsg": "i3310A门禁编码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
userCode = data.get("i3310D", "")
if not userCode:
result = {"errcode": 507, "errmsg": "i3310D用户唯一标识码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
photoCode = data.get("photoCode", "")
if not photoCode:
result = {"errcode": 502, "errmsg": "photoCode照片编号不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
dataTime = data.get("dataTime", "")
if not dataTime:
result = {"errcode": 503, "errmsg": "dataTime拍照时间不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
result = {"errcode": 200, "errmsg": "上传成功"}
return HttpResponse(json.dumps(result), content_type="application/json")
# 提供图片下载功能
def picture(request):
file = open("/home/hych007/project/100001.png", "rb")
response = HttpResponse(content=file.read(), content_type="image/png")
return response
# def writeLog(text):
# with open('/mnt/testlog.txt', 'a+') as fp:
# fp.write(text+'\n')
# 文件上传:form-data/Multipart方式 PUT方法
def puttest(request):
# writeLog('get a request')
if(request.method != "PUT"):
result = {"errcode": 1, "errmsg": "Not put method"}
# writeLog('Not put method')
return HttpResponse(json.dumps(result), content_type="application/json")
put, files = request.parse_file_upload(request.META, request)
# request.FILES.update(files)
# request.POST = put.dict()
fileitems = files.items()
haveFiles = False
for key, value in fileitems:
# fileData = files.get(key)
if value:
haveFiles = True
# 图片文件的读写要用二进制模式
name = value.name
with open('/home/hych007/project/serverproject/' + name, 'wb') as fp:
fp.write(bytes(value.read()))
if not haveFiles:
result = {"errcode": 2, "errmsg": "No file data"}
# writeLog('No file data')
return HttpResponse(json.dumps(result), content_type="application/json")
dataInfo = put.get("FaceDataRecord", "")
if not dataInfo:
result = {"errcode": 3, "errmsg": "no FaceDataRecord data"}
# writeLog('no FaceDataRecord data')
return HttpResponse(json.dumps(result), content_type="application/json")
result = {"errcode": 200, "errmsg": dataInfo}
# writeLog('request secceed')
return HttpResponse(json.dumps(result), content_type="application/json")
| 38.082803 | 85 | 0.632882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,267 | 0.355719 |
b5d05429ba74cb0c817d19e6c37641ec569991cf
| 2,191 |
py
|
Python
|
logparser/logs/logs.py
|
rkorte/rticonnextdds-logparser
|
e8d0446c8d1318e68886a58e95c3f1ba4a1fa455
|
[
"Apache-2.0"
] | 11 |
2016-06-28T13:26:01.000Z
|
2021-06-07T09:18:32.000Z
|
logparser/logs/logs.py
|
rkorte/rticonnextdds-logparser
|
e8d0446c8d1318e68886a58e95c3f1ba4a1fa455
|
[
"Apache-2.0"
] | 27 |
2016-10-26T19:57:16.000Z
|
2019-04-12T16:48:11.000Z
|
logparser/logs/logs.py
|
rkorte/rticonnextdds-logparser
|
e8d0446c8d1318e68886a58e95c3f1ba4a1fa455
|
[
"Apache-2.0"
] | 7 |
2016-08-28T17:24:15.000Z
|
2021-12-10T11:28:20.000Z
|
# Log Parser for RTI Connext.
#
# Copyright 2016 Real-Time Innovations, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create the global list of regular expressions and functions.
Functions:
+ add_regex: Compile the regex and add it to the list.
+ create_regex_list: Create the list of regular expressions and functions.
"""
from __future__ import absolute_import
import re
from logparser.logs.custom.logs import get_regex_list as custom_regex
from logparser.logs.debug.logs import get_regex_list as debug_regex
from logparser.logs.events.logs import get_regex_list as events_regex
from logparser.logs.micro.logs import get_regex_list as micro_regex
from logparser.logs.micro.micro import init as init_micro
from logparser.logs.network.logs import get_regex_list as network_regex
from logparser.logs.routing.logs import get_regex_list as routing_regex
def add_regex(log_list, method, regex):
"""Compile the regex and add it to the list."""
log_list.append((method, re.compile(regex)))
def create_regex_list(state):
"""Create the list of regular expressions and functions."""
init_micro(state)
# pylint: disable=W0106
expressions = []
[add_regex(expressions, expr[0], expr[1]) for expr in micro_regex()]
[add_regex(expressions, expr[0], expr[1]) for expr in network_regex()]
[add_regex(expressions, expr[0], expr[1]) for expr in events_regex()]
[add_regex(expressions, expr[0], expr[1]) for expr in routing_regex()]
[add_regex(expressions, expr[0], expr[1]) for expr in custom_regex()]
if state['debug']:
[add_regex(expressions, expr[0], expr[1]) for expr in debug_regex()]
return expressions
| 40.574074 | 76 | 0.748517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 977 | 0.445915 |
b5d0c034f7242aa14fa3baca13d703e86f187f17
| 276 |
py
|
Python
|
torrents/tests/test_file.py
|
noahgoldman/torwiz
|
213be5cf3b62d2c18c09e2fe4b869c549c263f32
|
[
"MIT"
] | 1 |
2015-03-09T01:58:23.000Z
|
2015-03-09T01:58:23.000Z
|
torrents/tests/test_file.py
|
noahgoldman/torwiz
|
213be5cf3b62d2c18c09e2fe4b869c549c263f32
|
[
"MIT"
] | 3 |
2015-04-01T22:49:58.000Z
|
2015-05-01T19:09:11.000Z
|
torrents/tests/test_file.py
|
noahgoldman/torwiz
|
213be5cf3b62d2c18c09e2fe4b869c549c263f32
|
[
"MIT"
] | null | null | null |
from bson.objectid import ObjectId
from torrents.file import TorrentFile
class TestTorrentFile:
def test_get_output_file(self):
id1 = ObjectId()
file1 = TorrentFile(id1)
assert file1.get_output_file() == 'torrent_files/' + str(id1) + '.torrent'
| 25.090909 | 82 | 0.695652 | 200 | 0.724638 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.094203 |
b5d13876f65729d4efb83ad2b61955efd49a0d23
| 2,444 |
py
|
Python
|
google/cloud/storage/benchmarks/storage_throughput_plots.py
|
millerantonio810/google-cloud-cpp
|
71582d922bc22b0dcbc58234f36c726ea3b7c171
|
[
"Apache-2.0"
] | 1 |
2021-01-16T02:43:50.000Z
|
2021-01-16T02:43:50.000Z
|
google/cloud/storage/benchmarks/storage_throughput_plots.py
|
millerantonio810/google-cloud-cpp
|
71582d922bc22b0dcbc58234f36c726ea3b7c171
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/storage/benchmarks/storage_throughput_plots.py
|
millerantonio810/google-cloud-cpp
|
71582d922bc22b0dcbc58234f36c726ea3b7c171
|
[
"Apache-2.0"
] | 1 |
2020-05-09T20:12:05.000Z
|
2020-05-09T20:12:05.000Z
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Summarize the results from running storage_throughput_benchmark."""
# %%
import argparse
import pandas as pd
import plotnine as p9
from scipy.stats import mannwhitneyu
# %%
pd.set_option("precision", 2)
# %%
def load_benchmark_output(file):
"""Loads the output generated by storage_throughput_benchmark."""
df = pd.read_csv(file, comment="#", names=["Op", "Api", "Bytes", "ElapsedMs"])
df["MiB"] = df.Bytes / 1024 / 1024
df["MiBs"] = df.MiB * 1000 / df.ElapsedMs
return df
# %%
def compare_api(df, op_name, alpha=0.05):
subset = df[df.Op == op_name]
stat, p = mannwhitneyu(
subset[subset.Api == "XML"].MiBs, subset[subset.Api == "JSON"].MiBs
)
print(
"\n\n===== %s XML vs. JSON =====\np-value=%.3f Statistics=%.3f"
% (op_name, p, stat)
)
print(subset.groupby(by="Api").MiBs.describe(percentiles=[0.50, 0.90, 0.95]))
if p > alpha:
print("%s/XML vs. READ/JSON: same distribution (fail to reject H0)" % op_name)
else:
print("%s/XML vs. READ/JSON: different distribution (reject H0)" % op_name)
# %%
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-file",
type=argparse.FileType("r"),
required=True,
help="the benchmark output file to load",
)
parser.add_argument(
"--output-file", type=str, required=True, help="the name for the output plot"
)
args = parser.parse_args()
# %%
data = load_benchmark_output(args.input_file)
# %%
print(data.head())
# %%
print(data.describe())
# %%
(
p9.ggplot(
data=data[(data.Op != "CREATE") & (data.Op != "DELETE")],
mapping=p9.aes(x="Op", y="MiBs", color="Api"),
)
+ p9.facet_wrap(facets="Op", labeller="label_both", scales="free")
+ p9.geom_boxplot()
).save(args.output_file)
# %%
compare_api(data, "READ")
compare_api(data, "WRITE")
| 26.857143 | 86 | 0.657529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,173 | 0.479951 |
b5d20cd2d199da5465fadfa36c7ff94c0bda75f4
| 711 |
py
|
Python
|
djAidESILV/products/migrations/0002_auto_20200522_1921.py
|
Kulumbaf/AidESILV
|
04dad828048edffdd3662b24c415edce22fd3ea3
|
[
"MIT"
] | null | null | null |
djAidESILV/products/migrations/0002_auto_20200522_1921.py
|
Kulumbaf/AidESILV
|
04dad828048edffdd3662b24c415edce22fd3ea3
|
[
"MIT"
] | null | null | null |
djAidESILV/products/migrations/0002_auto_20200522_1921.py
|
Kulumbaf/AidESILV
|
04dad828048edffdd3662b24c415edce22fd3ea3
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.12 on 2020-05-22 19:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='baby_product',
field=models.BooleanField(default=False, verbose_name='Produit pour enfant'),
),
migrations.AddField(
model_name='product',
name='category',
field=models.CharField(choices=[('A', 'Alimentation'), ('P', 'Pharmacie'), ('Q', 'Quotidien'), ('NA', 'Non-attribué')], default='NA', max_length=2, verbose_name='Catégorie'),
),
]
| 29.625 | 186 | 0.590717 | 619 | 0.868163 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.301543 |
b5d2438e72ede4149becee229525d2ab304971e9
| 939 |
py
|
Python
|
vietocr/train.py
|
lzmisscc/vietocr
|
df0d9a53e714d08d6b0b4ee52ab46fbc0b991bf3
|
[
"Apache-2.0"
] | null | null | null |
vietocr/train.py
|
lzmisscc/vietocr
|
df0d9a53e714d08d6b0b4ee52ab46fbc0b991bf3
|
[
"Apache-2.0"
] | null | null | null |
vietocr/train.py
|
lzmisscc/vietocr
|
df0d9a53e714d08d6b0b4ee52ab46fbc0b991bf3
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import logging
from vietocr.model.trainer import Trainer
from vietocr.tool.config import Cfg
import sys
sys.path.insert(0, './')
from char import character
logging.basicConfig(level=logging.INFO, )
def main():
parser = argparse.ArgumentParser()
parser.add_argument('config', help='see example at ')
parser.add_argument('--checkpoint', help='your checkpoint')
args = parser.parse_args()
config_base = Cfg.load_config_from_file("config/base.yml")
config = Cfg.load_config_from_file(args.config)
config_base.update(config)
config = config_base
config['vocab'] = character
trainer = Trainer(config, pretrained=False)
# args.checkpoint = config.trainer["checkpoint"]
# if args.checkpoint:
# trainer.load_checkpoint(args.checkpoint)
# logging.info(f"Load checkpoint form {args.checkpoint}....")
trainer.train()
if __name__ == '__main__':
main()
| 28.454545 | 68 | 0.707135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.28967 |
b5d34bad89b324ae2f55b466eea757d21d9ed3d6
| 363 |
py
|
Python
|
django_eveonline_connector/migrations/0018_remove_evetoken_primary.py
|
KryptedGaming/django-eveonline-connector
|
95fa146f4fcdf6bce84548b5cac1e5bf09cd72a0
|
[
"MIT"
] | 3 |
2020-03-07T13:58:45.000Z
|
2021-02-06T20:16:50.000Z
|
django_eveonline_connector/migrations/0018_remove_evetoken_primary.py
|
KryptedGaming/django-eveonline-connector
|
95fa146f4fcdf6bce84548b5cac1e5bf09cd72a0
|
[
"MIT"
] | 66 |
2019-12-17T20:54:22.000Z
|
2021-06-10T20:39:04.000Z
|
django_eveonline_connector/migrations/0018_remove_evetoken_primary.py
|
KryptedGaming/django-eveonline-connector
|
95fa146f4fcdf6bce84548b5cac1e5bf09cd72a0
|
[
"MIT"
] | 2 |
2020-01-17T20:04:52.000Z
|
2021-07-11T22:11:42.000Z
|
# Generated by Django 2.2.10 on 2020-02-27 18:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_eveonline_connector', '0017_primaryevecharacterassociation'),
]
operations = [
migrations.RemoveField(
model_name='evetoken',
name='primary',
),
]
| 20.166667 | 78 | 0.630854 | 277 | 0.763085 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.363636 |
b5d4e05a5e5fe08d9de941f7f2c1980a53f27d2a
| 598 |
py
|
Python
|
Plug-and-play module/SematicEmbbedBlock.py
|
riciche/SimpleCVReproduction
|
4075de39f9c61f1359668a413f6a5d98903fcf97
|
[
"Apache-2.0"
] | 923 |
2020-01-11T06:36:53.000Z
|
2022-03-31T00:26:57.000Z
|
Plug-and-play module/SematicEmbbedBlock.py
|
riciche/SimpleCVReproduction
|
4075de39f9c61f1359668a413f6a5d98903fcf97
|
[
"Apache-2.0"
] | 25 |
2020-02-27T08:35:46.000Z
|
2022-01-25T08:54:19.000Z
|
Plug-and-play module/SematicEmbbedBlock.py
|
riciche/SimpleCVReproduction
|
4075de39f9c61f1359668a413f6a5d98903fcf97
|
[
"Apache-2.0"
] | 262 |
2020-01-02T02:19:40.000Z
|
2022-03-23T04:56:16.000Z
|
import torch.nn as nn
"""
https://zhuanlan.zhihu.com/p/76378871
arxiv: 1804.03821
ExFuse
"""
class SematicEmbbedBlock(nn.Module):
def __init__(self, high_in_plane, low_in_plane, out_plane):
super(SematicEmbbedBlock, self).__init__()
self.conv3x3 = nn.Conv2d(high_in_plane, out_plane, 3, 1, 1)
self.upsample = nn.UpsamplingBilinear2d(scale_factor=2)
self.conv1x1 = nn.Conv2d(low_in_plane, out_plane, 1)
def forward(self, high_x, low_x):
high_x = self.upsample(self.conv3x3(high_x))
low_x = self.conv1x1(low_x)
return high_x * low_x
| 29.9 | 67 | 0.688963 | 503 | 0.841137 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.117057 |
b5d543bdac2737ffc8b7efa718d7cd3c1a92a7cd
| 1,539 |
py
|
Python
|
utilities.py
|
Fredrik-Oberg/volkswagencarnet
|
877123f4053c66d11d1f99abfc1dc4bbc74effde
|
[
"MIT"
] | null | null | null |
utilities.py
|
Fredrik-Oberg/volkswagencarnet
|
877123f4053c66d11d1f99abfc1dc4bbc74effde
|
[
"MIT"
] | null | null | null |
utilities.py
|
Fredrik-Oberg/volkswagencarnet
|
877123f4053c66d11d1f99abfc1dc4bbc74effde
|
[
"MIT"
] | null | null | null |
from datetime import date, datetime
from base64 import b64encode
from string import ascii_letters as letters, digits
from sys import argv
from os import environ as env
from os.path import join, dirname, expanduser
from itertools import product
import json
import logging
import re
_LOGGER = logging.getLogger(__name__)
def find_path(src, path):
"""Simple navigation of a hierarchical dict structure using XPATH-like syntax.
>>> find_path(dict(a=1), 'a')
1
>>> find_path(dict(a=1), '')
{'a': 1}
>>> find_path(dict(a=None), 'a')
>>> find_path(dict(a=1), 'b')
Traceback (most recent call last):
...
KeyError: 'b'
>>> find_path(dict(a=dict(b=1)), 'a.b')
1
>>> find_path(dict(a=dict(b=1)), 'a')
{'b': 1}
>>> find_path(dict(a=dict(b=1)), 'a.c')
Traceback (most recent call last):
...
KeyError: 'c'
"""
if not path:
return src
if isinstance(path, str):
path = path.split(".")
return find_path(src[path[0]], path[1:])
def is_valid_path(src, path):
"""
>>> is_valid_path(dict(a=1), 'a')
True
>>> is_valid_path(dict(a=1), '')
True
>>> is_valid_path(dict(a=1), None)
True
>>> is_valid_path(dict(a=1), 'b')
False
"""
try:
find_path(src, path)
return True
except KeyError:
return False
def camel2slug(s):
"""Convert camelCase to camel_case.
>>> camel2slug('fooBar')
'foo_bar'
"""
return re.sub("([A-Z])", "_\\1", s).lower().lstrip("_")
| 19.481013 | 82 | 0.587394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 842 | 0.547109 |
b5d85732ed11a9abee1adac3c37bfb5f5d7fe0c2
| 9,874 |
py
|
Python
|
nslsii/__init__.py
|
ke-zhang-rd/nslsii
|
d3f942cda8eac713ac625dbcf4285e108c04f154
|
[
"BSD-3-Clause"
] | null | null | null |
nslsii/__init__.py
|
ke-zhang-rd/nslsii
|
d3f942cda8eac713ac625dbcf4285e108c04f154
|
[
"BSD-3-Clause"
] | null | null | null |
nslsii/__init__.py
|
ke-zhang-rd/nslsii
|
d3f942cda8eac713ac625dbcf4285e108c04f154
|
[
"BSD-3-Clause"
] | null | null | null |
from IPython import get_ipython
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def import_star(module, ns):
def public(name):
return not name.startswith('_')
ns.update({name: getattr(module, name)
for name in dir(module) if public(name)})
def configure_base(user_ns, broker_name, *,
bec=True, epics_context=False, magics=True, mpl=True,
ophyd_logging=True, pbar=True):
"""
Perform base setup and instantiation of important objects.
This factory function instantiates essential objects to data collection
environments at NSLS-II and adds them to the current namespace. In some
cases (documented below), it will check whether certain variables already
exist in the user name space, and will avoid creating them if so. The
following are added:
* ``RE`` -- a RunEngine
This is created only if an ``RE`` instance does not currently exist in
the namespace.
* ``db`` -- a Broker (from "databroker"), subscribe to ``RE``
* ``bec`` -- a BestEffortCallback, subscribed to ``RE``
* ``peaks`` -- an alias for ``bec.peaks``
* ``sd`` -- a SupplementalData preprocessor, added to ``RE.preprocessors``
* ``pbar_maanger`` -- a ProgressBarManager, set as the ``RE.waiting_hook``
And it performs some low-level configuration:
* creates a context in ophyd's control layer (``ophyd.setup_ophyd()``)
* turns out interactive plotting (``matplotlib.pyplot.ion()``)
* bridges the RunEngine and Qt event loops
(``bluesky.utils.install_kicker()``)
* logs ERROR-level log message from ophyd to the standard out
Parameters
----------
user_ns: dict
a namespace --- for example, ``get_ipython().user_ns``
broker_name : Union[str, Broker]
Name of databroker configuration or a Broker instance.
bec : boolean, optional
True by default. Set False to skip BestEffortCallback.
epics_context : boolean, optional
True by default. Set False to skip ``setup_ophyd()``.
magics : boolean, optional
True by default. Set False to skip registration of custom IPython
magics.
mpl : boolean, optional
True by default. Set False to skip matplotlib ``ion()`` at event-loop
bridging.
ophyd_logging : boolean, optional
True by default. Set False to skip ERROR-level log configuration for
ophyd.
pbar : boolean, optional
True by default. Set false to skip ProgressBarManager.
Returns
-------
names : list
list of names added to the namespace
Examples
--------
Configure IPython for CHX.
>>>> configure_base(get_ipython().user_ns, 'chx');
"""
ns = {} # We will update user_ns with this at the end.
# Set up a RunEngine and use metadata backed by a sqlite file.
from bluesky import RunEngine
from bluesky.utils import get_history
# if RunEngine already defined grab it
# useful when users make their own custom RunEngine
if 'RE' in user_ns:
RE = user_ns['RE']
else:
RE = RunEngine(get_history())
ns['RE'] = RE
# Set up SupplementalData.
# (This is a no-op until devices are added to it,
# so there is no need to provide a 'skip_sd' switch.)
from bluesky import SupplementalData
sd = SupplementalData()
RE.preprocessors.append(sd)
ns['sd'] = sd
if isinstance(broker_name, str):
# Set up a Broker.
from databroker import Broker
db = Broker.named(broker_name)
ns['db'] = db
else:
db = broker_name
RE.subscribe(db.insert)
if pbar:
# Add a progress bar.
from bluesky.utils import ProgressBarManager
pbar_manager = ProgressBarManager()
RE.waiting_hook = pbar_manager
ns['pbar_manager'] = pbar_manager
if magics:
# Register bluesky IPython magics.
from bluesky.magics import BlueskyMagics
get_ipython().register_magics(BlueskyMagics)
if bec:
# Set up the BestEffortCallback.
from bluesky.callbacks.best_effort import BestEffortCallback
_bec = BestEffortCallback()
RE.subscribe(_bec)
ns['bec'] = _bec
ns['peaks'] = _bec.peaks # just as alias for less typing
if mpl:
# Import matplotlib and put it in interactive mode.
import matplotlib.pyplot as plt
ns['plt'] = plt
plt.ion()
# Make plots update live while scans run.
from bluesky.utils import install_kicker
install_kicker()
if epics_context:
# Create a context in the underlying EPICS client.
from ophyd import setup_ophyd
setup_ophyd()
if not ophyd_logging:
# Turn on error-level logging, particularly useful for knowing when
# pyepics callbacks fail.
import logging
import ophyd.ophydobj
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
ophyd.ophydobj.logger.addHandler(ch)
# convenience imports
# some of the * imports are for 'back-compatibility' of a sort -- we have
# taught BL staff to expect LiveTable and LivePlot etc. to be in their
# namespace
import numpy as np
ns['np'] = np
import bluesky.callbacks
ns['bc'] = bluesky.callbacks
import_star(bluesky.callbacks, ns)
import bluesky.plans
ns['bp'] = bluesky.plans
import_star(bluesky.plans, ns)
import bluesky.plan_stubs
ns['bps'] = bluesky.plan_stubs
import_star(bluesky.plan_stubs, ns)
# special-case the commonly-used mv / mvr and its aliases mov / movr4
ns['mv'] = bluesky.plan_stubs.mv
ns['mvr'] = bluesky.plan_stubs.mvr
ns['mov'] = bluesky.plan_stubs.mov
ns['movr'] = bluesky.plan_stubs.movr
import bluesky.preprocessors
ns['bpp'] = bluesky.preprocessors
import bluesky.callbacks.broker
import_star(bluesky.callbacks.broker, ns)
import bluesky.simulators
import_star(bluesky.simulators, ns)
user_ns.update(ns)
return list(ns)
def configure_olog(user_ns, *, callback=None, subscribe=True):
"""
Setup a callback that publishes some metadata from the RunEngine to Olog.
Also, add the public contents of pyOlog.ophyd_tools to the namespace.
This is expected to be run after :func:`configure_base`. It expects to find
an instance of RunEngine named ``RE`` in the user namespace. Additionally,
if the user namespace contains the name ``logbook``, that is expected to be
an instance ``pyOlog.SimpleOlogClient``.
Parameters
----------
user_ns: dict
a namespace --- for example, ``get_ipython().user_ns``
callback : callable, optional
a hook for customizing the logbook_cb_factory; if None a default is
used
subscribe : boolean, optional
True by default. Set to False to skip the subscription. (You still get
pyOlog.ophyd_tools.)
Returns
-------
names : list
list of names added to the namespace
Examples
--------
Configure the Olog.
>>>> configure_olog(get_ipython().user_ns);
"""
# Conceptually our task is simple: add a subscription to the RunEngine that
# publishes to the Olog using the Python wrapper of its REST API, pyOlog.
# In practice this is messy because we have deal with the many-layered API
# of pyOlog and, more importantly, ensure that slowness or errors from the
# Olog do not affect the run. Historically the Olog deployment has not been
# reliable, so it is important to be robust against these issues. Of
# course, by ignoring Olog errors, we leave gaps in the log, which is not
# great, but since all data is saved to a databroker anyway, we can always
# re-generate them later.
ns = {} # We will update user_ns with this at the end.
from bluesky.callbacks.olog import logbook_cb_factory
from functools import partial
from pyOlog import SimpleOlogClient
import queue
import threading
from warnings import warn
# This is for pyOlog.ophyd_tools.get_logbook, which simply looks for
# a variable called 'logbook' in the global IPython namespace.
if 'logbook' in user_ns:
simple_olog_client = user_ns['logbook']
else:
simple_olog_client = SimpleOlogClient()
ns['logbook'] = simple_olog_client
if subscribe:
if callback is None:
# list of logbook names to publish to
LOGBOOKS = ('Data Acquisition',)
generic_logbook_func = simple_olog_client.log
configured_logbook_func = partial(generic_logbook_func,
logbooks=LOGBOOKS)
callback = logbook_cb_factory(configured_logbook_func)
def submit_to_olog(queue, cb):
while True:
name, doc = queue.get() # waits until document is available
try:
cb(name, doc)
except Exception as exc:
warn('This olog is giving errors. This will not be logged.'
'Error:' + str(exc))
olog_queue = queue.Queue(maxsize=100)
olog_thread = threading.Thread(target=submit_to_olog,
args=(olog_queue, callback),
daemon=True)
olog_thread.start()
def send_to_olog_queue(name, doc):
try:
olog_queue.put((name, doc), block=False)
except queue.Full:
warn('The olog queue is full. This will not be logged.')
RE = user_ns['RE']
RE.subscribe(send_to_olog_queue, 'start')
import pyOlog.ophyd_tools
import_star(pyOlog.ophyd_tools, ns)
user_ns.update(ns)
return list(ns)
| 34.404181 | 79 | 0.645331 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,370 | 0.543853 |
b5d8aec11bfc5cc12bac4a3e909d08cecced6658
| 6,260 |
py
|
Python
|
utils/depot.py
|
Nikronic/Optimized-MDVRP
|
92587bf4c110c7e6597cc3120dd0556a6e170ce2
|
[
"MIT"
] | 16 |
2019-09-08T13:04:10.000Z
|
2022-03-04T06:52:34.000Z
|
utils/depot.py
|
zhangruijuan/Optimized-MDVRP
|
92587bf4c110c7e6597cc3120dd0556a6e170ce2
|
[
"MIT"
] | 6 |
2019-09-19T20:38:19.000Z
|
2019-10-14T17:35:54.000Z
|
utils/depot.py
|
Nikronic/Optimized-MDVRP
|
92587bf4c110c7e6597cc3120dd0556a6e170ce2
|
[
"MIT"
] | 4 |
2021-01-15T11:45:16.000Z
|
2021-12-18T14:14:54.000Z
|
import numpy as np
from typing import List
from copy import deepcopy
from utils.customer import Customer
class Depot:
"""
Depot class represents a list of nodes assigned to this depot which we call a route.
This class is going to be filled by `Customers` class.
"""
def __init__(self, id, x, y, capacity, depot_customers: List[Customer] = None):
"""
:param id: ID assigned to node for tracking
:param x: X coordinate of depot
:param y: Y coordinate of depot
:param capacity: The maximum capacity of the Depot
(in this project, it is filled by 'weight' of `Customers`. In other words, it indicates vehicles weight limit)
:param depot_customers: A list of `Customer`s
:return: A list of `Customers` assigned to this depot
"""
if depot_customers is None:
depot_customers = []
self.id = id
self.x = x
self.y = y
self.capacity = capacity
self.depot_customers = depot_customers
self.routes_ending_indices = []
if depot_customers is not None:
for i, c in enumerate(depot_customers):
if c.null:
self.routes_ending_indices.append(i)
self.size = self.depot_customers.__len__()
def route_ending_index(self) -> List[int]:
"""
Sorts then returns the list of indices corresponding to the the index of null customer representing the end
of a route in a `Depot`.
:return: A sorted list of ints indices
"""
return sorted(self.routes_ending_indices)
def used_capacity(self) -> float:
"""
Returns a float number that demonstrates how much of the capacity of the `Depot` has been used.
:return: A float number
"""
return sum([customer.cost for customer in self])
def get_all(self) -> List[Customer]:
"""
Returns all `Customer`s as a list independently using deep copy
:return: A list
"""
return deepcopy(self.depot_customers)
def add(self, customer: Customer):
"""
Adds a `Customer` to the `Depot`
:param customer: Customer class instance
:return: None
"""
if customer.null == True:
self.routes_ending_indices.insert(self.routes_ending_indices.__len__() - 1, self.len())
self.depot_customers.append(customer)
def clear(self):
"""
Clear the `Depot` from `Customer`s
:return: None
"""
self.routes_ending_indices = []
self.depot_customers.clear()
def len(self) -> int:
"""
Return the number of `Customer`s in the `Depot`
:return: int depot size
"""
return self.depot_customers.__len__()
def contains(self, customer: Customer) -> bool:
"""
Looks for the `Customer` in the `Depot` and returns if it exist
:param customer: A 'Customer` class instance
:return: Bool true or false
"""
return self.depot_customers.__contains__(customer)
def copy(self) -> List[Customer]:
"""
A shallow copy of the `Customer`s in the `Depot` using builtin `Copy` method
:return: a list
"""
return self.depot_customers.copy()
def index(self, customer: Customer) -> int:
"""
Returns the index of the `Customer` in the `Depot`
:param customer: A `Customer` class instance
:return: A int number as the index
"""
return self.depot_customers.index(customer)
def insert(self, index: int, customer: Customer):
"""
Insterts a new `Customer` into a specific `index`
:param customer: A `Customer` class instance
:return: None
"""
if customer.null == True:
self.routes_ending_indices.insert(self.routes_ending_indices.__len__() - 1, index)
return self.depot_customers.insert(index, customer)
def remove(self, customer: Customer) -> bool:
"""
Removes a `Customer` from the `Depot`
:param customer: a `Customer` class instance
:return: bool, if `Customer` does not exist returns False, else True
"""
if self.contains(customer):
if customer.null == True:
self.routes_ending_indices.remove(self.index(customer))
self.depot_customers.remove(customer)
return True
return False
def remove_at(self, index: int) -> bool:
"""
Remove a `Customer` at defined `index` from `Depot`
:param index: an int number
:return: bool, if `Customer` does not exist returns False, else True
"""
if index <= self.len():
if self.depot_customers[index].null == True:
self.routes_ending_indices.remove(index)
self.depot_customers.remove(self.depot_customers[index])
return True
return False
def __getitem__(self, index: int):
"""
Makes the class itself subscribable
:param index: The index to List
:return: A `Customer` class from List of `Customer`s.
"""
return self.depot_customers[index]
def describe(self, print_members=False, verbosity=False):
"""
Print the specifics of the `Depot`
:param print_members: Whether or not print the specifics of the `Customer`s in the `Depot`
:param verbosity: if True, it prints all information of all members, otherwise, only IDs
:return:
"""
print('-=-=-=-=-=-=-=-=-=-=-=- Depot: STARTED -=-=-=-=-=-=-=-=-=-=-=-')
if verbosity:
print('ID:{}, coordinate=[{}, {}], capacity={}, size={}'.format(
self.id, self.x, self.y, self.capacity, self.size))
if print_members:
print('Members: ')
for c in self.depot_customers:
c.describe()
else:
print('ID={}, capacity={}/{}'.format(self.id, self.used_capacity(), self.capacity), sep='')
print('Members IDs=')
print([c.id for c in self])
print('-=-=-=-=-=-=-=-=-=-=-=- Depot: Finished -=-=-=-=-=-=-=-=-=-=-=-')
| 35.367232 | 118 | 0.585304 | 6,151 | 0.982588 | 0 | 0 | 0 | 0 | 0 | 0 | 3,044 | 0.486262 |
b5d915f6cc267b773bbe24b2332fae333a3982c5
| 714 |
py
|
Python
|
fake.py
|
Wsky51/dfs-node-restapi
|
bab7605c609d4b53cd11686a576b74c1ae2871b7
|
[
"Apache-2.0"
] | null | null | null |
fake.py
|
Wsky51/dfs-node-restapi
|
bab7605c609d4b53cd11686a576b74c1ae2871b7
|
[
"Apache-2.0"
] | null | null | null |
fake.py
|
Wsky51/dfs-node-restapi
|
bab7605c609d4b53cd11686a576b74c1ae2871b7
|
[
"Apache-2.0"
] | null | null | null |
"""create fake data to the db file"""
from config import data_nodes, get_db
from type import DataNodeStatus, DataNode
from datetime import timedelta
from config import get_second_datetime
def create_fake_data_status(data_node: DataNode):
now = get_second_datetime()
db = get_db()
for i in range(100):
status = DataNodeStatus(
# 每十次宕机一次
dead=i % 10 == 0,
capacity=1000,
used=100 + (i % 7),
datetime=now + timedelta(minutes=i)
)
db.save(data_node.node_id, status)
def create_fake_data():
for data_node in data_nodes:
create_fake_data_status(data_node)
if __name__ == '__main__':
create_fake_data()
| 24.62069 | 49 | 0.648459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.096154 |
b5d9899c07fca487f770f0c61e19c1fd8ac3a831
| 78 |
py
|
Python
|
config.py
|
NormanLo4319/Project-1
|
a7b6bf6adc681a94cc23be5934ddbed1cf7ab6a6
|
[
"MIT"
] | 1 |
2020-07-19T07:10:01.000Z
|
2020-07-19T07:10:01.000Z
|
config.py
|
NormanLo4319/Food-Enviroment-Project
|
a7b6bf6adc681a94cc23be5934ddbed1cf7ab6a6
|
[
"MIT"
] | null | null | null |
config.py
|
NormanLo4319/Food-Enviroment-Project
|
a7b6bf6adc681a94cc23be5934ddbed1cf7ab6a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
gkey="Enter Your Key Here"
| 8.666667 | 26 | 0.602564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.833333 |
b5da3aa622fc6ae31ee50b900da3081886393cc7
| 215 |
py
|
Python
|
category_tree/helper.py
|
bharathramh92/shop
|
0c5800b2d36fbe1bfffaf555c3dc741d020aa5d7
|
[
"MIT"
] | 1 |
2016-05-27T22:13:37.000Z
|
2016-05-27T22:13:37.000Z
|
category_tree/helper.py
|
bharathramh92/shop
|
0c5800b2d36fbe1bfffaf555c3dc741d020aa5d7
|
[
"MIT"
] | null | null | null |
category_tree/helper.py
|
bharathramh92/shop
|
0c5800b2d36fbe1bfffaf555c3dc741d020aa5d7
|
[
"MIT"
] | null | null | null |
from category_tree.categories import data
def get_store_name_from_child(store_id):
while data[store_id]['parent_category'] is not None:
store_id = data[store_id]['parent_category']
return store_id
| 26.875 | 56 | 0.75814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.15814 |
b5db0b0b72cf05ff56cc67988018bcfa4797221d
| 371 |
py
|
Python
|
tests/pull_keys.py
|
patleeman/geckoboard_push
|
52c05db22b3c630d326a9650551720f583f0168f
|
[
"MIT"
] | null | null | null |
tests/pull_keys.py
|
patleeman/geckoboard_push
|
52c05db22b3c630d326a9650551720f583f0168f
|
[
"MIT"
] | null | null | null |
tests/pull_keys.py
|
patleeman/geckoboard_push
|
52c05db22b3c630d326a9650551720f583f0168f
|
[
"MIT"
] | null | null | null |
'''
Module to pull keys from test geckoboard widgets.
'''
import os
import json
def get_keys():
settings_folder = os.path.dirname(__file__)
settings_file = os.path.join(settings_folder,'gecko_settings.json')
with open(settings_file, 'r') as file:
json_data = json.load(file)
return json_data
if __name__ == '__main__':
print(get_keys())
| 23.1875 | 71 | 0.692722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.245283 |
b5db74f8420d00fdc906f19f599f41aad18c69af
| 2,596 |
py
|
Python
|
pajbot/web/common/menu.py
|
JoachimFlottorp/pajbot
|
4fb88c403dedb20d95be80e38da72be1ed064901
|
[
"MIT"
] | 128 |
2015-12-28T01:02:30.000Z
|
2019-05-24T21:20:50.000Z
|
pajbot/web/common/menu.py
|
JoachimFlottorp/pajbot
|
4fb88c403dedb20d95be80e38da72be1ed064901
|
[
"MIT"
] | 277 |
2015-05-03T18:48:57.000Z
|
2019-05-23T17:41:28.000Z
|
pajbot/web/common/menu.py
|
JoachimFlottorp/pajbot
|
4fb88c403dedb20d95be80e38da72be1ed064901
|
[
"MIT"
] | 96 |
2015-08-07T18:49:50.000Z
|
2019-05-20T19:49:27.000Z
|
from __future__ import annotations
from typing import Any, Dict, List, Union
import logging
from pajbot.web.utils import get_cached_enabled_modules
log = logging.getLogger(__name__)
class MenuItem:
def __init__(
self,
href: Union[str, List[MenuItem]],
menu_id: str,
caption: str,
enabled: bool = True,
level: int = 100,
) -> None:
self.href = href
self.id = menu_id
self.caption = caption
self.enabled = enabled
self.level = level
self.type = "single"
if isinstance(self.href, list):
self.type = "multi"
def init(app):
@app.context_processor
def menu() -> Dict[str, Any]:
enabled_modules = get_cached_enabled_modules()
# Menu items that are shown for normal users
menu_items: List[MenuItem] = [
MenuItem("/", "home", "Home"),
MenuItem("/commands", "commands", "Commands"),
MenuItem("/points", "points", "Points", "chatters_refresh" in enabled_modules),
MenuItem("/stats", "stats", "Stats"),
MenuItem("/decks", "decks", "Decks", "deck" in enabled_modules),
MenuItem("/playsounds", "user_playsounds", "Playsounds", "playsound" in enabled_modules),
]
# Menu items that are shown to admin when in an /admin page
admin_menu_items: List[MenuItem] = [
MenuItem("/", "home", "Home"),
MenuItem("/admin", "admin_home", "Admin Home"),
MenuItem(
[
MenuItem("/admin/banphrases", "admin_banphrases", "Banphrases"),
MenuItem("/admin/links/blacklist", "admin_links_blacklist", "Blacklisted links"),
MenuItem("/admin/links/whitelist", "admin_links_whitelist", "Whitelisted links"),
],
"filters",
"Filters",
),
MenuItem("/admin/commands", "admin_commands", "Commands"),
MenuItem("/admin/timers", "admin_timers", "Timers"),
MenuItem("/admin/moderators", "admin_moderators", "Moderators"),
MenuItem("/admin/modules", "admin_modules", "Modules"),
MenuItem("/admin/playsounds", "admin_playsounds", "Playsounds", "playsound" in enabled_modules),
MenuItem("/admin/streamer", "admin_streamer", "Streamer Info"),
]
data = {
"enabled_modules": enabled_modules,
"nav_bar_header": menu_items,
"nav_bar_admin_header": admin_menu_items,
}
return data
| 35.081081 | 108 | 0.571649 | 446 | 0.171803 | 0 | 0 | 1,939 | 0.746918 | 0 | 0 | 888 | 0.342065 |
b5db8ac1529ed13c3cad056d88e711f36bbfbbe1
| 611 |
py
|
Python
|
Python/463.py
|
FlyAndNotDown/LeetCode
|
889819ff7f64819e966fc6f9dd80110cf2bf6d3c
|
[
"MIT"
] | 4 |
2018-06-18T05:39:25.000Z
|
2022-01-04T07:35:52.000Z
|
Python/463.py
|
FlyAndNotDown/LeetCode
|
889819ff7f64819e966fc6f9dd80110cf2bf6d3c
|
[
"MIT"
] | 20 |
2019-11-30T03:42:40.000Z
|
2020-05-17T03:25:43.000Z
|
Python/463.py
|
FlyAndNotDown/LeetCode
|
889819ff7f64819e966fc6f9dd80110cf2bf6d3c
|
[
"MIT"
] | 2 |
2020-02-08T14:10:42.000Z
|
2021-09-23T13:51:36.000Z
|
"""
@no 463
@name Island Perimeter
"""
class Solution:
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
ans = 0
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == 1:
if i - 1 < 0 or grid[i - 1][j] == 0: ans += 1
if i + 1 > len(grid) - 1 or grid[i + 1][j] == 0: ans += 1
if j - 1 < 0 or grid[i][j - 1] == 0: ans += 1
if j + 1 > len(grid[i]) - 1 or grid[i][j + 1] == 0: ans += 1
return ans
| 30.55 | 80 | 0.392799 | 571 | 0.934534 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.178396 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.