blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
35c79bd5bff7a7463816fcbb46640eb7cb4d5b5f | 272c1920e168c41bc48d10b8d82cd22aa6d82fc7 | /tools/core_tools_WGCI.py | bf6b60658c9e52f1bcf199a9f8371989c2bbe863 | [
"MIT"
] | permissive | passion4energy/Kalman-Filter | e3facd79140d15b3fbeee09b37c4a2c985ac480c | 1f4b567fdf9b6465d27ffcaba8da8404f3f80c62 | refs/heads/master | 2022-03-21T09:48:53.432907 | 2019-12-10T07:27:38 | 2019-12-10T07:27:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,980 | py | """重要工具包 计算 WGCI
Copyright:
----------
Author: AutuanLiu
Date: 2019/2/22
"""
import numpy as np
import scipy.io as sio
from kalman_estimation import (Kalman4ARX, Kalman4FROLS, Selector, get_terms_matrix, get_txt_data, make_func4K4FROLS, make_linear_func, plot_term_ERR,
save_3Darray, torch4FROLS, save_2Darray)
def get_json_data(fname):
"""获取 JSON 数据
Args:
fname (str): 存储 JSON 数据的文件路径和文件名
"""
import ujson
return ujson.load(open(fname, 'r'))
def kalman4ARX_pipeline(data_type, configs, n_trial):
"""基于 Kalman 滤波器的各个算法的 pipeline
types = ['linear', 'longlag_linear']
Args:
data_type: 数据类型
configs (dict): 配置字典
n_trial: 试验次数
"""
config = configs[data_type]
WGCI100 = []
for trial in range(n_trial):
# 计算总体情况
term_selector = Selector(f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat")
# get data
normalized_signals = term_selector.make_selection()[0]
print(f'data_type: {data_type}, trial: ### {trial+1}')
# 构造 Kalman Filter
kf = Kalman4ARX(normalized_signals, config['max_lag'], uc=config['uc'])
# 估计系数
y_coef, A_coef = kf.estimate_coef(config['threshold'])
# 总体误差
whole_y_error = np.var(kf.y_error, 0)
# 子模型情况
terms_mat = sio.loadmat(f"{config['data_root']}{data_type}{config['term_path']}_WGCI{trial+1}.mat")
sub1 = []
for ch in range(5):
data_set = {
'normalized_signals': terms_mat['normalized_signals'],
'Hv': terms_mat['Hv'],
'Kalman_H': terms_mat['Kalman_H'][0, ch],
'terms_chosen': terms_mat['terms_chosen'][0, ch]
}
term_selector = Selector(data_set)
# get data
normalized_signals = term_selector.make_selection()[0]
# 构造 Kalman Filter
kf = Kalman4ARX(normalized_signals, config['max_lag'], uc=config['uc'])
# 估计系数
y_coef, A_coef = kf.estimate_coef(config['threshold'])
# 误差
sub_y_error = np.var(kf.y_error, 0)
sub1.append(np.log(sub_y_error / whole_y_error))
WGCI100.append(np.asarray(sub1).T)
mean_WGCI = np.mean(WGCI100, 0)
var_WGCI = np.var(WGCI100, 0)
print(f"mean_WGCI = {mean_WGCI}, var_WGCI = {var_WGCI}")
fname1 = f"{config['data_root']}{data_type}_kalman4ARX100_{config['est_fname']}WGCI100.txt"
save_3Darray(fname1, np.array([mean_WGCI * (mean_WGCI > 0.01), var_WGCI * (var_WGCI > 0.01)]))
def kalman4FROLS_pipeline(data_type, configs, n_trial):
"""基于 Kalman 滤波器的各个算法的 pipeline
Args:
data_type: 数据类型
configs (dict): 配置字典
n_trial: 试验次数
"""
config = configs[data_type]
WGCI100 = []
for trial in range(n_trial):
term_selector = Selector(f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat")
# get data
normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term_selector.make_selection()
print(f'data_type: {data_type}, trial: ### {trial+1}')
# 构造 Kalman Filter
kf = Kalman4FROLS(normalized_signals, Kalman_H=Kalman_H, uc=config['uc'])
y_coef = kf.estimate_coef()
print(y_coef)
# 总体误差
whole_y_error = np.var(kf.y_error, 0)
# 子模型情况
terms_mat = sio.loadmat(f"{config['data_root']}{data_type}{config['term_path']}_WGCI{trial+1}.mat")
sub1 = []
for ch in range(5):
data_set = {
'normalized_signals': terms_mat['normalized_signals'],
'Hv': terms_mat['Hv'],
'Kalman_H': terms_mat['Kalman_H'][0, ch],
'terms_chosen': terms_mat['terms_chosen'][0, ch]
}
term_selector = Selector(data_set)
# get data
normalized_signals = term_selector.make_selection()[0]
# 构造 Kalman Filter
kf = Kalman4FROLS(normalized_signals, Kalman_H=Kalman_H, uc=config['uc'])
# 估计系数
y_coef = kf.estimate_coef()
# 误差
sub_y_error = np.var(kf.y_error, 0)
sub1.append(np.log(sub_y_error / whole_y_error))
WGCI100.append(np.asarray(sub1).T)
mean_WGCI = np.mean(WGCI100, 0)
var_WGCI = np.var(WGCI100, 0)
print(f"mean_WGCI = {mean_WGCI}, var_WGCI = {var_WGCI}")
fname1 = f"{config['data_root']}{data_type}_kalman4FROLS100_{config['est_fname']}WGCI100.txt"
save_3Darray(fname1, np.array([mean_WGCI * (mean_WGCI > 0.01), var_WGCI * (var_WGCI > 0.01)]))
def torch4FROLS_pipeline(data_type, configs, n_trial):
"""基于 Kalman 滤波器的各个算法的 pipeline
Args:
data_type: 数据类型
configs (dict): 配置字典
n_trial: 试验次数
"""
config = configs[data_type]
WGCI100 = []
for trial in range(n_trial):
term_selector = Selector(f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat")
# get data
normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term_selector.make_selection()
print(f'data_type: {data_type}, trial: ### {trial+1}')
kf = torch4FROLS(normalized_signals, Kalman_H=Kalman_H, n_epoch=config['n_epoch'])
y_coef = kf.estimate_coef()
# 总体误差
whole_y_error = np.var(kf.y_error, 0)
# 子模型情况
terms_mat = sio.loadmat(f"{config['data_root']}{data_type}{config['term_path']}_WGCI{trial+1}.mat")
sub1 = []
for ch in range(5):
data_set = {
'normalized_signals': terms_mat['normalized_signals'],
'Hv': terms_mat['Hv'],
'Kalman_H': terms_mat['Kalman_H'][0, ch],
'terms_chosen': terms_mat['terms_chosen'][0, ch]
}
term_selector = Selector(data_set)
# get data
normalized_signals = term_selector.make_selection()[0]
# 构造 Kalman Filter
kf = torch4FROLS(normalized_signals, Kalman_H=Kalman_H, n_epoch=config['n_epoch'])
# 估计系数
y_coef = kf.estimate_coef()
# 误差
sub_y_error = np.var(kf.y_error, 0)
sub1.append(np.log(sub_y_error / whole_y_error))
WGCI100.append(np.asarray(sub1).T)
mean_WGCI = np.mean(WGCI100, 0)
var_WGCI = np.var(WGCI100, 0)
print(f"mean_WGCI = {mean_WGCI}, var_WGCI = {var_WGCI}")
fname1 = f"{config['data_root']}{data_type}_kalman4FROLS100_{config['est_fname']}WGCI100.txt"
save_3Darray(fname1, np.array([mean_WGCI * (mean_WGCI > 0.01), var_WGCI * (var_WGCI > 0.01)]))
| [
"[email protected]"
] | |
039e9d24e8edc1ee061d4c9a63df92916244d2fc | 4a8784c47e5382a33e90792088fa679c6f70ab92 | /P1.py | f78df7fe30f2a35f1ecf73e2adbc1e6cf7dabbf2 | [] | no_license | bobemv/ProjectEulerProblems | 1838ecad4533896b424f611dd136db6f4c18fd90 | 5a3e06a47cf835610a74ad433e7e5c662ed6674a | refs/heads/master | 2021-01-19T04:07:10.268057 | 2016-07-30T19:57:27 | 2016-07-30T19:57:27 | 64,561,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py |
def sumaDivisores(n, numeros = 999):
"Se calculan el numero de divisores en el"
"intervalo de cierto n y se realiza el sumatorio"
numDiv = numeros // n;
return n*(numDiv*(numDiv+1))/2;
__author__ = 'Roberto Moreno'
"Solucion mala"
resultado = 0;
for i in range(1000):
"En range(1000) va de 0 a 999"
if ((i % 5) == 0) or ((i % 3) == 0):
resultado += i;
print resultado;
"Solucion buena"
print sumaDivisores(3)+sumaDivisores(5)-sumaDivisores(15);
| [
"[email protected]"
] | |
882d5cbe8f3e470e18e8bba1a15c94b66f3461e1 | 81463a26ce2e320082d13429cd1cdae68612cdd9 | /tetromino.py | 81e2aef9993093842df07cba794e9713cb1fb42e | [] | no_license | jeonlego012/coding | a09c498bfb32b40d54a393f6c32ee6d17aec5770 | 178a65eb60cd1f9a4041037a168253c2d33813be | refs/heads/master | 2023-04-17T22:36:03.710887 | 2021-05-04T14:57:38 | 2021-05-04T14:57:38 | 352,631,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | def check_range(y, x) :
return y>=0 and y<n and x>=0 and x<m
def calculate(y, x) :
max_sum = 0
for tetromino in range(19) :
tempY, tempX = y, x
tetro_sum = paper[y][x]
for move in range(3) :
moveY, moveX = tempY + tetrominoY[tetromino][move], tempX + tetrominoX[tetromino][move]
if check_range(moveY, moveX) :
tempY, tempX = moveY, moveX
tetro_sum += paper[tempY][tempX]
max_sum = max(max_sum, tetro_sum)
return max_sum
n, m = map(int, input().split())
paper = [list(map(int, input().split())) for _ in range(n)]
tetrominoY = [[0,0,0], [0,1,0], [1,1,0], [1,0,1], [0,0,1],
[1,1,1], [-1,0,0], [-1,0,0], [0,1,1], [1,1,0],
[0,1,1], [1,0,0], [1,0,0], [1,0,1], [0,1,0],
[0,1,0], [1,0,1], [1,1,-1], [0,-1,1]]
tetrominoX = [[1,1,1], [1,0,-1], [0,0,1], [0,1,0], [1,1,-1],
[0,0,0], [0,1,1], [0,-1,-1], [1,0,0], [0,0,-1],
[-1,0,0], [0,1,1], [0,-1,-1], [0,-1,0], [-1,0,-1],
[1,0,1], [0,1,-1], [0,0,-1], [1,0,1]]
sum_list = []
for i in range(n) :
for j in range(m) :
sum_list.append(calculate(i, j))
print(max(sum_list)) | [
"[email protected]"
] | |
37d200a86c845be6d5609336099e569145bc106c | 9ce891fdd998efea19f79dae4ce971f6e2d74528 | /tests/test_apps/test_poller/test_managment_commands/test_poll_dynamics.py | 75ef46f2e0cacc6d23b73546bdbd47d07322fe95 | [
"MIT"
] | permissive | uktrade/legal-basis-api | fba0876e4e2f35223afbf29e5a096e8d38f721c4 | 036f0defc84e485c935d93ecc77c1c2bf60e2477 | refs/heads/master | 2023-08-31T15:17:02.303317 | 2023-08-25T08:41:56 | 2023-08-25T08:41:56 | 227,385,913 | 4 | 1 | MIT | 2023-09-06T18:49:38 | 2019-12-11T14:31:55 | Python | UTF-8 | Python | false | false | 2,535 | py | import io
import pytest
from mixer.backend.django import mixer
from server.apps.main.models import Consent, LegalBasis
from server.apps.poller.management.commands import poll_dynamics
class TestDynamicsCommand:
pytestmark = pytest.mark.django_db
def test_email_consent_creates_if_missing(self):
Consent.objects.all().delete()
out = io.StringIO()
command = poll_dynamics.Command(stdout=out)
assert Consent.objects.all().count() == 0
_ = command.email_consent
assert Consent.objects.all().count() == 1
def test_email_consent_noop_if_exists(self):
Consent.objects.all().delete()
out = io.StringIO()
mixer.blend(Consent, name="email_marketing")
command = poll_dynamics.Command(stdout=out)
assert Consent.objects.all().count() == 1
_ = command.email_consent
assert Consent.objects.all().count() == 1
def test_should_update_exists_without_consent(self):
mixer.blend(
LegalBasis, key=None, phone="", key_type="email", email="[email protected]"
)
out = io.StringIO()
command = poll_dynamics.Command(stdout=out)
assert not command._should_update("[email protected]")
def test_should_update_exists_with_consent(self):
Consent.objects.all().delete()
out = io.StringIO()
command = poll_dynamics.Command(stdout=out)
c = mixer.blend(Consent, name="email_marketing")
mixer.blend(
LegalBasis,
key=None,
phone="",
key_type="email",
email="[email protected]",
consents=c,
)
assert command._should_update("[email protected]")
def test_should_update_doesnt_exist(self):
out = io.StringIO()
command = poll_dynamics.Command(stdout=out)
assert command._should_update("[email protected]")
def test_should_create_exists(self):
Consent.objects.all().delete()
out = io.StringIO()
command = poll_dynamics.Command(stdout=out)
c = mixer.blend(Consent, name="email_marketing")
mixer.blend(
LegalBasis,
key=None,
phone="",
key_type="email",
email="[email protected]",
consents=c,
)
assert not command._should_create("[email protected]")
def test_should_create_doesnt_exist(self):
out = io.StringIO()
command = poll_dynamics.Command(stdout=out)
assert command._should_update("[email protected]")
| [
"[email protected]"
] | |
ed2d8cceda1c6115ee5238065408039b818e629d | 3b479ffc70ecf55508583913c66e66b3e97fe205 | /mysite/urls.py | 3cd96a7422e975eebc0f097a7ebf1b7efe634ee0 | [] | no_license | divyavutukuri/Text-Utils | f590045f5fbb2dd0096d6cf732c009e74a0149c3 | 6700d58e2b904baa1dccbffa1aef36c95a10c6dc | refs/heads/master | 2023-08-07T22:26:41.474476 | 2020-04-24T12:53:19 | 2020-04-24T12:53:19 | 258,499,241 | 0 | 0 | null | 2021-09-22T18:55:28 | 2020-04-24T11:56:00 | Python | UTF-8 | Python | false | false | 898 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.index,name='index'),
path('analyze',views.analyze,name='analyze'),
path('sites',views.ex1,name='sites')
]
| [
"[email protected]"
] | |
c9b1db4e8471b7360ba7dcf6e6ef8d6a306c89dc | 4e5cb79c8dcc6e78bf8cffc04ef279a2405a0236 | /pyconbalkan/speaker/migrations/0001_initial.py | 1b49b6a1d147dfe0c96c545f04cf0d81862628f3 | [] | no_license | PythonBalkan/pyconbalkan | 907a72abfc6de4ce2d254dbc1d1539a6571911f9 | 833deed488ba3ad2fa80ff144465a5755b5e1f15 | refs/heads/master | 2022-12-12T19:38:13.148694 | 2022-04-22T21:12:24 | 2022-05-02T23:16:30 | 127,753,413 | 32 | 24 | null | 2023-02-06T16:20:00 | 2018-04-02T12:30:08 | Python | UTF-8 | Python | false | false | 1,225 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-26 21:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Speaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=False)),
('name', models.CharField(max_length=50)),
('job', models.CharField(max_length=100)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SpeakerPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_picture', models.ImageField(upload_to='static/img')),
('speaker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='speaker.Speaker')),
],
),
]
| [
"[email protected]"
] | |
411d1702f625cb2279a9e58d45a57edae9f55cfb | f1ac2187b7a7cd603f0f24c82ff762a9d7d9035b | /cdk/web_stack.py | 394940957968d54338a3069e41627d73552b329e | [] | no_license | Martin605/ITP4104-Cloud-Services-Project | e91d285cf936bda3b6016211820bb50f54c7cff2 | f78e886d20242565394964509302889b573bbbf6 | refs/heads/master | 2020-09-29T08:50:58.355864 | 2020-04-23T01:19:50 | 2020-04-23T01:19:50 | 227,003,923 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 24,563 | py | from aws_cdk import (
aws_ec2 as ec2,
aws_iam as iam,
aws_logs as logs,
aws_cloudformation as cloudformation,
aws_elasticloadbalancingv2 as elasticloadbalancingv2,
aws_ssm as ssm,
core
)
class WebStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Parameter
LatestAmiId = core.CfnParameter(
self, "LatestAmiId",
type="AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>",
default="/aws/service/ami-amazon-linux-latest/amzn-ami-hvm-x86_64-gp2"
)
# S3 Bucket
source_bucket = "sourcebucketname%s" % (core.Aws.ACCOUNT_ID)
# Resources
CloudFormationLogs = logs.LogGroup(
self, 'CloudFormationLogs', retention=logs.RetentionDays('ONE_WEEK'))
WebInstance1 = ec2.CfnInstance(
self, 'WebInstance1', additional_info=None, affinity=None,
iam_instance_profile=core.Fn.import_value("WebServerInstanceProfileOutput"),
image_id=LatestAmiId.value_as_string,
instance_type='t3.micro',
network_interfaces=[
{
"deviceIndex":"0",
"groupSet": [core.Fn.import_value("WebSecurityGroupOutput")],
"subnetId": core.Fn.import_value("PrivateSubnet1")
}
],
tags=[core.CfnTag(key="Name", value="WebServer1")],
user_data=core.Fn.base64(
"""#!/bin/bash -ex
yum update -y
/opt/aws/bin/cfn-init -v --stack {StackName} --resource WebInstance1 --configsets InstallAndDeploy --region {Region}
# Signal the status from cfn-init (via $?)
/opt/aws/bin/cfn-signal -e $? --stack {StackName} --resource WebInstance1 --region {Region}
""".format(StackName=core.Aws.STACK_NAME,Region=core.Aws.REGION)
)
)
WebInstance1.cfn_options.metadata = {
"AWS::CloudFormation::Authentication": {
"rolebased": {
"type": "S3",
"buckets": [
source_bucket
],
"roleName": core.Fn.import_value("WebServerRoleOutput")
}
},
"AWS::CloudFormation::Init": {
"configSets": {
"InstallAndDeploy": [
"Install",
"InstallLogs",
"Deploy"
]
},
"Install": {
"packages": {
"yum": {
"python36": [],
"python36-devel": [],
"nginx": [],
"gcc": []
}
},
"files": {
"/etc/cfn/cfn-hup.conf": {
"content": """
[main]
stack={}
region={}
interval=1
verbose=true""".format(core.Aws.STACK_ID, core.Aws.REGION),
"mode": "000400",
"owner": "root",
"group": "root"
},
"/etc/cfn/hooks.d/cfn-auto-reloader.conf": {
"content": """
[cfn-auto-reloader-hook]
triggers=post.update
path=Resources.WebInstance1.Metadata.AWS::CloudFormation::Init
action=/opt/aws/bin/cfn-init -v --stack {} --resource WebInstance1 --configsets InstallAndDeploy --region {}
runas=root""".format(core.Aws.STACK_NAME, core.Aws.REGION),
"mode": "000400",
"owner": "root",
"group": "root"
}
},
"services": {
"sysvinit": {
"nginx": {
"enabled": "true",
"ensureRunning": "true"
},
"cfn-hup": {
"enabled": "true",
"ensureRunning": "true",
"files": [
"/etc/cfn/cfn-hup.conf",
"/etc/cfn/hooks.d/cfn-auto-reloader.conf"
]
}
}
},
"commands": {
"01_unblock_nginx": {
"command": "chkconfig nginx on"
},
"02_install_xray": {
"command": "curl https://s3.dualstack.us-east-2.amazonaws.com/aws-xray-assets.us-east-2/xray-daemon/aws-xray-daemon-3.x.rpm -o /tmp/xray.rpm && yum install -y /tmp/xray.rpm\n",
"cwd": "/tmp",
"ignoreErrors": "true"
}
}
},
"InstallLogs": {
"packages": {
"yum": {
"awslogs": []
}
},
"files": {
"/etc/awslogs/awslogs.conf": {
"content": """
[general]
state_file= /var/awslogs/state/agent-state
[yum]
file = /var/log/yum.log
log_group_name = %s
log_stream_name = {{hostname}} - {{instance_id}} yum.log
[messages]
file = /var/log/messages
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} messages.log
[cfn-hup]
file = /var/log/cfn-hup.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} cfn-hup.log
[cfn-init]
file = /var/log/cfn-init.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} cfn-init.log
[cfn-init-cmd]
file = /var/log/cfn-init-cmd.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} cfn-init-cmd.log
[cloud-init]
file = /var/log/cloud-init.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} cloud-init.log
[cloud-init-output]
file = /var/log/cloud-init-output.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} cloud-init.log
[handler]
file = /var/log/handler.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} handler.log
[uwsgi]
file = /var/log/uwsgi.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} uwsgi.log
[nginx_access]
file = /var/log/nginx/access.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} nginx_access.log
[nginx_error]
file = /var/log/nginx/error.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} nginx_error.log
""".format(CloudFormationLogs=CloudFormationLogs.log_group_name),
"group": "root",
"owner": "root",
"mode": "000400"
},
"/etc/awslogs/awscli.conf": {
"content": """
[plugins]
cwlogs = cwlogs
[default]
region = {}
""".format(core.Aws.REGION),
"mode": "000444",
"owner": "root",
"group": "root"
}
},
"commands": {
"01_create_state_directory": {
"command": "mkdir -p /var/awslogs/state"
}
},
"services": {
"sysvinit": {
"awslogs": {
"enabled": "true",
"ensureRunning": "true",
"files": [
"/etc/awslogs/awslogs.conf"
]}
}
}
},
"Deploy": {
"sources": {
"/photos": "https://s3.amazonaws.com/{}/deploy-app.zip".format(source_bucket)
},
"commands": {
"01_pip_uwsgi": {
"command": "pip-3.6 install uwsgi",
"cwd": "/photos",
"ignoreErrors": "false"
},
"02_pip_flask_app_requirements": {
"command": "pip-3.6 install -r requirements.txt",
"cwd": "/photos/FlaskApp",
"ignoreErrors": "false"
},
"03_stop_uwsgi": {
"command": "stop uwsgi",
"ignoreErrors": "true"
},
"04_stop_nginx": {
"command": "service nginx stop"
},
"05_copy_config": {
"command": "mv -f nginx.conf /etc/nginx/nginx.conf && mv -f uwsgi.conf /etc/init/uwsgi.conf",
"cwd": "/photos/Deploy",
"ignoreErrors": "false"
},
"06_create_database": {
"command": "python3 database_create_tables.py",
"cwd": "/photos/Deploy",
"ignoreErrors": "false"
},
"07_start_uwsgi": {
"command": "start uwsgi"
},
"08_restart_nginx": {
"command": "service nginx start"
}
}
}
}
}
WebInstance1.cfn_options.creation_policy = core.CfnCreationPolicy(
resource_signal=core.CfnResourceSignal(timeout='PT10M'))
WebInstance2 = ec2.CfnInstance(
self, 'WebInstance2', additional_info=None, affinity=None,
iam_instance_profile=core.Fn.import_value("WebServerInstanceProfileOutput"),
image_id=LatestAmiId.value_as_string,
instance_type='t3.micro',
network_interfaces=[
{
"deviceIndex":"0",
"groupSet": [core.Fn.import_value("WebSecurityGroupOutput")],
"subnetId": core.Fn.import_value("PrivateSubnet2")
}
],
tags=[core.CfnTag(key="Name", value="WebServer2")],
user_data=core.Fn.base64(
"""#!/bin/bash -ex
yum update -y
/opt/aws/bin/cfn-init -v --stack {StackName} --resource WebInstance2 --configsets InstallAndDeploy --region {Region}
# Signal the status from cfn-init (via $?)
/opt/aws/bin/cfn-signal -e $? --stack {StackName} --resource WebInstance2 --region {Region}
""".format(StackName=core.Aws.STACK_NAME,Region=core.Aws.REGION)
)
)
WebInstance2.cfn_options.metadata = {
"AWS::CloudFormation::Authentication": {
"rolebased": {
"type": "S3",
"buckets": [
source_bucket
],
"roleName": core.Fn.import_value("WebServerRoleOutput")
}
},
"AWS::CloudFormation::Init": {
"configSets": {
"InstallAndDeploy": [
"Install",
"InstallLogs",
"Deploy"
]
},
"Install": {
"packages": {
"yum": {
"python36": [],
"python36-devel": [],
"nginx": [],
"gcc": []
}
},
"files": {
"/etc/cfn/cfn-hup.conf": {
"content": """
[main]
stack={}
region={}
interval=1
verbose=true""".format(core.Aws.STACK_ID, core.Aws.REGION),
"mode": "000400",
"owner": "root",
"group": "root"
},
"/etc/cfn/hooks.d/cfn-auto-reloader.conf": {
"content": """
[cfn-auto-reloader-hook]
triggers=post.update
path=Resources.WebInstance1.Metadata.AWS::CloudFormation::Init
action=/opt/aws/bin/cfn-init -v --stack {} --resource WebInstance2 --configsets InstallAndDeploy --region {}
runas=root""".format(core.Aws.STACK_NAME, core.Aws.REGION),
"mode": "000400",
"owner": "root",
"group": "root"
}
},
"services": {
"sysvinit": {
"nginx": {
"enabled": "true",
"ensureRunning": "true"
},
"cfn-hup": {
"enabled": "true",
"ensureRunning": "true",
"files": [
"/etc/cfn/cfn-hup.conf",
"/etc/cfn/hooks.d/cfn-auto-reloader.conf"
]
}
}
},
"commands": {
"01_unblock_nginx": {
"command": "chkconfig nginx on"
},
"02_install_xray": {
"command": "curl https://s3.dualstack.us-east-2.amazonaws.com/aws-xray-assets.us-east-2/xray-daemon/aws-xray-daemon-3.x.rpm -o /tmp/xray.rpm && yum install -y /tmp/xray.rpm\n",
"cwd": "/tmp",
"ignoreErrors": "true"
}
}
},
"InstallLogs": {
"packages": {
"yum": {
"awslogs": []
}
},
"files": {
"/etc/awslogs/awslogs.conf": {
"content": """
[general]
state_file= /var/awslogs/state/agent-state
[yum]
file = /var/log/yum.log
log_group_name = %s
log_stream_name = {{hostname}} - {{instance_id}} yum.log
[messages]
file = /var/log/messages
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} messages.log
[cfn-hup]
file = /var/log/cfn-hup.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} cfn-hup.log
[cfn-init]
file = /var/log/cfn-init.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} cfn-init.log
[cfn-init-cmd]
file = /var/log/cfn-init-cmd.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} cfn-init-cmd.log
[cloud-init]
file = /var/log/cloud-init.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} cloud-init.log
[cloud-init-output]
file = /var/log/cloud-init-output.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} cloud-init.log
[handler]
file = /var/log/handler.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} handler.log
[uwsgi]
file = /var/log/uwsgi.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} uwsgi.log
[nginx_access]
file = /var/log/nginx/access.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} nginx_access.log
[nginx_error]
file = /var/log/nginx/error.log
log_group_name = {CloudFormationLogs}
log_stream_name = {{hostname}} - {{instance_id}} nginx_error.log
""".format(CloudFormationLogs=CloudFormationLogs.log_group_name),
"group": "root",
"owner": "root",
"mode": "000400"
},
"/etc/awslogs/awscli.conf": {
"content": """
[plugins]
cwlogs = cwlogs
[default]
region = {}
""".format(core.Aws.REGION),
"mode": "000444",
"owner": "root",
"group": "root"
}
},
"commands": {
"01_create_state_directory": {
"command": "mkdir -p /var/awslogs/state"
}
},
"services": {
"sysvinit": {
"awslogs": {
"enabled": "true",
"ensureRunning": "true",
"files": [
"/etc/awslogs/awslogs.conf"
]}
}
}
},
"Deploy": {
"sources": {
"/photos": "https://s3.amazonaws.com/{}/deploy-app.zip".format(source_bucket)
},
"commands": {
"01_pip_uwsgi": {
"command": "pip-3.6 install uwsgi",
"cwd": "/photos",
"ignoreErrors": "false"
},
"02_pip_flask_app_requirements": {
"command": "pip-3.6 install -r requirements.txt",
"cwd": "/photos/FlaskApp",
"ignoreErrors": "false"
},
"03_stop_uwsgi": {
"command": "stop uwsgi",
"ignoreErrors": "true"
},
"04_stop_nginx": {
"command": "service nginx stop"
},
"05_copy_config": {
"command": "mv -f nginx.conf /etc/nginx/nginx.conf && mv -f uwsgi.conf /etc/init/uwsgi.conf",
"cwd": "/photos/Deploy",
"ignoreErrors": "false"
},
"06_create_database": {
"command": "python3 database_create_tables.py",
"cwd": "/photos/Deploy",
"ignoreErrors": "false"
},
"07_start_uwsgi": {
"command": "start uwsgi"
},
"08_restart_nginx": {
"command": "service nginx start"
}
}
}
}
}
WebInstance2.cfn_options.creation_policy = core.CfnCreationPolicy(
resource_signal=core.CfnResourceSignal(timeout='PT10M'))
DefaultTargetGroup = elasticloadbalancingv2.CfnTargetGroup(
self, 'DefaultTargetGroup',
health_check_interval_seconds=15,
health_check_path="/",
health_check_protocol="HTTP",
health_check_timeout_seconds=10,
healthy_threshold_count=2,
unhealthy_threshold_count=2,
matcher={'httpCode': '200-299'},
port=80,
protocol="HTTP",
vpc_id=core.Fn.import_value("VPC"),
target_group_attributes=[
{"key": "deregistration_delay.timeout_seconds",
"value": "30"}],
targets=[
{"id":WebInstance1.ref,"port":80},
{"id":WebInstance2.ref,"port":80}
]
)
HttpListener = elasticloadbalancingv2.CfnListener(
self, 'HttpListener',
default_actions=[{
"type": "forward",
"targetGroupArn": DefaultTargetGroup.ref
}],
load_balancer_arn=core.Fn.import_value("LoadBalancerArn"),
port=80,
protocol="HTTP") | [
"[email protected]"
] | |
f5c1dd7878185c0c4b4b8389012857aebf4f13cb | 035b1a64a5fb1b839603ace74b2a5cf057d4cc7b | /Conversor moeda/App.py | 0d57be7cf3b2e640ba8c093c2a2d255c8047c691 | [
"MIT"
] | permissive | LuanFaria/Conversor_de_Moedas | c0ef20b9ca19d9c7e850f8f1e6b6ee67d4f1bfa6 | 6273da4739bef1427c63b6cf906b5a90239dd39b | refs/heads/main | 2023-06-15T13:04:10.464329 | 2021-07-10T03:34:52 | 2021-07-10T03:34:52 | 384,600,166 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,364 | py | from PyQt5.QtWidgets import QApplication, QMainWindow
import sys
from interface import *
from Coletor import *
import datetime
class Converter(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__(None)
super().setupUi(self)
# Fixar o Tamanho
self.setFixedSize(313, 164)
# Set
self.atualizar.clicked.connect(self.atualiza)
self.data.setText(str(datetime.datetime.now().date().today()))
self.dinheiro_1.addItems(['Dollar', 'Euro', 'Iene', 'Real'])
self.dinheiro_2.addItems(['Dollar', 'Euro', 'Iene', 'Real'])
self.convert.clicked.connect(self.calcular_conversor)
# Travar a saida do valor_2 para não editavel.
self.valor_2.setDisabled(True)
# Formatação CSS
self.valor_2.setStyleSheet(
'* {color:#000}' # Formatação em CSS
)
self.convert.setStyleSheet(
'* {background: #32CD32; color:#000}' # Formatação em CSS
)
self.setStyleSheet(
'* {background: #E0FFFF; color:#000; font-size: 20px,}' # Formatação em CSS
)
self._coletaD = 5.26
self._coletaR = 1
self._coletaI = 0.046
self._coletaE = 6.14
def atualiza(self):
self._coletaD = coletar_dollar()
self._coletaR = coletar_real()
self._coletaI = coletar_iene()
self._coletaE = coletar_euro()
# Teste
# self._coletaD = 1
# self._coletaR = 2
# self._coletaI = 3
# self._coletaE = 4
def calcular_conversor(self):
dinheiro_input = self.dinheiro_1.currentText()
dinheiro_resultado = self.dinheiro_2.currentText()
try:
# Real
if dinheiro_input == 'Dollar' and dinheiro_resultado == 'Real':
calcular = (float(self.valor_1.text()) * self._coletaD)
calcular = round(calcular, 2)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
elif dinheiro_input == 'Euro' and dinheiro_resultado == 'Real':
calcular = (float(self.valor_1.text()) * self._coletaE)
calcular = round(calcular, 2)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
elif dinheiro_input == 'Iene' and dinheiro_resultado == 'Real':
calcular = (float(self.valor_1.text()) * self._coletaI)
calcular = round(calcular, 2)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
elif dinheiro_input == 'Real' and dinheiro_resultado == 'Real':
calcular = (float(self.valor_1.text()) * self._coletaR)
calcular = round(calcular, 2)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
# Euro
elif dinheiro_input == 'Real' and dinheiro_resultado == 'Euro':
calcular = (float(self.valor_1.text()) / self._coletaE)
calcular = round(calcular, 2)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
elif dinheiro_input == 'Dollar' and dinheiro_resultado == 'Euro':
calcular = (float(self.valor_1.text()) / self._coletaE * self._coletaD)
calcular = round(calcular, 2)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
elif dinheiro_input == 'Iene' and dinheiro_resultado == 'Euro':
calcular = (float(self.valor_1.text()) / self._coletaE * self._coletaI)
calcular = round(calcular, 4)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
elif dinheiro_input == 'Euro' and dinheiro_resultado == 'Euro':
calcular = (float(self.valor_1.text()))
calcular = round(calcular, 4)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
# Dollar
elif dinheiro_input == 'Iene' and dinheiro_resultado == 'Dollar':
calcular = (float(self.valor_1.text()) / self._coletaD * self._coletaI)
calcular = round(calcular, 4)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
elif dinheiro_input == 'Real' and dinheiro_resultado == 'Dollar':
calcular = (float(self.valor_1.text()) / self._coletaD * self._coletaR)
calcular = round(calcular, 4)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
elif dinheiro_input == 'Euro' and dinheiro_resultado == 'Dollar':
calcular = (float(self.valor_1.text()) / self._coletaD * self._coletaE)
calcular = round(calcular, 4)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
elif dinheiro_input == 'Dollar' and dinheiro_resultado == 'Dollar':
calcular = (float(self.valor_1.text()))
calcular = round(calcular, 4)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
# Iene
elif dinheiro_input == 'Iene' and dinheiro_resultado == 'Iene':
calcular = (float(self.valor_1.text()))
calcular = round(calcular, 4)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
elif dinheiro_input == 'Euro' and dinheiro_resultado == 'Iene':
calcular = (float(self.valor_1.text()) / self._coletaI * self._coletaE)
calcular = round(calcular, 4)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
elif dinheiro_input == 'Dollar' and dinheiro_resultado == 'Iene':
calcular = (float(self.valor_1.text()) / self._coletaI * self._coletaD)
calcular = round(calcular, 4)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
elif dinheiro_input == 'Real' and dinheiro_resultado == 'Iene':
calcular = (float(self.valor_1.text()) / self._coletaI * self._coletaR)
calcular = round(calcular, 4)
self.valor_1.setText(str(self.valor_1.text()))
self.valor_2.setText(str(calcular).replace('.', ','))
except:
self.valor_1.setText(str(1))
pass
if __name__ == '__main__':
aplicativo = QApplication(sys.argv)
tela = Converter()
tela.show()
aplicativo.exec_()
| [
"[email protected]"
] | |
354576907f5498701abdb951f5c1359827eeacec | 288c275059393a9a858b93530a08226ecf8ecb96 | /test.py | 1fa9b919f2c903d351d804cddc3fb6e68bd258e7 | [] | no_license | L3-S6-Projet/test_server | cfae24b6106d8e593f403b24617397a8b6d17684 | bd3811d320f3decc98d1c4043f729291b695ce35 | refs/heads/master | 2022-08-23T19:47:49.920740 | 2020-05-09T11:08:27 | 2020-05-09T11:08:27 | 266,430,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,276 | py | import unittest
import requests
BASE_URL = 'http://127.0.0.1:3030/api/'
# Tests assume the following account are created, and will be re-created as-is with a POST to /api/reset:
# {'first_name': 'Admin', 'last_name': 'User', 'username': 'admin', 'password': 'admin'}
# {'first_name': 'Teacher', 'last_name': 'User', 'username': 'teacher', 'password': 'teacher'}
# {'first_name': 'Student', 'last_name': 'User', 'username': 'student', 'password': 'student'}
# Tests are not safe to run in parallel - they might manipulate the same accounts at the same time.
def validate_object(data, schema, _path=''):
'''Checks that a dict follows a schema. Can check for either type or value equality.'''
if type(schema) == dict:
assert type(data) == dict, f'{_path} is not a dict'
for key, key_schema in schema.items():
validate_object(data[key], key_schema, _path=_path + f'.{key}')
elif type(schema) == list:
assert type(data) == list, f'{_path} is not a list'
for index, item in enumerate(data):
validate_object(item, schema[0], _path=_path + f'[{index}]')
elif type(schema) == type:
assert type(
data) == schema, f'{_path} is of type {type(data)}, but should have been {schema}'
else:
assert data == schema, f'{_path} equals "{data}", where it should have been "{schema}"'
def get_token(username, password=None):
res = requests.post(BASE_URL + 'session', json={
'username': username,
'password': username if password is None else password,
})
assert res.status_code == 200, res.status_code
return res.json()['token']
def validate_error_response(res, code):
validate_object(res, {
'status': 'error',
'code': code,
})
def validate_simple_success_response(res):
validate_object(res, {
'status': 'success'
})
def reset():
res = requests.get(BASE_URL + 'reset')
assert res.text == '"ok"'
class TestSessionRoutes(unittest.TestCase):
@classmethod
def setUpClass(self):
reset()
def test_post_session(self):
user_schema_admin = {
'first_name': 'Admin',
'last_name': 'User',
'kind': 'administrator',
}
user_schema_teacher = {
'first_name': 'Teacher',
'last_name': 'User',
'kind': 'teacher',
}
user_schema_student = {
'first_name': 'Student',
'last_name': 'User',
'kind': 'student',
}
parameters = (
('admin', user_schema_admin),
('teacher', user_schema_teacher),
('student', user_schema_student)
)
for username, user_schema in parameters:
with self.subTest(msg='Checks that the user can login', username=username):
res = requests.post(BASE_URL + 'session', json={
'username': username,
'password': username,
})
assert res.status_code == 200, res.status_code
validate_object(res.json(), {
'status': 'success',
'token': str,
'user': user_schema,
})
def test_post_session_invalid_credentials(self):
res = requests.post(BASE_URL + 'session', json={
'username': 'not-found',
'password': 'invalid-password'
})
assert res.status_code == 403, res.status_code
validate_error_response(res.json(), 'InvalidCredentials')
def test_delete_session(self):
token = get_token('admin')
res = requests.delete(BASE_URL + 'session', headers={
'Authorization': f'Bearer {token}'
})
assert res.status_code == 200, res.status_code
validate_simple_success_response(res.json())
def test_delete_session_invalid_token(self):
res = requests.delete(BASE_URL + 'session', headers={
'Authorization': 'Bearer somerandomfaketoken'
})
assert res.status_code == 403, res.status_code
validate_error_response(res.json(), 'InvalidCredentials')
class TestProfileRoutes(unittest.TestCase):
def setUp(self):
reset()
def test_put_profile_not_authorized(self):
res = requests.put(BASE_URL + 'profile', headers={
'Authorization': 'Bearer somerandomfaketoken'
}, json={})
assert res.status_code == 403, res.status_code
validate_error_response(res.json(), 'InvalidCredentials')
def test_put_profile_first_name_last_name_without_admin(self):
for prop in ('first_name', 'last_name'):
for username in ('teacher', 'student'):
with self.subTest(msg='Checks that the property cant be edited', prop=prop, username=username):
token = get_token(username)
res = requests.put(BASE_URL + 'profile', headers={
'Authorization': f'Bearer {token}'
}, json={prop: 'new value'})
assert res.status_code == 401, res.status_code
validate_error_response(
res.json(), 'InsufficientAuthorization')
def test_put_profile_password_without_old(self):
token = get_token('admin')
res = requests.put(BASE_URL + 'profile', headers={
'Authorization': f'Bearer {token}'
}, json={
'password': 'newpassword'
})
assert res.status_code == 400, res.status_code
validate_error_response(res.json(), 'MalformedData')
def test_put_profile_old_password_without_new(self):
token = get_token('admin')
res = requests.put(BASE_URL + 'profile', headers={
'Authorization': f'Bearer {token}'
}, json={
'old_password': 'oldpassword'
})
assert res.status_code == 400, res.status_code
validate_error_response(res.json(), 'MalformedData')
def test_put_profile_wrong_old_password(self):
token = get_token('admin')
res = requests.put(BASE_URL + 'profile', headers={
'Authorization': f'Bearer {token}'
}, json={
'old_password': 'wrongpassword',
'password': 'newpassword',
})
assert res.status_code == 403, res.status_code
validate_error_response(res.json(), 'InvalidOldPassword')
def test_put_profile_can_change_password(self):
for account in ('admin', 'teacher', 'student'):
with self.subTest(msg='Checks that the user can change their password', account=account):
token = get_token(account)
res = requests.put(BASE_URL + 'profile', headers={
'Authorization': f'Bearer {token}'
}, json={
'old_password': account,
'password': 'newpassword'
})
assert res.status_code == 200, res.status_code
validate_simple_success_response(res.json())
res = requests.post(BASE_URL + 'session', json={
'username': account,
'password': account,
})
assert res.status_code == 403, res.status_code
validate_error_response(res.json(), 'InvalidCredentials')
get_token(account, 'newpassword')
def test_put_profile_admin_can_change_first_name_last_name(self):
token = get_token('admin')
res = requests.put(BASE_URL + 'profile', headers={
'Authorization': f'Bearer {token}'
}, json={
'first_name': 'NewAdminFirstName',
'last_name': 'NewAdminLastName',
})
assert res.status_code == 200, res.status_code
validate_simple_success_response(res.json())
res = requests.post(BASE_URL + 'session', json={
'username': 'admin',
'password': 'admin',
})
assert res.status_code == 200, res.status_code
assert res.json()['user']['first_name'] == 'NewAdminFirstName'
assert res.json()['user']['last_name'] == 'NewAdminLastName'
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
9a96327791724d1b20140f28c67f7738ba9fefab | 04a344a58278fb01a4cfda67624403b5a0870e26 | /Divisible_Sum_Pairs.py | 41fa8dfb1d4141a93cb7413d7fc6c855a3c2889f | [] | no_license | aleksiheikkila/HackerRank_Python_Problems | ed33f4a161e72003f45eabd3df58d7d537767934 | c758911abede97196883b0909d21879616d12d33 | refs/heads/master | 2023-01-09T02:55:48.952599 | 2022-12-27T19:33:00 | 2022-12-27T19:33:00 | 201,996,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | '''
HackerRank problem
Domain : Python
Author : Aleksi Heikkilä
Created : Aug 2019
Problem : https://www.hackerrank.com/challenges/divisible-sum-pairs/problem
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the divisibleSumPairs function below.
def divisibleSumPairs(n, k, ar):
"""
Complete the divisibleSumPairs function.
It should return the integer count of pairs meeting the criteria.
divisibleSumPairs has the following parameter(s):
n: the integer length of array
ar: an array of integers
k: the integer to divide the pair sum by
"""
valid_pairs = [[i, j] for i in range(n)
for j in range(i+1, n)
if (ar[i] + ar[j]) % k == 0]
return len(valid_pairs)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
ar = list(map(int, input().rstrip().split()))
result = divisibleSumPairs(n, k, ar)
fptr.write(str(result) + '\n')
fptr.close()
| [
"[email protected]"
] | |
f56bfe1662a5f51d41b144269ef27b7a458bc3ee | 47a18d699d29f8a617f8c53c67208df2739ac4f4 | /home/migrations/0006_forgetpass.py | f9da5622bc3dc3a349aded8e147e968027888b11 | [] | no_license | achieverz-sport/Achieverz | c79b154ae3814c14a84acd55b6817c0279303680 | 09eead46159b8434b8718aa7b2aab51a7e26c4db | refs/heads/main | 2022-12-24T23:57:23.156024 | 2020-10-05T10:46:27 | 2020-10-05T10:46:27 | 301,354,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | # Generated by Django 3.1.1 on 2020-09-24 13:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0005_auto_20200924_1155'),
]
operations = [
migrations.CreateModel(
name='forgetpass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=20)),
('key', models.CharField(max_length=20)),
],
),
]
| [
"[email protected]"
] | |
0a8e2bc6532dbb507eb9601893e5e6fd4777f437 | 3f5a7cd3b55349e63344972829abe47cf8b26987 | /canshu_a.py | ccf5f05c9711bd8eab63aa5fc9f46e2ef04f0fec | [] | no_license | cruzereiz/- | 266ef5ef0db66d69a592e46c66459aebf9ae4abd | 9d7826b203e2e1d345a863961be09d1116edb370 | refs/heads/main | 2023-04-15T21:55:54.633605 | 2021-04-28T01:26:27 | 2021-04-28T01:26:27 | 362,299,900 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | import json
def canshu_a(id):
model_id = '{"rid":"R_SO_4_1492901593","threadId":"R_SO_4_1492901593","pageNo":"1","pageSize":"50","cursor":"-1","offset":"0",' \
'"orderType":"1","csrf_token":""}'
b = json.loads(model_id)
b['rid'] = 'R_SO_4_{}'.format(id)
b['threadId'] = 'R_SO_4_{}'.format(id)
return json.dumps(b)
| [
"[email protected]"
] | |
deb2567b32d956ed4e674ed4a6c3b42550b00a4f | 21b965f430ee6fd5772d65b2ce54287d461479ee | /python/setup.temp.py | 23c01c8b4b42c53bc870791a1788a41415ce5b6b | [
"MIT"
] | permissive | hoojaoh/taichi | 9610266386229957a2c0bb7f6afcaaad3c83a746 | 641c4b83bcf98e7354b135964cd23759b0110c6b | refs/heads/master | 2023-08-31T07:33:58.991208 | 2019-11-01T20:01:46 | 2019-11-01T20:01:52 | 78,141,490 | 0 | 0 | MIT | 2019-11-01T22:20:18 | 2017-01-05T19:24:06 | C++ | UTF-8 | Python | false | false | 1,142 | py | import setuptools
import glob
classifiers = [
'Development Status :: 1 - Planning',
'Topic :: Multimedia :: Graphics',
'Topic :: Games/Entertainment :: Simulation',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: C++',
]
data_files = glob.glob('python/lib/*')
print(data_files)
packages = setuptools.find_packages()
print(packages)
setuptools.setup(
name=project_name,
packages=packages,
version=version,
description='The Taichi Programming Language',
author='Yuanming Hu',
author_email='[email protected]',
url='https://github.com/yuanming-hu/taichi',
install_requires=[
'numpy', 'Pillow', 'scipy', 'pybind11', 'colorama', 'setuptools', 'astor', 'matplotlib'
],
data_files=[('lib', data_files)],
keywords=['graphics', 'simulation'],
license='MIT',
platforms=['Linux'],
include_package_data=True,
classifiers=classifiers,
has_ext_modules=lambda: True
)
| [
"[email protected]"
] | |
7b185abe18bf28f13c874b2a769fb91bd47c5b9b | b2f01680e78cf5b64c779425790e10b31ab82fc0 | /A_plot_Bacteria/taxonomy_tree.py | ff79577f0b8644bddf8d8b65a47b89bcd7dce471 | [] | no_license | stormlovetao/rnaSeq | fcdfca39434aaa26dc5ef17ed84ea99b8743f7ff | 61b5051024f701a983acb52278fef4a9a03af1dc | refs/heads/master | 2021-01-10T03:30:55.952913 | 2017-03-07T21:22:35 | 2017-03-07T21:22:35 | 47,236,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,607 | py | #export PATH=~/anaconda_ete/bin:$PATH
from ete3 import NCBITaxa
ncbi = NCBITaxa()
####### BRAINCODE viruses taxonomy tree ########
fp_in = open("/PHShome/tw786/localView/overview/Tree/BRAINCODE_viruses.txt")
viruses1 = fp_in.readlines()
viruses1 = [x.strip() for x in viruses1]
viruses_taxid = ncbi.get_name_translator(viruses1)
viruses_taxid = [x[0] for x in viruses_taxid.values()]
tree = ncbi.get_topology(viruses_taxid)
file_path = "/PHShome/tw786/localView/overview/Tree/BRAINCODE_viruses_tree.txt"
fp = open(file_path, 'w')
print>>fp, tree.get_ascii(attributes=['sci_name','rank'])
fp_in.close()
fp.close()
####### GTEx viruses taxonomy tree ########
fp_in = open("/PHShome/tw786/localView/overview/Tree/GTEx_viruses.txt")
viruses2 = fp_in.readlines()
viruses2 = [x.strip() for x in viruses2]
viruses_taxid = ncbi.get_name_translator(viruses2)
viruses_taxid = [x[0] for x in viruses_taxid.values()]
tree = ncbi.get_topology(viruses_taxid)
file_path = "/PHShome/tw786/localView/overview/Tree/GTEx_viruses_tree.txt"
fp = open(file_path, 'w')
print>>fp, tree.get_ascii(attributes=['sci_name','rank'])
fp_in.close()
fp.close()
####### BRAINCODE + GTEx viruses taxonomy tree ########
viruses_merge = viruses1 + viruses2
viruses_merge = list(set(viruses_merge))
viruses_taxid = ncbi.get_name_translator(viruses_merge)
viruses_taxid = [x[0] for x in viruses_taxid.values()]
tree = ncbi.get_topology(viruses_taxid)
file_path = "/PHShome/tw786/localView/overview/Tree/BRAINCODE+GTEx_viruses_tree.txt"
fp = open(file_path, 'w')
print>>fp, tree.get_ascii(attributes=['sci_name','rank'])
fp_in.close()
fp.close() | [
"[email protected]"
] | |
f5108df754338fad2dcde10f801f7e363935eaff | 20035d7de00f51adbfd43b6c08ecc4f651f45ab4 | /backend/services/text_similarity/application/configuration.py | eb0d765bae34d702cf38ee30afffda3ab96d6fe9 | [
"MIT"
] | permissive | R-aryan/Text-Similarity-Using-BERT | 3f40d2deb8b8b73f5428f47be37662251fa0d436 | edba09c0da2b21d50532facc1237c2e7ffae10bd | refs/heads/main | 2023-08-07T19:50:13.727391 | 2021-09-27T06:06:37 | 2021-09-27T06:06:37 | 409,261,165 | 0 | 0 | null | 2021-09-27T06:06:38 | 2021-09-22T15:38:08 | Python | UTF-8 | Python | false | false | 639 | py | from injector import Module, singleton
from common.logging.console_logger import ConsoleLogger
from services.text_similarity.application.ai.inference.prediction import PredictionManager
from services.text_similarity.application.ai.training.src.preprocess import Preprocess
from services.text_similarity.settings import Settings
class Configuration(Module):
def configure(self, binder):
logger = ConsoleLogger(filename=Settings.LOGS_DIRECTORY)
binder.bind(ConsoleLogger, to=logger, scope=singleton)
binder.bind(PredictionManager, to=PredictionManager(preprocess=Preprocess(), logger=logger), scope=singleton)
| [
"[email protected]"
] | |
b4377f15b5571bc63d5c60b1a5df7257600bce43 | a479a5773fd5607f96c3b84fed57733fe39c3dbb | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/__init__.py | 868a2ec6db7e8ae231952e4752d89bc0dcf1bd68 | [
"Apache-2.0"
] | permissive | napalm-automation/napalm-yang | 839c711e9294745534f5fbbe115e0100b645dbca | 9148e015b086ebe311c07deb92e168ea36fd7771 | refs/heads/develop | 2021-01-11T07:17:20.226734 | 2019-05-15T08:43:03 | 2019-05-15T08:43:03 | 69,226,025 | 65 | 64 | Apache-2.0 | 2019-05-15T08:43:24 | 2016-09-26T07:48:42 | Python | UTF-8 | Python | false | false | 40,677 | py | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/as-external-lsa/types-of-service/type-of-service/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Per-TOS parameters for the LSA
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__forwarding_address",
"__external_route_tag",
"__tos",
"__metric",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__forwarding_address = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="forwarding-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
self.__external_route_tag = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="external-route-tag",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__tos = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"as-external-lsa",
"types-of-service",
"type-of-service",
"state",
]
def _get_forwarding_address(self):
"""
Getter method for forwarding_address, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/forwarding_address (inet:ipv4-address-no-zone)
YANG Description: The destination to which traffic for the external prefix
should be advertised. When this value is set to 0.0.0.0 then
traffic should be forwarded to the LSA's originator
"""
return self.__forwarding_address
def _set_forwarding_address(self, v, load=False):
"""
Setter method for forwarding_address, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/forwarding_address (inet:ipv4-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_forwarding_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_forwarding_address() directly.
YANG Description: The destination to which traffic for the external prefix
should be advertised. When this value is set to 0.0.0.0 then
traffic should be forwarded to the LSA's originator
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="forwarding-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """forwarding_address must be of a type compatible with inet:ipv4-address-no-zone""",
"defined-type": "inet:ipv4-address-no-zone",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="forwarding-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)""",
}
)
self.__forwarding_address = t
if hasattr(self, "_set"):
self._set()
def _unset_forwarding_address(self):
self.__forwarding_address = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="forwarding-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
def _get_external_route_tag(self):
"""
Getter method for external_route_tag, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/external_route_tag (uint32)
YANG Description: An opaque tag that set by the LSA originator to carry
information relating to the external route
"""
return self.__external_route_tag
def _set_external_route_tag(self, v, load=False):
"""
Setter method for external_route_tag, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/external_route_tag (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_external_route_tag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_external_route_tag() directly.
YANG Description: An opaque tag that set by the LSA originator to carry
information relating to the external route
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="external-route-tag",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """external_route_tag must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="external-route-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__external_route_tag = t
if hasattr(self, "_set"):
self._set()
def _unset_external_route_tag(self):
self.__external_route_tag = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="external-route-tag",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_tos(self):
"""
Getter method for tos, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/tos (uint8)
YANG Description: OSPF encoding of the type of service referred to by this
LSA. Encoding for OSPF TOS are described in RFC2328.
"""
return self.__tos
def _set_tos(self, v, load=False):
"""
Setter method for tos, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/tos (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_tos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tos() directly.
YANG Description: OSPF encoding of the type of service referred to by this
LSA. Encoding for OSPF TOS are described in RFC2328.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """tos must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__tos = t
if hasattr(self, "_set"):
self._set()
def _unset_tos(self):
self.__tos = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/metric (oc-ospf-types:ospf-metric)
YANG Description: The metric value to be used for the TOS specified. This value
represents the cost of use of the link for the specific type
of service.
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/metric (oc-ospf-types:ospf-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: The metric value to be used for the TOS specified. This value
represents the cost of use of the link for the specific type
of service.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric must be of a type compatible with oc-ospf-types:ospf-metric""",
"defined-type": "oc-ospf-types:ospf-metric",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-ospf-types:ospf-metric', is_config=False)""",
}
)
self.__metric = t
if hasattr(self, "_set"):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
forwarding_address = __builtin__.property(_get_forwarding_address)
external_route_tag = __builtin__.property(_get_external_route_tag)
tos = __builtin__.property(_get_tos)
metric = __builtin__.property(_get_metric)
_pyangbind_elements = OrderedDict(
[
("forwarding_address", forwarding_address),
("external_route_tag", external_route_tag),
("tos", tos),
("metric", metric),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/as-external-lsa/types-of-service/type-of-service/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Per-TOS parameters for the LSA
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__forwarding_address",
"__external_route_tag",
"__tos",
"__metric",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__forwarding_address = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="forwarding-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
self.__external_route_tag = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="external-route-tag",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__tos = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"as-external-lsa",
"types-of-service",
"type-of-service",
"state",
]
def _get_forwarding_address(self):
"""
Getter method for forwarding_address, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/forwarding_address (inet:ipv4-address-no-zone)
YANG Description: The destination to which traffic for the external prefix
should be advertised. When this value is set to 0.0.0.0 then
traffic should be forwarded to the LSA's originator
"""
return self.__forwarding_address
def _set_forwarding_address(self, v, load=False):
"""
Setter method for forwarding_address, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/forwarding_address (inet:ipv4-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_forwarding_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_forwarding_address() directly.
YANG Description: The destination to which traffic for the external prefix
should be advertised. When this value is set to 0.0.0.0 then
traffic should be forwarded to the LSA's originator
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="forwarding-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """forwarding_address must be of a type compatible with inet:ipv4-address-no-zone""",
"defined-type": "inet:ipv4-address-no-zone",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="forwarding-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)""",
}
)
self.__forwarding_address = t
if hasattr(self, "_set"):
self._set()
def _unset_forwarding_address(self):
self.__forwarding_address = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
is_leaf=True,
yang_name="forwarding-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ipv4-address-no-zone",
is_config=False,
)
def _get_external_route_tag(self):
"""
Getter method for external_route_tag, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/external_route_tag (uint32)
YANG Description: An opaque tag that set by the LSA originator to carry
information relating to the external route
"""
return self.__external_route_tag
def _set_external_route_tag(self, v, load=False):
"""
Setter method for external_route_tag, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/external_route_tag (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_external_route_tag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_external_route_tag() directly.
YANG Description: An opaque tag that set by the LSA originator to carry
information relating to the external route
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="external-route-tag",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """external_route_tag must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="external-route-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__external_route_tag = t
if hasattr(self, "_set"):
self._set()
def _unset_external_route_tag(self):
self.__external_route_tag = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="external-route-tag",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_tos(self):
"""
Getter method for tos, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/tos (uint8)
YANG Description: OSPF encoding of the type of service referred to by this
LSA. Encoding for OSPF TOS are described in RFC2328.
"""
return self.__tos
def _set_tos(self, v, load=False):
"""
Setter method for tos, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/tos (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_tos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tos() directly.
YANG Description: OSPF encoding of the type of service referred to by this
LSA. Encoding for OSPF TOS are described in RFC2328.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """tos must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="tos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__tos = t
if hasattr(self, "_set"):
self._set()
def _unset_tos(self):
self.__tos = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="tos",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/metric (oc-ospf-types:ospf-metric)
YANG Description: The metric value to be used for the TOS specified. This value
represents the cost of use of the link for the specific type
of service.
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service/type_of_service/state/metric (oc-ospf-types:ospf-metric)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
YANG Description: The metric value to be used for the TOS specified. This value
represents the cost of use of the link for the specific type
of service.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """metric must be of a type compatible with oc-ospf-types:ospf-metric""",
"defined-type": "oc-ospf-types:ospf-metric",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-ospf-types:ospf-metric', is_config=False)""",
}
)
self.__metric = t
if hasattr(self, "_set"):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-ospf-types:ospf-metric",
is_config=False,
)
forwarding_address = __builtin__.property(_get_forwarding_address)
external_route_tag = __builtin__.property(_get_external_route_tag)
tos = __builtin__.property(_get_tos)
metric = __builtin__.property(_get_metric)
_pyangbind_elements = OrderedDict(
[
("forwarding_address", forwarding_address),
("external_route_tag", external_route_tag),
("tos", tos),
("metric", metric),
]
)
| [
"[email protected]"
] | |
fc77fc4b71fbf255ed35ce57035c54569e418768 | 9617203960dccb356b775c69d901ad815b9282bb | /racf_audit/racf_audit.py | a33f5d2417d86bffc64a7f1a41158eb9d84e8230 | [
"MIT"
] | permissive | smlbiobot/SML-Cogs | 5b97e7df0b1126a6daf22829f82adb84c3e35ee0 | 31e8553183b87dce76fdf7744fc9fb1434ccc67f | refs/heads/master | 2021-10-27T13:02:38.591310 | 2021-10-26T15:56:17 | 2021-10-26T15:56:17 | 80,660,432 | 17 | 18 | MIT | 2018-06-15T00:05:54 | 2017-02-01T20:23:30 | Python | UTF-8 | Python | false | false | 64,137 | py | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import argparse
import asyncio
import csv
import datetime as dt
import io
import itertools
import json
import os
import re
from collections import OrderedDict
from collections import defaultdict
from collections import namedtuple
import aiohttp
import discord
import humanfriendly
import unidecode
import yaml
from addict import Dict
from cogs.utils import checks
from cogs.utils.chat_formatting import box
from cogs.utils.chat_formatting import inline
from cogs.utils.chat_formatting import pagify
from cogs.utils.chat_formatting import underline
from cogs.utils.dataIO import dataIO
from discord.ext import commands
from tabulate import tabulate
PATH = os.path.join("data", "racf_audit")
JSON = os.path.join(PATH, "settings.json")
PLAYERS = os.path.join("data", "racf_audit", "player_db.json")
# RACF_SERVER_ID = '218534373169954816'
RACF_SERVER_ID = '528327242875535372'
SML_SERVER_ID = '275395656955330560'
MEMBER_ROLE_NAMES = [
'Member',
'Tourney',
'Practice',
'CW',
'Diary',
'Alpha',
'Bravo',
'Coca',
'Delta',
'Echo',
'Fox',
'Golf',
'Hotel',
'Ice',
'One',
'Trade',
'Zen',
'Mini',
'OG',
]
class NoPlayerRecord(Exception):
pass
def nested_dict():
"""Recursively nested defaultdict."""
return defaultdict(nested_dict)
def server_role(server, role_name):
"""Return discord role object by name."""
return discord.utils.get(server.roles, name=role_name)
def member_has_role(server, member, role_name):
"""Return True if member has specific role."""
role = discord.utils.get(server.roles, name=role_name)
return role in member.roles
def grouper(n, iterable, fillvalue=None):
"""Group lists into lists of items.
grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
async def check_manage_roles(ctx, bot):
"""Check for permissions to run command since no one has manage roles anymore."""
server = ctx.message.server
author = ctx.message.author
channel = ctx.message.channel
# For 100T server, only allow command to run if user has the "Bot Comamnder" role
if server.id == RACF_SERVER_ID:
bc_role = discord.utils.get(server.roles, name="Bot Commander")
if bc_role not in author.roles:
await bot.send_message(
channel,
"Only Bot Commanders on this server can run this command.")
return False
else:
return True
# For other servers, only allow to run if user has manage role permissions
if not author.server_permissions.manage_roles:
await bot.send_message(
channel,
"You don’t have the manage roles permission.")
return False
return True
class RACFAuditException(Exception):
pass
class CachedClanModels(RACFAuditException):
pass
AuditResult = namedtuple("AuditResult", "audit_results output error")
class RACFClan:
"""RACF Clan."""
def __init__(self, name=None, tag=None, role=None, membership_type=None, model=None):
"""Init."""
self.name = name
self.tag = tag
self.role = role
self.membership_type = membership_type
self.model = model
@property
def repr(self):
"""Representation of the clan. Used for debugging."""
o = []
o.append('RACFClan object')
o.append(
"{0.name} #{0.tag} | {0.role.name}".format(self)
)
members = sorted(self.model.members, key=lambda m: m.name.lower())
member_names = [m.name for m in members]
print(member_names)
o.append(', '.join(member_names))
return '\n'.join(o)
class DiscordUser:
"""Discord user = player tag association."""
def __init__(self, user=None, tag=None):
"""Init."""
self.user = user
self.tag = tag
class DiscordUsers:
"""List of Discord users."""
def __init__(self, crclan_cog, server):
"""Init."""
self.crclan_cog = crclan_cog
self.server = server
self._user_list = None
@property
def user_list(self):
"""Create multiple DiscordUser from a list of tags.
players format:
'99688854348369920': '22Q0VGUP'
discord_member_id: CR player tag
"""
if self._user_list is None:
players = self.crclan_cog.manager.get_players(self.server)
out = []
for member_id, player_tag in players.items():
user = self.server.get_member(member_id)
if user is not None:
out.append(DiscordUser(user=user, tag=player_tag))
self._user_list = out
return self._user_list
def tag_to_member(self, tag):
"""Return Discord member from tag."""
for u in self.user_list:
if u.tag == tag:
return u.user
return None
def tag_to_member_id(self, tag):
"""Return Discord member from tag."""
for u in self.user_list:
if u.tag == tag:
return u.user
return None
def clean_tag(tag):
"""clean up tag."""
if not tag:
return tag
t = tag.upper()
t = t.replace('B', '8').replace('O', '0')
t = re.sub(r'[^0289CGJLPQRUVY]+', '', t)
return t
def get_role_name(role):
if role is None:
return ''
role = role.lower()
roles_dict = {
'leader': 'Leader',
'coleader': 'Co-Leader',
'elder': 'Elder',
'member': 'Member'
}
if role in roles_dict.keys():
return roles_dict.get(role)
return ''
class ClashRoyaleAPIError(Exception):
def __init__(self, status=None, message=None):
super().__init__()
self._status = status
self._message = message
@property
def status(self):
return self._status
@property
def message(self):
return self._message
@property
def status_message(self):
out = []
if self._status is not None:
out.append(str(self._status))
if self._message is not None:
out.append(self._message)
return '. '.join(out)
class ClashRoyaleAPI:
def __init__(self, token):
self.token = token
async def fetch_with_session(self, session, url, timeout=30.0):
"""Perform the actual fetch with the session object."""
headers = {
'Authorization': 'Bearer {}'.format(self.token)
}
async with session.get(url, headers=headers) as resp:
body = await resp.json()
if resp.status != 200:
raise ClashRoyaleAPIError(status=resp.status, message=resp.reason)
return body
async def fetch(self, url):
"""Fetch request."""
error_msg = None
try:
async with aiohttp.ClientSession() as session:
body = await self.fetch_with_session(session, url)
except asyncio.TimeoutError:
error_msg = 'Request timed out'
raise ClashRoyaleAPIError(message=error_msg)
except aiohttp.ServerDisconnectedError as err:
error_msg = 'Server disconnected error: {}'.format(err)
raise ClashRoyaleAPIError(message=error_msg)
except (aiohttp.ClientError, ValueError) as err:
error_msg = 'Request connection error: {}'.format(err)
raise ClashRoyaleAPIError(message=error_msg)
except json.JSONDecodeError:
error_msg = "Non JSON returned"
raise ClashRoyaleAPIError(message=error_msg)
else:
return body
finally:
if error_msg is not None:
raise ClashRoyaleAPIError(message=error_msg)
async def fetch_multi(self, urls):
"""Perform parallel fetch"""
results = []
error_msg = None
try:
async with aiohttp.ClientSession() as session:
for url in urls:
await asyncio.sleep(0)
body = await self.fetch_with_session(session, url)
results.append(body)
except asyncio.TimeoutError:
error_msg = 'Request timed out'
raise ClashRoyaleAPIError(message=error_msg)
except aiohttp.ServerDisconnectedError as err:
error_msg = 'Server disconnected error: {}'.format(err)
raise ClashRoyaleAPIError(message=error_msg)
except (aiohttp.ClientError, ValueError) as err:
error_msg = 'Request connection error: {}'.format(err)
raise ClashRoyaleAPIError(message=error_msg)
except json.JSONDecodeError:
error_msg = "Non JSON returned"
raise ClashRoyaleAPIError(message=error_msg)
else:
return results
finally:
if error_msg is not None:
raise ClashRoyaleAPIError(message=error_msg)
async def fetch_clan(self, tag):
"""Get a clan."""
tag = clean_tag(tag)
url = 'https://api.clashroyale.com/v1/clans/%23{}'.format(tag)
body = await self.fetch(url)
return body
async def fetch_clan_list(self, tags):
"""Get multiple clans."""
tags = [clean_tag(tag) for tag in tags]
tasks = [self.fetch_clan(tag) for tag in tags]
results = await asyncio.gather(*tasks, return_exceptions=True)
for index, r in enumerate(results):
if isinstance(r, ClashRoyaleAPIError):
print(r.status_message)
results[index] = {}
raise ClashRoyaleAPIError(status=r.status, message=r.message)
return results
async def fetch_clan_leaderboard(self, location=None):
"""Get clan leaderboard"""
url = 'https://api.clashroyale.com/v1/locations/global/rankings/clans'
body = await self.fetch(url)
return body
async def fetch_clan_war(self, tag=None):
url = 'https://api.clashroyale.com/v1/clans/%23{}/currentwar'.format(tag)
body = await self.fetch(url)
return body
class RACFAudit:
"""RACF Audit.
Requires use of additional cogs for functionality:
SML-Cogs: crclan : CRClan
SML-Cogs: mm : MemberManagement
"""
required_cogs = ['crclan', 'mm']
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = dataIO.load_json(JSON)
self._clan_roles = None
players_path = PLAYERS
if not os.path.exists(players_path):
players_path = os.path.join(PATH, "player_db_bak.json")
self._players = dataIO.load_json(players_path)
dataIO.save_json(PLAYERS, self._players)
with open('data/racf_audit/family_config.yaml') as f:
self.config = yaml.load(f, Loader=yaml.FullLoader)
loop = asyncio.get_event_loop()
self.task = loop.create_task(self.loop_task())
def __unload(self):
"""Remove task when unloaded."""
try:
if self.task:
self.task.cancel()
except Exception:
pass
async def loop_task(self):
"""Loop."""
try:
while True:
if self == self.bot.get_cog("RACFAudit"):
loop = asyncio.get_event_loop()
loop.create_task(self.run_audit_task())
interval = int(dt.timedelta(hours=4).total_seconds())
await asyncio.sleep(interval)
except asyncio.CancelledError:
pass
@property
def players(self):
"""Player dictionary, userid -> tag"""
return self._players
# players = dataIO.load_json(PLAYERS)
# return players
@commands.group(aliases=["racfas"], pass_context=True, no_pm=True)
# @checks.mod_or_permissions(manage_roles=True)
async def racfauditset(self, ctx):
"""RACF Audit Settings."""
verified = await check_manage_roles(ctx, self.bot)
if not verified:
return
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
async def update_server_settings(self, ctx, key, value):
"""Set server settings."""
server = ctx.message.server
self.settings[server.id][key] = value
dataIO.save_json(JSON, self.settings)
await self.bot.say("Updated settings.")
async def set_player_tag(self, tag, member: discord.Member, force=False):
"""Allow external programs to set player tags. (RACF)"""
await asyncio.sleep(0)
players = self.players
# clean tags
tag = clean_tag(tag)
# ensure unique tag
tag_in_db = False
if tag in players.keys():
tag_in_db = True
if not force:
return False
# ensure unique user ids
user_id_in_db = False
for k, v in players.items():
if v.get('user_id') == member.id:
user_id_in_db = True
if not force:
return False
# if force override, remove the entries
if tag_in_db:
players.pop(tag, None)
if user_id_in_db:
_ks = None
for k, v in players.items():
if v.get('user_id') == member.id:
if _ks is None:
_ks = []
_ks.append(k)
if _ks is not None:
for k in _ks:
players.pop(k, None)
players[tag] = {
"tag": tag,
"user_id": member.id,
"user_name": member.display_name
}
dataIO.save_json(PLAYERS, players)
return True
async def get_player_tag(self, tag):
await asyncio.sleep(0)
return self.players.get(tag)
async def rm_player_tag(self, tag):
"""Remove player tag from settings."""
pass
@racfauditset.command(name="auth", pass_context=True)
@checks.is_owner()
async def racfauditset_auth(self, ctx, token):
"""Set API Authentication token."""
self.settings["auth"] = token
dataIO.save_json(JSON, self.settings)
await self.bot.say("Updated settings.")
await self.bot.delete_message(ctx.message)
@racfauditset.command(name="settings", pass_context=True)
@checks.is_owner()
async def racfauditset_settings(self, ctx):
"""Set API Authentication token."""
await self.bot.say(box(self.settings))
@property
def auth(self):
"""API authentication token."""
return self.settings.get("auth")
@commands.group(aliases=["racfa"], pass_context=True, no_pm=True)
async def racfaudit(self, ctx):
"""RACF Audit."""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@racfaudit.command(name="config", pass_context=True, no_pm=True)
@checks.mod_or_permissions()
async def racfaudit_config(self, ctx):
"""Show config."""
for page in pagify(box(tabulate(self.config['clans'], headers="keys"))):
await self.bot.say(page)
def clan_tags(self):
tags = []
for clan in self.config.get('clans'):
# only clans with member roles
if clan.get('type') == 'Member':
tags.append(clan.get('tag'))
return tags
@property
def clan_roles(self):
"""Dictionary mapping clan name to clan role names"""
if self._clan_roles is None:
self._clan_roles = {}
for clan in self.config.get('clans'):
if clan['type'] == 'Member':
self._clan_roles[clan['name']] = clan['role_name']
return self._clan_roles
def search_args_parser(self):
"""Search arguments parser."""
# Process arguments
parser = argparse.ArgumentParser(prog='[p]racfaudit search')
parser.add_argument(
'name',
nargs='?',
default='_',
help='IGN')
parser.add_argument(
'-c', '--clan',
nargs='?',
help='Clan')
parser.add_argument(
'-n', '--min',
nargs='?',
type=int,
default=0,
help='Min Trophies')
parser.add_argument(
'-m', '--max',
nargs='?',
type=int,
default=10000,
help='Max Trophies')
parser.add_argument(
'-l', '--link',
action='store_true',
default=False
)
return parser
@property
def api(self):
return ClashRoyaleAPI(self.auth)
async def family_member_models(self):
"""All family member models."""
api = ClashRoyaleAPI(self.auth)
tags = self.clan_tags()
clan_models = await api.fetch_clan_list(tags)
# print(clan_models)
members = []
for clan_model in clan_models:
for member_model in clan_model.get('memberList', []):
tag = member_model.get('tag')
if tag:
member_model['tag'] = clean_tag(tag)
member_model['clan'] = clan_model
members.append(member_model)
return members
@racfaudit.command(name="tag2member", pass_context=True, aliases=['t2m'])
async def racfaudit_tag2member(self, ctx, tag):
"""Find member by tag in DB."""
verified = await check_manage_roles(ctx, self.bot)
if not verified:
return
tag = clean_tag(tag)
user_id = None
for _key, m in self.players.items():
m_tag = m.get('tag')
if m_tag is not None and m_tag == tag:
user_id = m['user_id']
break
if user_id is None:
await self.bot.say("Member not found.")
else:
server = ctx.message.server
member = server.get_member(user_id)
await self.bot.say("{} ({}) is associated with #{}".format(
member.mention if member is not None else 'Unknown user',
user_id, tag)
)
@racfaudit.command(name="tag", pass_context=True)
# @checks.mod_or_permissions(manage_roles=True)
async def racfaudit_tag(self, ctx, member: discord.Member):
"""Find member tag in DB."""
verified = await check_manage_roles(ctx, self.bot)
if not verified:
return
found = False
for tag, m in self.players.items():
if m["user_id"] == member.id:
await self.bot.say("RACF Audit database: `{}` is associated to `#{}`".format(member, tag))
found = True
if not found:
await self.bot.say("RACF Audit database: Member is not associated with any tags.")
@racfaudit.command(name="rmtag", pass_context=True)
# @checks.mod_or_permissions(manage_roles=True)
async def racfaudit_rm_tag(self, ctx, tag):
"""Remove tag in DB."""
verified = await check_manage_roles(ctx, self.bot)
if not verified:
return
tag = clean_tag(tag)
try:
self.players.pop(tag, None)
except KeyError:
await self.bot.say("Tag not found in DB.")
else:
dataIO.save_json(PLAYERS, self.players)
await self.bot.say("Removed tag from DB.")
@racfaudit.command(name="search", pass_context=True, no_pm=True)
# @checks.mod_or_permissions(manage_roles=True)
async def racfaudit_search(self, ctx, *args):
"""Search for member.
usage: [p]racfaudit search [-h] [-t TAG] name
positional arguments:
name IGN
optional arguments:
-h, --help show this help message and exit
-c CLAN, --clan CLAN Clan name
-n MIN --min MIN Min Trophies
-m MAX --max MAX Max Trophies
-l --link Display link to cr-api.com
"""
verified = await check_manage_roles(ctx, self.bot)
if not verified:
return
parser = self.search_args_parser()
try:
pargs = parser.parse_args(args)
except SystemExit:
await self.bot.send_cmd_help(ctx)
return
results = []
await self.bot.type()
try:
member_models = await self.family_member_models()
except ClashRoyaleAPIError as e:
await self.bot.say(e.status_message)
return
if pargs.name != '_':
for member_model in member_models:
# simple search
if pargs.name.lower() in member_model.get('name').lower():
results.append(member_model)
else:
# unidecode search
s = unidecode.unidecode(member_model.get('name'))
s = ''.join(re.findall(r'\w', s))
if pargs.name.lower() in s.lower():
results.append(member_model)
else:
results = member_models
# filter by clan name
if pargs.clan:
results = [m for m in results if pargs.clan.lower() in m.get('clan', {}).get('name', '').lower()]
# filter by trophies
results = [m for m in results if pargs.min <= m.get('trophies') <= pargs.max]
limit = 10
if len(results) > limit:
await self.bot.say(
"Found more than {0} results. Returning top {0} only.".format(limit)
)
results = results[:limit]
if len(results):
out = []
for member_model in results:
clan = member_model.get('clan')
clan_name = None
if clan is not None:
clan_name = clan.get('name')
out.append("**{name}** #{tag}, {clan_name}, {role}, {trophies}".format(
name=member_model.get('name'),
tag=member_model.get('tag'),
clan_name=clan_name,
role=get_role_name(member_model.get('role')),
trophies=member_model.get('trophies'),
))
if pargs.link:
out.append('<http://royaleapi.com/player/{}>'.format(member_model.get('tag')))
out.append('<http://royaleapi.com/player/{}/battles>'.format(member_model.get('tag')))
for page in pagify('\n'.join(out)):
await self.bot.say(page)
else:
await self.bot.say("No results found.")
def run_args_parser(self):
"""Search arguments parser."""
# Process arguments
parser = argparse.ArgumentParser(prog='[p]racfaudit run')
parser.add_argument(
'-x', '--exec',
action='store_true',
default=False,
help='Execute add/remove roles')
parser.add_argument(
'-d', '--debug',
action='store_true',
default=False,
help='Debug')
parser.add_argument(
'-c', '--clan',
nargs='+',
help='Clan(s) to show')
parser.add_argument(
'-s', '--settings',
action='store_true',
default=False,
help='Settings')
return parser
@racfaudit.command(name="run", pass_context=True, no_pm=True)
# @checks.mod_or_permissions(manage_roles=True)
async def racfaudit_run(self, ctx, *args):
"""Audit the entire RACF family.
[p]racfaudit run [-h] [-x] [-d] [-c CLAN [CLAN ...]]
optional arguments:
-h, --help show this help message and exit
-x, --exec Execute add/remove roles
-d, --debug Debug
-c CLAN [CLAN ...], --clan CLAN [CLAN ...]
Clan(s) to show
"""
verified = await check_manage_roles(ctx, self.bot)
if not verified:
return
server = ctx.message.server
parser = self.run_args_parser()
try:
pargs = parser.parse_args(args)
except SystemExit:
await self.bot.send_cmd_help(ctx)
return
# Show settings
if pargs.settings:
await ctx.invoke(self.racfaudit_config)
await self.bot.type()
clan_filters = []
if pargs.clan:
clan_filters = pargs.clan
result = await self.run_racfaudit(server, clan_filters=clan_filters)
for page in pagify('\n'.join(result.output)):
await self.bot.say(page)
if pargs.exec:
channel = ctx.message.channel
await self.bot.type()
if result.error:
await self.bot.send(channel, "Audit aborted because of Clash Royale API error.")
else:
await self.exec_racf_audit(channel=channel, audit_results=result.audit_results, server=server)
await self.bot.say("Audit finished.")
async def run_racfaudit(self, server: discord.Server, clan_filters=None) -> AuditResult:
"""Run audit and return results."""
default_audit_results = {
"elder_promotion_req": [],
"coleader_promotion_req": [],
"leader_promotion_req": [],
"no_discord": [],
"no_clan_role": [],
"no_member_role": [],
"not_in_our_clans": [],
}
audit_results = default_audit_results.copy()
error = False
out = []
try:
member_models = await self.family_member_models()
except ClashRoyaleAPIError as e:
# await self.bot.say(e.status_message)
error = True
else:
out.append("**RACF Family Audit**")
# soemthing went wrong e.g. clash royale error
if member_models:
# associate Discord user to member
for member_model in member_models:
tag = clean_tag(member_model.get('tag'))
try:
discord_id = self.players[tag]["user_id"]
except KeyError:
pass
else:
member_model['discord_member'] = server.get_member(discord_id)
# find member_models mismatch
discord_members = []
for member_model in member_models:
has_discord = member_model.get('discord_member')
if has_discord is None:
audit_results["no_discord"].append(member_model)
if has_discord:
discord_member = member_model.get('discord_member')
discord_members.append(discord_member)
# promotions
is_elder = False
is_coleader = False
is_leader = False
for r in discord_member.roles:
if r.name.lower() == 'elder':
is_elder = True
if r.name.lower() == 'coleader':
is_coleader = True
if r.name.lower() == 'leader':
is_leader = True
if is_elder:
if member_model.get('role').lower() != 'elder':
audit_results["elder_promotion_req"].append(member_model)
if is_coleader:
if member_model.get('role').lower() != 'coleader':
audit_results["coleader_promotion_req"].append(member_model)
if is_leader:
if member_model.get('role').lower() != 'leader':
audit_results["leader_promotion_req"].append(member_model)
# no clan role
clan_name = member_model['clan']['name']
clan_role_name = self.clan_roles[clan_name]
if clan_role_name not in [r.name for r in discord_member.roles]:
audit_results["no_clan_role"].append({
"discord_member": discord_member,
"member_model": member_model
})
# no member role
discord_role_names = [r.name for r in discord_member.roles]
if 'Member' not in discord_role_names:
audit_results["no_member_role"].append(discord_member)
# find discord member with roles
for user in server.members:
user_roles = [r.name for r in user.roles]
if 'Member' in user_roles:
if user not in discord_members:
audit_results['not_in_our_clans'].append(user)
# show results
def list_member(member_model):
"""member row"""
clan = member_model.get('clan')
clan_name = None
if clan is not None:
clan_name = clan.get('name')
row = "**{name}** #{tag}, {clan_name}, {role}, {trophies}".format(
name=member_model.get('name'),
tag=member_model.get('tag'),
clan_name=clan_name,
role=get_role_name(member_model.get('role')),
trophies=member_model.get('trophies')
)
return row
for clan in self.config['clans']:
display_output = False
clan_name_filters = []
for f in clan.get('filters', []):
clan_name_filters.append(str(f).lower())
if clan_filters:
for c in clan_filters:
if str(c).lower() in clan_name_filters:
display_output = True
else:
display_output = True
if not display_output:
continue
if clan['type'] == 'Member':
out.append("-" * 40)
out.append(inline(clan.get('name')))
# no discord
out.append(underline("Members without discord"))
for member_model in audit_results["no_discord"]:
try:
if member_model['clan']['name'] == clan.get('name'):
out.append(list_member(member_model))
except KeyError:
pass
# elders
out.append(underline("Elders need promotion"))
for member_model in audit_results["elder_promotion_req"]:
try:
if member_model['clan']['name'] == clan.get('name'):
out.append(list_member(member_model))
except KeyError:
pass
# coleaders
out.append(underline("Co-Leaders need promotion"))
for member_model in audit_results["coleader_promotion_req"]:
try:
if member_model['clan']['name'] == clan.get('name'):
out.append(list_member(member_model))
except KeyError:
pass
# clan role
out.append(underline("No clan role"))
for result in audit_results["no_clan_role"]:
try:
if result["member_model"]['clan']['name'] == clan.get('name'):
out.append(result['discord_member'].mention)
except KeyError:
pass
# not in our clans
out.append("-" * 40)
out.append(underline("Discord users not in our clans but with member roles"))
for result in audit_results['not_in_our_clans']:
out.append('`{}` {}'.format(result, result.id))
else:
out.append('Error from Clash Royale API. Aborting audit…')
return AuditResult(
audit_results=audit_results,
output=out,
error=error
)
async def exec_racf_audit(self, channel: discord.Channel = None, audit_results=None, server=None):
"""Execute audit and output to specific channel."""
await self.bot.send_message(channel, "**RACF Family Audit**")
await self.bot.send_typing(channel)
async def exec_add_roles(d_member, roles, channel=None):
# print("add roles", d_member, [r.name for r in roles])
# await asyncio.sleep(0)
try:
await self.bot.add_roles(d_member, *roles)
except discord.DiscordException as e:
pass
else:
if channel is not None:
await self.bot.send_message(channel,
"Add {} to {}".format(", ".join([r.name for r in roles]), d_member))
async def exec_remove_roles(d_member, roles, channel=None):
# print("remove roles", d_member, [r.name for r in roles])
# await asyncio.sleep(0)
try:
await self.bot.remove_roles(d_member, *roles)
except discord.DiscordException as e:
pass
else:
if channel is not None:
await self.bot.send_message(channel,
"Remove {} from {}".format(", ".join([r.name for r in roles]),
d_member))
# change clan roles
for result in audit_results["no_clan_role"]:
try:
member_model = result['member_model']
discord_member = result['discord_member']
clan_role_name = self.clan_roles[member_model['clan']['name']]
other_clan_role_names = [r for r in self.clan_roles.values() if r != clan_role_name]
for rname in other_clan_role_names:
role = discord.utils.get(discord_member.roles, name=rname)
if role is not None:
await exec_remove_roles(discord_member, [role], channel=channel)
role = discord.utils.get(server.roles, name=clan_role_name)
if role is not None:
await exec_add_roles(discord_member, [role], channel=channel)
except KeyError:
pass
# Add member role
member_role = discord.utils.get(server.roles, name='Member')
for discord_member in audit_results["no_member_role"]:
try:
if member_role is not None:
await exec_add_roles(discord_member, [member_role], channel=channel)
except KeyError:
pass
# remove member roles from people who are not in our clans
for result in audit_results['not_in_our_clans']:
result_role_names = [r.name for r in result.roles]
# ignore people with special
if 'Special' in result_role_names:
continue
if 'Keep-Member' in result_role_names:
continue
if 'Leader-Emeritus' in result_role_names:
continue
if 'FamilyLead' in result_role_names:
continue
if 'BS-FamilyLead' in result_role_names:
continue
if 'Bot Commander' in result_role_names:
continue
if 'MOD' in result_role_names:
continue
to_remove_role_names = []
for role_name in MEMBER_ROLE_NAMES:
if role_name in result_role_names:
to_remove_role_names.append(role_name)
to_remove_roles = [discord.utils.get(server.roles, name=rname) for rname in to_remove_role_names]
to_remove_roles = [r for r in to_remove_roles if r is not None]
if len(to_remove_roles):
await exec_remove_roles(result, to_remove_roles, channel=channel)
# if visitor_role is not None:
# await exec_add_roles(result, [visitor_role], channel=channel)
# Remove clan roles from visitors
member_role = discord.utils.get(server.roles, name='Member')
server_members = list(server.members).copy()
for user in server_members:
# not a member
if member_role not in user.roles:
user_role_names = [r.name for r in user.roles]
user_member_role_names = set(user_role_names) & set(MEMBER_ROLE_NAMES)
# union of user roles with member role names -> user has member roles which need to be removed
if user_member_role_names:
to_remove_roles = [discord.utils.get(server.roles, name=rname) for rname in user_member_role_names]
to_remove_roles = [r for r in to_remove_roles if r is not None]
if to_remove_roles:
try:
await exec_remove_roles(user, to_remove_roles, channel=channel)
except Exception as e:
pass
await self.bot.send_message(channel, "Audit finished.")
async def search_player(self, tag=None, user_id=None):
"""Search for players.
Return player dict
{'tag': '200CYRVCU', 'user_id': '295317904633757696', 'user_name': 'Ryann'}
"""
if tag is not None:
for key, player in self.players.items():
if player.get('tag') == tag:
return player
if user_id is not None:
for key, player in self.players.items():
if player.get('user_id') == user_id:
return player
return None
@racfaudit.command(name="audit", pass_context=True)
async def audit_member(self, ctx, member: discord.Member):
"""Run audit against specific user."""
try:
player = await self.search_player(user_id=member.id)
if player is None:
raise NoPlayerRecord()
tag = player.get('tag')
if tag is None:
raise NoPlayerRecord()
except NoPlayerRecord as e:
await self.bot.say("Your tag is not set. Please ask a Co-Leader for help.")
return
racf = self.bot.get_cog("RACF")
await ctx.invoke(racf.racf_verify, member, tag)
@commands.command(name="auditme", pass_context=True)
async def audit_self(self, ctx):
"""Run audit against self."""
author = ctx.message.author
try:
player = await self.search_player(user_id=author.id)
if player is None:
raise NoPlayerRecord()
tag = player.get('tag')
if tag is None:
raise NoPlayerRecord()
except NoPlayerRecord as e:
await self.bot.say("Your tag is not set. Please ask a Co-Leader for help.")
return
racf = self.bot.get_cog("RACF")
await ctx.invoke(racf.racf_verify, author, tag, grant_permission=True)
@racfaudit.command(name="rank", pass_context=True)
async def racfaudit_rank(self, ctx, *names):
"""Look up member rank within the family.
Options:
-startswith search names from the start only
-link link to profiles
-mini exclude mini clan
"""
await self.bot.type()
try:
member_models = await self.family_member_models()
except ClashRoyaleAPIError as e:
await self.bot.say(e.status_message)
return
results = []
member_models = sorted(member_models, key=lambda x: x['trophies'], reverse=True)
option_startwith = '-startswith' in names
option_link = '-link' in names
option_mini = '-mini' in names
if option_startwith:
names = list(names)
names.remove('-startswith')
if option_mini:
member_models = [m for m in member_models if m.get('clan', {}).get('tag') != '#9R8G9290']
for index, member_model in enumerate(member_models, 1):
# simple search
for name in names:
add_it = False
if name.lower() in member_model.get('name').lower():
if option_startwith:
m_name = member_model.get('name', '').lower()
# strip color code
m_name = re.sub(r'^(\<.+\>)', '', m_name)
# strip all non alphanumeric characters
m_name = ''.join(re.findall(r'[a-z0-9]+', m_name))
if m_name.startswith(name):
add_it = True
else:
add_it = True
else:
# unidecode search
s = unidecode.unidecode(member_model.get('name'))
s = ''.join(re.findall(r'\w', s))
if name.lower() in s.lower():
add_it = True
if add_it:
results.append({
'index': index,
'member': member_model,
'clan_name': member_model.get('clan', {}).get('name', '')
})
limit = 20
if len(results) > limit:
await self.bot.say("More than {0} results found, showing top {0}…".format(limit))
results = results[:limit]
if results:
em = await self.result_embed(results, option_link=option_link)
await self.bot.say(embed=em)
else:
await self.bot.say("No results")
async def result_embed(self, results, option_link=False, groups_of=5):
"""Format a list of member models into a single embed."""
em = discord.Embed(
title="RoyaleAPI Clan Family",
url="https://royaleapi.com/f/royaleapi",
color=discord.Color.blue()
)
result_groups = grouper(groups_of, results)
for group_id, group in enumerate(result_groups):
out = []
for result in group:
if not result:
continue
index = result['index']
member = result['member']
member_name = member.get('name', '')
trophies = member.get('trophies', 0)
clan_name = result.get('clan_name', '')
links = ''
if option_link:
tag = member.get('tag', '')
links = (
'\n'
'[Profile](https://royaleapi.com/player/{tag})'
' • [Log](https://royaleapi.com/player/{tag}/battles)'
' • [Decks](https://royaleapi.com/player/{tag}/decks)'
).format(tag=tag)
line = '`\u2800{rank: >3} {trophies:<4}\u2800`**{member_name}** {clan_name} {links}'.format(
rank=index,
member_name=member_name,
clan_name=clan_name,
trophies=trophies,
links=links,
)
result['line'] = line
result['links'] = links
out.append(line)
r_index_first = group_id * groups_of + 1
r_index_last = r_index_first + len(out) - 1
field_name = "{} - {}".format(r_index_first, r_index_last)
em.add_field(
name=field_name,
value='\n'.join(out),
inline=False
)
return em
@commands.command(name="racfaudit_top", aliases=["rtop"], pass_context=True)
async def racfaudit_top(self, ctx, count: int):
"""Show top N members in family."""
await self.bot.type()
try:
member_models = await self.family_member_models()
except ClashRoyaleAPIError as e:
await self.bot.say(e.status_message)
return
author = ctx.message.author
if not author.server_permissions.manage_roles:
count = min(count, 10)
member_models = sorted(member_models, key=lambda x: x['trophies'], reverse=True)
results = []
for index, m in enumerate(member_models[:count]):
r = dict(
index=index + 1,
member=m,
clan_name=m.get('clan', {}).get('name', '')
)
results.append(r)
if results:
em = await self.result_embed(results)
await self.bot.say(embed=em)
else:
await self.bot.say("No results")
@racfaudit.command(name="csv", pass_context=True)
async def racfaudit_csv(self, ctx):
"""Output membership in CSV format."""
await self.bot.type()
try:
member_models = await self.family_member_models()
except ClashRoyaleAPIError as e:
await self.bot.say(e.status_message)
return
filename = "members-{:%Y%m%d-%H%M%S}.csv".format(dt.datetime.utcnow())
tmp_file = os.path.join("data", "racf_audit", "member_csv.csv")
fieldnames = [
'name',
'tag',
'role',
'expLevel',
'trophies',
'clan_tag',
'clan_name',
'clanRank',
'previousClanRank',
'donations',
'donationsReceived',
]
members = []
for model in member_models:
member = {k: v for k, v in model.items() if k in fieldnames}
member['clan_tag'] = clean_tag(model.get('clan', {}).get('tag', ''))
member['clan_name'] = model.get('clan', {}).get('name', '')
member['tag'] = clean_tag(model.get('tag', ''))
members.append(member)
channel = ctx.message.channel
with io.StringIO() as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for member in members:
writer.writerow(member)
s = f.getvalue()
with io.BytesIO(s.encode()) as fb:
await self.bot.send_file(
channel, fb, filename=filename)
def calculate_clan_trophies(self, trophies):
"""Add a list of trophies to be calculated."""
trophies = sorted(trophies, reverse=True)
total = 0
factors = OrderedDict({
10: 0.5,
20: 0.25,
30: 0.12,
40: 0.1,
50: 0.03
})
for index, t in enumerate(trophies, 1):
factor_list = [f for f in factors.keys() if index <= f]
if len(factor_list) > 0:
factor = min(factor_list)
total += t * factors[factor]
return int(total)
@racfaudit.command(name="season", pass_context=True)
async def racfaudit_season(self, ctx, *args):
"""Find top 50 RACF not in Alpha."""
await self.bot.type()
server = ctx.message.server
author = ctx.message.author
try:
member_models = await self.family_member_models()
except ClashRoyaleAPIError as e:
await self.bot.say(e.status_message)
return
top50_results = []
non_top50_results = []
ALPHA_CLAN_TAG = '#9PJ82CRC'
# ALPHA_CLAN_TAG = '#PV98LY0P'
member_models = sorted(member_models, key=lambda x: x['trophies'], reverse=True)
alpha_trophies = [m.get('trophies') for m in member_models if m.get('clan', {}).get('tag') == ALPHA_CLAN_TAG]
alpha_clan_trophies = self.calculate_clan_trophies(alpha_trophies)
top50_trophies = [m.get('trophies') for m in member_models[:50]]
top50_clan_trophies = self.calculate_clan_trophies(top50_trophies)
trophy_50 = int(member_models[49].get('trophies', 0))
# Find alpha rank if top 50 in alpha
api = ClashRoyaleAPI(self.auth)
alpha_global_rank = 0
possible_alpha_rank = 0
ranks = []
try:
lb = await api.fetch_clan_leaderboard()
except ClashRoyaleAPIError as e:
await self.bot.say("Error: {}".format(e.message))
else:
items = lb.get('items', [])
for item in items:
if item.get('tag') == ALPHA_CLAN_TAG:
alpha_global_rank = item.get('rank')
clan_scores = [item.get('clanScore') for item in items]
possible_alpha_rank = len([score for score in clan_scores if score > top50_clan_trophies])
# Summary
o_summary = [
'50th = {:,} :trophy: '.format(trophy_50),
'Alpha Clan Trophies: {:,} :trophy:'.format(alpha_clan_trophies),
'Global Rank: {:,}'.format(alpha_global_rank),
'Top 50 Clan Trophies: {:,} :trophy:'.format(top50_clan_trophies),
'Possible Rank: {:,}'.format(possible_alpha_rank),
]
# await self.bot.say('\n'.join(o))
# logic calc
for index, member_model in enumerate(member_models, 1):
# non alpha in top 50
clan_tag = member_model.get('clan', {}).get('tag')
if index == 50:
trophy_50 = member_model.get('trophies')
if index <= 50:
if clan_tag != ALPHA_CLAN_TAG:
top50_results.append({
'index': index,
'member': member_model,
})
# alphas not in top 50
else:
if clan_tag == ALPHA_CLAN_TAG:
non_top50_results.append({
'index': index,
'member': member_model
})
def append_result(result, out, out_members):
index = result['index']
member = result['member']
clan_name = member.get('clan', {}).get('name').replace('RoyaleAPI', '').strip()
trophies = member.get('trophies')
delta = trophies - trophy_50
if delta > 0:
delta = '+{}'.format(delta)
elif delta == 0:
delta = ' {}'.format(delta)
line = '`\u2800{rank: >3} {trophies:<4}\u2800`**{member}** {clan_name} {delta:<4}'.format(
rank=index,
member=member.get('name')[:15],
clan_name=clan_name,
trophies=trophies,
delta=delta
)
out.append(line)
out_members.append(member)
# top 50 not in alpha
# out = ['Top 50 not in Alpha']
out = []
out_members = []
for result in top50_results:
append_result(result, out, out_members)
# for page in pagify('\n'.join(out)):
# await self.bot.say(box(page, lang='py'))
# alphas not in top 50
# out_2 = ['Alphas not in top 50']
out_2 = []
out_2_members = []
for result in non_top50_results:
append_result(result, out_2, out_2_members)
# for page in pagify('\n'.join(out_2)):
# await self.bot.say(box(page, lang='py'))
# embed display
em = discord.Embed(
title="End of season",
description="\n".join(o_summary),
color=discord.Color.blue()
)
em.add_field(
name="Top 50 not in Alpha",
value='\n'.join(out),
inline=False
)
em.add_field(
name="Alphas not in Top 50",
value='\n'.join(out_2),
inline=False
)
await self.bot.say(embed=em)
def append_discord_member(member_list, member):
tag = clean_tag(member.get('tag'))
try:
discord_id = self.players[tag]["user_id"]
except KeyError:
pass
else:
discord_member = server.get_member(discord_id)
if discord_member is not None:
member_list.append(discord_member)
# options for output mentions
if 'Bot Commander' in [r.name for r in author.roles]:
top50_discord_members = []
non_top50_discord_members = []
for member in out_members:
append_discord_member(top50_discord_members, member)
for member in out_2_members:
append_discord_member(non_top50_discord_members, member)
if '-id' in args:
out = ['Top 50 not in Alpha']
for discord_member in top50_discord_members:
out.append(discord_member.id)
out.append('Alphas not in Top 50')
for discord_member in non_top50_discord_members:
out.append(discord_member.id)
for page in pagify('\n'.join(out)):
await self.bot.say(box(page, lang='py'))
if '-mention' in args:
out = []
for discord_member in top50_discord_members:
out.append(discord_member.mention)
out.append(
'Congratulations! You are top 50 in the RACF. '
'Please move to Alpha by end of season to help us with the global rank!'
)
for discord_member in non_top50_discord_members:
out.append(discord_member.mention)
out.append(
'You are not within top 50 in the RACF right now. '
'Please move to Bravo unless you are certain that you can un-tilt.'
)
out.append(
'\n\nNote: If you are _very_ close to where the 50th is, you can stay put. '
'Just be conscious of that number. Thank you! :heart:'
)
for page in pagify(' '.join(out)):
await self.bot.say(page)
@racfaudit.command(name="auto", pass_context=True, no_pm=True)
@checks.admin_or_permissions(manager_server=True)
async def racfaudit_auto(self, ctx, channel: discord.Channel = None):
"""Auto audit server"""
server = ctx.message.server
enabled = channel is not None
channel_id = None
if channel is not None:
channel_id = channel.id
if not self.settings.get('auto_audit_servers'):
self.settings['auto_audit_servers'] = dict()
self.settings['auto_audit_servers'][server.id] = dict(
enabled=enabled,
channel_id=channel_id
)
dataIO.save_json(JSON, self.settings)
if enabled:
await self.bot.say("Auto audit enabled")
else:
await self.bot.say("Auto audit disabled")
async def run_audit_task(self):
"""Auto run audit."""
for server_id, v in self.settings.get('auto_audit_servers', {}).items():
if server_id in [RACF_SERVER_ID, SML_SERVER_ID]:
channel_id = v.get('channel_id')
enabled = v.get('enabled')
if enabled:
channel = self.bot.get_channel(channel_id)
server = self.bot.get_server(server_id)
if channel is not None:
try:
result = await self.run_racfaudit(server)
except ClashRoyaleAPIError:
pass
else:
if result.error:
await self.bot.send(channel, "Audit aborted because of Clash Royale API error.")
else:
await self.exec_racf_audit(channel=channel, audit_results=result.audit_results,
server=server)
@racfaudit.command(name="nudge", pass_context=True, no_pm=True)
@checks.mod_or_permissions(kick_members=True)
async def racfaudit_nudge(self, ctx, query):
"""Nudge members for CW battles."""
api = ClashRoyaleAPI(self.auth)
server = ctx.message.server
# find clan tag based on config filters
tag = None
for clan in self.config.get('clans'):
if query.lower() in [f.lower() for f in clan.get('filters', [])]:
tag = clan.get('tag')
break
if tag is None:
await self.bot.say("Cannot find clan tag")
return
await self.bot.say("Clan tag: {}".format(tag))
cw = await api.fetch_clan_war(tag)
cwd = Dict(cw)
c = await api.fetch_clan(tag)
cd = Dict(c)
# tag to member name
member_tag_to_name = {clean_tag(m.get('tag', '')): m.get('name', '') for m in cd.memberList}
# helper: send message
async def send_message(war_day="Collection Day", timedelta_human="0 minutes", discord_users=None):
if discord_users:
msg = "{mentions} {timedelta} til end of {war_day} and you have battles remaining!!".format(
war_day=war_day,
mentions=" ".join([u.mention for u in discord_users]),
timedelta=timedelta_human,
member_tags=", ".join(member_tags)
)
await self.bot.say(msg)
if war_day == 'War Day':
await self.bot.say("Note: we cannot detect members who have more than one battle to play.")
# helper: not on discord:
async def send_not_on_discord(member_tag):
await self.bot.say(
"{member_name} #{member_tag} is not on Discord.".format(
member_name=member_tag_to_name.get(member_tag, ''),
member_tag=member_tag
)
)
# collection day nudge
now = dt.datetime.utcnow()
member_tags = []
if cwd.state == 'collectionDay':
# time remaining
end_time = dt.datetime.strptime(cwd.collectionEndTime, '%Y%m%dT%H%M%S.%fZ')
# end_time = dt.datetime.strptime('20190126T102716.230Z', '%Y%m%dT%H%M%S.%fZ')
timedelta = end_time - now
minutes = timedelta // dt.timedelta(minutes=1)
timedelta_human = "{}".format(humanfriendly.format_timespan(dt.timedelta(minutes=minutes).total_seconds()))
# battle remaining
for p in cwd.participants:
if p.collectionDayBattlesPlayed != 3:
member_tags.append(clean_tag(p.tag))
# not yet participating
participant_tags = [p.get('tag') for p in cwd.participants]
for m in cd.memberList:
if m.get('tag') not in participant_tags:
member_tags.append(clean_tag(m.get('tag')))
# discord user
discord_users = []
tag2discord = {v.get('tag'): v.get('user_id') for k, v in self.players.items()}
for member_tag in member_tags:
if member_tag in tag2discord:
discord_id = tag2discord.get(member_tag)
discord_user = server.get_member(discord_id)
if discord_user is not None:
discord_users.append(discord_user)
else:
await send_not_on_discord(member_tag)
else:
await send_not_on_discord(member_tag)
await send_message(war_day="Collection Day", timedelta_human=timedelta_human, discord_users=discord_users)
return
if cwd.state == 'warDay':
# time remaining
end_time = dt.datetime.strptime(cwd.warEndTime, '%Y%m%dT%H%M%S.%fZ')
timedelta = end_time - now
minutes = timedelta // dt.timedelta(minutes=1)
timedelta_human = "{}".format(humanfriendly.format_timespan(dt.timedelta(minutes=minutes).total_seconds()))
# battles remaining
for p in cwd.participants:
if p.battlesPlayed == 0:
member_tags.append(clean_tag(p.tag))
# discord user
players = [self.players.get(member_tag) for member_tag in member_tags]
players = [p for p in players if p]
discord_users = []
for player in players:
discord_id = player.get('user_id')
discord_user = server.get_member(discord_id)
if discord_user is None:
member_tag = player.get('tag')
await send_not_on_discord(member_tag)
else:
discord_users.append(discord_user)
await send_message(war_day="War Day", timedelta_human=timedelta_human, discord_users=discord_users)
return
if cwd.state.lower() == 'matchmaking':
await self.bot.say("Clan is matchmaking… aborted.")
return
if cwd.state.lower() == 'notinwar':
await self.bot.say("Clan is not in war… aborted.")
return
# not in war or collection
await self.bot.say("Clan is not in a known war state… aborted.")
def check_folder():
"""Check folder."""
os.makedirs(PATH, exist_ok=True)
os.makedirs(os.path.join(PATH, "clans"), exist_ok=True)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, {})
def setup(bot):
"""Setup."""
check_folder()
check_file()
n = RACFAudit(bot)
bot.add_cog(n)
| [
"[email protected]"
] | |
5b05fd125f8e4a01b95fc570dc9749b52d706066 | dc8809c77c53af6b9e5f66f52fe9d8e9e32ece51 | /finalCDSTest.py | 51b7a48e9f5374683351c66ed3f1a98a52c8765b | [] | no_license | psykodan/FYP | afb2263e862aad0640722fcd546a39a73653c79a | 6cba87fab509a78355e20f177192bfcc148c2918 | refs/heads/master | 2021-05-10T13:05:18.635337 | 2018-05-04T12:54:21 | 2018-05-04T12:54:21 | 118,461,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,127 | py | import numpy as np
import cv2
import os
from advancedHaarColourThreshold import casualtyDetectionImageStream
def main():
holdoutSets=[]
holdoutSets.append("/home/daniel/Documents/FYP/FYP/data/CloudyChopSurfFanore/holdout/1/positive/")
holdoutSets.append("/home/daniel/Documents/FYP/FYP/data/CloudyChopSurfFanore/holdout/2/positive/")
holdoutSets.append("/home/daniel/Documents/FYP/FYP/data/CloudyChopSurfFanore/holdout/3/positive/")
haar = "cascade.xml"
cascade = cv2.CascadeClassifier(haar)
objectLocations = []
objectLocations.append("/home/daniel/Documents/FYP/FYP/data/CloudyChopSurfFanore/holdout/1/info.dat")
objectLocations.append("/home/daniel/Documents/FYP/FYP/data/CloudyChopSurfFanore/holdout/2/info.dat")
objectLocations.append("/home/daniel/Documents/FYP/FYP/data/CloudyChopSurfFanore/holdout/3/info.dat")
threshold = 3.6
box = 40
#Iterate through confidence thresholds 0 - 5
for hSetNum in range(len(holdoutSets)):
casualtyDetectionImageStream(holdoutSets[hSetNum], cascade, threshold, box)
processResults(objectLocations[hSetNum], holdoutSets[hSetNum])
def processResults(info, dirIn):
tp = 0
fp = 0
tn = 0
fn = 0
p = 0
n = 0
#opening and processing the created results file and the info.dat file that specifies where the objects actually are
r = open('results.txt', 'r')
results = r.read()
r.close()
resRows = results.split('\n')
resFormatted = []
for i in resRows:
resFormatted.append(i.split(' '))
d = open(info, 'r')
data = d.read()
d.close()
dataRows = data.split('\n')
dataFormatted = []
for j in dataRows:
dataFormatted.append(j.split(' '))
images = []
#go through each image in the info.dat file checking against every correspoding detection in the results file
for x in dataFormatted:
if(len(x) != 1):
images.append(x[0])
p = p + int(x[1])
for y in resFormatted:
if(y[0] == x[0]): #if the image from the results file is the same image from info.dat
if(y[1] == "NONE"): #If the result is a NONE this is a false negative as the info.dat wouldn't have an entry for a negative
fn = fn + 1
elif(int(x[1])==1):
#Check x y coords of box are within 50 pixels for 1 object
if((int(y[1]) >= (int(x[2]) - 50) and int(y[1]) <= (int(x[2]) + 50)) and (int(y[2]) >= (int(x[3]) - 50) and int(y[2]) <= (int(x[3]) + 50))):
tp = tp + 1
else:
fp = fp + 1
elif(int(x[1])==2):
#Check x y coords of box are within 50 pixels for 1 object
if((int(y[1]) >= (int(x[2]) - 50) and int(y[1]) <= (int(x[2]) + 50)) and (int(y[2]) >= (int(x[3]) - 50) and int(y[2]) <= (int(x[3]) + 50))):
tp = tp + 1
#Check x y coords of box are within 50 pixels for 1 object
elif((int(y[1]) >= (int(x[6]) - 50) and int(y[1]) <= (int(x[6]) + 50)) and (int(y[2]) >= (int(x[7]) - 50) and int(y[2]) <= (int(x[7]) + 50))):
tp = tp + 1
else:
fp = fp + 1
elif(int(x[1])==3):
#Check x y coords of box are within 50 pixels for 1 object
if((int(y[1]) >= (int(x[2]) - 50) and int(y[1]) <= (int(x[2]) + 50)) and (int(y[2]) >= (int(x[3]) - 50) and int(y[2]) <= (int(x[3]) + 50))):
tp = tp + 1
#Check x y coords of box are within 50 pixels for 1 object
elif((int(y[1]) >= (int(x[6]) - 50) and int(y[1]) <= (int(x[6]) + 50)) and (int(y[2]) >= (int(x[7]) - 50) and int(y[2]) <= (int(x[7]) + 50))):
tp = tp + 1
#Check x y coords of box are within 50 pixels for 1 object
elif((int(y[1]) >= (int(x[10]) - 50) and int(y[1]) <= (int(x[10]) + 50)) and (int(y[2]) >= (int(x[11]) - 50) and int(y[2]) <= (int(x[11]) + 50))):
tp = tp + 1
else:
fp = fp + 1
else:
print("nothing to see here")
for z in resFormatted:
if(len(z) != 1):
if(z[1]=="NONE" and z[0] not in images): #If the results file says NONE and there is no entry in the info.dat file then this is a true negative
tn = tn +1
if(z[1] != "NONE" and z[0] not in images): #If the results file doesn't say NONE and there is no entry in the info.dat file then this is a false positive that wouldn't have been picked up above
fp = fp +1
count = len([name for name in os.listdir(dirIn) if os.path.isfile(os.path.join(dirIn, name))])
n = count - len(dataFormatted)
#Metrics
if((tp+fp+fn+tn)!= 0):
accuracy = (tp + tn)/(tp+fp+fn+tn)
else:
accuracy = 0
if((tp+fp) != 0):
precision = tp/(tp+fp)
else:
precision = 0
if((tp+fn) != 0):
sensitivity = tp/(tp+fn)
else:
sensitivity = 0
if((tn+fp) != 0):
specificity = tn/(tn+fp)
else:
specificity = 0
print("True positive count: " + str(tp))
print("False positive count: "+ str(fp))
print("True negative count: "+ str(tn))
print("False negative count: "+ str(fn))
print("Positive count: "+ str(p))
print("Negative count: "+ str(n))
#Writing results to file
rF = open('resultsFull.txt', 'a')
rF.write("Confusion Matrix for black Colour Thresholding\n")
rF.write(" Actual \n")
rF.write(" \n")
rF.write(" P("+str(p)+") N("+str(n)+")\n")
rF.write("P --------------------\n")
rF.write("r P | ["+str(tp)+"] | ["+str(fp)+"] |\n")
rF.write("e ("+str(int(tp + fp))+") -------------------\n")
rF.write("d N | ["+str(fn)+"] | ["+str(tn)+"] |\n")
rF.write(" ("+str(int(tn + fn))+") -------------------\n\n")
rF.write("Accuracy = " + str(accuracy) + "\n")
rF.write("Precision = " + str(precision) + "\n")
rF.write("Sensitivity = " + str(sensitivity) + "\n")
rF.write("Specificity = " + str(specificity) + "\n")
rF.write("-------------- NEXT RUN ------------\n")
rF.close()
rR = open('resultsRaw.txt', 'a')
rR.write(str(p) +","+ str(n) +","+ str(tp) +","+ str(tn) +","+ str(fp) +","+ str(fn) +","+ str(accuracy) +","+ str(precision) +","+ str(sensitivity) +","+ str(specificity) +"\n" )
rR.close()
r = open('results.txt','w')
r.write("")
r.close()
main() | [
"[email protected]"
] | |
703c14122a9d7e68d954f28abd98d67fb76e61b7 | b7eed26cf8a0042a61f555eed1e9bf0a3227d490 | /students/medrek_tomasz/lesson_06_dicts_tuples_sets_args_kwargs/uppercase.py | 5b34196d30840304fa45a907fd4681223a0cee82 | [] | no_license | jedzej/tietopythontraining-basic | e8f1ac5bee5094c608a2584ab19ba14060c36dbe | a68fa29ce11942cd7de9c6bbea08fef5541afa0f | refs/heads/master | 2021-05-11T11:10:05.110242 | 2018-08-20T12:34:55 | 2018-08-20T12:34:55 | 118,122,178 | 14 | 84 | null | 2018-08-24T15:53:04 | 2018-01-19T12:23:02 | Python | UTF-8 | Python | false | false | 604 | py | #!/usr/bin/env python3
def capitalize(lower_case_word):
first_letter_no = ord(lower_case_word[0:1])
if first_letter_no in range(ord('a'), ord('z') + 1):
return chr(first_letter_no - 32) + lower_case_word[1:]
else:
return lower_case_word
def capitalize_message(message):
output = ' '
words_list = []
for word in message.split():
words_list += [capitalize(word)]
return output.join(words_list)
def main():
input_message = input("Please enter your message\n")
print(capitalize_message(input_message))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
09f5a82abfe71544df6b67e4dceb7dd68152b887 | 4b2ed102f94d4e96a423300c789d31d49e6ee3cb | /app/rrhh/migrations/0005_alter_detallerolpersonal_options.py | 2423db787f8e3b7d271329466ee6dcfda0481d84 | [] | no_license | peilflores/controlasistencia | 34fedda4f052d8892eca328687ce5718dcb86266 | dcb99e2687d32abe896c66c68719bc3d8296b7f1 | refs/heads/main | 2023-06-05T01:56:13.466041 | 2021-06-30T06:56:50 | 2021-06-30T06:56:50 | 381,582,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | # Generated by Django 3.2 on 2021-06-21 04:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rrhh', '0004_alter_detallerol_rol'),
]
operations = [
migrations.AlterModelOptions(
name='detallerolpersonal',
options={'verbose_name': 'Detalle Rol Personal', 'verbose_name_plural': 'Detalles Roles Personales'},
),
]
| [
"[email protected]"
] | |
efcad0606abc2253c9cdfa4e89175cfb0feee50e | ebdc72111b5140919e8ac7736c597e33e31b190b | /ImprovedVersion/comparison.py | fa27d4806b8c72c41c4f68bf93bb208859c73d28 | [] | no_license | lordChipotle/ML-SIM-Improved | c031ee733b65d083f585fa73f806494b8550a48c | e756b6e427af37c25a4eacd761510880e43023d2 | refs/heads/master | 2023-01-07T12:43:45.451340 | 2020-11-05T15:09:34 | 2020-11-05T15:09:34 | 280,270,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,322 | py | import torch
from torch import nn
import pandas as pd
import os
from datetime import date, datetime
import matplotlib.pyplot as plt
import numpy as np
import json
import matplotlib
font = {'size' : 18}
matplotlib.rc('font', **font)
from datetime import timedelta
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
from run import run_opt as run_opt_china, initHubei
from components.utils import codeDict, flowHubei, get_data_path, get_important_date, get_seed_num
from run_foreign import run_opt as run_opt_foreign
from show_simulation_foreign import run_simulation as run_simulation_foreign
from res_analysis import load_and_save
import argparse
korea_flag = True
foresee_size = 3
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--load_inter', type=bool, default=False)
args = parser.parse_args()
return args
if korea_flag:
start_date = date(2020, 1, 26)
day_num = []
date_it = date(2020, 3, 5)
period_it = (date_it - start_date).days
training_end_date = date_it
else:
date_it = date(2020, 2, 13)
start_date = date(2020, 1, 14)
period_it = (date_it - start_date).days
training_end_date = date_it
def ChinaSEIRData(city=420000):
d = {
}
if city in d:
return d[city]
else:
data_all = json.load(open('./data/data-seir.json', 'r'))
if str(city) in data_all:
return data_all[str(city)]
return [0] * len(data_all[str(420000)])
def HanSEIRData():
data_all = json.load(open('./data/data-seir-korea.json', 'r'))
return data_all['900003']
def plot1(ours, real, simulated, seir, title, savefile, date_it=None, loc='upper left', y_max2=None, interval=12):
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
y_max = -1000
y_max = max(np.max(ours), np.max(real), np.max(simulated), np.max(seir))
xs = [start_date + timedelta(days=i) for i in range(len(real))]
plt.plot(xs, real, color="indianred", linewidth=2.5, linestyle="-", label="real")
xs = [start_date + timedelta(days=i) for i in range(len(simulated))]
plt.plot(xs,simulated, color="orange", linewidth=2.5, linestyle="-", label="LSTM")
xs = [start_date + timedelta(days=i) for i in range(len(ours))]
plt.plot(xs,ours, color="cornflowerblue", linewidth=2.5, linestyle="-", label="MLSim")
xs = [start_date + timedelta(days=i) for i in range(len(seir))]
plt.plot(xs,seir, color="forestgreen", linewidth=2.5, linestyle="-", label="SEIR")
if date_it is not None:
plt.vlines(date_it, 0, y_max, colors="r", linestyles="dashed")
plt.gcf().autofmt_xdate()
#plt.tick_params(axis='both', which='major', labelsize=14)
plt.xlabel('Date', fontsize=18)
plt.ylabel('Cases number', fontsize=18)
ax=plt.gca()
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_major_locator(ticker.MultipleLocator(interval))
plt.ylim(bottom=0)
if y_max2 is not None:
plt.ylim(top=y_max2)
#ax.xaxis.set_major_locator(x_major_locator)
plt.legend(loc=loc)
#plt.title(title)
plt.savefig(savefile, bbox_inches='tight')
plt.clf()
def get_total(data, type, cities):
l = len(data[type][str(cities[0])])
res = l * [0]
for ind, city in enumerate(cities):
for i in range(l):
res[i] += data[type][str(city)][i]
return res
def get_COVID_new_confirm(type='real_confirmed', cities=420000, file='./data_run_lstm_china0.json'):
with open(file, 'r') as f:
data = json.load(f)
return np.array(get_total(data, type, [int(cities)]))
def prepare_data():
#smooth_confirm = json.load(open('./data/sars.json', 'r'))['data']
smooth_confirm = [0] * 116
smooth_confirm = np.array([np.around(item) for item in smooth_confirm])
return smooth_confirm
def train_china():
province_travel_dict = flowHubei()
real_data = pd.read_csv(get_data_path())['adcode'].unique()
province_code_dict = codeDict()
for i in range(1):
all_param = {}
x = run_opt_china(420000, 200000, start_date=date(2020, 1, 11), important_dates=[get_important_date(420000)],
repeat_time=3, training_date_end=training_end_date, seed=i,
json_name='data_run_lstm_china{}.json'.format(int(i)), loss_ord=4., touch_range=[0, 0.33])
unob_flow_num = initHubei(x, start_date=date(2020, 1, 11), important_date=[get_important_date(420000)],
travel_from_hubei=province_travel_dict)
all_param[420000] = x
real_data = [110000, 440000, 330000, 310000, 320000, 120000]
for ind, item in enumerate(real_data):
print(i, ind, item, province_code_dict[item])
if item == 420000:
continue
x = run_opt_china(item, 40000, start_date=date(2020, 1, 11), important_dates=[get_important_date(420000)],
infectratio_range=[0.0, 0.05], dummy_range=[0, 0.000001], unob_flow_num=unob_flow_num, repeat_time=2,
training_date_end=training_end_date, json_name='data_run_lstm_china{}.json'.format(int(i)),
loss_ord=4., touch_range=[0.0, 0.33], iso_range=[0.03, 0.12])
all_param[item] = x
all_param_df = pd.DataFrame(all_param)
all_param_df.to_csv('params_lstm_china{}.csv'.format(int(i)), index=False)
load_and_save('data_run_lstm_china{}.json', 'data_run_lstm_china_{}.json', 1)
def train_korea():
province_code_dict = {900003: 'South Korea'}
for i in range(1):
all_param = {}
for ind, item in enumerate([900003]):
print(i, ind, item, province_code_dict[item])
x = run_opt_foreign(item, 40000, start_date=date(2020, 1, 24), important_dates=[get_important_date(900003)],
infectratio_range=[0.0, 0.05], dummy_range=[0, 100], unob_flow_num=None, repeat_time=3,
training_end_date=training_end_date, seed=i, json_name='data_run_lstm_korea{}.json'.format(int(i)),
touch_range=[0, 0.333])
run_simulation_foreign(x, item, 60, 60,
start_date, get_important_date(item), json_name='data_run_lstm_korea{}.json'.format(int(i)))
all_param[item] = x
all_param_df = pd.DataFrame(all_param)
all_param_df.to_csv('params_lstm_korea{}.csv'.format(int(i)), index=False)
load_and_save('data_run_lstm_korea{}.json', 'data_run_lstm_korea_{}.json', 1)
def construct_data_set(data, size=3):
dataX = []
dataY = []
for i in range(len(data) - size):
x_it = data[i:i+size]
y_it = data[i+size]
dataX.append(x_it)
dataY.append(y_it)
dataX = np.array(dataX)
dataY = np.array(dataY).reshape((-1, 1))
return dataX, dataY
class lstm_reg(nn.Module):
def __init__(self, input_size, hidden_size, output_size=1, num_layers=1):
super(lstm_reg, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.rnn = nn.LSTM(hidden_size, hidden_size, num_layers, batch_first=True)
self.fc2 = nn.Linear(hidden_size, output_size)
def forward(self, x, mem_in=None):
x = torch.tanh(self.fc1.forward(x))
x, mem = self.rnn(x,mem_in)
x = torch.tanh(self.fc2(x))
return x, mem
def forward_and_pred(self, x, pred=0):
output, mem = self.forward(x)
if pred > 0:
current_x = list(x[0][-1].data.numpy())
current_y = output[0][-1].data.numpy()[0]
res = []
for i in range(pred):
current_x.pop(0)
current_x.append(current_y)
net_x = torch.from_numpy(np.array(current_x).reshape((1,1,-1)))
ot, mem = self.forward(net_x, mem)
res.append(ot)
current_y = ot[0][-1].data.numpy()[0]
out_new = torch.cat(res, -2)
output = torch.cat([output, out_new], -2)
return output
def train_and_pred(trainX, trainY, testX, pred_num=30,den=4500,load_model=False,mask=None):
net = lstm_reg(foresee_size, 30, 1, 1)
if load_model:
net.load_state_dict(torch.load('params.pkl'))
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
train_x, train_y = torch.from_numpy(trainX).to(torch.float32), torch.from_numpy(trainY).to(
torch.float32)
test_x = torch.from_numpy(testX).to(torch.float32).unsqueeze(0)
train_x = train_x / den
train_y = train_y / den
test_x = test_x / den
if mask is not None:
mask_tensor =torch.from_numpy(mask).to(torch.float32).detach()
if not load_model:
for i in range(500):
pred, _ = net.forward(train_x)
if mask is None:
loss = criterion(pred, train_y)
else:
d1 = list(np.round((torch.sqrt((pred - train_y)**2) * mask_tensor).squeeze()[1].detach().numpy(),3))
d2 = list(np.round((pred * mask_tensor).squeeze()[1].detach().numpy(), 3))
d3 = list(np.round((train_y * mask_tensor).squeeze()[1].detach().numpy(), 3))
pred_test = list(np.round(net.forward_and_pred(test_x, 0).squeeze().data.numpy(), 3))
loss = (((pred - train_y)**2) * mask_tensor).sum() / mask_tensor.sum()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print(i, loss.item())
#torch.save(net.state_dict(), 'params.pkl')
pred_train = net.forward_and_pred(train_x, 0).data.numpy()
pred_test = net.forward_and_pred(test_x, pred_num).data.numpy()
return pred_train, pred_test[0]
def main(load_inter_flag=False):
if not load_inter_flag:
if korea_flag:
train_korea()
else:
train_china()
fmt = 'pdf'
#all_city = pd.read_csv(get_data_path())['adcode'].unique()
all_city = [420000, 110000, 440000, 330000, 310000, 320000, 120000]
if korea_flag:
all_city = [900003]
mlsim_loss_array = []
seir_loss_array = []
lstm_loss_array = []
for city in all_city:
print(city)
torch.manual_seed(6)
if not os.path.exists('./img/{}{}/'.format('./lstm', str(city))):
os.mkdir('./img/{}{}/'.format('./lstm', str(city)))
if not korea_flag:
div_idx = period_it
dataX_list, dataY_list = [], []
mask_list = []
_DataX, _DataY = construct_data_set(prepare_data(), foresee_size)
_DataX, _DataY = np.expand_dims(_DataX, 0), np.expand_dims(_DataY, 0)
dataX_list.append(_DataX)
dataY_list.append(_DataY)
mask_list.append(np.zeros((1, np.shape(_DataY)[1], 1)))
TestDataX, TestDataY =construct_data_set(get_COVID_new_confirm(cities=city), foresee_size)
mask_list.append(np.zeros((1, np.shape(_DataY)[1], 1)))
dataX_list.append(np.zeros((1, np.shape(_DataY)[1], foresee_size)))
dataY_list.append(np.zeros((1, np.shape(_DataY)[1], 1)))
for i in range(div_idx):
mask_list[-1][0][i][0] = 1
dataY_list[-1][0][i][0] = TestDataY[i][0]
for ii in range(foresee_size):
dataX_list[-1][0][i][ii] = TestDataX[i][ii]
DataX = np.concatenate(dataX_list, 0)
DataY = np.concatenate(dataY_list, 0)
mask = np.concatenate(mask_list, 0)
TestDataX, TestDataY =construct_data_set(get_COVID_new_confirm(cities=city), foresee_size)
_, real_cum =construct_data_set(get_COVID_new_confirm('real_cum_confirmed',cities=city), foresee_size)
_, sim_cum =construct_data_set(get_COVID_new_confirm('sim_cum_confirmed_deduction_s1',cities=city), foresee_size)
_, sim_inc =construct_data_set(get_COVID_new_confirm('sim_confirmed_deduction_s1',cities=city), foresee_size)
load_model = False
_, sim_inc_seir = construct_data_set(ChinaSEIRData(city=city)[:-3], foresee_size)
else:
div_idx = period_it
dataX_list, dataY_list = [], []
mask_list = []
for item in pd.read_csv(get_data_path())['adcode'].unique():
_DataX, _DataY =construct_data_set(get_COVID_new_confirm(cities=int(item)), foresee_size)
_DataX, _DataY =np.expand_dims(_DataX, 0), np.expand_dims(_DataY, 0)
dataX_list.append(_DataX)
dataY_list.append(_DataY)
mask_list.append(np.ones((1,np.shape(_DataY)[1], 1)))
TestDataX, TestDataY =construct_data_set(get_COVID_new_confirm(file='data_run_lstm_korea0.json', cities=900003), foresee_size)
mask_list.append(np.zeros((1, np.shape(_DataY)[1], 1)))
dataX_list.append(np.zeros((1, np.shape(_DataY)[1], foresee_size)))
dataY_list.append(np.zeros((1, np.shape(_DataY)[1], 1)))
for i in range(div_idx):
mask_list[-1][0][i][0] = 1
dataY_list[-1][0][i][0] = TestDataY[i][0]
for ii in range(foresee_size):
dataX_list[-1][0][i][ii] = TestDataX[i][ii]
DataX = np.concatenate(dataX_list, 0)
DataY = np.concatenate(dataY_list, 0)
mask =np.concatenate(mask_list, 0)
TestDataX, TestDataY =construct_data_set(get_COVID_new_confirm(file='data_run_lstm_korea0.json', cities=900003), foresee_size)
_, real_cum =construct_data_set(get_COVID_new_confirm(type='real_cum_confirmed', file='data_run_lstm_korea0.json', cities=900003), foresee_size)
_, sim_cum =construct_data_set(get_COVID_new_confirm('sim_cum_confirmed_deduction_s1',file='data_run_lstm_korea0.json', cities=900003), foresee_size)
_, sim_inc =construct_data_set(get_COVID_new_confirm('sim_confirmed_deduction_s1',file='data_run_lstm_korea0.json', cities=900003), foresee_size)
_, sim_inc_seir = construct_data_set(HanSEIRData()[:], foresee_size)
load_model = False
den = 4500
if city < 900000 and not city == 420000:
den = 200
# print(TestDataX)
if korea_flag:
pred_train, pred_test = train_and_pred(DataX, DataY, TestDataX[:div_idx][:], len(sim_cum) - div_idx, den, load_model=load_model, mask=mask)
else:
pred_train, pred_test = train_and_pred(DataX, DataY, TestDataX[:div_idx][:], len(sim_cum) - div_idx, den, load_model=load_model, mask=mask)
pred_test = pred_test * den
pred_test[pred_test < 0] = 0
if not os.path.exists('./img/lstm'):
os.mkdir('./img/lstm')
max_len = len(TestDataY)
plot1(sim_inc[:max_len], TestDataY, pred_test[:max_len], sim_inc_seir[:max_len],
'The number of newly confirmed cases ',
'./img/{}{}/newly_real_sim.{}'.format('./lstm',str(city), fmt),
date_it=date_it,
loc='upper right',
)
plot1(sim_cum, real_cum, np.cumsum(pred_test), np.cumsum(sim_inc_seir),
'The cumulative number of newly confirmed cases ',
'img/{}{}/cum_real_sim.{}'.format('./lstm',str(city), fmt),
date_it=date_it,
loc='lower right')
if city == 420000:
plot1(sim_inc, TestDataY, pred_test, sim_inc_seir,
'The number of newly confirmed cases ',
'./img/{}{}/newly_real_sim.{}'.format('./lstm', str(city), fmt),
date_it=date_it,
loc='upper right',
#y_max2=7000
)
if city == 900003:
plot1(sim_inc[:max_len], TestDataY, pred_test[:max_len], sim_inc_seir[:max_len],
'The number of newly confirmed cases ',
'./img/{}{}/newly_real_sim.{}'.format('./lstm', str(city), fmt),
date_it=date_it,
loc='upper left',
interval=12
)
train_size = (date_it - start_date).days
print(train_size, len(TestDataY))
test_label = np.array([TestDataY[item] for item in range(train_size)])
loss_ours = np.sqrt(np.mean(np.square(test_label - np.array([sim_inc[item] for item in range(train_size)]))))
loss_seir = np.sqrt(
np.mean(np.square(test_label - np.array([sim_inc_seir[item] for item in range(train_size)]))))
loss_lstm = np.sqrt(np.mean(np.square(test_label - np.array([pred_test[item] for item in range(train_size)]))))
print('training, ',city, ' ours: ', loss_ours,
'seir: ', loss_seir,
'lstm: ', loss_lstm)
mlsim_loss_array.append(loss_ours)
seir_loss_array.append(loss_seir)
lstm_loss_array.append(loss_lstm)
test_label =np.array([TestDataY[item] for item in range(train_size, len(TestDataY))])
loss_ours = np.sqrt(np.mean(np.square(test_label-np.array([sim_inc[item] for item in range(train_size, len(TestDataY))]))))
loss_seir = np.sqrt(np.mean(np.square(test_label-np.array([sim_inc_seir[item] for item in range(train_size, len(TestDataY))]))))
loss_lstm = np.sqrt(np.mean(np.square(test_label-np.array([pred_test[item] for item in range(train_size, len(TestDataY))]))))
print('testing, ours: ', loss_ours,
'seir: ', loss_seir,
'lstm: ', loss_lstm)
mlsim_loss_array.append(loss_ours)
seir_loss_array.append(loss_seir)
lstm_loss_array.append(loss_lstm)
print('MLSim ',sep='', end='')
for lsim in mlsim_loss_array:
print('& ${:.2f}$ '.format(lsim), sep='', end='')
print('\\\\')
print('SEIR ', sep='', end='')
for lsim in seir_loss_array:
print('& ${:.2f}$ '.format(lsim), sep='', end='')
print('\\\\')
print('LSTM ', sep='', end='')
for lsim in lstm_loss_array:
print('& ${:.2f}$ '.format(lsim), sep='', end='')
print('\\\\')
pass
if __name__ == '__main__':
args = parse_args()
main(args.load_inter)
| [
"[email protected]"
] | |
c5bbcb3afe3dfc342a8782caf16175ea2abeb6cc | ade52c685a0d5faf1b28d256cd4372e7082b3401 | /gradient_boosting_trees/gradient_boosting_regressor.py | a707d71d18c26b572c93da98f68ba857b03c4acc | [] | no_license | AfekIlayAdler/BootStrapDesicionTree | 7eb5244536e9eda0736207b432d6906256001918 | fb57ef8bc05c674123b883ae2c7d22b76d3f9a04 | refs/heads/master | 2020-09-05T09:17:13.200553 | 2020-04-13T13:43:28 | 2020-04-13T13:43:28 | 220,054,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,032 | py | from numpy import mean
from pandas import DataFrame, Series
from Tree.node import Leaf
from Tree.tree import CartRegressionTree, CartRegressionTreeKFold, MIN_SAMPLES_LEAF, MAX_DEPTH, MIN_IMPURITY_DECREASE, \
MIN_SAMPLES_SPLIT
from Tree.tree_feature_importance import node_based_feature_importance
from gradient_boosting_trees.gradient_boosting_abstract import GradientBoostingMachine, N_ESTIMATORS, LEARNING_RATE
class GradientBoostingRegressor(GradientBoostingMachine):
"""currently supports least squares"""
def __init__(self, base_tree,
n_estimators,
learning_rate,
min_samples_leaf,
max_depth,
min_impurity_decrease,
min_samples_split):
self.n_estimators = n_estimators
self.n_trees = 0
self.learning_rate = learning_rate
self.min_samples_split = min_samples_split
self.min_impurity_decrease = min_impurity_decrease
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.tree = base_tree
self.base_prediction = None
self.features = None
self.trees = []
def compute_gradient(self, x, y):
temp_x = x.copy()
temp_y = y.copy()
tree = self.tree(
min_samples_leaf=self.min_samples_leaf,
max_depth=self.max_depth,
min_impurity_decrease=self.min_impurity_decrease,
min_samples_split=self.min_samples_split)
tree.fit(temp_x, temp_y)
gradients = tree.predict(x.to_dict('records'))
self.trees.append(tree)
return gradients
def fit(self, X, y):
self.features = X.columns
self.base_prediction = mean(y)
f = mean(y)
for m in range(self.n_estimators):
if m > 0 and isinstance(self.trees[-1].root, Leaf): # if the previous tree was a bark then we stop
return
pseudo_response = y - f
gradients = self.compute_gradient(X, pseudo_response)
f += self.learning_rate * gradients
self.n_trees += 1
def predict(self, data: DataFrame):
X = data.copy()
if not isinstance(X, DataFrame):
X = DataFrame(X, columns = self.features)
X['prediction'] = self.base_prediction
for tree in self.trees:
X['prediction'] += self.learning_rate * tree.predict(X.to_dict('records'))
return X['prediction']
def compute_feature_importance(self, method='gain'):
gbm_feature_importances = {feature: 0 for feature in self.features}
# TODO : deal with the case that a tree is a bark
for tree in self.trees:
tree_feature_importance = node_based_feature_importance(tree, method=method)
for feature, feature_importance in tree_feature_importance.items():
gbm_feature_importances[feature] += feature_importance
return gbm_feature_importances
class CartGradientBoostingRegressor(GradientBoostingRegressor):
def __init__(self,
n_estimators=N_ESTIMATORS,
learning_rate=LEARNING_RATE,
min_samples_leaf=MIN_SAMPLES_LEAF,
max_depth=MAX_DEPTH,
min_impurity_decrease=MIN_IMPURITY_DECREASE,
min_samples_split=MIN_SAMPLES_SPLIT):
super().__init__(
base_tree=CartRegressionTree,
n_estimators=n_estimators,
learning_rate=learning_rate,
min_samples_leaf=min_samples_leaf,
max_depth=max_depth,
min_impurity_decrease=min_impurity_decrease,
min_samples_split=min_samples_split)
class CartGradientBoostingRegressorKfold(GradientBoostingRegressor):
def __init__(self,
n_estimators=N_ESTIMATORS,
learning_rate=LEARNING_RATE,
min_samples_leaf=MIN_SAMPLES_LEAF,
max_depth=MAX_DEPTH,
min_impurity_decrease=MIN_IMPURITY_DECREASE,
min_samples_split=MIN_SAMPLES_SPLIT):
super().__init__(
base_tree=CartRegressionTreeKFold,
n_estimators=n_estimators,
learning_rate=learning_rate,
min_samples_leaf=min_samples_leaf,
max_depth=max_depth,
min_impurity_decrease=min_impurity_decrease,
min_samples_split=min_samples_split)
if __name__ == '__main__':
import numpy as np
import pandas as pd
from pathlib import Path
import time
from Tree.tree_visualizer import TreeVisualizer
# def create_x_y(regression=True):
# df = pd.DataFrame()
# n_rows = 10 ** 3
# n_numeric_cols = 0
# n_categorical_cols = 50
# n_categorical_values = 50
# for col in range(n_numeric_cols):
# df[col] = np.random.random(n_rows)
# for col in range(n_categorical_cols):
# df[col + n_numeric_cols] = np.random.randint(n_categorical_values, size=n_rows)
# df[col + n_numeric_cols] = df[col + n_numeric_cols].astype('category')
# y = np.random.random(n_rows) if regression else np.random.randint(2, size=n_rows)
# return df, pd.Series(y)
# def create_x_y(category_size=52):
# A1 = 3
# A2 = 2
# SIGMA = 10
# N_ROWS = 10 ** 3
# CATEGORY_COLUMN_NAME = 'random_category'
# VAL_RATIO = 0.15
# X = pd.DataFrame()
# X['x1'] = np.random.randn(N_ROWS)
# X['x2'] = np.random.randn(N_ROWS)
# sigma = SIGMA * np.random.randn(N_ROWS)
# y = A1 * X['x1'] + A2 * X['x2'] + sigma
# X[CATEGORY_COLUMN_NAME] = np.random.randint(0, category_size, N_ROWS)
# X[CATEGORY_COLUMN_NAME] = X[CATEGORY_COLUMN_NAME].astype('category')
# return X, y
def create_x_y():
a = 0.1
a = float(a)
N_ROWS = 1000
category_size = 10
CATEGORY_COLUMN_NAME = 'category'
X = pd.DataFrame()
X[CATEGORY_COLUMN_NAME] = np.random.randint(0, category_size, N_ROWS)
X[CATEGORY_COLUMN_NAME] = X[CATEGORY_COLUMN_NAME].astype('category')
X['x1'] = np.random.randn(N_ROWS)
sigma = 0.1 * np.random.randn(N_ROWS)
left_group = [i for i in range(category_size // 2)]
y = a * (X['x1'] > 0) * 1 + (1 - a) * X[CATEGORY_COLUMN_NAME].isin(left_group) + sigma
return X, y
np.seterr(all='raise')
EXP = 'simulation' # 'simulation'
KFOLD = True
MAX_DEPTH = 3
reg = CartGradientBoostingRegressorKfold(max_depth=3) if KFOLD else CartGradientBoostingRegressor(max_depth=3)
if EXP == 'boston':
input_path = Path.cwd().parent / "Datasets/boston_house_prices/boston_house_prices.csv"
dtypes = {'CRIM': 'float64',
'ZN': 'float64',
'INDUS': 'float64',
'CHAS': 'category',
'NOX': 'float64',
'RM': 'float64',
'AGE': 'float64',
'DIS': 'float64',
'RAD': 'category',
'TAX': 'float64',
'PTRATIO': 'float64',
'B': 'float64',
'LSTAT': 'float64',
'y': 'float64'}
df = pd.read_csv(input_path, dtype=dtypes)
start = time.time()
y = df['y']
X = df.drop(columns=['y'])
reg.fit(X, y)
end = time.time()
print(end - start)
tree_vis = TreeVisualizer()
tree_vis.plot(reg.trees[-1].root)
print(reg.compute_feature_importance())
else:
np.random.seed(3)
X, y = create_x_y()
start = time.time()
reg.fit(X, y)
end = time.time()
print(end - start)
tree_vis = TreeVisualizer()
tree_vis.plot(reg.trees[0].root)
tree_vis.plot(reg.trees[1].root)
tree_vis.plot(reg.trees[2].root)
print(reg.n_trees)
print(reg.compute_feature_importance())
| [
"[email protected]"
] | |
e3933c8422621cc69a76c38c56832a4776a372b5 | 610466c56a623c3e02af88f637f586a278786f94 | /Chapter-6-Trees/6.4-Minimum-Sum-Level.py | 94579af346476bfb31773d84c4c87791ce7604e7 | [] | no_license | HypeDis/DailyCodingProblem-Book | b8ed52e62531d2e12a8a92c00138f64361c473fb | 771f513c165ae9d7c3c2e36eddd8e3ba22e62ad2 | refs/heads/master | 2020-04-30T02:13:54.045570 | 2020-04-09T16:00:27 | 2020-04-09T16:00:27 | 176,553,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | # find the level with the smallest sum in a binary tree
import collections
deque = collections.deque
defaultdict = collections.defaultdict
from TreeNode import Node
root = Node(1)
root.left = Node(-10)
root.right = Node(5)
root.left.left = Node(-11)
root.left.right = Node(-1)
root.right.left = Node(4)
root.right.right = Node(15)
def find_smallest_level(root):
level_sums = defaultdict(int)
bft = deque([])
bft.append((root, 0))
while len(bft):
node, level = bft.popleft()
level_sums[level] += node.value
if node.right:
bft.append((node.right, level + 1))
if node.left:
bft.append((node.left, level + 1))
#
return min(level_sums, key=level_sums.get)
minVal = find_smallest_level(root)
print(minVal)
| [
"[email protected]"
] | |
c7e7f8cef351cbec96b2f59afb5bcf996fd9fe7c | 70beea16c1086732ae8f08b6e2f07739daf6f861 | /python/cac_tripplanner/destinations/migrations/0032_auto_20171220_1407.py | 18fa060f7b57db4b62d1e10c4f54ec67a5a9300e | [
"Apache-2.0"
] | permissive | flibbertigibbet/cac-tripplanner | 63bd0d744f85097e07cc27eb5aec7844c21b2cf2 | 53b493361f05db7af08df24039c9e5c0968a2fd7 | refs/heads/develop | 2021-05-23T04:49:47.815711 | 2020-10-21T16:21:16 | 2020-10-21T16:21:16 | 31,267,987 | 2 | 1 | null | 2017-01-03T18:23:53 | 2015-02-24T16:00:30 | JavaScript | UTF-8 | Python | false | false | 623 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-20 19:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('destinations', '0031_remove_relax_category'),
]
operations = [
migrations.AlterField(
model_name='destination',
name='watershed_alliance',
field=models.BooleanField(default=False, help_text=b'\n Does this location belong to the <a target="_blank"\n href="https://www.watershedalliance.org/centers/">\n Alliance for Watershed Education</a>?'),
),
]
| [
"[email protected]"
] | |
3a2b365370c237b7be1266a276c875045c6571b9 | 480e33f95eec2e471c563d4c0661784c92396368 | /HeterogeneousCore/CUDAServices/scripts/cudaPreallocate.py | 331ddd30f73bd19b66fea42b3c13512c272e1f22 | [
"Apache-2.0"
] | permissive | cms-nanoAOD/cmssw | 4d836e5b76ae5075c232de5e062d286e2026e8bd | 4eccb8a758b605875003124dd55ea58552b86af1 | refs/heads/master-cmsswmaster | 2021-01-23T21:19:52.295420 | 2020-08-27T08:01:20 | 2020-08-27T08:01:20 | 102,867,729 | 7 | 14 | Apache-2.0 | 2022-05-23T07:58:09 | 2017-09-08T14:03:57 | C++ | UTF-8 | Python | false | false | 1,285 | py | #!/usr/bin/env python
from __future__ import print_function
import re
import sys
import argparse
def main(opts):
device = []
host = []
device_re = re.compile("Device.*allocated new device block.*\((?P<bytes>\d+) bytes")
host_re = re.compile("Host.*allocated new host block.*\((?P<bytes>\d+) bytes")
f = open(opts.file)
for line in f:
m = device_re.search(line)
if m:
device.append(m.group("bytes"))
continue
m = host_re.search(line)
if m:
host.append(m.group("bytes"))
f.close()
print("process.CUDAService.allocator.devicePreallocate = cms.untracked.vuint32(%s)" % ",".join(device))
print("process.CUDAService.allocator.hostPreallocate = cms.untracked.vuint32(%s)" % ",".join(host))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Extract CUDAService preallocation parameters from a log file.
To use, run the job once with "process.CUDAService.allocator.debug =
True" and direct the output to a file. Then run this script by passing
the file as an argument, and copy the output of this script back to
the configuration file.""")
parser.add_argument("file", type=str, help="Log file to parse")
opts = parser.parse_args()
main(opts)
| [
"[email protected]"
] | |
a69a28824ea7ca56ddf8ed693760d7af22bb38c5 | 48ffc4798649c5f80ca1796768d27f7387e314c9 | /ex45.py | 49f4c7e2fe57fa03c3b22ced4c8b790b18279047 | [] | no_license | DanielDavid48/Exercicios-PY | fb4e633b3bd61eebc6f6b98772015d2752f4e674 | 7e5bc9f1cb6586a376de7f398b9b6093558eb1a5 | refs/heads/main | 2023-04-19T09:41:42.828417 | 2021-05-06T17:31:15 | 2021-05-06T17:31:15 | 355,333,494 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | import emoji
from random import choice
print('Seja bem vindo ao JOKENPO"')
print('\033[32mVocê terá 3 opções para jogar: pedra; papel e tesoura.\033[m')
print("""Opções:
pedra
papel
tesoura""")
opcao = str(input('\033[32mInsira uma opção: \033[m'))
comandos = ['pedra', 'papel', 'tesoura']
sorteio = choice(comandos)
if sorteio == 'pedra':
if sorteio == opcao:
print(f'\033[33mAs jogadas foram iguais, tente novamente!\033[m')
if opcao == 'papel':
print(f"""Jogada da máquina: {sorteio}
Sua jogada: {opcao}
\033[32mPois então você ganhou!\033[m""")
if opcao == 'tesoura':
print(f"""Jogada da máquina: {sorteio}
Sua jogada: {opcao}
\033[31mPois então você perdeu!\033[m""")
elif sorteio == 'papel':
if sorteio == opcao:
print(f'\033[33mAs jogadas foram iguais, tente novamente!\033[m')
if opcao == 'pedra':
print(f"""Jogada da máquina: {sorteio}
Sua jogada: {opcao}
\033[31mPois então você perdeu!\033[m""")
if opcao == 'tesoura':
print(f"""Jogada da máquina: {sorteio}
Sua jogada: {opcao}
\033[32mPois então você ganhou!\033[m""")
elif sorteio == 'tesoura':
if sorteio == opcao:
print('\033[33mAs jogadas foram iguais, tente novamente!\033[m')
if opcao == 'pedra':
print(f"""Jogada da máquina: {sorteio}
Sua jogada: {opcao}
\033[32mPois então você ganhou!\033[m""")
if opcao == 'papel':
print(f"""Jogada da máquina: {sorteio}
Sua jogada: {opcao}
\033[31mPois então você perdeu!\033[m""")
else:
print('\033[31mVocê não inseriu uma opção válida!\033[m') | [
"[email protected]"
] | |
162d8945cb75bcdebb266800438923f8d7ffb325 | 384d0be5ac54b306b945cf38c10d9b0a44c975ea | /stack/glance/glance/tests/functional/keystone_utils.py | 397fae9f593ff18fb7d69ebb6075a5c3d45caf71 | [
"Apache-2.0"
] | permissive | ashokcse/openstack-bill | 05ae313637b3cfecba946d2a9b32e8c7609fc721 | 1a3d7575d4b341f64fa1764ed47e47a7504a9bcc | refs/heads/master | 2021-01-18T14:05:24.696165 | 2012-09-12T11:29:20 | 2012-09-12T11:29:20 | 5,424,267 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,329 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base test class for keystone-related tests"""
import datetime
import os
import shutil
import sys
import time
from glance.tests import functional
from glance.tests.utils import execute, find_executable
pattieblack_token = '887665443383'
froggy_token = '383344566788'
admin_token = '999888777666'
bacon_token = '111111111111'
prosciutto_token = '222222222222'
class KeystoneServer(functional.Server):
"""
Class used to easily manage starting and stopping a keystone
server during functional test runs.
"""
def __init__(self, server_control, server_name, test_dir, port,
auth_port, admin_port):
super(KeystoneServer, self).__init__(test_dir, port)
self.no_venv = True
self.server_control = server_control
self.server_name = server_name
self.auth_port = auth_port
self.admin_port = admin_port
default_sql_connection = 'sqlite:///%s/keystone.db' % test_dir
self.sql_connection = os.environ.get('GLANCE_TEST_KEYSTONE_SQL',
default_sql_connection)
self.pid_file = os.path.join(self.test_dir, '%s.pid' % server_name)
self.log_file = os.path.join(self.test_dir, '%s.log' % server_name)
self.conf_base = """[DEFAULT]
verbose = %(verbose)s
debug = %(debug)s
default_store = sqlite
log_file = %(log_file)s
backends = keystone.backends.sqlalchemy
service-header-mappings = {
'nova' : 'X-Server-Management-Url',
'swift' : 'X-Storage-Url',
'cdn' : 'X-CDN-Management-Url'}
service_host = 0.0.0.0
service_port = %(auth_port)s
admin_host = 0.0.0.0
admin_port = %(admin_port)s
keystone-admin-role = Admin
keystone-service-admin-role = KeystoneServiceAdmin
[keystone.backends.sqlalchemy]
sql_connection = %(sql_connection)s
backend_entities = ['UserRoleAssociation', 'Endpoints', 'Role', 'Tenant',
'User', 'Credentials', 'EndpointTemplates', 'Token',
'Service']
sql_idle_timeout = 30
[pipeline:admin]
pipeline = urlrewritefilter admin_api
[pipeline:keystone-legacy-auth]
pipeline = urlrewritefilter legacy_auth RAX-KEY-extension service_api
[app:service_api]
paste.app_factory = keystone.server:service_app_factory
[app:admin_api]
paste.app_factory = keystone.server:admin_app_factory
[filter:urlrewritefilter]
paste.filter_factory = keystone.middleware.url:filter_factory
[filter:legacy_auth]
paste.filter_factory = keystone.frontends.legacy_token_auth:filter_factory
[filter:RAX-KEY-extension]
paste.filter_factory =
keystone.contrib.extensions.raxkey.frontend:filter_factory
"""
class AuthServer(KeystoneServer):
"""
Server object that starts/stops/manages the keystone auth server
"""
def __init__(self, server_control, test_dir, auth_port, admin_port):
super(AuthServer, self).__init__(server_control, 'auth',
test_dir, auth_port,
auth_port, admin_port)
class AdminServer(KeystoneServer):
"""
Server object that starts/stops/manages the keystone admin server
"""
def __init__(self, server_control, test_dir, auth_port, admin_port):
super(AdminServer, self).__init__(server_control, 'admin',
test_dir, admin_port,
auth_port, admin_port)
def conf_patch(server, **subs):
# First, pull the configuration file
conf_base = server.conf_base.split('\n')
# Need to find the pipeline
for idx, text in enumerate(conf_base):
if text.startswith('[pipeline:glance-'):
# OK, the line to modify is the next one...
modidx = idx + 1
break
# Now we need to replace the default context field...
conf_base[modidx] = conf_base[modidx].replace('context',
'tokenauth keystone_shim')
# Put the conf back together and append the keystone pieces
server.conf_base = '\n'.join(conf_base) + """
[filter:tokenauth]
paste.filter_factory = keystone.middleware.auth_token:filter_factory
service_protocol = http
service_host = 127.0.0.1
service_port = %%(bind_port)s
auth_host = 127.0.0.1
auth_port = %(admin_port)s
auth_protocol = http
auth_uri = http://127.0.0.1:%(admin_port)s/
admin_token = 999888777666
delay_auth_decision = 1
[filter:keystone_shim]
paste.filter_factory = keystone.middleware.glance_auth_token:filter_factory
""" % subs
class KeystoneTests(functional.FunctionalTest):
"""
Base test class for keystone-related tests.
"""
inited = False
KEYSTONE = None
def setUp(self):
"""
Look up keystone-control.
"""
if not self.inited:
KeystoneTests.inited = True
# Make sure we can import keystone
tmp_path = sys.path
if 'GLANCE_TEST_KEYSTONE_PATH' in os.environ:
sys.path.insert(1, os.environ['GLANCE_TEST_KEYSTONE_PATH'])
try:
import keystone
except ImportError, e:
# Can't import keystone
KeystoneTests.inited = True
KeystoneTests.disabled = True
KeystoneTests.disabled_message = (
"Cannot import keystone. Set "
"GLANCE_TEST_KEYSTONE_PATH to the directory containing "
"the keystone package.")
finally:
sys.path = tmp_path
# Try looking up the keystone executable
if self.KEYSTONE is None:
bindir = os.environ.get('GLANCE_TEST_KEYSTONE_BIN')
if bindir is not None:
cmdname = os.path.join(bindir, 'keystone-control')
else:
cmdname = 'keystone-control'
KeystoneTests.KEYSTONE = find_executable(cmdname)
# If we don't have keystone-control, disable ourself
if self.disabled is not True and self.KEYSTONE is None:
KeystoneTests.disabled = True
KeystoneTests.disabled_message = (
"Cannot find keystone-control. Set "
"GLANCE_TEST_KEYSTONE_BIN to the directory containing "
"the keystone binaries.")
# Make sure to call superclass
super(KeystoneTests, self).setUp()
# Also need keystone auth and admin ports...
self.auth_port = functional.get_unused_port()
self.admin_port = functional.get_unused_port()
# Set up the servers
self.auth_server = AuthServer(self.KEYSTONE, self.test_dir,
self.auth_port, self.admin_port)
self.admin_server = AdminServer(self.KEYSTONE, self.test_dir,
self.auth_port, self.admin_port)
# Include their pid files, too
self.pid_files.extend([self.auth_server.pid_file,
self.admin_server.pid_file])
# Have to patch the api and registry config files for keystone
# integration
conf_patch(self.api_server, auth_port=self.auth_port,
admin_port=self.admin_port)
conf_patch(self.registry_server, auth_port=self.auth_port,
admin_port=self.admin_port)
self.registry_server.conf_base += (
'context_class = glance.registry.context.RequestContext\n')
# Need to add keystone python path to the environment for
# glance
self.exec_env = None
if 'GLANCE_TEST_KEYSTONE_PATH' in os.environ:
def augment_python_path(currpath):
if not currpath:
return os.environ['GLANCE_TEST_KEYSTONE_PATH']
return (os.environ['GLANCE_TEST_KEYSTONE_PATH'] +
':' + currpath)
self.exec_env = dict(PYTHONPATH=augment_python_path)
self.api_server.exec_env = self.exec_env
self.registry_server.exec_env = self.exec_env
# Keystone can take care of itself on this score
def tearDown(self):
super(KeystoneTests, self).tearDown()
if not self.disabled:
self._reset_database(self.auth_server.sql_connection)
def start_servers(self, **kwargs):
"""
Starts the authentication and admin servers (keystone-auth and
keystone-admin) on unused ports, in addition to the Glance API
and Registry servers.
Any kwargs passed to this method will override the
configuration value in the conf file used in starting the
servers.
"""
# Start with the Glance servers
super(KeystoneTests, self).start_servers(**kwargs)
# Set up the data store
keystone_conf = self.auth_server.write_conf(**kwargs)
if not os.path.isdir(self.test_dir):
os.mkdir(self.test_dir)
datafile = os.path.join(os.path.dirname(__file__), 'data',
'keystone_data.py')
execute("python %s -c %s" % (datafile, keystone_conf),
no_venv=True, exec_env=self.exec_env)
# Start keystone-auth
exitcode, out, err = self.auth_server.start(**kwargs)
self.assertEqual(0, exitcode,
"Failed to spin up the Auth server. "
"Got: %s" % err)
self.assertTrue("Starting keystone-auth with" in out)
# Now keystone-admin
exitcode, out, err = self.admin_server.start(**kwargs)
self.assertEqual(0, exitcode,
"Failed to spin up the Admin server. "
"Got: %s" % err)
self.assertTrue("Starting keystone-admin with" in out)
self.wait_for_keystone_servers()
def wait_for_keystone_servers(self, timeout=3):
"""
Tight loop, waiting for both Auth and Admin server to be
available on the ports. Returns when both are pingable.
There is a timeout on waiting for the servers to come up.
:param timeout: Optional, defaults to 3 seconds
"""
now = datetime.datetime.now()
timeout_time = now + datetime.timedelta(seconds=timeout)
while (timeout_time > now):
if (self.ping_server(self.auth_port) and
self.ping_server(self.admin_port)):
return
now = datetime.datetime.now()
time.sleep(0.05)
self.assertFalse(True, "Failed to start keystone servers.")
def stop_servers(self):
"""
Called to stop the started servers in a normal fashion. Note
that cleanup() will stop the servers using a fairly draconian
method of sending a SIGTERM signal to the servers. Here, we
use the glance-control and keystone-control stop method to
gracefully shut the servers down. This method also asserts
that the shutdown was clean, and so it is meant to be called
during a normal test case sequence.
"""
# Spin down the auth server...
exitcode, out, err = self.auth_server.stop()
self.assertEqual(0, exitcode,
"Failed to spin down the Auth server. "
"Got: %s" % err)
# ...and the admin server...
exitcode, out, err = self.admin_server.stop()
self.assertEqual(0, exitcode,
"Failed to spin down the Admin server. "
"Got: %s" % err)
# Now on to everything else...
super(KeystoneTests, self).stop_servers()
| [
"[email protected]"
] | |
f4c60a6b9683e8bff777b2c1770eff89ad84842e | 6e8f2e28479566dbaa338300b2d61f784ff83f97 | /.history/code/datasetup_20210414095452.py | f5a12d33d825aade94f6256ddefae15b5c5a1929 | [] | no_license | eeng5/CV-final-project | 55a7d736f75602858233ebc380c4e1d67ab2b866 | 580e28819560b86f6974959efb1d31ef138198fc | refs/heads/main | 2023-04-09T21:28:21.531293 | 2021-04-21T19:57:22 | 2021-04-21T19:57:22 | 352,703,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,652 | py | from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
import cv2
import os
import glob
from pathlib import Path
def cleanTestDirs():
for f in Path('/Users/Natalie/Desktop/cs1430/CV-final-project/data/test').glob('*.jpg'):
try:
f.unlink()
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanTrainDirs():
for f in Path('/Users/Natalie/Desktop/cs1430/CV-final-project/data/train').glob('*.jpg'):
try:
f.unlink()
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanAll():
cleanTestDirs()
cleanTrainDirs()
def createPixelArray(arr):
arr = list(map(int, arr.split()))
array = np.array(arr, dtype=np.uint8)
array = array.reshape((48, 48))
return array
def equalize_hist(img):
img = cv2.equalizeHist(img)
return img
def showImages(imgs):
_, axs = plt.subplots(1, len(imgs), figsize=(20, 20))
axs = axs.flatten()
for img, ax in zip(imgs, axs):
ax.imshow(img,cmap=plt.get_cmap('gray'))
plt.show()
def augmentIMG(img, task):
imgs = [img]
img1 = equalize_hist(img)
imgs.append(img1)
if(task == 3):
img2 = cv2.bilateralFilter(img1, d=9, sigmaColor=75, sigmaSpace=75)
imgs.append(img2)
img6 = cv2.flip(img, 1) # flip horizontally
imgs.append(img6)
return imgs
def saveIMG(arr, num, folderLoc):
im = Image.fromarray(arr)
filename = folderLoc + "image_"+ num+".jpg"
im.save(filename)
def createTrain(emotion_dict, task):
df = pd.read_csv('/Users/elizabethwang/Desktop/CS1430/CV-final-project/data/train.csv') # CHANGE ME
base_filename = /Users/Natalie/Desktop/cs1430/CV-final-project/data/train' # CHANGE ME
for index, row in df.iterrows():
if (row[' Usage'] == "Training"):
px = row[' pixels']
emot = int(row['emotion'])
emot_loc = emotion_dict[emot]
filename = base_filename + emot_loc
img = createPixelArray(px)
img_arr = augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
saveIMG(i, num, filename)
def createTest(emotion_dict , task):
df = pd.read_csv('/Users/elizabethwang/Desktop/CS1430/CV-final-project/data/icml_face_data.csv') # CHANGE ME
base_filename = "/Users/elizabethwang/Desktop/CS1430/CV-final-project/data/test/" # CHANGE ME
for index, row in df.iterrows():
if (row[' Usage'] == "PublicTest"):
px = row[' pixels']
emot = int(row['emotion'])
emot_loc = emotion_dict[emot]
filename = base_filename + emot_loc
img = createPixelArray(px)
img_arr = augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
saveIMG(i, num, filename)
def createEmotionDict():
emotionDict = {}
emotionDict[0]="angry/"
emotionDict[1]="disgust/"
emotionDict[2]="fear/"
emotionDict[3]="happy/"
emotionDict[4]="sad/"
emotionDict[5]="surprise/"
emotionDict[6] = "neutral/"
return emotionDict
def createSimpleData():
cleanAll()
emot_dict = createEmotionDict()
createTrain(emot_dict, 1)
createTest(emot_dict, 1)
def createComplexData():
cleanAll()
emot_dict = createEmotionDict()
createTrain(emot_dict, 3)
createTest(emot_dict, 3)
def main():
emot_dict = createEmotionDict()
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
89e45a5cc761cc4a4a3c992bcb906b1d81f2e26d | e0b5a869c687fea3c9dda138734d25b3c5e68b88 | /venv/Lib/site-packages/hupper/cli.py | b6c0f6a609fb989443332a91ab55d53443a9b502 | [] | no_license | asen-krasimirov/Python-OOP-Course | b74de5f83fb3e287cb206d48c3db79d15657c902 | c6df3830168d8b8d780d4fb4ccfe67d1bb350f7e | refs/heads/main | 2023-02-01T04:09:33.796334 | 2020-12-15T14:56:59 | 2020-12-15T14:56:59 | 309,389,119 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py | import argparse
import runpy
import sys
from .logger import LogLevel
from .reloader import start_reloader
def interval_parser(string):
"""Parses the shutdown or reload interval into an int greater than 0."""
msg = "Interval must be an int greater than 0"
try:
value = int(string)
if value <= 0:
raise argparse.ArgumentTypeError(msg)
return value
except ValueError:
raise argparse.ArgumentTypeError(msg)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", dest="module", required=True)
parser.add_argument("-w", dest="watch", action="append")
parser.add_argument("-x", dest="ignore", action="append")
parser.add_argument("-v", dest="verbose", action='store_true')
parser.add_argument("-q", dest="quiet", action='store_true')
parser.add_argument("--shutdown-interval", type=interval_parser)
parser.add_argument("--reload-interval", type=interval_parser)
args, unknown_args = parser.parse_known_args()
if args.quiet:
level = LogLevel.ERROR
elif args.verbose:
level = LogLevel.DEBUG
else:
level = LogLevel.INFO
# start_reloader has defaults for some values so we avoid passing
# arguments if we don't have to
reloader_kw = {}
if args.reload_interval is not None:
reloader_kw['reload_interval'] = args.reload_interval
if args.shutdown_interval is not None:
reloader_kw['shutdown_interval'] = args.shutdown_interval
reloader = start_reloader(
"hupper.cli.main",
verbose=level,
ignore_files=args.ignore,
**reloader_kw
)
sys.argv[1:] = unknown_args
sys.path.insert(0, "")
if args.watch:
reloader.watch_files(args.watch)
return runpy.run_module(args.module, alter_sys=True, run_name="__main__")
| [
"[email protected]"
] | |
03db626ae713ce0b0c3f4ee45b458daf28cdd3b7 | f764ceff10c7f4a636d8bb7002075cb1b543426b | /py/tests/day05_test.py | c3ef7c38e54ffb9edcc66f09f1d8ef8197e82686 | [] | no_license | gunnihinn/advent-2018 | 73eec4457c0b4abc4b0f871a42e0e58770ac457f | d4e2e05e2c6af11c28a07ec9d9178bc9f353c67b | refs/heads/master | 2020-04-09T07:27:48.009540 | 2018-12-12T14:00:50 | 2018-12-12T14:00:50 | 160,156,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | import unittest
from py.day05 import *
class TestExample(unittest.TestCase):
def test_reduce1(self):
self.assertEqual(reduce('aA'), [])
def test_reduce2(self):
self.assertEqual(reduce('abBA'), [])
def test_reduce3(self):
self.assertEqual(reduce('abAB'), list('abAB'))
def test_reduce4(self):
self.assertEqual(reduce('aabAAB'), list('aabAAB'))
def test_reduce5(self):
self.assertEqual(reduce('dabAcCaCBAcCcaDA'), list('dabCBAcaDA'))
| [
"[email protected]"
] | |
3ae3c619c3f36bab8818e6ecf5b6d3e41d1ddced | 6e5c625136c36d845c72f7a4fdea482f05384590 | /venv/lib/python3.9/site-packages/setuptools/command/bdist_egg.py | 87a325c16db5f0dc5ea237611fd9406af7ea49b9 | [] | no_license | DvdCp/BattleWebApp | 26fe9e714c090add337d951a1672ef7747a1db33 | f8eeeccdb0e73bd4bcc9529adfe74436d8cf5c13 | refs/heads/master | 2023-07-08T09:53:10.640382 | 2021-08-08T18:09:13 | 2021-08-08T18:09:13 | 379,716,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,183 | py | """setuptools.command.bdist_egg
Build .egg distributions"""
from distutils.errors import DistutilsSetupError
from distutils.dir_util import remove_tree, mkpath
from distutils import log
from types import CodeType
import sys
import os
import re
import textwrap
import marshal
from setuptools.extern import six
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from setuptools.extension import Library
from setuptools import Command
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def sorted_walk(dir):
"""Do os.walk in a reproducible way,
independent of indeterministic filesystem readdir order
"""
for base, dirs, files in os.walk(dir):
dirs.sort()
files.sort()
yield base, dirs, files
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files, []
for item in old:
if isinstance(item, tuple) and len(item) == 2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized == site_packages or normalized.startswith(
site_packages + os.sep
):
item = realpath[len(site_packages) + 1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s", self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s", self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p, ext_name) in enumerate(ext_outputs):
filename, ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s", ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep, '/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root, 'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s", script_dir)
self.call_command('install_scripts', install_dir=script_dir,
no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s", native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s", native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_egg', get_python_version(), self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base, dirs, files in walk_egg(self.bdist_dir):
for name in files:
path = os.path.join(base, name)
if name.endswith('.py'):
log.debug("Deleting %s", path)
os.unlink(path)
if base.endswith('__pycache__'):
path_old = path
pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc'
m = re.match(pattern, name)
path_new = os.path.join(
base, os.pardir, m.group('name') + '.pyc')
log.info(
"Renaming file from [%s] to [%s]"
% (path_old, path_new))
try:
os.remove(path_new)
except OSError:
pass
os.rename(path_old, path_new)
def zip_safe(self):
safe = getattr(self.distribution, 'zip_safe', None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation', {}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable app point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = '{}.{}'.format(*sys.version_info)
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info, '')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir: ''}
for base, dirs, files in sorted_walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] +
filename + '/')
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext, Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir, filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = sorted_walk(egg_dir)
base, dirs, files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag, fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if not can_scan():
return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag, fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe) == flag:
f = open(fn, 'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base, name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if six.PY2:
skip = 8 # skip magic & date
elif sys.version_info < (3, 7):
skip = 12 # skip magic & date & file size
else:
skip = 16 # skip magic & reserved? & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, six.string_types):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir) + 1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'", p)
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in sorted_walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in sorted_walk(base_dir):
visit(None, dirname, files)
return zip_filename
| [
"[email protected]"
] | |
77e75cde2e9917f1df2476fea3c616112c507065 | d3919225e9a0af652eddd333cb5540ca065d6512 | /scirex/predictors/__init__.py | 5d33845887b44b1580dc4a24ed467e098d594d7d | [
"Apache-2.0"
] | permissive | viswavi/SciREX | ed230bf16f142950b60fbe4cd0bd4a49be65bb16 | 8e4b402e95d438c92eeecee315d389903a963b8d | refs/heads/master | 2023-05-19T08:38:16.709227 | 2021-05-24T15:50:30 | 2021-05-24T15:56:30 | 294,549,661 | 0 | 1 | Apache-2.0 | 2021-06-09T16:30:06 | 2020-09-11T00:02:13 | Python | UTF-8 | Python | false | false | 97 | py | # Because Allennlp decoding api won't work.
# Need to combine paragraph stuff into document stuff | [
"[email protected]"
] | |
ffaa0033881a2acb5b23949e5974638c8cc10895 | 64bc2537c5a05dd1c53eaf0684383c8f841f2cf3 | /src/player/MediaPlayer.py | 62fde0cb7743dd8c8b7708d28bd1c62cc916af99 | [] | no_license | fvcastellanos/pi-player | a8865221988f6658b77b8059458096a2b737a6d7 | 99edd22644d09974e62eb4161b20cea3aaa4401b | refs/heads/main | 2023-08-29T12:10:32.096032 | 2021-10-24T05:44:39 | 2021-10-24T05:44:39 | 417,582,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,046 | py | import vlc
import logging
import discid
from player.AudioDisc import AudioDisc
from player.Track import Track
logger = logging.getLogger(__name__)
class MediaPlayer:
__mediaPath: str
__player : vlc.MediaPlayer
__mediaList : vlc.MediaList
__mediaListPlayer : vlc.MediaListPlayer
__eventManager: vlc.EventManager
__media: vlc.Media
audioDisc: AudioDisc
def __init__(self, path = "cdda:///dev/sr0"):
self.__mediaPath = path
self.audioDisc = None
self.__createMediaPlayer()
def play(self):
logger.info("Play track")
self.__mediaListPlayer.play()
def stop(self):
logger.info("Stop track")
if self.__mediaListPlayer.is_playing():
self.__mediaListPlayer.stop()
def pause(self):
logger.info("Pause track")
self.__mediaListPlayer.pause()
def next(self):
logger.info("Next track")
self.__mediaListPlayer.next()
def prev(self):
logger.info("Previous track")
self.__mediaListPlayer.previous()
def loadDisk(self):
logger.info("Load disc")
self.__loadDisk(self.__mediaPath)
# ----------------------------------------------------------------------
def __loadDisk(self, path: str):
logger.info(f"Load media player resource: {path}")
self.__getDiscInformation(path)
if (self.audioDisc != None):
for track in self.audioDisc.tracks:
self.__media = vlc.Media(path, (track.playerName))
self.__mediaList.add_media(self.__media)
self.__mediaListPlayer.set_media_list(self.__mediaList)
self.__eventManager = self.__player.event_manager()
# self.__eventManager__.event_attach(vlc.EventType.MediaParsedChanged, self.__getDiscInformation__)
# self.__eventManager.event_attach(vlc.EventType.MediaPlayerTimeChanged, self.__mediaPlayerTimeChanged)
def __mediaPlayerTimeChanged(self, event):
logger.info(f"time changed: {event}")
def __getDiscInformation(self, path: str):
try:
discDevice = path[7:]
disc : discid.Disc = discid.read(device = discDevice)
logger.info(f"disc id: {disc.id}")
tracks = [Track(name = track.number, duration = track.seconds) for track in disc.tracks]
self.audioDisc = AudioDisc(id = disc.id, tracks = tracks)
except Exception as exception:
logger.exception("Unable to load disc information", exception)
def __createMediaPlayer(self):
vlcInstance = vlc.Instance()
self.__player = vlcInstance.media_player_new()
self.__mediaList = vlcInstance.media_list_new()
self.__mediaListPlayer = vlcInstance.media_list_player_new()
self.__mediaListPlayer.set_media_player(self.__player)
| [
"[email protected]"
] | |
4cb2ac17b39afa644185ebcf5031f1894d4ba203 | c64713cee6f47b5ac3448ff5ecb358d9f4bd5542 | /todo_list/todo_list/urls.py | b18144eab99eb3d5218d3d86ecef2e95a6f2d8ea | [
"MIT"
] | permissive | ptyadana/django3-todo-app | ce902681d07e1f0ce47be6def70196a9e25168ef | 0cc753e25f2ace219e51539ee511e5ed844a1d7c | refs/heads/master | 2021-03-25T23:23:04.626284 | 2020-03-16T12:52:12 | 2020-03-16T12:52:12 | 247,654,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py | """todo_list URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from todo_app import views
# from django.conf.urls import handler404
# handler404 = views.handler404
urlpatterns = [
path('admin/', admin.site.urls),
#Auth
path('signup/',views.signupuser, name="signupuser"),
path('login/',views.loginuser, name="loginuser"),
path('logout/',views.logoutuser, name="logoutuser"),
#To Do
path('',views.home, name='home'),
path('home/',views.home, name='home'),
path('current/',views.currenttodos, name='currenttodos'),
path('completed/',views.completedtodos, name='completedtodos'),
path('create/',views.createtodos, name='createtodos'),
path('todo/<int:todo_pk>',views.viewtodo, name='viewtodo'),
path('todo/<int:todo_pk>/complete',views.completetodo, name='completetodo'),
path('todo/<int:todo_pk>/delete',views.deletetodo, name='deletetodo'),
]
| [
"[email protected]"
] | |
193c66f9c3f80d05c19b0f91adaa16272a94ff58 | 8ffaaedd45732f077a3b20dcfd1e6a11c8cd779b | /deck_chores/parsers.py | 236d855ccb0ee5589e938b8724f8ba9455fdd29f | [
"ISC"
] | permissive | mumblepins-docker/deck-chores | 84dc72684767dad5d31fbf9e481b9e248da44ca2 | 36ee7501837154581c19d1ad1cead401539eb568 | refs/heads/master | 2020-04-05T18:53:29.369373 | 2018-10-25T19:04:58 | 2018-10-25T19:04:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,708 | py | from functools import lru_cache
from collections import defaultdict
import logging
from typing import DefaultDict, Dict, Optional, Tuple, Type, Union # noqa: F401
from apscheduler.triggers.cron import CronTrigger # type: ignore
from apscheduler.triggers.date import DateTrigger # type: ignore
from apscheduler.triggers.interval import IntervalTrigger # type: ignore
import cerberus # type: ignore
from pytz import all_timezones
from deck_chores.config import cfg
from deck_chores.exceptions import ParsingError
from deck_chores.utils import generate_id, split_string
####
Trigger = Union[CronTrigger, DateTrigger, IntervalTrigger]
####
NAME_INTERVAL_MAP = {
'weekly': (1, 0, 0, 0, 0),
'daily': (0, 1, 0, 0, 0),
'hourly': (0, 0, 1, 0, 0),
'every minute': (0, 0, 0, 1, 0),
'every second': (0, 0, 0, 0, 1),
}
####
log = logging.getLogger('deck_chores')
####
class JobConfigValidator(cerberus.Validator):
@staticmethod
@lru_cache(128)
def _fill_args(value: str, length: int, filling: str) -> Tuple[str, ...]:
value = value.strip()
while ' ' in value:
value = value.replace(' ', ' ')
tokens = value.split(' ')
return tuple([filling] * (length - len(tokens)) + tokens)
def _normalize_coerce_cron(self, value: str) -> Tuple[Type, Tuple[str, ...]]:
# TODO there's a constant here
args = self._fill_args(value, len(CronTrigger.FIELD_NAMES), '*')
return CronTrigger, args
def _normalize_coerce_date(self, value: str) -> Tuple[Type, Tuple[str]]:
return DateTrigger, (value,)
def _normalize_coerce_interval(
self, value: str
) -> Tuple[Type, Optional[Tuple[int, int, int, int, int]]]:
args = NAME_INTERVAL_MAP.get(value)
if args is None:
for c in '.:/':
value = value.replace(c, ' ')
args = self._fill_args(value, 5, '0') # type: ignore
args = tuple(int(x) for x in args) # type: ignore
return IntervalTrigger, args
def _validator_trigger(self, field, value):
if isinstance(value, str): # normalization failed
return
trigger_class, args = value[0], value[1]
try:
trigger_class(*args, timezone=self.document.get('timezone', cfg.timezone))
except Exception as e:
message = "Error while instantiating a {trigger} with '{args}'.".format(
trigger=trigger_class.__name__, args=args
)
if cfg.debug:
message += "\n%s" % e
self._error(field, message)
job_def_validator = JobConfigValidator(
{
'command': {'required': True},
'cron': {
'coerce': 'cron',
'validator': 'trigger',
'required': True,
'excludes': ['date', 'interval'],
},
'date': {
'coerce': 'date',
'validator': 'trigger',
'required': True,
'excludes': ['cron', 'interval'],
},
'interval': {
'coerce': 'interval',
'validator': 'trigger',
'required': True,
'excludes': ['cron', 'date'],
},
'max': {'coerce': int, 'default_setter': lambda x: cfg.default_max},
'name': {'regex': r'[a-z0-9.-]+'},
'timezone': {
'default_setter': lambda x: cfg.timezone,
'allowed': all_timezones,
'required': True,
},
'user': {'default_setter': lambda x: cfg.default_user},
}
)
####
def labels(*args, **kwargs) -> Tuple[str, str, dict]:
# don't call this from unittests
try:
return _parse_labels(*args, **kwargs)
except ParsingError as e:
if isinstance(e.args[0], str):
lines = e.args[0].splitlines()
elif isinstance(e.args[0], list):
lines = e.args[0]
else:
raise RuntimeError
for line in lines:
if isinstance(line, str):
log.error(line)
elif isinstance(line, Exception):
log.exception(line)
return '', '', {}
except Exception as e:
raise e
@lru_cache()
def _parse_labels(container_id: str) -> Tuple[str, str, dict]:
_labels = cfg.client.containers.get(container_id).labels
log.debug('Parsing labels: %s' % _labels)
filtered_labels = {k: v for k, v in _labels.items() if k.startswith(cfg.label_ns)}
options = _parse_options(_labels.get(cfg.label_ns + 'options', None))
service_id = _parse_service_id(_labels)
if 'image' in options:
_labels = _image_definition_labels_of_container(container_id).copy()
_labels.update(filtered_labels)
else:
_labels = filtered_labels
job_definitions = _parse_job_defintion(_labels)
if service_id:
log.debug('Assigning service id: %s' % service_id)
for definition in job_definitions.values():
# this is informative, not functional
definition['service_id'] = service_id
return service_id, options, job_definitions
@lru_cache(4)
def _parse_options(options: str) -> str:
result = set(cfg.default_options)
if options is not None:
for option in split_string(options):
if option.startswith('no'):
result.remove(option[2:])
else:
result.add(option)
result_string = ','.join(sorted(x for x in result if x))
log.debug('Parsed options: %s' % result_string)
return result_string
def _parse_service_id(_labels: dict) -> str:
filtered_labels = {k: v for k, v in _labels.items() if k in cfg.service_identifiers}
log.debug('Considering labels for service id: %s' % filtered_labels)
if not filtered_labels:
return ''
if len(filtered_labels) != len(cfg.service_identifiers):
log.critical(
'Missing service identity labels: {}'.format(
', '.join(set(cfg.service_identifiers) - set(filtered_labels))
)
)
return ''
identifiers = tuple(filtered_labels[x] for x in cfg.service_identifiers)
return generate_id(*identifiers)
@lru_cache()
def _image_definition_labels_of_container(container_id: str) -> Dict[str, str]:
labels = cfg.client.containers.get(container_id).image.labels
return {k: v for k, v in labels.items() if k.startswith(cfg.label_ns)}
def _parse_job_defintion(_labels: dict) -> dict:
log.debug('Considering labels for job definitions: %s' % _labels)
name_grouped_definitions = defaultdict(
dict
) # type: DefaultDict[str, Dict[str, str]]
for key, value in _labels.items():
if key == cfg.label_ns + 'options':
continue
name, attribute = key[len(cfg.label_ns):].rsplit('.', 1)
name_grouped_definitions[name][attribute] = value
log.debug('Definitions: %s' % name_grouped_definitions)
result = {}
for name, definition in name_grouped_definitions.items():
log.debug('Processing %s' % name)
definition['name'] = name
if not job_def_validator(definition):
log.error('Misconfigured job definition: %s' % definition)
log.error('Errors: %s' % job_def_validator.errors)
else:
job = job_def_validator.document
for trigger_name in ('cron', 'date', 'interval'):
trigger = job.pop(trigger_name, None)
if trigger is None:
continue
job['trigger'] = trigger
log.debug('Normalized definition: %s' % job)
result[name] = job
return result
####
__all__ = [labels.__name__]
| [
"[email protected]"
] | |
5223dd247001b502827063554b4efad3205d446e | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4441501.3.spec | e67e344e2f6159571cddc2df4d0a27d1a2583ac7 | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,964 | spec | {
"id": "mgm4441501.3",
"metadata": {
"mgm4441501.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 99268,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 306,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 11981,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 304,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 451,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1538,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 100017,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 123,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 306,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 18311,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 304,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 608028,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 49,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 12619,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 176834,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 7233374,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 1724,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 38,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 2114,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 3028,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 2044327,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 91,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 349,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 26,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 572,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 542,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 473,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 207,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 23714,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 81,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 949,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4441501.3/file/999.done.species.stats"
}
},
"id": "mgm4441501.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4441501.3"
}
},
"raw": {
"mgm4441501.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4441501.3"
}
}
} | [
"[email protected]"
] | |
4b8cc3d566e7d58bc9404bd86ae945be8cf6d1cd | 5f1c96bd362e5344210aa828e92f33f3382974bd | /src/core/table.py | 03410095bd7a8d5de9080f36584cf484967d9160 | [] | no_license | huagor/doudizhu-3 | d7fc8408b068a86179ce96f35a85ad1733ab4b7c | ca3ba87f7b0d2553a47584e03c499b191f471563 | refs/heads/master | 2021-06-20T11:02:40.254917 | 2017-07-05T15:39:04 | 2017-07-05T15:39:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,977 | py | import logging
import random
from typing import List
from tornado.ioloop import IOLoop
from core.robot import AiPlayer
from net.protocol import Protocol as Pt
logger = logging.getLogger('ddz')
class Table(object):
WAITING = 0
PLAYING = 1
END = 2
CLOSED = 3
def __init__(self):
self.uid = Table.gen_id()
self.players = [None, None, None]
self.state = 0 # 0 waiting 1 playing 2 end 3 closed
self.pokers: List[int] = []
self.multiple = 1
self.call_score = 0
self.whose_turn = 0
self.last_shot_seat = 0
self.last_shot_poker = []
self.room = 100
IOLoop.current().call_later(0.1, self.ai_join, nth=1)
def ai_join(self, nth=1):
size = self.size()
if size == 0 or size == 3:
return
if size == 2 and nth == 1:
IOLoop.current().call_later(1, self.ai_join, nth=2)
p1 = AiPlayer(11, 'AI-1', self.players[0])
p1.to_server([Pt.REQ_JOIN_TABLE, self.uid])
if size == 1:
p2 = AiPlayer(12, 'AI-2', self.players[0])
p2.to_server([Pt.REQ_JOIN_TABLE, self.uid])
def sync_table(self):
ps = []
for p in self.players:
if p:
ps.append((p.uid, p.name))
else:
ps.append((-1, ''))
response = [Pt.RSP_JOIN_TABLE, self.uid, ps]
for player in self.players:
if player:
player.send(response)
def deal_poker(self):
# if not all(p and p.ready for p in self.players):
# return
self.state = 1
self.pokers = [i for i in range(54)]
random.shuffle(self.pokers)
for i in range(51):
self.players[i % 3].hand_pokers.append(self.pokers.pop())
self.whose_turn = random.randint(0, 2)
for p in self.players:
p.hand_pokers.sort()
response = [Pt.RSP_DEAL_POKER, self.turn_player.uid, p.hand_pokers]
p.send(response)
def call_score_end(self, score):
self.call_score = score
self.turn_player.role = 2
self.turn_player.hand_pokers += self.pokers
response = [Pt.RSP_SHOW_POKER, self.turn_player.uid, self.pokers]
for p in self.players:
p.send(response)
logger.info('Player[%d] IS LANDLORD[%s]', self.turn_player.uid, str(self.pokers))
def go_next_turn(self):
self.whose_turn += 1
if self.whose_turn == 3:
self.whose_turn = 0
@property
def turn_player(self):
return self.players[self.whose_turn]
def calc_coin(self, winner):
self.state = 2
coins = []
tax = 100
for p in self.players:
p.ready = False
coin = self.room * self.call_score * self.multiple
if p == winner:
coins.append(coin - tax)
else:
coins.append(-coin - tax)
return coins
def add(self, player):
for i, p in enumerate(self.players):
if not p:
player.seat = i
self.players[i] = player
return True
logger.error('Player[%d] JOIN Table[%d] FULL', player.pid, self.uid)
return False
def remove(self, player):
for i, p in enumerate(self.players):
if p and p.pid == player.pid:
self.players[i] = None
else:
logger.error('Player[%d] NOT IN Table[%d]', player.pid, self.uid)
if all(p is None for p in self.players):
self.state = 3
logger.error('Table[%d] close', self.uid)
return True
return False
def size(self):
return sum([p is not None for p in self.players])
def __str__(self):
return '[{}: {}]'.format(self.uid, self.players)
counter = 0
@classmethod
def gen_id(cls):
cls.counter += 1
return cls.counter
| [
"[email protected]"
] | |
c45ce4f83dd33f28f821f06e41f9651f9bd68f9c | daedd7d771bcb3179b9c9bada700a15f20bdad66 | /preprocess.py | 0d0efa59213f1e187d5364cf7b31b3633440d948 | [] | no_license | BenaichaA/MURA-Deep-Learning-Project | ba15ceb8417fbe9d992672e76400354853844bdd | a188c5c88943bc1b25032f1c9f9752450e7bf006 | refs/heads/master | 2020-05-14T13:36:45.450858 | 2019-05-01T20:46:29 | 2019-05-01T20:46:29 | 181,816,148 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | import csv
import numpy as np
import tensorflow as tf
def format_labels():
paths = "MURA-v1.1/valid_image_paths.csv"
labels = "MURA-v1.1/valid_labeled_studies.csv"
filePaths = []
with open(paths) as pathsfile:
readPaths = csv.reader(pathsfile, delimiter=',')
for row in readPaths:
filePaths.append(row[0])
filePaths = np.array(filePaths)
fileLabels = np.zeros(filePaths.size)
with open(labels) as labelsfile:
readLabels = csv.reader(labelsfile, delimiter=',')
for row in readLabels:
fileLabels[np.flatnonzero(np.core.defchararray.find(filePaths, row[0])!=-1)] = row[1]
np.save("valid_labels_array.npy", fileLabels)
np.save("valid_paths_array.npy", filePaths)
def format_studies():
labels = "MURA-v1.1/valid_labeled_studies.csv"
filePaths = []
with open(labels) as pathsfile:
readPaths = csv.reader(pathsfile, delimiter=',')
for row in readPaths:
filePaths.append([row[0], int(row[1])])
np.save("valid_studies_array.npy", filePaths)
def parse_and_augment(filename, label, row, col):
image = tf.read_file(filename)
image = tf.image.decode_png(image, channels=1)
image = tf.cast(tf.image.resize_image_with_pad(image, row, col), tf.float32)
image = tf.contrib.image.rotate(image, angles=tf.random.uniform([1], minval=-0.3, maxval=0.3)[0], interpolation='BILINEAR')
image = tf.image.random_flip_left_right(image)
image = tf.image.grayscale_to_rgb(image)
return image, label
def parse_file(filename, label, row, col):
image = tf.read_file(filename)
image = tf.image.decode_png(image, channels=1)
image = tf.cast(tf.image.resize_image_with_pad(image, row, col), tf.float32)
image = tf.image.grayscale_to_rgb(image)
return image, label
def shuffle(a, b):
p = np.random.permutation(len(a))
return a[p], b[p]
| [
"[email protected]"
] | |
ef70822fe7bc5e5ed1a9943009eca03accec250d | 485f4cd1643b38dcdca46510a32f333b31742b61 | /src/simple-potentiometer.py | 64cc615a673357c493fb2efcf546663807e26694 | [
"Apache-2.0"
] | permissive | athenian-programming/cpx-crickit-tutorial | c3b66ab27295ab53b5882955d2fccf4afc9963d6 | 0c7c6b82e44e4389fcee0977e064e651c7cb3678 | refs/heads/master | 2020-04-09T01:18:48.578300 | 2018-12-02T18:58:19 | 2018-12-02T18:58:19 | 159,899,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | import time
import board
from analogio import AnalogIn
# A2 is used here, but A1 and A3 are also candidates
analogin = AnalogIn(board.A2)
while True:
voltage = analogin.value / 65536
print((voltage,))
time.sleep(0.1)
| [
"[email protected]"
] | |
396232a34fba1ca17fea228d8ca0ede2baf546f0 | 81549b7fb2464574c275cc4c6f71e42c3018be4e | /packager/file.py | 339a541d69dd4851413fcd84098d185d845dddd3 | [] | no_license | jeffbowman/packager | e4a499ef6d3b2db74ff213c9db4f685a88867588 | 60bea84b916a0c83fce8087ff72d12a508c283d0 | refs/heads/master | 2021-01-10T09:02:21.147680 | 2016-04-05T15:38:22 | 2016-04-05T15:38:22 | 55,028,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,285 | py | from abc import ABCMeta, abstractmethod
from command import Command
import shutil
import os
import logging
class File(Command):
"""Base class for commands to handle file operations.
Should not be instantiated, rather use `Move` or `Copy` commands.
"""
__metaclass__ = ABCMeta
def __init__(self, opts):
if not opts.has_key('source'):
raise Exception('source tag must be provided in ', opts)
else:
self.source = opts['source']['name']
if not opts.has_key('target'):
raise Exception('target tag must be provided in ', opts)
else:
self.target = opts['target']['name']
if opts['source'].has_key('contents'):
self.src_contents = opts['source']['contents']
if not type(self.src_contents) == list:
raise Exception('source contents must be a list in ', opts)
else:
self.src_contents = None
@abstractmethod
def do(self):
pass
class Move(File):
"""Command to move files or directories.
Will recursively move a file or directory to a target location.
Required:
source
target
Optional:
contents
yaml example:
---
- move:
source:
name: String
contents: [list of String]
target:
name: String
"""
def __init__(self, opts):
super(Move, self).__init__(opts)
if opts.has_key('execute'):
self.execute = opts['execute']
else:
self.execute = None
def do(self):
if self.execute != None:
super(Move, self).do()
return
src = os.path.abspath(self.source)
target = os.path.abspath(self.target)
if not os.path.exists(target):
print('making target: ' + target)
os.makedirs(target)
if self.src_contents != None:
files = self.src_contents
else:
files = os.listdir(src)
for f in [src + '/' + f for f in files]:
logging.info('move ' + f + ' -> ' + target)
shutil.move(f, target)
class Copy(File):
"""Command to copy files or directories.
Will recursively copy a file or directory to a target location.
Required:
source
target
Optional:
contents
yaml example:
---
- move:
source:
name: String
contents: [list of String]
target:
name: String
"""
def __init__(self, opts):
super(Copy, self).__init__(opts)
if opts.has_key('execute'):
self.execute = opts['execute']
else:
self.execute = None
def do(self):
if self.execute != None:
super(File, self).do()
src = os.path.abspath(self.source)
target = os.path.abspath(self.target)
if not os.path.exists(target):
os.makedirs(target)
if self.src_contents != None:
files = [src + '/' + f for f in self.src_contents]
else:
files = [src + '/' + f for f in os.listdir(src)]
for f in files:
logging.info('copy ' + f + ' -> ' + target)
shutil.copy(f, target)
| [
"[email protected]"
] | |
b551f6a67c562c1ba60365cb11afee1bb21d6d0e | f902bfafd93fe96515f5b12fa8630b365cf71ce9 | /tests/events/test_group.py | c0a25f25ff814c12d580094ebf7f0a857a69ae97 | [] | no_license | hickb/Houdini-Toolbox | 69e999a924f387313382c254de2771d66c649f64 | 5309b2608f86b12aab8e90e754269ed2d2a44653 | refs/heads/master | 2022-06-18T23:08:45.481414 | 2020-05-06T19:25:02 | 2020-05-06T19:25:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | """Tests for ht.events.group module."""
# =============================================================================
# IMPORTS
# =============================================================================
# Houdini Toolbox Imports
import ht.events.group
# =============================================================================
# TESTS
# =============================================================================
class Test_HoudiniEventGroup(object):
"""Test ht.events.group.HoudiniEventGroup class."""
def test___init__(self):
"""Test object initialization."""
group = ht.events.group.HoudiniEventGroup()
assert group._data == {}
assert group._event_map == {}
# Properties
def test_data(self, mocker):
"""Test the 'data' property."""
mocker.patch.object(
ht.events.group.HoudiniEventGroup, "__init__", lambda x: None
)
group = ht.events.group.HoudiniEventGroup()
mock_value = mocker.MagicMock(spec=dict)
group._data = mock_value
assert group.data == mock_value
def test_event_map(self, mocker):
"""Test 'event_map' property."""
mocker.patch.object(
ht.events.group.HoudiniEventGroup, "__init__", lambda x: None
)
group = ht.events.group.HoudiniEventGroup()
mock_value = mocker.MagicMock(spec=dict)
group._event_map = mock_value
assert group.event_map == mock_value
| [
"[email protected]"
] | |
2d0dc2c09a870c30cd508203de167546dc1deb35 | aff27667091f8f9217784d1918722caeb2ec124c | /runp-heroku.py | ee2fc08e61973a8d081df9faf97c92ed730f96a2 | [] | no_license | PritishC/flask-microblog | 2353d74ba3268c1ec6aa4ee1e314b359d347d361 | 00f87d4112a63f0eedb14293c34363c93e90078d | refs/heads/master | 2016-09-05T20:26:48.161299 | 2014-09-02T07:11:42 | 2014-09-02T07:11:42 | 22,011,745 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | #!/home/pritishc/Envs/ircflask/bin/python
from app import app
| [
"[email protected]"
] | |
8093a2ae2e4f724c10c39558d775be3dd8e508b5 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=32/sched.py | a11fb9d9a8e363d961dbdb7bb6a5f953e71cec31 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | -X FMLP -Q 0 -L 1 66 250
-X FMLP -Q 0 -L 1 57 200
-X FMLP -Q 0 -L 1 57 400
-X FMLP -Q 0 -L 1 53 250
-X FMLP -Q 0 -L 1 45 200
-X FMLP -Q 1 -L 1 42 150
-X FMLP -Q 1 -L 1 40 125
-X FMLP -Q 1 -L 1 34 125
-X FMLP -Q 1 -L 1 32 150
-X FMLP -Q 1 -L 1 31 150
-X FMLP -Q 2 -L 1 29 100
-X FMLP -Q 2 -L 1 26 200
-X FMLP -Q 2 -L 1 20 250
-X FMLP -Q 2 -L 1 20 125
-X FMLP -Q 3 -L 1 16 125
-X FMLP -Q 3 -L 1 14 100
-X FMLP -Q 3 -L 1 13 125
-X FMLP -Q 3 -L 1 4 100
| [
"[email protected]"
] | |
67600faad0af4b5d359fc64839e03d55c8fa8155 | dce438aec547bb36a48c5f1667db20f6c877e4a6 | /work.py | 501af3eeafcbd13b6f7f2a248a92eadfa26b8b7c | [] | no_license | tuhin47/Signature-Recognition | 139d9ce808dc1bb23ca68155010ebf625b6eb656 | 483f8e114c824b81a748c66edb585813000a1e7b | refs/heads/master | 2020-03-07T13:56:55.717869 | 2018-03-31T08:40:32 | 2018-03-31T08:40:32 | 127,514,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,349 | py | import prepross
import tensorflow as tf
import os
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
batch_size = 30
#classes = ['A','B','C', 'D', 'E', 'F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','W','X','Y','Z']
classes =["1","10","11","12","13","14","15",
"16","17","18","19","2","3","4","5","6","7","8"]
#,"ch001","ch002","ch003","ch004","ch005","ch006","ch007",
#"ch008","ch009","ch010","dut001","dut002","dut003","dut004","dut006","dut009","dut012","dut014","dut015",
#"dut016"]
#"001","002","003","004","005","006","007","008","009","010","011","012",
num_classes = len(classes)
train_accuracy=[]
validation_accuracy=[]
validation_loss=[]
epoc_list=[]
validation_size = 0.2
img_size = 128
num_channels = 3
train_path='data/train'
data = prepross.read_train_sets(train_path, img_size, classes, validation_size=validation_size)
print("SUCCESSFULLY READED")
print("===================")
print("TRAINING:\t\t{}".format(len(data.train.labels)))
print("VALIDATION:\t{}".format(len(data.valid.labels)))
session = tf.Session()
x = tf.placeholder(tf.float32, shape=[None, img_size,img_size,num_channels], name='x')
print(x)
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
y_true_cls = tf.argmax(y_true, dimension=1)
filter_size_conv1 = 3
num_filters_conv1 = 32
filter_size_conv2 = 3
num_filters_conv2 = 32
filter_size_conv3 = 3
num_filters_conv3 = 64
#
# filter_size_conv4 = 3
# num_filters_conv4 = 64
fc_layer_size = 128
def create_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def create_biases(size):
return tf.Variable(tf.constant(0.05, shape=[size]))
def create_convolutional_layer(input,
num_input_channels,
conv_filter_size,
num_filters):
weights = create_weights(shape=[conv_filter_size, conv_filter_size, num_input_channels, num_filters])
biases = create_biases(num_filters)
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
layer += biases
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
layer = tf.nn.relu(layer)
return layer
def create_flatten_layer(layer):
layer_shape = layer.get_shape()
num_features = layer_shape[1:4].num_elements()
layer = tf.reshape(layer, [-1, num_features])
return layer
def create_fc_layer(input,
num_inputs,
num_outputs,
use_relu=True):
weights = create_weights(shape=[num_inputs, num_outputs])
biases = create_biases(num_outputs)
# Fully connected layer
layer = tf.matmul(input, weights) + biases
layer = tf.nn.sigmoid(layer)
# if use_relu:
# layer = tf.nn.relu(layer)
#
# elif use_sigmoid:
# layer = tf.nn.sigmoid(layer)
return layer
layer_conv1 = create_convolutional_layer(input=x,
num_input_channels=num_channels,
conv_filter_size=filter_size_conv1,
num_filters=num_filters_conv1)
layer_conv2 = create_convolutional_layer(input=layer_conv1,
num_input_channels=num_filters_conv1,
conv_filter_size=filter_size_conv2,
num_filters=num_filters_conv2)
# layer_conv3= create_convolutional_layer(input=layer_conv2,
# num_input_channels=num_filters_conv2,
# conv_filter_size=filter_size_conv3,
# num_filters=num_filters_conv3)
# layer_conv4= create_convolutional_layer(input=layer_conv3,
# num_input_channels=num_filters_conv3,
# conv_filter_size=filter_size_conv4,
# num_filters=num_filters_conv4)
layer_flat = create_flatten_layer(layer_conv2)
layer_fc1 = create_fc_layer(input=layer_flat,
num_inputs=layer_flat.get_shape()[1:4].num_elements(),
num_outputs=fc_layer_size
)
layer_fc2 = create_fc_layer(input=layer_fc1,
num_inputs=fc_layer_size,
num_outputs=num_classes
)
y_pred = tf.nn.softmax(layer_fc2,name='y_pred')
y_pred_cls = tf.argmax(y_pred, dimension=1)
session.run(tf.global_variables_initializer())
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,
labels=y_true)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session.run(tf.global_variables_initializer())
def show_progress(epoch, feed_dict_train, feed_dict_validate, val_loss):
acc = session.run(accuracy, feed_dict=feed_dict_train)
val_acc = session.run(accuracy, feed_dict=feed_dict_validate)
msg = " EPOCH {0} === TRAIN Accuracy: {1:>6.1%}, Validation Accuracy: {2:>6.1%}, Validation-Loss: {3:.3f}"
print (msg.format (epoch + 1, acc, val_acc, val_loss) )
total_iterations = 0
saver = tf.train.Saver()
def train(num_iteration):
global total_iterations
print ('PATH: ' +os.getcwd())
for i in range(total_iterations,
total_iterations + num_iteration):
x_batch, y_true_batch, _, cls_batch = data.train.next_batch(batch_size)
x_valid_batch, y_valid_batch, _, valid_cls_batch = data.valid.next_batch(batch_size)
feed_dict_tr = {x: x_batch,
y_true: y_true_batch}
feed_dict_val = {x: x_valid_batch,
y_true: y_valid_batch}
session.run(optimizer, feed_dict=feed_dict_tr)
if i % int(data.train.num_examples/batch_size) == 0:
val_loss = session.run(cost, feed_dict=feed_dict_val)
epoch = int(i / int(data.train.num_examples/batch_size))
show_progress(epoch, feed_dict_tr, feed_dict_val, val_loss)
saver.save(session, os.getcwd()+'/F-Model-CharacterRecognition')
total_iterations += num_iteration
train(num_iteration=1200)
| [
"[email protected]"
] | |
94b75392a59f3e245de35e339c21bdd3032fb6b9 | 1162986e591732abb8e165046e838a23ba7c1614 | /Final_Project/object-detection/detect.py | 76d9118d15bcd50d4329ea251711e5d0f2c58ebc | [] | no_license | ering0427/Interactive-Lab-Hub | 6dc69da247c7e78626e2d3b4c2d4bc58a8086178 | f0492b6428ae30fe8163e96060e95b4e38374c59 | refs/heads/Spring2021 | 2023-04-20T16:14:10.593078 | 2021-05-16T18:16:05 | 2021-05-16T18:16:05 | 337,768,758 | 0 | 0 | null | 2021-03-24T13:20:38 | 2021-02-10T15:36:47 | Python | UTF-8 | Python | false | false | 7,917 | py | # How to load a Tensorflow model using OpenCV
# Jean Vitor de Paulo Blog - https://jeanvitor.com/tensorflow-object-detecion-opencv/
# David edited some stuff
from __future__ import print_function
import numpy as np
import cv2
import sys
from pyimagesearch.notifications import TwilioNotifier
from pyimagesearch.utils import Conf
from imutils.video import VideoStream
from imutils.io import TempFile
from datetime import datetime
from datetime import date
import argparse
import imutils
import signal
import time
import pygame
pygame.mixer.init()
pygame.mixer.music.load("recording.ogg")
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True,
help="Path to the input configuration file")
args = vars(ap.parse_args())
# load the configuration file and initialize the Twilio notifier
conf = Conf(args["conf"])
tn = TwilioNotifier(conf)
catAppear = False
notifSent = False
intruderFound = False
writer = None
W = None
H = None
# Load a model imported from Tensorflow
tensorflowNet = cv2.dnn.readNetFromTensorflow('frozen_inference_graph.pb', 'ssd_mobilenet_v2_coco_2018_03_29.pbtxt')
CLASSES = ["background", "person", "bicycle", "car", "motorcycle",
"airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant",
"unknown", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse",
"sheep", "cow", "elephant", "bear", "zebra", "giraffe", "unknown", "backpack",
"umbrella", "unknown", "unknown", "handbag", "tie", "suitcase", "frisbee", "skis",
"snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard",
"surfboard", "tennis racket", "bottle", "unknown", "wine glass", "cup", "fork", "knife",
"spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog",
"pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "unknown", "dining table",
"unknown", "unknown", "toilet", "unknown", "tv", "laptop", "mouse", "remote", "keyboard",
"cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "unknown",
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush" ]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
img = None
webCam = False
#if(len(sys.argv)>1 and not sys.argv[-1]== "noWindow"):
# try:
# print("I'll try to read your image");
# img = cv2.imread(sys.argv[1])
# if img is None:
# print("Failed to load image file:", sys.argv[1])
# except:
# print("Failed to load the image are you sure that:", sys.argv[1],"is a path to an image?")
#else:
try:
print("Trying to open the Webcam.")
cap = cv2.VideoCapture(0)
if cap is None or not cap.isOpened():
raise("No camera")
webCam = True
except:
img = cv2.imread("../data/test.jpg")
print("Using default image.")
while(True):
if webCam:
ret, img = cap.read()
catPrevAppear = catAppear
dogPrevAppear = intruderFound
rows, cols, channels = img.shape
# Use the given image as input, which needs to be blob(s).
tensorflowNet.setInput(cv2.dnn.blobFromImage(img, size=(300, 300), swapRB=True, crop=False))
# Runs a forward pass to compute the net output
networkOutput = tensorflowNet.forward()
# Loop on the outputs
for detection in networkOutput[0,0]:
score = float(detection[2])
idx = int(detection[1])
# if a dog is detected
if CLASSES[idx]=='dog':
if score > 0.2:
intruderFound = True
left = detection[3] * cols
top = detection[4] * rows
right = detection[5] * cols
bottom = detection[6] * rows
cv2.imwrite('intruder.jpg',img)
if intruderFound and not dogPrevAppear:
startT = datetime.now()
elif dogPrevAppear:
# time difference
timeD = (datetime.now() - startT).seconds
# if dog appears for more than 2 seconds
if intruderFound and timeDiff > 2:
print("Intruder detected!")
dateA = date.today().strftime("%A, %B %d %Y")
msg = "Intruder detected on {} at {}! Intruder is stealing food! Please take action!".format(dateA,datetime.now().strftime("%I:%M%p"))
tn.send(msg, 'intruder.jpg')
#draw a red rectangle around detected objects
cv2.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), COLORS[idx], thickness=2)
y = int(top) - 15 if int(top) - 15 > 15 else int(top) + 15
cv2.putText(img, 'intruder', (int(left), y),cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
pygame.mixer.music.play()
elif CLASSES[idx] == 'cat':
if score > 0.2:
left = detection[3] * rows
top = detection[4] * cols
right = detection[5] * cols
bottom = detection[6] * rows
#draw a red rectangle around detected objects
cv2.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), COLORS[idx], thickness=2)
y = int(top) - 15 if int(top) - 15 > 15 else int(top) + 15
cv2.putText(img, CLASSES[idx], (int(left), y),cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
catAppear = True
# if cat just appeared
if catAppear and not catPrevAppear:
startTime = datetime.now()
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
writer = cv2.VideoWriter('/home/pi/Interactive-Lab-Hub/Final_Project/object-detection/output.mp4', fourcc, 30, (cols, rows),True)
print("Cat appeared!")
# if cat previously appeared
elif catPrevAppear:
timeDiff = (datetime.now() - startTime).seconds
# if cat appears for more than 20 seconds
if catAppear and timeDiff > 20:
if not notifSent:
msg = "Bella is still eating!"
writer.release()
writer = None
#tn.send(msg, 'output.mp4')
notifSent = True
print("Bella is still eating!")
else:
# Bella left
if catPrevAppear:
catPrevAppear = False
if notifSent:
notifSent = False
else:
endTime = datetime.now()
totalSeconds = (endTime - startTime).seconds
if totalSeconds >= 2:
dateAppeared = date.today().strftime("%A, %B %d %Y")
msg = "Bella ate her food on {} at {} for {} " \
"seconds.".format(dateAppeared,
startTime.strftime("%I:%M%p"), totalSeconds)
if writer is not None:
writer.release()
writer = None
#tn.send(msg, 'output.mp4')
if webCam:
if sys.argv[-1] == "noWindow":
print("Finished a frame")
cv2.imwrite('detected_out.jpg',img)
continue
cv2.imshow('detected (press q to quit)',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
break
else:
break
if writer is not None:
writer.write(img)
if writer is not None:
writer.release()
cv2.imwrite('detected_out.jpg',img)
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
c1fd06e65d5b13122d537fbad5112f1dbf409c67 | d98ee572bfc1bb67415f791603ad912b33f08aa0 | /main.py | ee1850cefde429b4bcb73521b38113218cadfd49 | [] | no_license | august-Garra96/ejemploGit2 | 0f5066606c5163f14c2e1ad3591a502f98d9ad9b | e35038c72caeb7cf692075130f8b7b77bb1af141 | refs/heads/master | 2022-12-13T14:04:51.578019 | 2020-09-13T20:46:51 | 2020-09-13T20:46:51 | 295,235,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # This is a sample Python script.
# Press Mayús+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('Dennis Augusto Garrafa Solis')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| [
"[email protected]"
] | |
3e499bdabe692c7f2499dfa1097f3d676afb1eb0 | 2bdb914b6e3fe80ffaeeb2b4e1c0acacba9c94fd | /2019/42/Oppgave 8.py | 4ece68e605b32991928c59fb18da44ba0f4986f2 | [] | no_license | havardnyboe/ProgModX | 9a38c104d09c356a3146f6920e21567d3910569b | 222dcb5bf354953e22ab9fb2d1b8ddd2422a427f | refs/heads/master | 2022-10-06T17:19:22.895947 | 2020-06-10T16:02:05 | 2020-06-10T16:02:05 | 261,474,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | liste = ["Tekst", 42, 3.1415, True, [1, 2], (2, 3)]
neste = 0
for i in liste:
print(i, "er en", type(liste[neste]))
neste+=1 | [
"[email protected]"
] | |
369a652da5dd92413eb7e654dfd2414a1d5eb23f | ad5c146994748e0605e9c17e34f2777b02c3acfc | /progress/todos/urls.py | ae46e61f33eaf83a13fcbbb7cc22ccd94c0f0a78 | [] | no_license | riteshghorse/progress-tracker | 173435c7d693bb1eafdc30048f8742bf79d736be | 1c6e2e0246781caac1b9e8cbf0a1a16165b06f63 | refs/heads/master | 2021-05-16T01:31:05.095964 | 2020-06-12T23:16:12 | 2020-06-12T23:16:12 | 107,129,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^newEntry$', views.newEntry, name='newEntry'),
] | [
"[email protected]"
] | |
d30883a494d959001c238d295767929e6d16f5eb | b3c4ec7e3a8db4c18f81a2ada9274497211eb2b1 | /E2Ppy/plot_field.py | 5fda0a880ca6885aeefb50be84808446c684a5d1 | [] | no_license | PNMZR/Python3_FEM_Codes_Recomposed_from_Other_Programming_Languages | fe1de520ba642881e4eebaa8a3e8b41edb3f688b | 28c20d3204a3d3aa9500e85bf7944c193037a8d1 | refs/heads/master | 2022-12-11T16:14:00.309967 | 2020-09-09T03:56:31 | 2020-09-09T03:56:31 | 288,892,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 09:50:08 2020
@author: lenovo
"""
import matplotlib.tri as mtri
import mesh_region as mesh
import matplotlib as mpl
import numpy as np
def plot_field(pt1, pt2, pt3, pt4, numx, numy, fac, u_x, u_y, field,
fig, ax, elemType='T3'):
"""
Forms a color dependent finite element mesh for plotting outputs
"""
node, triangles = mesh.mesh_region(pt1, pt2, pt3, pt4, numx, numy, elemType)
node = node + fac*np.vstack([u_x, u_y]).T
x = node[:, 0]
y = node[:, 1]
z = field
triang = mtri.Triangulation(x, y, triangles-1)
# 绘制有限元网格以及等值线图
p = ax.tricontourf(triang, z, 150, cmap=mpl.cm.jet)
#ax.triplot(triang, 'ko-')
fig.colorbar(p, ax=ax)
ax.axis('equal')
| [
"[email protected]"
] | |
d6985200f3c6aa0ed01ddf7d13cc1ecf1ef31fec | d70cdc9a46676acb4c4396cd69f1e8253921858b | /migrations/versions/e0f3844e1176_.py | f86933a8560816039d7de1598b57efd9fad11afd | [] | no_license | Lemon000/Blog | c57105b1260bcfa13bb413713fb8e2bd57400b15 | 4654a94a72039c8b4664c15787ca6d5a56c67d61 | refs/heads/master | 2022-12-13T04:31:46.968468 | 2020-01-08T08:41:45 | 2020-01-08T08:41:45 | 232,509,848 | 0 | 0 | null | 2022-12-08T01:07:18 | 2020-01-08T07:57:39 | HTML | UTF-8 | Python | false | false | 983 | py | """empty message
Revision ID: e0f3844e1176
Revises:
Create Date: 2020-01-02 17:16:06.788196
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e0f3844e1176'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=32), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
9681745e47d6259063c31935e92c174f3594e7ae | dc8aa9a5735611ef1cf4002a3bf589bbf8499978 | /coursera.org/ciencia-computacao-python-conceitos/semana4/digitos_adjacentes.py | 1bb4f4b6905b34cfcfcd9ae3f14bd958db24194b | [] | no_license | pilgerowski/bricabraque | 03d7d4abffcfc206c776b96d84eca8b48a3663f0 | a2570adcc39bdfcbda6c6f434f6de698ceb0611d | refs/heads/master | 2021-01-22T22:13:36.899246 | 2017-07-27T00:38:35 | 2017-07-27T00:38:35 | 92,764,006 | 0 | 1 | null | 2017-07-27T00:38:36 | 2017-05-29T18:10:45 | Python | UTF-8 | Python | false | false | 330 | py | iNumero = int(input("Digite um número inteiro: "))
bHaAdjacenteIgual = False
iAnterior = iNumero % 10
while(iNumero > 0):
iNumero = iNumero // 10
iDigito = iNumero % 10
if(iDigito == iAnterior):
bHaAdjacenteIgual = True
iAnterior = iDigito
if(bHaAdjacenteIgual):
print("sim")
else:
print("não")
| [
"[email protected]"
] | |
dc16006d123ff7fe0d618a47f7c7bc1f3b5d8f56 | 99bfe0755549458de6431e6a7376e164d56ddb46 | /test.py | 20025aec21bd0990db3190ed2192e720dbf706be | [] | no_license | wisch628/spotify-python | 8deebbe4843cdcb570426f21ffa482f356d49928 | 41e3bbd9025e77a67191b9da896ab6d3bbca1938 | refs/heads/main | 2023-06-24T17:21:16.194499 | 2021-07-28T07:29:55 | 2021-07-28T07:29:55 | 390,256,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | import requests
import os
CLIENT_ID=os.environ.get('CLIENT_ID')
CLIENT_SECRET=os.environ.get('CLIENT_SECRET')
AUTH_URL = 'https://accounts.spotify.com/api/token'
auth_response = requests.post(AUTH_URL, {
'grant_type': 'client_credentials',
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
})
auth_response_data = auth_response.json()
access_token = auth_response_data['access_token']
BASE_URL = 'https://api.spotify.com/v1/'
headers = {
'Authorization': 'Bearer {token}'.format(token=access_token)
}
artist = '0TnOYISbd1XYRBk9myaseg'
r = requests.get(BASE_URL + 'artists/' + artist, headers=headers)
r = r.json()
print(r['name']) | [
"[email protected]"
] | |
b160afb6ee8b864232cf72c473b0df3fc5fed880 | 0bc334d987b3742d26fb9b8a78d9efc4ca39e926 | /main.py | b1e6b63ee826d9cc85b07d949084fb02672b522f | [] | no_license | LiuZH-enjoy/word2vec | 822f0b8b302b869dfa0d1c01c2ce2aaa5a5cda0f | 0edd078da3b48eecde2bc5765219af6250ab86ff | refs/heads/main | 2023-05-13T21:32:51.343580 | 2021-06-11T07:03:15 | 2021-06-11T07:03:15 | 375,935,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | import torch
import argparse
import train
import preprocess
import module
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=8, help='batch大小(默认为8)')
parser.add_argument('--epochs', type=int, default=2000, help='epoch数(默认为2000)')
parser.add_argument('--embedding_size', type=int, default=2, help='隐藏层维度(默认为2)')
parser.add_argument('--window_size', type=int, default=2, help='背景词长度(默认为2)')
args = parser.parse_args()
args.device = torch.device('cpu')
args.cuda = False
dtype = torch.FloatTensor
sentences = ["jack like dog", "jack like cat", "jack like animal",
"dog cat animal", "banana apple cat dog like", "dog fish milk like",
"dog cat animal like", "jack like apple", "apple like", "jack like banana",
"apple banana jack movie book music like", "cat dog hate", "cat dog like"]
word_sequence = " ".join(sentences).split() # ['jack', 'like', 'dog', 'jack', 'like', 'cat', 'animal',...]
vocab = list(set(word_sequence)) # build words vocabulary
word2idx = {w: i for i, w in enumerate(vocab)} # {'jack':0, 'like':1,...}
args.vocab_size=len(vocab)
model = module.Word2vec(args)
loader = preprocess.make_data(word_sequence, word2idx, args)
train.train(loader, model, args)
for i, label in enumerate(vocab):
W, WT = model.parameters()
x,y = float(W[i][0]), float(W[i][1])
plt.scatter(x, y)
plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')
plt.show() | [
"[email protected]"
] | |
5a475209b7b16ea0b71a631ccc0dbad6bbfff20f | bf668891c4f8f26b746f83a9c412b7ed9addc835 | /demo.py | 2fdd67c0cd0428326f993fc06c7307e8f4bd91f0 | [
"MIT"
] | permissive | AlbertXu233/RDIE-for-SR-IQA | 94f4ab59574b29045c687ba2d25dd713630d1ecd | 814831af09f6d166b4b7ad24730499fe2b1a164e | refs/heads/main | 2023-06-21T02:05:08.494621 | 2021-07-09T01:18:18 | 2021-07-09T01:18:18 | 384,162,586 | 0 | 0 | MIT | 2021-07-08T15:19:08 | 2021-07-08T15:03:38 | null | UTF-8 | Python | false | false | 1,150 | py | import os
import cv2
from RDIE import *
hrImgNames = os.listdir('imgs/hr')
srImgNames = os.listdir('imgs/sr')
hrImgNames.sort(key=lambda x:x.split('.')[0])
srImgNames.sort(key=lambda x:x.split('.')[0])
hrImgNames = ['imgs/hr/'+name for name in hrImgNames]
srImgNames = ['imgs/sr/'+name for name in srImgNames]
#for calculate RDIE
metric = RDIE()
values = []
for hrName,srName in zip(hrImgNames,srImgNames):
hr = cv2.imread(hrName)
sr = cv2.imread(srName)
val = metric.call(hr,sr)
print(val)
values.append(val)
values = np.array(values)
np.save('imgs/values.npy',values)
#for calculate and save RIE maps
RIEModel = RIE3C()
os.makedirs('imgs/RIE_hr',exist_ok=True)
os.makedirs('imgs/RIE_sr',exist_ok=True)
for hrName,srName in zip(hrImgNames,srImgNames):
hr = cv2.imread(hrName)
sr = cv2.imread(srName)
hrMap = RIEModel(hr[None,:])
srMap = RIEModel(sr[None,:])
hrMap = 255*tf.transpose(hrMap,[3,1,2,0])[0]
srMap = 255*tf.transpose(srMap,[3,1,2,0])[0]
cv2.imwrite('imgs/RIE_hr/'+hrName.split('/')[-1],np.array(hrMap,np.uint8))
cv2.imwrite('imgs/RIE_sr/'+srName.split('/')[-1],np.array(srMap,np.uint8)) | [
"[email protected]"
] | |
2bc9e59e83813ef8d9d7d458526f16966d131ff3 | 33a424e56d9f843c5052464d035cb1cca807ddca | /agent/temboardagent/version.py | bb3bb25db908f2a69980f1fecc915047114ac13e | [
"PostgreSQL"
] | permissive | pgiraud/temboard | 9dd6cfb5d6cb5ad5cacd21b079138e9f4b7b7c49 | 4967cf27de8f731a05167d56a6d3bbd7550c4a3b | refs/heads/master | 2022-10-12T18:34:26.950034 | 2022-09-23T14:25:06 | 2022-09-23T14:25:06 | 84,056,172 | 0 | 0 | NOASSERTION | 2021-05-21T14:37:21 | 2017-03-06T09:45:21 | Python | UTF-8 | Python | false | false | 28 | py | __version__ = "8.0b1.post9"
| [
"[email protected]"
] | |
748cba2b3befffb2fb4003ded47dc999b5531b0a | 95d7f71d2467d48e91f9e9b2ee6b668794d92b66 | /official/core/base_trainer_test.py | a346173a587b1c4e65d6d4d62c357debcafb6a81 | [
"Apache-2.0"
] | permissive | ChulanZhang/models | e130d43033d2f34a05fc31f345e3364ad69b4590 | 341fe24b69d5f9a15872641dbec6aab8a09e1d8e | refs/heads/master | 2021-06-24T08:02:58.397775 | 2021-06-20T15:48:15 | 2021-06-20T15:48:15 | 218,371,200 | 1 | 0 | Apache-2.0 | 2019-10-29T19:50:47 | 2019-10-29T19:50:46 | null | UTF-8 | Python | false | false | 13,715 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_models.core.trainers.trainer."""
# pylint: disable=g-direct-tensorflow-import
import multiprocessing
import os
import sys
from absl.testing import parameterized
import numpy as np
import portpicker
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.core import base_trainer as trainer_lib
from official.core import config_definitions as cfg
from official.core import train_lib
from official.utils.testing import mock_task
TPU_TEST = 'test_tpu' in sys.argv[0]
GPU_TEST = 'test_gpu' in sys.argv[0]
def all_strategy_combinations():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.cloud_tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],)
def create_in_process_cluster(num_workers, num_ps):
"""Creates and starts local servers and returns the cluster_resolver."""
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {}
cluster_dict['worker'] = ['localhost:%s' % port for port in worker_ports]
if num_ps > 0:
cluster_dict['ps'] = ['localhost:%s' % port for port in ps_ports]
cluster_spec = tf.train.ClusterSpec(cluster_dict)
# Workers need some inter_ops threads to work properly.
worker_config = tf.compat.v1.ConfigProto()
if multiprocessing.cpu_count() < num_workers + 1:
worker_config.inter_op_parallelism_threads = num_workers + 1
for i in range(num_workers):
tf.distribute.Server(
cluster_spec,
job_name='worker',
task_index=i,
config=worker_config,
protocol='grpc')
for i in range(num_ps):
tf.distribute.Server(
cluster_spec, job_name='ps', task_index=i, protocol='grpc')
cluster_resolver = tf.distribute.cluster_resolver.SimpleClusterResolver(
cluster_spec, rpc_layer='grpc')
return cluster_resolver
def dataset_fn(input_context=None):
del input_context
def dummy_data(_):
return tf.zeros((1, 1), dtype=tf.float32)
dataset = tf.data.Dataset.range(1)
dataset = dataset.repeat()
dataset = dataset.map(
dummy_data, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
class MockAsyncTrainer(trainer_lib._AsyncTrainer):
"""Mock AsyncTrainer to test the _AsyncTrainer class."""
def __init__(self):
self._strategy = tf.distribute.get_strategy()
self.init_async()
self.global_step = tf.Variable(
0,
dtype=tf.int64,
name='global_step',
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
self.eval_global_step = tf.Variable(
0,
dtype=tf.int64,
name='eval_global_step',
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
train_dataset = self.distribute_dataset(dataset_fn)
trainer_lib.orbit.StandardTrainer.__init__(
self, train_dataset, options=trainer_lib.orbit.StandardTrainerOptions())
eval_dataset = self.distribute_dataset(dataset_fn)
trainer_lib.orbit.StandardEvaluator.__init__(
self,
eval_dataset,
options=trainer_lib.orbit.StandardEvaluatorOptions(
use_tf_while_loop=True))
def train_loop_begin(self):
self.global_step.assign(0)
def train_step(self, iterator):
def replica_step(_):
self.global_step.assign_add(1)
self._strategy.run(replica_step, args=(next(iterator),))
def train_loop_end(self):
self.join()
return self.global_step.numpy()
def eval_begin(self):
self.eval_global_step.assign(0)
def eval_step(self, iterator):
def replica_step(_):
self.eval_global_step.assign_add(1)
self._strategy.run(replica_step, args=(next(iterator),))
def eval_end(self):
self.join()
return self.eval_global_step.numpy()
class TrainerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self._config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
def create_test_trainer(self, config, model_dir=None, task=None):
task = task or mock_task.MockTask(config.task, logging_dir=model_dir)
ckpt_exporter = train_lib.maybe_create_best_ckpt_exporter(config, model_dir)
trainer = trainer_lib.Trainer(
config,
task,
model=task.build_model(),
optimizer=task.create_optimizer(config.trainer.optimizer_config,
config.runtime),
checkpoint_exporter=ckpt_exporter)
return trainer
@combinations.generate(all_strategy_combinations())
def test_trainer_train(self, distribution):
with distribution.scope():
trainer = self.create_test_trainer(self._config)
logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', logs)
self.assertIn('learning_rate', logs)
def test_base_async_trainer(self):
if TPU_TEST or GPU_TEST:
self.skipTest('Aysnc training is not available on GPU/GPU.')
num_workers = 3
num_ps = 2
cluster_resolver = create_in_process_cluster(num_workers, num_ps)
distribution = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
with distribution.scope():
trainer = MockAsyncTrainer()
trainer.init_async()
self.assertIsInstance(
trainer._coordinator,
tf.distribute.experimental.coordinator.ClusterCoordinator)
self.assertEqual(trainer.train(tf.constant(10)), 10)
self.assertEqual(trainer.evaluate(tf.constant(11)), 11)
def test_async_trainer_train(self):
if TPU_TEST or GPU_TEST:
self.skipTest('Aysnc training is not available on GPU/GPU.')
num_workers = 3
num_ps = 2
cluster_resolver = create_in_process_cluster(num_workers, num_ps)
distribution = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
with distribution.scope():
config = cfg.ExperimentConfig(**self._config.as_dict())
config.trainer.eval_tf_while_loop = True
trainer = self.create_test_trainer(config)
logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', logs)
self.assertIn('learning_rate', logs)
def test_async_trainer_validate(self):
if TPU_TEST or GPU_TEST:
self.skipTest('Aysnc training is not available on GPU/GPU.')
num_workers = 3
num_ps = 2
cluster_resolver = create_in_process_cluster(num_workers, num_ps)
distribution = tf.distribute.experimental.ParameterServerStrategy(
cluster_resolver)
with distribution.scope():
config = cfg.ExperimentConfig(**self._config.as_dict())
config.trainer.eval_tf_while_loop = True
trainer = self.create_test_trainer(config)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('acc', logs)
self.assertIn('validation_loss', logs)
@combinations.generate(all_strategy_combinations())
def test_trainer_validate(self, distribution):
with distribution.scope():
trainer = self.create_test_trainer(self._config)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertEqual(logs['counter'], 5. * distribution.num_replicas_in_sync)
self.assertIn('validation_loss', logs)
@combinations.generate(all_strategy_combinations())
def test_trainer_validate_without_loss(self, distribution):
class MockTaskWithoutValidationLoss(mock_task.MockTask):
def validation_step(self, inputs, model, metrics=None):
# Disable validation loss.
logs = super().validation_step(inputs, model)
del logs[self.loss]
return logs
with distribution.scope():
task = MockTaskWithoutValidationLoss()
trainer = self.create_test_trainer(self._config, task=task)
logs = trainer.evaluate(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertEqual(logs['counter'], 5. * distribution.num_replicas_in_sync)
self.assertNotIn('validation_loss', logs)
@combinations.generate(
combinations.combine(
mixed_precision_dtype=['float32', 'bfloat16', 'float16'],
loss_scale=[None, 'dynamic', 128, 256],
))
def test_configure_optimizer(self, mixed_precision_dtype, loss_scale):
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(
mixed_precision_dtype=mixed_precision_dtype, loss_scale=loss_scale),
trainer=cfg.TrainerConfig(
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
},
})))
trainer = self.create_test_trainer(config)
if mixed_precision_dtype != 'float16':
self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)
elif mixed_precision_dtype == 'float16' and loss_scale is None:
self.assertIsInstance(trainer.optimizer, tf.keras.optimizers.SGD)
else:
self.assertIsInstance(trainer.optimizer,
tf.keras.mixed_precision.LossScaleOptimizer)
metrics = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', metrics)
def test_export_best_ckpt(self):
config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
best_checkpoint_export_subdir='best_ckpt',
best_checkpoint_eval_metric='acc',
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
model_dir = self.get_temp_dir()
trainer = self.create_test_trainer(config, model_dir=model_dir)
trainer.train(tf.convert_to_tensor(1, dtype=tf.int32))
trainer.evaluate(tf.convert_to_tensor(1, dtype=tf.int32))
self.assertTrue(
tf.io.gfile.exists(os.path.join(model_dir, 'best_ckpt', 'info.json')))
def test_recovery(self):
config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
loss_upper_bound=0.5,
recovery_max_trials=2,
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
model_dir = self.get_temp_dir()
trainer = self.create_test_trainer(config, model_dir=model_dir)
checkpoint_manager = tf.train.CheckpointManager(
trainer.checkpoint, self.get_temp_dir(), max_to_keep=2)
checkpoint_manager.save()
trainer.add_recovery(config.trainer, checkpoint_manager=checkpoint_manager)
before_weights = trainer.model.get_weights()
_ = trainer.train(tf.convert_to_tensor(1, dtype=tf.int32))
# The training loss is 1.0 and upper_bound is 0.5, so the recover happens.
after_weights = trainer.model.get_weights()
for left, right in zip(before_weights, after_weights):
self.assertAllEqual(left, right)
# Let's the loss be NaN and max_trials = 0 to see RuntimeError.
config = cfg.ExperimentConfig(
trainer=cfg.TrainerConfig(
recovery_max_trials=0,
optimizer_config=cfg.OptimizationConfig({
'optimizer': {
'type': 'sgd'
},
'learning_rate': {
'type': 'constant'
}
})))
task = mock_task.MockTask(config.task, logging_dir=model_dir)
def build_losses(labels, model_outputs, aux_losses=None):
del labels, model_outputs
return tf.constant([np.nan], tf.float32) + aux_losses
task.build_losses = build_losses
trainer = trainer_lib.Trainer(
config,
task,
model=task.build_model(),
optimizer=task.create_optimizer(config.trainer.optimizer_config,
config.runtime))
trainer.add_recovery(config.trainer, checkpoint_manager=checkpoint_manager)
with self.assertRaises(RuntimeError):
_ = trainer.train(tf.convert_to_tensor(2, dtype=tf.int32))
def test_model_with_compiled_loss(self):
task = mock_task.MockTask()
model = task.build_model()
model.compile(loss=tf.keras.losses.CategoricalCrossentropy())
trainer = trainer_lib.Trainer(
self._config,
task,
model=model,
optimizer=task.create_optimizer(self._config.trainer.optimizer_config))
logs = trainer.train(tf.convert_to_tensor(5, dtype=tf.int32))
self.assertIn('training_loss', logs)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
28c329cdb87b9bf4ba6938afa96149d6b26b45ee | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/compareVersions_20200909131716.py | d622cfb2cc1588914ad6545a8a07d595eeab93c4 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | def compare(version1,version2):
# split where there are ,
# then loop through both of them
# if v1 > v2 return 1
# if v1 < v2 return -1
# otherwise return 0
v1 = version1.split(".")
v2 = version2.split(".")
v1 = [int(i) for i in v1]
v2= [int(i) for i in v2]
for a,b in zip(v1,v2):
if a > b:
return 1
elif a < b:
return -1
return 0
compare("0.1","1.1")
| [
"[email protected]"
] | |
323eafb63b51c67a491bec0c7b176cf634a5ceee | e2fe70d710cc50c5400a26f13281c0b9d68c772f | /module10/les1/first.py | 4a98916e3ea0617a633c0a87d29c30efb9290352 | [] | no_license | ronylitv/GOIT-Tasks | cca9d95329bbc5810fa1a2be7dc1ab16ac6efaf2 | cfeaedefc11a986bd9b146972ef576eadd851abf | refs/heads/master | 2023-07-17T05:54:17.694438 | 2021-08-30T13:15:31 | 2021-08-30T13:15:31 | 381,319,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | class Rectangle:
def __init__(self, high, width):
self.width = width
self.height = high
def perimeter(self):
return 2 * (self.height + self.width)
def square(self):
return self.height * self.width
r = Rectangle(2, 10)
print(r.perimeter())
print(r.square())
| [
"[email protected]"
] | |
64645b490d8995485103c138e3f412e789ddca2e | 017b699d94c0365e3d390bbe9549a0804fa8978c | /Ticket.py | 6319e3f93e919ca123da06534d536e82dea3256a | [] | no_license | AlchemistsLab/CovalentSupportBot | 4db0e8036ff9034b3c11fba9071c6046620b4b33 | 8204bad64dbe025dcda1c225ce2513d935e7337c | refs/heads/main | 2023-06-20T11:05:10.362695 | 2021-07-14T09:25:55 | 2021-07-14T09:25:55 | 386,060,622 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | import datetime
class Ticket():
def __init__(self, ticketnumber, userid):
self._ticketnumber = ticketnumber
self._staffmember = None
self._userid = userid
self._questions_and_response = None
self.open()
def assign(self, staff):
self._staffmember = staff
self._status = 'Handling'
def open(self):
self._status = 'Open'
self._date = datetime.datetime.now()
self._duration = None
def close(self):
self._status = 'Closed'
self._duration = datetime.datetime.now() - self._date
@property
def status(self):
return self._status
@property
def ticketnumber(self):
return self._ticketnumber
@property
def userid(self):
return self._userid
@userid.setter
def userid(self, discord_id):
self._userid = discord_id
@property
def staffid(self):
if self._staffmember == None:
return None
return self._staffmember.staffid
@property
def date(self):
return self._date
@property
def duration(self):
return self._duration
@property
def questions(self):
return self._questions_and_response
@questions.setter
def questions(self, questions):
self._questions_and_response = questions
| [
"[email protected]"
] | |
be0321b94907337ffa106f6da1095856ad39c9fa | b66c276111707d8f2444e75e66b8a4fb5b6da733 | /backend/base/urls/post_urls.py | 446387b5ec901fe4e97d6f966c27b9168c8886ca | [] | no_license | anuupadhyay/CodingHub | 27ea15faa68c7d4421c077277d85d7904656cc74 | c7aefd7ce751201fde59cfbc8f100d9d5ef7e0cc | refs/heads/master | 2023-07-06T20:01:33.701908 | 2021-08-16T06:48:00 | 2021-08-16T06:48:00 | 381,011,689 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from django.urls import path
from base.views import post_views as views
urlpatterns = [
path('', views.getPosts, name="posts"),
path('<str:pk>/', views.getPost, name="post"),
]
| [
"[email protected]"
] | |
75bf6c3354e8a55ffb564b64b2e850e75902a4ce | 3790aba36d7f266a31e1b21d11e2f238e968166b | /XGB/constants.py | a8ea2fa352ee7458a3e12676c7476570524db6be | [] | no_license | gautamverma/MLAnalysisGH | 4a6aaad21f3e9f9f9ba560011661857c92163c77 | 7c46e2ca3358367ae8cf0cdf801f43352295910e | refs/heads/master | 2020-12-04T03:42:32.339804 | 2020-05-06T19:16:50 | 2020-05-06T19:16:50 | 231,595,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py | # Batch size of 10000
CHUNKSIZE = 250000
REGRESSION = 'regression'
CLASSIFICATION = 'classification'
# Default value ACTUAL SIZE 75%
TRAIN_ITERATION = 30
TRAIN_MOD_COUNT = 10
CONSTANT_FILLER = 'missing'
NUMERIC_FILLER = 0
# All Customer filler
ALL_CONSUMER = 'allCustomer'
## Data Input Keys
IBUCKET_KEY = 'input_bucket'
IPREFIX_KEY = 'input_prefix'
IRESULT_PREFIX_KEY = 'result_prefix'
IFILES_KEY = 'files'
IFOLDER_KEY = 'base_folder'
ITRAINING_FP = 'training_file_name'
INUMERICAL_COLS = 'numerical_cols'
ICATEGORICAL_COLS = 'categorical_cols'
IRESULT_COL_KEY = 'result_col'
IRESULT_FUNCTION = 'result_function'
ISTARTEGY_KEY = 'startegy'
IPARAMS_KEY = 'learning_params'
ITRAIN_ITERATIONS = 'iterations'
ICHUNKSIZE_KEY = 'chunksize'
IOBJECTIVE_KEY = 'objective_key'
IFILE_PREFIX = "file_prefix"
IMODEL_FP = "model_filepath"
IMODEL_FN = "model_filename"
IMULTIPLE_METRICS_FILES = 'multiple_metric_files'
IMULTIPLE_TRAINING_FILE = 'multiple_training_file'
NON_MARKETING_FILTERED_FILE = 'non_marketing_filtered_file'
PROD_ENVIROMENT_FILTERED_FILE = 'prod_data_filtered_file'
PROD_ENVIROMENT_NON_MA_FILTERED_FILE = 'prod_data_and_non_marketing_filtered_file'
LEARNING_TYPE = 'learning_type'
# Example data imput
# data_input = {}
# data_input[IBUCKET_KEY] = 'gautam.placement-metrics-prod'
# data_input[IFILES_KEY] = [
# 'deep-learning-ds/january/18Jan25JanPMetrics000',
# 'deep-learning-ds/january/18Jan25JanPMetadata000',
# 'deep-learning-ds/january/18Jan25JanCM000',
# 'deep-learning-ds/january/18Jan25JanPP000',
# 'deep-learning-ds/january/18Jan25JanCreative000'
# ]
# data_input[INUMERICAL_COLS] = []
# data_input[ICATEGORICAL_COLS] = []
#
# data_input[IFOLDER_KEY] = base_folder
# data_input[ITRAINING_FP] = base_folder + '/training_file'
| [
"[email protected]"
] | |
40c5ca732a65d5ae956e19fbce0b726834470f36 | 80690771010aced98d399c40eb92e109c0624076 | /Ros5E_GlobalAlignment.py | 8d880f98f02efe48f09f7ccb7f402594df104e09 | [] | no_license | ahmdeen/Bioinformatic-Algorthims | f9624a44108ce3978641c168ee30ca1a6ec6fd5d | 7cae61bb45535911f220da290f9ecf609b964058 | refs/heads/master | 2021-01-18T13:58:32.126215 | 2015-03-25T21:03:59 | 2015-03-25T21:03:59 | 27,619,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,248 | py | """
Global Alignment Problem
Find the highest-scoring alignment between two strings using a scoring matrix.
Given: Two protein strings.
Return: The maximum alignment score of these strings followed by an
alignment achieving this maximum score.
"""
import os
'''Classes'''
class blosum62(object):
'''Class to score proteins using the BLOSUM62 matrix'''
def __init__(self):
'''Initalize BLOSUM62 Matrix'''
with open(os.path.join(os.path.dirname(__file__), 'DATA/BLOSUM62.txt')) as inFile:
lines = [line.strip().split() for line in inFile.readlines()]
self.S = {(item[0], item[1]): int(item[2]) for item in lines}
def __getitem__(self, pair):
'''Returns the score of a given pair'''
return self.S[pair[0],pair[1]]
'''Functions'''
def globalAlignment(seq1, seq2, ScoreMatrix, sig):
'''Returns the global alignment of the input sequences utilizing the given ScoringMatrix and indel penalty'''
len1, len2 = len(seq1), len(seq2)
S = [[0]*(len2+1) for s in xrange(len1+1)]
backtrack = [[0]*(len2+1) for bt in xrange(len1+1)]
for i in xrange(1, len1+1):
S[i][0] = -i*sig
for j in xrange(1, len2+1):
S[0][j] = -j*sig
for i in xrange(1, len1+1):
for j in xrange(1, len2+1):
scoreList = [S[i-1][j] - sig, S[i][j-1] - sig, S[i-1][j-1] + ScoreMatrix[seq1[i-1], seq2[j-1]]]
S[i][j] = max(scoreList)
backtrack[i][j] = scoreList.index(S[i][j])
#-----
indelInsertFcn = lambda seq, i: seq[:i] + '-' + seq[i:]
#-----
align1, align2 = seq1, seq2
a, b = len1, len2
maxScore = str(S[a][b])
while a*b != 0:
if backtrack[a][b] == 0:
a -= 1
align2 = indelInsertFcn(align2, b)
elif backtrack[a][b] == 1:
b -= 1
align1 = indelInsertFcn(align1, a)
else:
a -= 1
b -= 1
for i in xrange(a):
align2 = indelInsertFcn(align2, 0)
for j in xrange(b):
align1 = indelInsertFcn(align1, 0)
return maxScore, align1, align2
'''Input/Output'''
"""
seq1 = 'PLEASANTLY'
seq2 = 'MEANLY'
"""
with open('Data/rosalind_5e.txt') as infile:
seq1, seq2 = [line.strip() for line in infile.readlines()]
indelPenalty = 5
alignment = globalAlignment(seq1,seq2, blosum62(), indelPenalty)
answer = '\n'.join(alignment)
print answer
with open('Ros5E_Answer.txt', 'w') as outfile:
outfile.write(answer)
| [
"[email protected]"
] | |
cae9000152e64553d4650ba35f9b7dbeed67727c | f2a0cbbe779ce69743757d95e3bba39c8c10fc78 | /中国诗词大会/txt导入数据库.py | 8fbad9dbc6ebaea9f1552d0f3cd8611d4075a6da | [] | no_license | xffffffffffff/nickYang | 43077a214ed550bb0e2fd7eb41196883551b1909 | dfbe3942babd7843e159a9e2b569c975a93bb34c | refs/heads/master | 2022-08-04T07:40:17.978237 | 2020-05-25T04:58:35 | 2020-05-25T04:58:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,912 | py | import sys
import re
import os,time
import pymysql
conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='newbegin',db='lsj',charset='utf8')
cur = conn.cursor(cursor=pymysql.cursors.DictCursor)
class WordCreate():
def __init__(self,path):
'''
导入文件进行解析
'''
with open(path,encoding="utf-8") as file:
content = file.read()
x = re.findall(r'[(|(](.*?)[)|)]', content)
print(x)
for i in x:
zifu = '('+i+')'
content = content.replace(zifu,'')
content = re.split(r'\d', content)
content = [i for i in content if i != '']
self.sc_list = []
for i in content:
weak_list = re.split(r'[、。;!?\n\s*]',i)
weak_list = [i for i in weak_list if i != '']
self.sc_list.append(weak_list)
# print(self.sc_list)
def OperationSql(self):
num=130
for i in self.sc_list:
for _ in range(62-len(i)): #导入诗词为62 导入赏析为61
i.append('NULL')
# print(len(i))
num2 = 1
for _ in range(61):
sql = "UPDATE poetry SET yiwen_{}='{}' WHERE id ={}".format(num2, i[num2-1], num)
print(sql)
cur.execute(sql)
num2 += 1
# for _ in range(61):
# sql = "UPDATE poetry SET sentense_{}='{}' WHERE id ={}".format(num2, i[num2], num)
# print(sql)
# cur.execute(sql)
# num2 += 1
conn.commit()
num+=1
def main():
generator = WordCreate('D:\桌面\诗词库-2-赏析.txt') #选择合适的路径
generator.OperationSql()
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
40eca71d9ce52576b62b9e0810fe6b9f28fd475f | dd045d8cfe5acb54c23ef6d5c677333a596f15ea | /visAnalytics/urls.py | 64e92f761f64bbbf10e9b16fd6825871c6ebc880 | [] | no_license | Septien/VA_WebService | e1007c7b6bda3c02c7830bb87b1a9ded71a42060 | cd54f3c163d57fa51f320e6f735a4d26f133761a | refs/heads/master | 2020-03-21T20:53:07.738341 | 2018-05-28T16:00:11 | 2018-05-28T16:00:11 | 139,034,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from django.urls import path
from . import views
urlpatterns = [
#https://stackoverflow.com/questions/3711349/django-and-query-string-parameters
# The main entry point
path('', views.index),
# For requesting a script
path('script', views.getScript),
# For teh ||-coord
path(r'pcoordinates', views.parallelCoordinatesHandler),
# For the histogram
path(r"histogram", views.HistogramHandler),
]
| [
"[email protected]"
] | |
80b6dd5d4f076bb909efb9384c58ede6d4315e41 | 38d8d88063bd44c34c5c448eafbb343632fe9dfd | /PythonNumpy/array.py | c073f7fb7e70556cd26e991a9d824cee6dee92a5 | [] | no_license | ChocolateOriented/mechainLearning | 3d1af54e898fc7eb49e1cb94c04e5c22c48458ff | d9242bd1c2552917b0f1c61bb7d97e56eb379fa8 | refs/heads/master | 2020-03-11T02:23:38.557891 | 2018-04-20T10:56:12 | 2018-04-20T10:56:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | import numpy as np
randMat = np.mat(np.random.rand(4,4));
invRandMat = randMat.I
print(randMat * invRandMat)
np.tile() | [
"[email protected]"
] | |
16c159d6f2c02f6dc602a3c379faeef88a8d5337 | b032efc400f540d31e1728df5817c9779d036387 | /venv/bin/python-config | b6ea19279a56f966b4146179e7d7899347706819 | [] | no_license | mathv96/bebidas | 718f480408a92b00a46559d5cb87084d8b5dac7b | 4a469af1373995d6946a95c99907f49f5af38191 | refs/heads/master | 2021-01-20T21:46:23.738484 | 2017-08-29T21:55:45 | 2017-08-29T21:55:45 | 101,790,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,371 | #!/home/matheus96/Documentos/FATEC/Lab.Eng/bebidas/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"[email protected]"
] | ||
aebaec9b986b01f8e3d1909282c5c696ec7684f6 | c2964a5777ab2add5e5c7dddabcd9ee0555482e7 | /dash-2019-coronavirus/history_version/app_replaced_20200326.py | 770a3e978bcb36c823a998bca156acc2732e45bb | [
"MIT"
] | permissive | lizeyujack/data-visualisation-scripts | b3fdf7e175307493fc6b23e79b63fc42a34cd6a6 | d6af7f9a141331378c02417267c4a5f3e114f711 | refs/heads/master | 2022-04-22T11:12:26.412377 | 2020-04-25T02:04:20 | 2020-04-25T02:04:20 | 258,697,271 | 1 | 0 | MIT | 2020-04-25T05:45:56 | 2020-04-25T05:45:55 | null | UTF-8 | Python | false | false | 84,070 | py | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import math
import os
import base64
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
###################################
# Private function
###################################
def make_country_table(countryName):
'''This is the function for building df for Province/State of a given country'''
countryTable = df_latest.loc[df_latest['Country/Region'] == countryName]
# Suppress SettingWithCopyWarning
pd.options.mode.chained_assignment = None
countryTable['Remaining'] = countryTable['Confirmed'] - countryTable['Recovered'] - countryTable['Deaths']
countryTable = countryTable[['Province/State', 'Remaining', 'Confirmed', 'Recovered', 'Deaths', 'lat', 'lon']]
countryTable = countryTable.sort_values(
by=['Remaining', 'Confirmed'], ascending=False).reset_index(drop=True)
# Set row ids pass to selected_row_ids
countryTable['id'] = countryTable['Province/State']
countryTable.set_index('id', inplace=True, drop=False)
# Turn on SettingWithCopyWarning
pd.options.mode.chained_assignment = 'warn'
return countryTable
def make_europe_table(europe_list):
'''This is the function for building df for Europe countries'''
europe_table = df_latest.loc[df_latest['Country/Region'].isin(europe_list)]
# Suppress SettingWithCopyWarning
pd.options.mode.chained_assignment = None
europe_table['Remaining'] = europe_table['Confirmed'] - europe_table['Recovered'] - europe_table['Deaths']
europe_table = europe_table[['Country/Region', 'Remaining', 'Confirmed', 'Recovered', 'Deaths', 'lat', 'lon']]
europe_table = europe_table.sort_values(
by=['Remaining', 'Confirmed'], ascending=False).reset_index(drop=True)
# Set row ids pass to selected_row_ids
europe_table['id'] = europe_table['Country/Region']
europe_table.set_index('id', inplace=True, drop=False)
# Turn on SettingWithCopyWarning
pd.options.mode.chained_assignment = 'warn'
return europe_table
def make_dcc_country_tab(countryName, dataframe):
'''This is for generating tab component for country table'''
return dcc.Tab(label=countryName,
value=countryName,
className='custom-tab',
selected_className='custom-tab--selected',
children=[dash_table.DataTable(
id='datatable-interact-location-{}'.format(countryName),
# Don't show coordinates
columns=[{"name": i, "id": i}
for i in dataframe.columns[0:5]],
# But still store coordinates in the table for interactivity
data=dataframe.to_dict("rows"),
row_selectable="single" if countryName != 'Schengen' else False,
sort_action="native",
style_as_list_view=True,
style_cell={'font_family': 'Arial',
'font_size': '1.2rem',
'padding': '.1rem',
'backgroundColor': '#f4f4f2', },
fixed_rows={'headers': True, 'data': 0},
style_table={'minHeight': '800px',
'height': '800px',
'maxHeight': '800px'},
style_header={'backgroundColor': '#f4f4f2',
'fontWeight': 'bold'},
style_cell_conditional=[{'if': {'column_id': 'Province/State'}, 'width': '28%'},
{'if': {'column_id': 'Country/Region'}, 'width': '28%'},
{'if': {'column_id': 'Remaining'}, 'width': '18%'},
{'if': {'column_id': 'Confirmed'}, 'width': '18%'},
{'if': {'column_id': 'Recovered'}, 'width': '18%'},
{'if': {'column_id': 'Deaths'}, 'width': '18%'},
{'if': {'column_id': 'Remaining'}, 'color':'#e36209'},
{'if': {'column_id': 'Confirmed'}, 'color': '#d7191c'},
{'if': {'column_id': 'Recovered'}, 'color': '#1a9622'},
{'if': {'column_id': 'Deaths'}, 'color': '#6c6c6c'},
{'textAlign': 'center'}],
)
]
)
################################################################################
# Data processing
################################################################################
# Method #1
# Import csv file and store each csv in to a df list
# NOTE all following steps really rely on the correct order of these csv files in folder raw_data
filename = os.listdir('./raw_data/')
sheet_name = [i.replace('.csv', '')
for i in filename if 'data' not in i and i.endswith('.csv')]
sheet_name.sort(reverse=True)
# dfs = {sheet_name: pd.read_csv('./raw_data/{}.csv'.format(sheet_name))
# for sheet_name in sheet_name}
# Method #2
# Import xls file and store each sheet in to a df list
# xl_file = pd.ExcelFile('./data.xls')
# dfs = {sheet_name: xl_file.parse(sheet_name)
# for sheet_name in xl_file.sheet_names}
# Data from each sheet can be accessed via key
# keyList = list(dfs.keys())
# Data cleansing
# for key, df in dfs.items():
# dfs[key].loc[:,'Confirmed'].fillna(value=0, inplace=True)
# dfs[key].loc[:,'Deaths'].fillna(value=0, inplace=True)
# dfs[key].loc[:,'Recovered'].fillna(value=0, inplace=True)
# dfs[key]=dfs[key].astype({'Confirmed':'int64', 'Deaths':'int64', 'Recovered':'int64'})
# # Change as China for coordinate search
# dfs[key]=dfs[key].replace({'Country/Region':'Mainland China'}, 'China')
# # Add a zero to the date so can be convert by datetime.strptime as 0-padded date
# dfs[key]['Last Update'] = '0' + dfs[key]['Last Update']
# # Convert time as Australian eastern daylight time
# dfs[key]['Date_last_updated_AEDT'] = [datetime.strptime(d, '%m/%d/%Y %H:%M') for d in dfs[key]['Last Update']]
# dfs[key]['Date_last_updated_AEDT'] = dfs[key]['Date_last_updated_AEDT'] + timedelta(hours=16)
# #dfs[key]['Remaining'] = dfs[key]['Confirmed'] - dfs[key]['Recovered'] - dfs[key]['Deaths']
# Add coordinates for each area in the list for the latest table sheet
# To save time, coordinates calling was done seperately
# Import the data with coordinates
df_latest = pd.read_csv('{}_data.csv'.format(sheet_name[0]))
df_latest = df_latest.astype({'Date_last_updated_AEDT': 'datetime64'})
# Save numbers into variables to use in the app
confirmedCases = df_latest['Confirmed'].sum()
deathsCases = df_latest['Deaths'].sum()
recoveredCases = df_latest['Recovered'].sum()
# Construct confirmed cases dataframe for line plot and 24-hour window case difference
df_confirmed = pd.read_csv('./lineplot_data/df_confirmed.csv')
df_confirmed = df_confirmed.astype({'Date': 'datetime64'})
plusConfirmedNum = df_confirmed['plusNum'][0]
plusPercentNum1 = df_confirmed['plusPercentNum'][0]
# Construct recovered cases dataframe for line plot and 24-hour window case difference
df_recovered = pd.read_csv('./lineplot_data/df_recovered.csv')
df_recovered = df_recovered.astype({'Date': 'datetime64'})
plusRecoveredNum = df_recovered['plusNum'][0]
plusPercentNum2 = df_recovered['plusPercentNum'][0]
# Construct death case dataframe for line plot and 24-hour window case difference
df_deaths = pd.read_csv('./lineplot_data/df_deaths.csv')
df_deaths = df_deaths.astype({'Date': 'datetime64'})
plusDeathNum = df_deaths['plusNum'][0]
plusPercentNum3 = df_deaths['plusPercentNum'][0]
# Construct remaining case dataframe for line plot and 24-hour window case difference
df_remaining = pd.read_csv('./lineplot_data/df_remaining.csv')
df_remaining = df_remaining.astype({'Date': 'datetime64'})
plusRemainNum = df_remaining['plusNum'][0]
plusRemainNum3 = df_remaining['plusPercentNum'][0]
# Create data table to show in app
# Generate sum values for Country/Region level
dfCase = df_latest.groupby(by='Country/Region', sort=False).sum().reset_index()
dfCase = dfCase.sort_values(
by=['Confirmed'], ascending=False).reset_index(drop=True)
# As lat and lon also underwent sum(), which is not desired, remove from this table.
dfCase = dfCase.drop(columns=['lat', 'lon'])
# Grep lat and lon by the first instance to represent its Country/Region
dfGPS = df_latest.groupby(
by='Country/Region', sort=False).first().reset_index()
dfGPS = dfGPS[['Country/Region', 'lat', 'lon']]
# Merge two dataframes
dfSum = pd.merge(dfCase, dfGPS, how='inner', on='Country/Region')
dfSum = dfSum.replace({'Country/Region': 'China'}, 'Mainland China')
dfSum['Remaining'] = dfSum['Confirmed'] - dfSum['Recovered'] - dfSum['Deaths']
# Rearrange columns to correspond to the number plate order
dfSum = dfSum[['Country/Region', 'Remaining',
'Confirmed', 'Recovered', 'Deaths', 'lat', 'lon']]
# Sort value based on Remaining cases and then Confirmed cases
dfSum = dfSum.sort_values(
by=['Remaining', 'Confirmed'], ascending=False).reset_index(drop=True)
# Set row ids pass to selected_row_ids
dfSum['id'] = dfSum['Country/Region']
dfSum.set_index('id', inplace=True, drop=False)
# Create tables for tabs
CNTable = make_country_table('China')
AUSTable = make_country_table('Australia')
USTable = make_country_table('US')
CANTable = make_country_table('Canada')
europe_list = ['Austria', 'Belgium', 'Czechia', 'Denmark', 'Estonia',
'Finland', 'France', 'Germany', 'Greece', 'Hungary', 'Iceland',
'Italy', 'Latvia', 'Liechtenstein', 'Lithuania', 'Luxembourg',
'Malta', 'Netherlands', 'Norway', 'Poland', 'Portugal', 'Slovakia',
'Slovenia', 'Spain', 'Sweden', 'Switzerland']
EuroTable = make_europe_table(europe_list)
# Remove dummy row of recovered case number in USTable
USTable = USTable.dropna(subset=['Province/State'])
# Remove dummy row of recovered case number in USTable
CANTable = CANTable.dropna(subset=['Province/State'])
# Save numbers into variables to use in the app
latestDate = datetime.strftime(df_confirmed['Date'][0], '%b %d, %Y %H:%M AEDT')
secondLastDate = datetime.strftime(df_confirmed['Date'][1], '%b %d')
daysOutbreak = (df_confirmed['Date'][0] - datetime.strptime('12/31/2019', '%m/%d/%Y')).days
# Read cumulative data of a given region from ./cumulative_data folder
dfs_curve = pd.read_csv('./lineplot_data/dfs_curve.csv')
# Pseduo data for logplot
pseduoDay = np.arange(1, daysOutbreak+1)
y1 = 100*(1.85)**(pseduoDay-1) # 85% growth rate
y2 = 100*(1.35)**(pseduoDay-1) # 35% growth rate
y3 = 100*(1.15)**(pseduoDay-1) # 15% growth rate
y4 = 100*(1.05)**(pseduoDay-1) # 5% growth rate
#############################################################################################
# Start to make plots
#############################################################################################
# Line plot for confirmed cases
# Set up tick scale based on confirmed case number
#tickList = np.arange(0, df_confirmed['Other locations'].max()+5000, 30000)
# Create empty figure canvas
fig_confirmed = go.Figure()
# Add trace to the figure
fig_confirmed.add_trace(go.Scatter(x=df_confirmed['Date'], y=df_confirmed['Mainland China'],
mode='lines+markers',
line_shape='spline',
name='Mainland China',
line=dict(color='#921113', width=4),
marker=dict(size=4, color='#f4f4f2',
line=dict(width=1, color='#921113')),
text=[datetime.strftime(
d, '%b %d %Y AEDT') for d in df_confirmed['Date']],
hovertext=['Mainland China confirmed<br>{:,d} cases<br>'.format(
i) for i in df_confirmed['Mainland China']],
hovertemplate='<b>%{text}</b><br></br>' +
'%{hovertext}' +
'<extra></extra>'))
fig_confirmed.add_trace(go.Scatter(x=df_confirmed['Date'], y=df_confirmed['Other locations'],
mode='lines+markers',
line_shape='spline',
name='Other Region',
line=dict(color='#eb5254', width=4),
marker=dict(size=4, color='#f4f4f2',
line=dict(width=1, color='#eb5254')),
text=[datetime.strftime(
d, '%b %d %Y AEDT') for d in df_confirmed['Date']],
hovertext=['Other region confirmed<br>{:,d} cases<br>'.format(
i) for i in df_confirmed['Other locations']],
hovertemplate='<b>%{text}</b><br></br>' +
'%{hovertext}' +
'<extra></extra>'))
# Customise layout
fig_confirmed.update_layout(
# title=dict(
# text="<b>Confirmed Cases Timeline<b>",
# y=0.96, x=0.5, xanchor='center', yanchor='top',
# font=dict(size=20, color="#292929", family="Playfair Display")
# ),
margin=go.layout.Margin(
l=10,
r=10,
b=10,
t=5,
pad=0
),
yaxis=dict(
showline=False, linecolor='#272e3e',
zeroline=False,
# showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth=.1,
#tickmode='array',
# Set tick range based on the maximum number
#tickvals=tickList,
# Set tick label accordingly
#ticktext=["{:.0f}k".format(i/1000) for i in tickList]
),
# yaxis_title="Total Confirmed Case Number",
xaxis=dict(
showline=False, linecolor='#272e3e',
showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth=.1,
zeroline=False
),
xaxis_tickformat='%b %d',
hovermode='x',
legend_orientation="h",
# legend=dict(x=.02, y=.95, bgcolor="rgba(0,0,0,0)",),
plot_bgcolor='#f4f4f2',
paper_bgcolor='#cbd2d3',
font=dict(color='#292929', size=10)
)
# Line plot for combine recovered cases
# Set up tick scale based on total recovered case number
#tickList = np.arange(0, df_remaining['Total'].max()+10000, 30000)
# Create empty figure canvas
fig_combine = go.Figure()
# Add trace to the figure
fig_combine.add_trace(go.Scatter(x=df_recovered['Date'], y=df_recovered['Total'],
mode='lines+markers',
line_shape='spline',
name='Total Recovered Cases',
line=dict(color='#168038', width=4),
marker=dict(size=4, color='#f4f4f2',
line=dict(width=1, color='#168038')),
text=[datetime.strftime(
d, '%b %d %Y AEDT') for d in df_recovered['Date']],
hovertext=['Total recovered<br>{:,d} cases<br>'.format(
i) for i in df_recovered['Total']],
hovertemplate='<b>%{text}</b><br></br>' +
'%{hovertext}' +
'<extra></extra>'))
fig_combine.add_trace(go.Scatter(x=df_deaths['Date'], y=df_deaths['Total'],
mode='lines+markers',
line_shape='spline',
name='Total Death Cases',
line=dict(color='#626262', width=4),
marker=dict(size=4, color='#f4f4f2',
line=dict(width=1, color='#626262')),
text=[datetime.strftime(
d, '%b %d %Y AEDT') for d in df_deaths['Date']],
hovertext=['Total death<br>{:,d} cases<br>'.format(
i) for i in df_deaths['Total']],
hovertemplate='<b>%{text}</b><br></br>' +
'%{hovertext}' +
'<extra></extra>'))
fig_combine.add_trace(go.Scatter(x=df_remaining['Date'], y=df_remaining['Total'],
mode='lines+markers',
line_shape='spline',
name='Total Remaining Cases',
line=dict(color='#e36209', width=4),
marker=dict(size=4, color='#f4f4f2',
line=dict(width=1, color='#e36209')),
text=[datetime.strftime(
d, '%b %d %Y AEDT') for d in df_deaths['Date']],
hovertext=['Total remaining<br>{:,d} cases<br>'.format(
i) for i in df_remaining['Total']],
hovertemplate='<b>%{text}</b><br></br>' +
'%{hovertext}' +
'<extra></extra>'))
# Customise layout
fig_combine.update_layout(
margin=go.layout.Margin(
l=10,
r=10,
b=10,
t=5,
pad=0
),
yaxis=dict(
showline=False, linecolor='#272e3e',
zeroline=False,
# showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth=.1,
#tickmode='array',
# Set tick range based on the maximum number
#tickvals=tickList,
# Set tick label accordingly
#ticktext=["{:.0f}k".format(i/1000) for i in tickList]
),
# yaxis_title="Total Confirmed Case Number",
xaxis=dict(
showline=False, linecolor='#272e3e',
showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth=.1,
zeroline=False
),
xaxis_tickformat='%b %d',
hovermode='x',
legend_orientation="h",
# legend=dict(x=.02, y=.95, bgcolor="rgba(0,0,0,0)",),
plot_bgcolor='#f4f4f2',
paper_bgcolor='#cbd2d3',
font=dict(color='#292929', size=10)
)
# Line plot for death rate cases
# Set up tick scale based on death case number of Mainland China
tickList = np.arange(0, (df_deaths['Other locations']/df_confirmed['Other locations']*100).max()+0.5, 0.5)
# Create empty figure canvas
fig_rate = go.Figure()
# Add trace to the figure
fig_rate.add_trace(go.Scatter(x=df_deaths['Date'], y=df_deaths['Mainland China']/df_confirmed['Mainland China']*100,
mode='lines+markers',
line_shape='spline',
name='Mainland China',
line=dict(color='#626262', width=4),
marker=dict(size=4, color='#f4f4f2',
line=dict(width=1, color='#626262')),
text=[datetime.strftime(
d, '%b %d %Y AEDT') for d in df_deaths['Date']],
hovertext=['Mainland China death rate<br>{:.2f}%'.format(
i) for i in df_deaths['Mainland China']/df_confirmed['Mainland China']*100],
hovertemplate='<b>%{text}</b><br></br>' +
'%{hovertext}' +
'<extra></extra>'))
fig_rate.add_trace(go.Scatter(x=df_deaths['Date'], y=df_deaths['Other locations']/df_confirmed['Other locations']*100,
mode='lines+markers',
line_shape='spline',
name='Other Region',
line=dict(color='#a7a7a7', width=4),
marker=dict(size=4, color='#f4f4f2',
line=dict(width=1, color='#a7a7a7')),
text=[datetime.strftime(
d, '%b %d %Y AEDT') for d in df_deaths['Date']],
hovertext=['Other region death rate<br>{:.2f}%'.format(
i) for i in df_deaths['Other locations']/df_confirmed['Other locations']*100],
hovertemplate='<b>%{text}</b><br></br>' +
'%{hovertext}' +
'<extra></extra>'))
# Customise layout
fig_rate.update_layout(
margin=go.layout.Margin(
l=10,
r=10,
b=10,
t=5,
pad=0
),
yaxis=dict(
showline=False, linecolor='#272e3e',
zeroline=False,
# showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth=.1,
tickmode='array',
# Set tick range based on the maximum number
tickvals=tickList,
# Set tick label accordingly
ticktext=['{:.1f}'.format(i) for i in tickList]
),
# yaxis_title="Total Confirmed Case Number",
xaxis=dict(
showline=False, linecolor='#272e3e',
showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth=.1,
zeroline=False
),
xaxis_tickformat='%b %d',
hovermode='x',
legend_orientation="h",
# legend=dict(x=.02, y=.95, bgcolor="rgba(0,0,0,0)",),
plot_bgcolor='#f4f4f2',
paper_bgcolor='#cbd2d3',
font=dict(color='#292929', size=10)
)
# Default cumulative plot for tab
# Default plot is an empty canvas
#df_region_tab = pd.read_csv('./cumulative_data/{}.csv'.format('The World'))
#df_region_tab = df_region_tab.astype({'Date_last_updated_AEDT': 'datetime64', 'date_day': 'datetime64'})
# Create empty figure canvas
fig_cumulative_tab = go.Figure()
# Add trace to the figure
#fig_cumulative_tab.add_trace(go.Scatter(x=df_region_tab['date_day'],
# y=df_region_tab['Confirmed'],
# mode='lines+markers',
# # line_shape='spline',
# name='Confirmed case',
# line=dict(color='#d7191c', width=2),
# # marker=dict(size=4, color='#f4f4f2',
# # line=dict(width=1,color='#921113')),
# text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_region_tab['date_day']],
# hovertext=['{} Confirmed<br>{:,d} cases<br>'.format('The World', i) for i in df_region_tab['Confirmed']],
# hovertemplate='<b>%{text}</b><br></br>' +
# '%{hovertext}' +
# '<extra></extra>'))
#fig_cumulative_tab.add_trace(go.Scatter(x=df_region_tab['date_day'],
# y=df_region_tab['Recovered'],
# mode='lines+markers',
# # line_shape='spline',
# name='Recovered case',
# line=dict(color='#1a9622', width=2),
# # marker=dict(size=4, color='#f4f4f2',
# # line=dict(width=1,color='#168038')),
# text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_region_tab['date_day']],
# hovertext=['{} Recovered<br>{:,d} cases<br>'.format('The World', i) for i in df_region_tab['Recovered']],
# hovertemplate='<b>%{text}</b><br></br>' +
# '%{hovertext}' +
# '<extra></extra>'))
#fig_cumulative_tab.add_trace(go.Scatter(x=df_region_tab['date_day'],
# y=df_region_tab['Deaths'],
# mode='lines+markers',
# # line_shape='spline',
# name='Death case',
# line=dict(color='#626262', width=2),
# # marker=dict(size=4, color='#f4f4f2',
# # line=dict(width=1,color='#626262')),
# text=[datetime.strftime(d, '%b %d %Y AEDT') for d in df_region_tab['date_day']],
# hovertext=['{} Deaths<br>{:,d} cases<br>'.format('The World', i) for i in df_region_tab['Deaths']],
# hovertemplate='<b>%{text}</b><br></br>' +
# '%{hovertext}' +
# '<extra></extra>'))
# Customise layout
fig_cumulative_tab.update_layout(
margin=go.layout.Margin(
l=10,
r=10,
b=10,
t=5,
pad=0
),
# annotations=[
# dict(
# x=.5,
# y=.4,
# xref="paper",
# yref="paper",
# text='The World',
# opacity=0.5,
# font=dict(family='Arial, sans-serif',
# size=60,
# color="grey"),
# )
# ],
yaxis_title="Cumulative cases numbers",
yaxis=dict(
showline=False, linecolor='#272e3e',
zeroline=False,
# showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth=.1,
tickmode='array',
# Set tick range based on the maximum number
# tickvals=tickList,
# Set tick label accordingly
# ticktext=["{:.0f}k".format(i/1000) for i in tickList]
),
xaxis_title="Select A Location From Table",
xaxis=dict(
showline=False, linecolor='#272e3e',
showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth=.1,
zeroline=False
),
xaxis_tickformat='%b %d',
# transition = {'duration':500},
hovermode='x',
legend_orientation="h",
legend=dict(x=.02, y=.95, bgcolor="rgba(0,0,0,0)",),
plot_bgcolor='#f4f4f2',
paper_bgcolor='#cbd2d3',
font=dict(color='#292929', size=10)
)
# Default curve plot for tab
# Create empty figure canvas
fig_curve_tab = go.Figure()
fig_curve_tab.add_trace(go.Scatter(x=pseduoDay,
y=y1,
line=dict(color='rgba(0, 0, 0, .3)', width=1, dash='dot'),
text=['85% growth rate' for i in pseduoDay],
hovertemplate='<b>%{text}</b><br>' +
'<extra></extra>'
)
)
fig_curve_tab.add_trace(go.Scatter(x=pseduoDay,
y=y2,
line=dict(color='rgba(0, 0, 0, .3)', width=1, dash='dot'),
text=['35% growth rate' for i in pseduoDay],
hovertemplate='<b>%{text}</b><br>' +
'<extra></extra>'
)
)
fig_curve_tab.add_trace(go.Scatter(x=pseduoDay,
y=y3,
line=dict(color='rgba(0, 0, 0, .3)', width=1, dash='dot'),
text=['15% growth rate' for i in pseduoDay],
hovertemplate='<b>%{text}</b><br>' +
'<extra></extra>'
)
)
fig_curve_tab.add_trace(go.Scatter(x=pseduoDay,
y=y4,
line=dict(color='rgba(0, 0, 0, .3)', width=1, dash='dot'),
text=['5% growth rate' for i in pseduoDay],
hovertemplate='<b>%{text}</b><br>' +
'<extra></extra>'
)
)
for regionName in ['The World', 'China', 'Japan', 'Italy', 'South Korea', 'US']:
dotgrayx_tab = [np.array(dfs_curve.loc[dfs_curve['Region'] == regionName, 'DayElapsed'])[0]]
dotgrayy_tab = [np.array(dfs_curve.loc[dfs_curve['Region'] == regionName, 'Confirmed'])[0]]
fig_curve_tab.add_trace(go.Scatter(x=dfs_curve.loc[dfs_curve['Region'] == regionName]['DayElapsed'],
y=dfs_curve.loc[dfs_curve['Region'] == regionName]['Confirmed'],
mode='lines',
line_shape='spline',
name=regionName,
opacity=0.3,
line=dict(color='#636363', width=1.5),
text=[
i for i in dfs_curve.loc[dfs_curve['Region'] == regionName]['Region']],
hovertemplate='<b>%{text}</b><br>' +
'<br>%{x} days after 100 cases<br>' +
'with %{y:,d} cases<br>'
'<extra></extra>'
)
)
fig_curve_tab.add_trace(go.Scatter(x=dotgrayx_tab,
y=dotgrayy_tab,
mode='markers',
marker=dict(size=6, color='#636363',
line=dict(width=1, color='#636363')),
opacity=0.5,
text=[regionName],
hovertemplate='<b>%{text}</b><br>' +
'<br>%{x} days after 100 cases<br>' +
'with %{y:,d} cases<br>'
'<extra></extra>'
)
)
# Customise layout
fig_curve_tab.update_xaxes(range=[0, daysOutbreak-19])
fig_curve_tab.update_yaxes(range=[1.9, 6])
fig_curve_tab.update_layout(
xaxis_title="Number of day since 100th confirmed cases",
yaxis_title="Confirmed cases (Logarithmic)",
margin=go.layout.Margin(
l=10,
r=10,
b=10,
t=5,
pad=0
),
#annotations=[dict(
# x=.5,
# y=.4,
# xref="paper",
# yref="paper",
# text=dfSum['Country/Region'][0] if dfSum['Country/Region'][0] in set(dfs_curve['Region']) else "Not over 100 cases",
# opacity=0.5,
# font=dict(family='Arial, sans-serif',
# size=60,
# color="grey"),
# )
#],
yaxis_type="log",
yaxis=dict(
showline=False,
linecolor='#272e3e',
zeroline=False,
# showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth = .1,
),
xaxis=dict(
showline=False,
linecolor='#272e3e',
# showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth = .1,
zeroline=False
),
showlegend=False,
# hovermode = 'x',
plot_bgcolor='#f4f4f2',
paper_bgcolor='#cbd2d3',
font=dict(color='#292929', size=10)
)
##################################################################################################
# Start dash app
##################################################################################################
app = dash.Dash(__name__,
assets_folder='./assets/',
meta_tags=[
{"name": "author", "content": "Jun Ye"},
{"name": "description", "content": "The coronavirus COVID-19 monitor/dashboard provides up-to-date data and map for the global spread of coronavirus. In the meanwile, please keep calm, stay home and wash your hand!"},
{"property": "og:title",
"content": "Coronavirus COVID-19 Outbreak Global Cases Monitor Dashboard"},
{"property": "og:type", "content": "website"},
{"property": "og:image", "content": "https://junye0798.com/post/build-a-dashboard-to-track-the-spread-of-coronavirus-using-dash/featured_hu031431b9019186307c923e911320563b_1304417_1200x0_resize_lanczos_2.png"},
{"property": "og:url",
"content": "https://dash-coronavirus-2020.herokuapp.com/"},
{"property": "og:description",
"content": "The coronavirus COVID-19 monitor/dashboard provides up-to-date data and map for the global spread of coronavirus. In the meanwile, please keep calm, stay home and wash your hand!"},
{"name": "twitter:card", "content": "summary_large_image"},
{"name": "twitter:site", "content": "@perishleaf"},
{"name": "twitter:title",
"content": "Coronavirus COVID-19 Outbreak Global Cases Monitor Dashboard"},
{"name": "twitter:description",
"content": "The coronavirus COVID-19 monitor/dashboard provides up-to-date data and map for the global spread of coronavirus. In the meanwile, please keep calm, stay home and wash your hand!"},
{"name": "twitter:image", "content": "https://junye0798.com/post/build-a-dashboard-to-track-the-spread-of-coronavirus-using-dash/featured_hu031431b9019186307c923e911320563b_1304417_1200x0_resize_lanczos_2.png"},
{"name": "viewport",
"content": "width=device-width, height=device-height, initial-scale=1.0"}
]
)
image_filename = './assets/TypeHuman.png'
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
app.title = 'Coronavirus COVID-19 Global Monitor'
# Section for Google annlytic and donation #
app.index_string = """<!DOCTYPE html>
<html>
<head>
<script data-name="BMC-Widget" src="https://cdnjs.buymeacoffee.com/1.0.0/widget.prod.min.js" data-id="qPsBJAV" data-description="Support the app server for running!" data-message="Please support the app server for running!" data-color="#FF813F" data-position="right" data-x_margin="18" data-y_margin="25"></script>
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-154901818-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-154901818-2');
</script>
{%metas%}
<title>{%title%}</title>
{%favicon%}
{%css%}
<script type='text/javascript' src='https://platform-api.sharethis.com/js/sharethis.js#property=5e5e32508d3a3d0019ee3ecb&product=sticky-share-buttons&cms=website' async='async'></script>
</head>
<body>
{%app_entry%}
<footer>
{%config%}
{%scripts%}
{%renderer%}
</footer>
</body>
</html>"""
server = app.server
app.config['suppress_callback_exceptions'] = True
app.layout = html.Div(style={'backgroundColor': '#f4f4f2'},
children=[
html.Div(
id="header",
children=[
html.H4(
children="Coronavirus (COVID-19) Outbreak Global Cases Monitor"),
html.P(
id="description",
children="On Dec 31, 2019, the World Health Organization (WHO) was informed of \
an outbreak of “pneumonia of unknown cause” detected in Wuhan City, Hubei Province, China – the \
seventh-largest city in China with 11 million residents. As of {}, there are over {:,d} cases \
of COVID-19 confirmed globally.\
This dash board is developed to visualise and track the recent reported \
cases on a hourly timescale.".format(latestDate, confirmedCases),
),
# html.P(
# id="note",
# children=['⚠️ Source from ',
# html.A('The National Health Commission of China', href='http://www.nhc.gov.cn/yjb/s7860/202002/553ff43ca29d4fe88f3837d49d6b6ef1.shtml'),
# ': in its February 14 official report, deducted \
# 108 previously reported deaths and 1,043 previously reported cases from the total in Hubei Province due to "repeated counting." \
# Data have been corrected for these changes.']
# ),
# html.P(
# id="note",
# children=['⚠️ Source from ',
# html.A('读卖新闻', href='https://www.yomiuri.co.jp/national/20200216-OYT1T50089/'),
# ': Diamond Princess cruise confirmed 70 new infections, bringing the total infected cases to 355.']
# ),
# html.P(
# id="note",
# children=['⚠️ Source from ',
# html.A('anews', href='http://www.anews.com.tr/world/2020/02/21/iran-says-two-more-deaths-among-13-new-coronavirus-cases'),
# ': Iran\'s health ministry Friday reported two more deaths among 13 new cases of coronavirus in the Islamic republic, bringing the total number of deaths to four and infections to 18.']
# ),
# html.P(
# id="note",
# children=['⚠️ Source from ',
# html.A('The New York Times', href='https://www.nytimes.com/2020/03/01/world/coronavirus-news.html'),
# ': New York State Reports First Case.']
# ),
html.P(
id='time-stamp',
# style={'fontWeight':'bold'},
children="🔴 Last update: {}".format(latestDate))
]
),
html.Div(
id="number-plate",
style={'marginLeft': '1.5%',
'marginRight': '1.5%', 'marginBottom': '.5%'},
children=[
#html.Hr(),
html.Div(
style={'width': '24.4%', 'backgroundColor': '#cbd2d3', 'display': 'inline-block',
'marginRight': '.8%', 'verticalAlign': 'top'},
children=[
html.H3(style={'textAlign': 'center',
'fontWeight': 'bold', 'color': '#2674f6'},
children=[
html.P(style={'color': '#cbd2d3', 'padding': '.5rem'},
children='xxxx xx xxx xxxx xxx xxxxx'),
'{}'.format(daysOutbreak),
]),
html.H5(style={'textAlign': 'center', 'color': '#2674f6', 'padding': '.1rem'},
children="Days Since Outbreak")
]),
html.Div(
style={'width': '24.4%', 'backgroundColor': '#cbd2d3', 'display': 'inline-block',
'marginRight': '.8%', 'verticalAlign': 'top'},
children=[
html.H3(style={'textAlign': 'center',
'fontWeight': 'bold', 'color': '#d7191c'},
children=[
html.P(style={'padding': '.5rem'},
children='+ {:,d} in the past 24h ({:.1%})'.format(plusConfirmedNum, plusPercentNum1)),
'{:,d}'.format(
confirmedCases)
]),
html.H5(style={'textAlign': 'center', 'color': '#d7191c', 'padding': '.1rem'},
children="Confirmed Cases")
]),
html.Div(
style={'width': '24.4%', 'backgroundColor': '#cbd2d3', 'display': 'inline-block',
'marginRight': '.8%', 'verticalAlign': 'top'},
children=[
html.H3(style={'textAlign': 'center',
'fontWeight': 'bold', 'color': '#1a9622'},
children=[
html.P(style={'padding': '.5rem'},
children='+ {:,d} in the past 24h ({:.1%})'.format(plusRecoveredNum, plusPercentNum2)),
'{:,d}'.format(
recoveredCases),
]),
html.H5(style={'textAlign': 'center', 'color': '#1a9622', 'padding': '.1rem'},
children="Recovered Cases")
]),
html.Div(
style={'width': '24.4%', 'backgroundColor': '#cbd2d3', 'display': 'inline-block',
'verticalAlign': 'top'},
children=[
html.H3(style={'textAlign': 'center',
'fontWeight': 'bold', 'color': '#6c6c6c'},
children=[
html.P(style={'padding': '.5rem'},
children='+ {:,d} in the past 24h ({:.1%})'.format(plusDeathNum, plusPercentNum3)),
'{:,d}'.format(deathsCases)
]),
html.H5(style={'textAlign': 'center', 'color': '#6c6c6c', 'padding': '.1rem'},
children="Death Cases")
])
]),
html.Div(
id='dcc-plot',
style={'marginLeft': '1.5%', 'marginRight': '1.5%',
'marginBottom': '.35%', 'marginTop': '.5%'},
children=[
html.Div(
style={'width': '32.79%', 'display': 'inline-block',
'marginRight': '.8%', 'verticalAlign': 'top'},
children=[
html.H5(style={'textAlign': 'center', 'backgroundColor': '#cbd2d3',
'color': '#292929', 'padding': '1rem', 'marginBottom': '0'},
children='Confirmed Case Timeline'),
dcc.Graph(style={'height': '300px'}, figure=fig_confirmed)]),
html.Div(
style={'width': '32.79%', 'display': 'inline-block',
'marginRight': '.8%', 'verticalAlign': 'top'},
children=[
html.H5(style={'textAlign': 'center', 'backgroundColor': '#cbd2d3',
'color': '#292929', 'padding': '1rem', 'marginBottom': '0'},
children='Remaining/Recovered/Death Case Timeline'),
dcc.Graph(style={'height': '300px'}, figure=fig_combine)]),
html.Div(
style={'width': '32.79%', 'display': 'inline-block',
'verticalAlign': 'top'},
children=[
html.H5(style={'textAlign': 'center', 'backgroundColor': '#cbd2d3',
'color': '#292929', 'padding': '1rem', 'marginBottom': '0'},
children='Death Rate (%) Timeline'),
dcc.Graph(style={'height': '300px'}, figure=fig_rate)])]),
html.Div(
id='dcc-map',
style={'marginLeft': '1.5%', 'marginRight': '1.5%', 'marginBottom': '.5%'},
children=[
html.Div(style={'width': '66.41%', 'marginRight': '.8%', 'display': 'inline-block', 'verticalAlign': 'top'},
children=[
html.H5(style={'textAlign': 'center', 'backgroundColor': '#cbd2d3',
'color': '#292929', 'padding': '1rem', 'marginBottom': '0'},
children='Latest Coronavirus Outbreak Map'),
dcc.Graph(
id='datatable-interact-map',
style={'height': '500px'},),
dcc.Tabs(
id="tabs-plots",
value='Cumulative Cases',
parent_className='custom-tabs',
className='custom-tabs-container',
children=[dcc.Tab(className='custom-tab',
selected_className='custom-tab--selected',
label='Cumulative Cases',
value='Cumulative Cases'),
dcc.Tab(className='custom-tab',
selected_className='custom-tab--selected',
label='Confirmed Case Trajectories',
value='Confirmed Case Trajectories'),
]
),
html.Div(id='tabs-content-plots'),
]),
html.Div(style={'width': '32.79%', 'display': 'inline-block', 'verticalAlign': 'top'},
children=[
html.H5(style={'textAlign': 'center', 'backgroundColor': '#cbd2d3',
'color': '#292929', 'padding': '1rem', 'marginBottom': '0'},
children='Cases by Country/Region'),
dcc.Tabs(
id="tabs-table",
value='The World',
parent_className='custom-tabs',
className='custom-tabs-container',
children=[
dcc.Tab(label='The World',
value='The World',
className='custom-tab',
selected_className='custom-tab--selected',
children=[
dash_table.DataTable(
id='datatable-interact-location',
# Don't show coordinates
columns=[{"name": i, "id": i}
for i in dfSum.columns[0:5]],
# But still store coordinates in the table for interactivity
data=dfSum.to_dict(
"rows"),
row_selectable="single",
sort_action="native",
style_as_list_view=True,
style_cell={'font_family': 'Arial',
'font_size': '1.2rem',
'padding': '.1rem',
'backgroundColor': '#f4f4f2', },
fixed_rows={
'headers': True, 'data': 0},
style_table={'minHeight': '800px',
'height': '800px',
'maxHeight': '800px'},
style_header={'backgroundColor': '#f4f4f2',
'fontWeight': 'bold'},
style_cell_conditional=[{'if': {'column_id': 'Country/Regions'}, 'width': '28%'},
{'if': {
'column_id': 'Remaining'}, 'width': '18%'},
{'if': {
'column_id': 'Confirmed'}, 'width': '18%'},
{'if': {
'column_id': 'Recovered'}, 'width': '18%'},
{'if': {
'column_id': 'Deaths'}, 'width': '18%'},
{'if': {
'column_id': 'Remaining'}, 'color':'#e36209'},
{'if': {
'column_id': 'Confirmed'}, 'color': '#d7191c'},
{'if': {
'column_id': 'Recovered'}, 'color': '#1a9622'},
{'if': {
'column_id': 'Deaths'}, 'color': '#6c6c6c'},
{'textAlign': 'center'}],
)
]),
make_dcc_country_tab(
'Australia', AUSTable),
make_dcc_country_tab(
'Canada', CANTable),
make_dcc_country_tab(
'Mainland China', CNTable),
make_dcc_country_tab(
'United States', USTable),
#make_dcc_country_tab(
# 'Schengen', EuroTable),
]
)
])
]),
html.Div(
id='my-footer',
style={'marginLeft': '1.5%', 'marginRight': '1.5%', 'marginBottom': '1%', 'marginTop': '.5%'},
children=[
html.Hr(style={'marginBottom': '.5%'},),
html.P(style={'textAlign': 'center', 'margin': 'auto'},
children=['Keep calm and stay home | ',
html.A('Developed by Jun with ❤️ in Sydney', href='https://junye0798.com/'), ' | ',
html.A('About this dashboard', href='https://github.com/Perishleaf/data-visualisation-scripts/tree/master/dash-2019-coronavirus',target='_blank'), " | ",
html.A('Report a bug', href='https://twitter.com/perishleaf', target='_blank')
]
),
html.P(style={'textAlign': 'center', 'margin': 'auto', 'marginTop': '.5%'},
children=['Proudly supported by']
),
html.P(style={'textAlign': 'center', 'margin': 'auto', 'marginTop': '.5%'},
children=[html.A(html.Img(style={'height' : '10%', 'width' : '10%',}, src=app.get_asset_url('TypeHuman.png')),
href='https://www.typehuman.com/', target='_blank')]
)
]
),
])
@app.callback(
Output('datatable-interact-map', 'figure'),
[Input('tabs-table', 'value'),
Input('datatable-interact-location', 'derived_virtual_selected_rows'),
Input('datatable-interact-location', 'selected_row_ids'),
Input('datatable-interact-location-Australia', 'derived_virtual_selected_rows'),
Input('datatable-interact-location-Australia', 'selected_row_ids'),
Input('datatable-interact-location-Canada', 'derived_virtual_selected_rows'),
Input('datatable-interact-location-Canada', 'selected_row_ids'),
Input('datatable-interact-location-Mainland China', 'derived_virtual_selected_rows'),
Input('datatable-interact-location-Mainland China', 'selected_row_ids'),
Input('datatable-interact-location-United States', 'derived_virtual_selected_rows'),
Input('datatable-interact-location-United States', 'selected_row_ids'),
]
)
def update_figures(value, derived_virtual_selected_rows, selected_row_ids,
Australia_derived_virtual_selected_rows, Australia_selected_row_ids,
Canada_derived_virtual_selected_rows, Canada_selected_row_ids,
CHN_derived_virtual_selected_rows, CHN_selected_row_ids,
US_derived_virtual_selected_rows, US_selected_row_ids
):
# When the table is first rendered, `derived_virtual_data` and
# `derived_virtual_selected_rows` will be `None`. This is due to an
# idiosyncracy in Dash (unsupplied properties are always None and Dash
# calls the dependent callbacks when the component is first rendered).
# So, if `rows` is `None`, then the component was just rendered
# and its value will be the same as the component's dataframe.
# Instead of setting `None` in here, you could also set
# `derived_virtual_data=df.to_rows('dict')` when you initialize
# the component.
if value == 'The World':
if derived_virtual_selected_rows is None:
derived_virtual_selected_rows = []
dff = dfSum
latitude = 14.056159 if len(derived_virtual_selected_rows) == 0 else dff.loc[selected_row_ids[0]].lat
longitude = 6.395626 if len(derived_virtual_selected_rows) == 0 else dff.loc[selected_row_ids[0]].lon
zoom = 1.02 if len(derived_virtual_selected_rows) == 0 else 4
elif value == 'Australia':
if Australia_derived_virtual_selected_rows is None:
Australia_derived_virtual_selected_rows = []
dff = AUSTable
latitude = -25.931850 if len(Australia_derived_virtual_selected_rows) == 0 else dff.loc[Australia_selected_row_ids[0]].lat
longitude = 134.024931 if len(Australia_derived_virtual_selected_rows) == 0 else dff.loc[Australia_selected_row_ids[0]].lon
zoom = 3 if len(Australia_derived_virtual_selected_rows) == 0 else 5
elif value == 'Canada':
if Canada_derived_virtual_selected_rows is None:
Canada_derived_virtual_selected_rows = []
dff = CANTable
latitude = 55.474012 if len(Canada_derived_virtual_selected_rows) == 0 else dff.loc[Canada_selected_row_ids[0]].lat
longitude = -97.344913 if len(Canada_derived_virtual_selected_rows) == 0 else dff.loc[Canada_selected_row_ids[0]].lon
zoom = 3 if len(Canada_derived_virtual_selected_rows) == 0 else 5
elif value == 'Mainland China':
if CHN_derived_virtual_selected_rows is None:
CHN_derived_virtual_selected_rows = []
dff = CNTable
latitude = 33.471197 if len(CHN_derived_virtual_selected_rows) == 0 else dff.loc[CHN_selected_row_ids[0]].lat
longitude = 106.206780 if len(CHN_derived_virtual_selected_rows) == 0 else dff.loc[CHN_selected_row_ids[0]].lon
zoom = 2.5 if len(CHN_derived_virtual_selected_rows) == 0 else 5
elif value == 'United States':
if US_derived_virtual_selected_rows is None:
US_derived_virtual_selected_rows = []
dff = USTable
latitude = 40.022092 if len(US_derived_virtual_selected_rows) == 0 else dff.loc[US_selected_row_ids[0]].lat
longitude = -98.828101 if len(US_derived_virtual_selected_rows) == 0 else dff.loc[US_selected_row_ids[0]].lon
zoom = 3 if len(US_derived_virtual_selected_rows) == 0 else 5
mapbox_access_token = "pk.eyJ1IjoicGxvdGx5bWFwYm94IiwiYSI6ImNqdnBvNDMyaTAxYzkzeW5ubWdpZ2VjbmMifQ.TXcBE-xg9BFdV2ocecc_7g"
# Generate a list for hover text display
textList = []
for area, region in zip(df_latest['Province/State'], df_latest['Country/Region']):
if type(area) is str:
if region == "Hong Kong" or region == "Macau" or region == "Taiwan":
textList.append(area)
else:
textList.append(area+', '+region)
else:
textList.append(region)
# Generate a list for color gradient display
colorList = []
for comfirmed, recovered, deaths in zip(df_latest['Confirmed'], df_latest['Recovered'], df_latest['Deaths']):
remaining = comfirmed - deaths - recovered
colorList.append(remaining)
fig2 = go.Figure(go.Scattermapbox(
lat=df_latest['lat'],
lon=df_latest['lon'],
mode='markers',
marker=go.scattermapbox.Marker(
color=['#d7191c' if i > 0 else '#1a9622' for i in colorList],
size=[i**(1/3) for i in df_latest['Confirmed']],
sizemin=1,
sizemode='area',
sizeref=2.*max([math.sqrt(i)
for i in df_latest['Confirmed']])/(100.**2),
),
text=textList,
hovertext=['Confirmed: {:,d}<br>Recovered: {:,d}<br>Death: {:,d}'.format(i, j, k) for i, j, k in zip(df_latest['Confirmed'],
df_latest['Recovered'],
df_latest['Deaths'])],
hovertemplate="<b>%{text}</b><br><br>" +
"%{hovertext}<br>" +
"<extra></extra>")
)
fig2.update_layout(
plot_bgcolor='#151920',
paper_bgcolor='#cbd2d3',
margin=go.layout.Margin(l=10, r=10, b=10, t=0, pad=40),
hovermode='closest',
transition={'duration': 50},
annotations=[
dict(
x=.5,
y=-.01,
align='center',
showarrow=False,
text="Points are placed based on data geolocation levels.<br>Province/State level - Australia, China, Canada, and United States; Country level- other countries.",
xref="paper",
yref="paper",
font=dict(size=10, color='#292929'),
)],
mapbox=go.layout.Mapbox(
accesstoken=mapbox_access_token,
style="light",
# The direction you're facing, measured clockwise as an angle from true north on a compass
bearing=0,
center=go.layout.mapbox.Center(
lat=latitude,
lon=longitude
),
pitch=0,
zoom=zoom
)
)
return fig2
@app.callback(Output('tabs-content-plots', 'children'),
[Input('tabs-plots', 'value')])
def render_content(tab):
if tab == 'Cumulative Cases':
return dcc.Graph(id='datatable-interact-lineplot',
style={'height': '300px'},
figure=fig_cumulative_tab,)
elif tab == 'Confirmed Case Trajectories':
return dcc.Graph(id='datatable-interact-logplot',
style={'height': '300px'},
figure=fig_curve_tab,)
@app.callback(
Output('datatable-interact-lineplot', 'figure'),
[Input('tabs-table', 'value'),
Input('datatable-interact-location', 'derived_virtual_selected_rows'),
Input('datatable-interact-location', 'selected_row_ids'),
Input('datatable-interact-location-Australia', 'derived_virtual_selected_rows'),
Input('datatable-interact-location-Australia', 'selected_row_ids'),
Input('datatable-interact-location-Canada', 'derived_virtual_selected_rows'),
Input('datatable-interact-location-Canada', 'selected_row_ids'),
Input('datatable-interact-location-Mainland China', 'derived_virtual_selected_rows'),
Input('datatable-interact-location-Mainland China', 'selected_row_ids'),
Input('datatable-interact-location-United States', 'derived_virtual_selected_rows'),
Input('datatable-interact-location-United States', 'selected_row_ids'),
]
)
def update_lineplot(value, derived_virtual_selected_rows, selected_row_ids,
Australia_derived_virtual_selected_rows, Australia_selected_row_ids,
Canada_derived_virtual_selected_rows, Canada_selected_row_ids,
CHN_derived_virtual_selected_rows, CHN_selected_row_ids,
US_derived_virtual_selected_rows, US_selected_row_ids
):
if value == 'The World':
if derived_virtual_selected_rows is None:
derived_virtual_selected_rows = []
dff = dfSum
if selected_row_ids:
if dff.loc[selected_row_ids[0]]['Country/Region'] == 'Mainland China':
Region = 'China'
else:
Region = dff.loc[selected_row_ids[0]]['Country/Region']
else:
Region = 'The World' # Display the global total case number
elif value == 'Australia':
if Australia_derived_virtual_selected_rows is None:
Australia_derived_virtual_selected_rows = []
dff = AUSTable
if Australia_selected_row_ids:
Region = dff.loc[Australia_selected_row_ids[0]]['Province/State']
else:
Region = 'Australia'
elif value == 'Canada':
if Canada_derived_virtual_selected_rows is None:
Canada_derived_virtual_selected_rows = []
dff = CANTable
if Canada_selected_row_ids:
Region = dff.loc[Canada_selected_row_ids[0]]['Province/State']
else:
Region = 'Canada'
elif value == 'Mainland China':
if Canada_derived_virtual_selected_rows is None:
Canada_derived_virtual_selected_rows = []
dff = CNTable
if CHN_selected_row_ids:
Region = dff.loc[CHN_selected_row_ids[0]]['Province/State']
else:
Region = 'China'
elif value == 'United States':
if Canada_derived_virtual_selected_rows is None:
Canada_derived_virtual_selected_rows = []
dff = USTable
if US_selected_row_ids:
Region = dff.loc[US_selected_row_ids[0]]['Province/State']
else:
Region = 'US'
# Read cumulative data of a given region from ./cumulative_data folder
df_region = pd.read_csv('./cumulative_data/{}.csv'.format(Region))
df_region = df_region.astype(
{'Date_last_updated_AEDT': 'datetime64', 'date_day': 'datetime64'})
# Line plot for confirmed cases
# Set up tick scale based on confirmed case number
# tickList = list(np.arange(0, df_confirmed['Mainland China'].max()+1000, 10000))
# Create empty figure canvas
fig3 = go.Figure()
# Add trace to the figure
fig3.add_trace(go.Scatter(x=df_region['date_day'],
y=df_region['Confirmed'],
mode='lines+markers',
# line_shape='spline',
name='Confirmed case',
line=dict(color='#d7191c', width=2),
# marker=dict(size=4, color='#f4f4f2',
# line=dict(width=1,color='#921113')),
text=[datetime.strftime(d, '%b %d %Y AEDT')
for d in df_region['date_day']],
hovertext=['{} Confirmed<br>{:,d} cases<br>'.format(
Region, i) for i in df_region['Confirmed']],
hovertemplate='<b>%{text}</b><br></br>' +
'%{hovertext}' +
'<extra></extra>'))
fig3.add_trace(go.Scatter(x=df_region['date_day'],
y=df_region['Recovered'],
mode='lines+markers',
# line_shape='spline',
name='Recovered case',
line=dict(color='#1a9622', width=2),
# marker=dict(size=4, color='#f4f4f2',
# line=dict(width=1,color='#168038')),
text=[datetime.strftime(d, '%b %d %Y AEDT')
for d in df_region['date_day']],
hovertext=['{} Recovered<br>{:,d} cases<br>'.format(
Region, i) for i in df_region['Recovered']],
hovertemplate='<b>%{text}</b><br></br>' +
'%{hovertext}' +
'<extra></extra>'))
fig3.add_trace(go.Scatter(x=df_region['date_day'],
y=df_region['Deaths'],
mode='lines+markers',
# line_shape='spline',
name='Death case',
line=dict(color='#626262', width=2),
# marker=dict(size=4, color='#f4f4f2',
# line=dict(width=1,color='#626262')),
text=[datetime.strftime(d, '%b %d %Y AEDT')
for d in df_region['date_day']],
hovertext=['{} Deaths<br>{:,d} cases<br>'.format(
Region, i) for i in df_region['Deaths']],
hovertemplate='<b>%{text}</b><br></br>' +
'%{hovertext}' +
'<extra></extra>'))
# Customise layout
fig3.update_layout(
margin=go.layout.Margin(
l=10,
r=10,
b=10,
t=5,
pad=0
),
annotations=[
dict(
x=.5,
y=.4,
xref="paper",
yref="paper",
text=Region,
opacity=0.5,
font=dict(family='Arial, sans-serif',
size=60,
color="grey"),
)
],
yaxis_title="Cumulative cases numbers",
yaxis=dict(
showline=False, linecolor='#272e3e',
zeroline=False,
# showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth=.1,
tickmode='array',
# Set tick range based on the maximum number
# tickvals=tickList,
# Set tick label accordingly
# ticktext=["{:.0f}k".format(i/1000) for i in tickList]
),
xaxis_title="Select A Location From Table",
xaxis=dict(
showline=False, linecolor='#272e3e',
showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth=.1,
zeroline=False
),
xaxis_tickformat='%b %d',
# transition = {'duration':500},
hovermode='x',
legend_orientation="h",
legend=dict(x=.02, y=.95, bgcolor="rgba(0,0,0,0)",),
plot_bgcolor='#f4f4f2',
paper_bgcolor='#cbd2d3',
font=dict(color='#292929', size=10)
)
return fig3
@app.callback(
Output('datatable-interact-logplot', 'figure'),
[Input('tabs-table', 'value'),
Input('datatable-interact-location', 'derived_virtual_selected_rows'),
Input('datatable-interact-location', 'selected_row_ids'),
Input('datatable-interact-location-Australia', 'derived_virtual_selected_rows'),
Input('datatable-interact-location-Australia', 'selected_row_ids'),
Input('datatable-interact-location-Canada', 'derived_virtual_selected_rows'),
Input('datatable-interact-location-Canada', 'selected_row_ids'),
Input('datatable-interact-location-Mainland China', 'derived_virtual_selected_rows'),
Input('datatable-interact-location-Mainland China', 'selected_row_ids'),
Input('datatable-interact-location-United States', 'derived_virtual_selected_rows'),
Input('datatable-interact-location-United States', 'selected_row_ids'),
]
)
def update_logplot(value, derived_virtual_selected_rows, selected_row_ids,
Australia_derived_virtual_selected_rows, Australia_selected_row_ids,
Canada_derived_virtual_selected_rows, Canada_selected_row_ids,
CHN_derived_virtual_selected_rows, CHN_selected_row_ids,
US_derived_virtual_selected_rows, US_selected_row_ids
):
if value == 'The World':
if derived_virtual_selected_rows is None:
derived_virtual_selected_rows = []
dff = dfSum
elapseDay = daysOutbreak
if selected_row_ids:
if dff.loc[selected_row_ids[0]]['Country/Region'] == 'Mainland China':
Region = 'China'
else:
Region = dff.loc[selected_row_ids[0]]['Country/Region']
else:
Region = 'The World'
elif value == 'Australia':
if Australia_derived_virtual_selected_rows is None:
Australia_derived_virtual_selected_rows = []
dff = AUSTable
elapseDay = daysOutbreak
if Australia_selected_row_ids:
Region = dff.loc[Australia_selected_row_ids[0]]['Province/State']
else:
Region = 'Australia'
elif value == 'Canada':
if Canada_derived_virtual_selected_rows is None:
Canada_derived_virtual_selected_rows = []
dff = CANTable
elapseDay = daysOutbreak
if Canada_selected_row_ids:
Region = dff.loc[Canada_selected_row_ids[0]]['Province/State']
else:
Region = 'Canada'
elif value == 'Mainland China':
if CHN_derived_virtual_selected_rows is None:
CHN_derived_virtual_selected_rows = []
dff = CNTable
elapseDay = daysOutbreak
if CHN_selected_row_ids:
Region = dff.loc[CHN_selected_row_ids[0]]['Province/State']
else:
Region = 'China'
elif value == 'United States':
if US_derived_virtual_selected_rows is None:
US_derived_virtual_selected_rows = []
dff = USTable
elapseDay = daysOutbreak
if US_selected_row_ids:
Region = dff.loc[US_selected_row_ids[0]]['Province/State']
else:
Region = 'US'
# Create empty figure canvas
fig_curve = go.Figure()
fig_curve.add_trace(go.Scatter(x=pseduoDay,
y=y1,
line=dict(color='rgba(0, 0, 0, .3)', width=1, dash='dot'),
text=[
'85% growth rate' for i in pseduoDay],
hovertemplate='<b>%{text}</b><br>' +
'<extra></extra>'
)
)
fig_curve.add_trace(go.Scatter(x=pseduoDay,
y=y2,
line=dict(color='rgba(0, 0, 0, .3)', width=1, dash='dot'),
text=[
'35% growth rate' for i in pseduoDay],
hovertemplate='<b>%{text}</b><br>' +
'<extra></extra>'
)
)
fig_curve.add_trace(go.Scatter(x=pseduoDay,
y=y3,
line=dict(color='rgba(0, 0, 0, .3)', width=1, dash='dot'),
text=[
'15% growth rate' for i in pseduoDay],
hovertemplate='<b>%{text}</b><br>' +
'<extra></extra>'
)
)
fig_curve.add_trace(go.Scatter(x=pseduoDay,
y=y4,
line=dict(color='rgba(0, 0, 0, .3)', width=1, dash='dot'),
text=[
'5% growth rate' for i in pseduoDay],
hovertemplate='<b>%{text}</b><br>' +
'<extra></extra>'
)
)
# Add trace to the figure
if Region in set(dfs_curve['Region']):
dotx = [np.array(dfs_curve.loc[dfs_curve['Region'] == Region,'DayElapsed'])[0]]
doty = [np.array(dfs_curve.loc[dfs_curve['Region'] == Region,'Confirmed'])[0]]
for regionName in ['The World', 'China', 'Japan', 'Italy', 'South Korea', 'US']:
dotgrayx = [np.array(dfs_curve.loc[dfs_curve['Region'] == regionName, 'DayElapsed'])[0]]
dotgrayy = [np.array(dfs_curve.loc[dfs_curve['Region'] == regionName, 'Confirmed'])[0]]
fig_curve.add_trace(go.Scatter(x=dfs_curve.loc[dfs_curve['Region'] == regionName]['DayElapsed'],
y=dfs_curve.loc[dfs_curve['Region'] == regionName]['Confirmed'],
mode='lines',
line_shape='spline',
name=regionName,
opacity=0.3,
line=dict(color='#636363', width=1.5),
text=[
i for i in dfs_curve.loc[dfs_curve['Region'] == regionName]['Region']],
hovertemplate='<b>%{text}</b><br>' +
'<br>%{x} days after 100 cases<br>' +
'with %{y:,d} cases<br>'
'<extra></extra>'
)
)
fig_curve.add_trace(go.Scatter(x=dotgrayx,
y=dotgrayy,
mode='markers',
marker=dict(size=6, color='#636363',
line=dict(width=1, color='#636363')),
opacity=0.5,
text=[regionName],
hovertemplate='<b>%{text}</b><br>' +
'<br>%{x} days after 100 cases<br>' +
'with %{y:,d} cases<br>'
'<extra></extra>'
)
)
fig_curve.add_trace(go.Scatter(x=dfs_curve.loc[dfs_curve['Region'] == Region]['DayElapsed'],
y=dfs_curve.loc[dfs_curve['Region'] == Region]['Confirmed'],
mode='lines',
line_shape='spline',
name=Region,
line=dict(color='#d7191c', width=3),
text=[
i for i in dfs_curve.loc[dfs_curve['Region'] == Region]['Region']],
hovertemplate='<b>%{text}</b><br>' +
'<br>%{x} days after 100 cases<br>' +
'with %{y:,d} cases<br>'
'<extra></extra>'
)
)
fig_curve.add_trace(go.Scatter(x=dotx,
y=doty,
mode='markers',
marker=dict(size=7, color='#d7191c',
line=dict(width=1, color='#d7191c')),
text=[Region],
hovertemplate='<b>%{text}</b><br>' +
'<br>%{x} days after 100 cases<br>' +
'with %{y:,d} cases<br>'
'<extra></extra>'
)
)
else:
for regionName in ['The World','China', 'Japan', 'Italy', 'South Korea', 'US']:
dotgrayx = [np.array(dfs_curve.loc[dfs_curve['Region'] == regionName, 'DayElapsed'])[0]]
dotgrayy = [np.array(dfs_curve.loc[dfs_curve['Region'] == regionName, 'Confirmed'])[0]]
fig_curve.add_trace(go.Scatter(x=dfs_curve.loc[dfs_curve['Region'] == regionName]['DayElapsed'],
y=dfs_curve.loc[dfs_curve['Region'] == regionName]['Confirmed'],
mode='lines',
line_shape='spline',
name=regionName,
opacity=0.3,
line=dict(color='#636363', width=1.5),
text=[
i for i in dfs_curve.loc[dfs_curve['Region'] == regionName]['Region']],
hovertemplate='<b>%{text}</b><br>' +
'<br>%{x} days after 100 cases<br>' +
'with %{y:,d} cases<br>'
'<extra></extra>'
)
)
fig_curve.add_trace(go.Scatter(x=dotgrayx,
y=dotgrayy,
mode='markers',
marker=dict(size=6, color='#636363',
line=dict(width=1, color='#636363')),
opacity=0.5,
text=[regionName],
hovertemplate='<b>%{text}</b><br>' +
'<br>%{x} days after 100 cases<br>' +
'with %{y:,d} cases<br>'
'<extra></extra>'
)
)
# Customise layout
fig_curve.update_xaxes(range=[0, elapseDay-19])
fig_curve.update_yaxes(range=[1.9, 6])
fig_curve.update_layout(
xaxis_title="Number of day since 100th confirmed cases",
yaxis_title="Confirmed cases (Logarithmic)",
margin=go.layout.Margin(
l=10,
r=10,
b=10,
t=5,
pad=0
),
annotations=[dict(
x=.5,
y=.4,
xref="paper",
yref="paper",
text=Region if Region in set(dfs_curve['Region']) else "Not over 100 cases",
opacity=0.5,
font=dict(family='Arial, sans-serif',
size=60,
color="grey"),
)
],
yaxis_type="log",
yaxis=dict(
showline=False,
linecolor='#272e3e',
zeroline=False,
# showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth = .1,
),
xaxis=dict(
showline=False,
linecolor='#272e3e',
# showgrid=False,
gridcolor='rgba(203, 210, 211,.3)',
gridwidth = .1,
zeroline=False
),
showlegend=False,
# hovermode = 'x',
plot_bgcolor='#f4f4f2',
paper_bgcolor='#cbd2d3',
font=dict(color='#292929', size=10)
)
return fig_curve
if __name__ == "__main__":
app.run_server(debug=True)
| [
"[email protected]"
] | |
dcf26e4fbc990deff46d1ca28274644d2c882a7c | 24dccc981b29ae7837b3db6b7ba85cc2534ab5e9 | /MIC Data Exploration Tools/MIC_Data_Exploration_Tools.py | 0284bbcb2f45ba453a969739b5ece3b41f518cb2 | [] | no_license | burtonbiomedical/vitek_data_project | 6bfcf921a7b7c6c1a4620a1940350ac56a166145 | 4435b6a67084089fb29f1999c28b9274789e71b3 | refs/heads/master | 2021-05-16T14:50:35.609674 | 2018-02-25T00:31:02 | 2018-02-25T00:31:02 | 118,652,673 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,393 | py | #Import Dependencies
import pymongo
import sys
import os
import pandas as pd
import re
import pickle
import matplotlib.pyplot as plt
import numbers
import numpy as np
import seaborn as sns
from datetime import datetime, date
class ExtractData:
def __init__(self, db_name, mongo_client):
"""Inintialise database connection
args:-
db_name: Name of the database to search (must be of expected format)]
mongo_client: pymongo client object"""
self.db = mongo_client[db_name]
self.all_orgs = self.db.organism_index
def get_mic_data(self, organism):
"""Returns a list of dictionary objects, containing MIC data for all isolates for
specified bacterial species.
Return list of
args:-
organism: The organism to search for. Regular expressions are accepted"""
report_ids = self.get_reportIDs(organism)
total_mic_data = list(map(self.extract_report_mic_data, report_ids))
intended_organism_data = self.remove_irrelevant_isolates(organism, total_mic_data)
return intended_organism_data
def get_reportIDs(self, organism):
"""Get report IDs for organism of interest
Returns list of report IDs
args:-
organism: the bacterial organism of interest (accepts regular expressions)"""
report_summaries = list(self.all_orgs.find({'organism_name': {'$regex':organism}}))
report_ids = []
for x in report_summaries:
report_ids += x['reports']
return report_ids
def extract_report_mic_data(self, reportID):
"""Get MIC data for a single report
Returns MIC data for all isolates in a report, as list of dictionaries.
args:-
reportID: the report ID for the report to search for"""
report = list(self.db.reports.find({'_id':reportID}))[0]
mic_data = report['organism_summary']
return mic_data
def remove_irrelevant_isolates(self, intended_organism, total_mic_data):
"""Report MIC array may contain data for desired organism, but accompanied with data for irrelvant
organisms i.e. a report with multiple isolates. This function removes all isolates that are not of interest
Returns a single array of dictionaries containing only those with organism of interest.
args:-
intended_organism: the organism that we are collecting data for
extracted_mic_data: the total MIC summaries for all isolates from all reports as an array of dictionaries"""
intended_organism_data = []
for l in total_mic_data:
for org in l:
org_name = org['isolate_data']['organism_name']
if re.compile(intended_organism, re.IGNORECASE).search(org_name):
intended_organism_data.append(org)
return intended_organism_data
def to_pickle(self, mic_data, path, filename):
"""Export data as serialised python object
args:-
mic_data: extracted mic data as list of python dictionaries
filename: file to save data too, with file extension 'pickle'"""
with open("{}{}".format(path, filename), 'wb') as file:
pickle.dump(mic_data, file)
class ProcessData():
"""Class for creating pandas dataframe and data exploration. Class expects a single organism MIC data file,
as serialised python dictionary."""
def __init__(self, organism_mic_data_path, start_date=None, end_date=None):
"""args:-
organism_mic_data_filename: string - organism mic data pickle file path"""
self.mic_data = pickle.load(open(organism_mic_data_path, 'rb'))
self.mic_dataframe = self.build_dataframe(start_date, end_date)
def build_dataframe(self, start_date, end_date):
"""Creates pandas dataframe using mic data from pickle file
Returns dataframe object
args:-
Specify date range using start and end dates:
start_date: string of format YYYY-MM-DD or datetime object
end_date: string of format YYYY-MM-DD or datetime object"""
def get_drug_mic_data(drugMIC):
"""Creates dictionary object of format drugname:result from mic data dictionary values"""
drugName = drugMIC['drug']
#Antibiotic result can be of type MIC value, or an interpretation e.g. + or -
if 'mic' in list(drugMIC.keys()):
drugResult = drugMIC['mic']
else:
drugResult = drugMIC['interpretation']
return {drugName: drugResult}
def build_row_object(isolate):
"""Builds dictionary object representing a single row, that details a single isolate"""
mic_data = isolate['isolate_data']['mic_data']
drug_mic_data = list(map(lambda x: get_drug_mic_data(x), mic_data))
row = {drug:result for drugResult in drug_mic_data for drug,result in drugResult.items()}
row['isolate_date'] = isolate['isolate_date']
row['species'] = isolate['isolate_data']['organism_name']
return row
df_rows = []
for isolate in self.mic_data:
if start_date != None and end_date != None:
start_date = datetime.strptime(str(start_date), '%Y-%m-%d').date()
end_date = datetime.strptime(str(end_date), '%Y-%m-%d').date()
isolate_date = datetime.date(isolate['isolate_date'])
if (isolate_date >= start_date) and (isolate_date <= end_date):
df_rows.append(build_row_object(isolate))
else:
df_rows.append(build_row_object(isolate))
df = pd.DataFrame.from_dict(df_rows)
df.sort_values('isolate_date', inplace=True)
df.set_index('isolate_date', inplace=True, drop=True)
return df
def get_dataframe(self):
"""Return pandas dataframe object"""
return self.mic_dataframe.copy(deep=True)
def to_excel(self, path):
"""Save pandas dataframe object as excel file
args:-
path: string - path to save file too"""
self.mic_dataframe.to_excel(path)
def antibiotic_series(self, antibiotic, remove_outliers=False):
"""Generate a pandas series object for specified antibiotic.
args:-
antibiotic: string - antibiotic of interest
remove_outliers: integer - standard deviations either side of the mean to remain included in series data.
Default = False, will include all data."""
antibiotic_data = self.mic_dataframe[antibiotic].copy()
antibiotic_data = pd.to_numeric(antibiotic_data, errors='coerce')
antibiotic_data.dropna(inplace=True)
if remove_outliers != False:
antibiotic_data = antibiotic_data[np.abs(antibiotic_data-antibiotic_data.mean())
<=(remove_outliers*antibiotic_data.std())]
return antibiotic_data
def antibiotic_descriptives(self, antibiotic, remove_outliers=False, save_path="/"):
"""Return descriptive statistics for requested antibiotic
args:-
antibiotic: string - antibiotic of interest
remove_outliers: integer - standard deviations either side of the mean to remain included in series data.
Default = False, will include all data."""
antibiotic_data = self.antibiotic_series(antibiotic, remove_outliers=remove_outliers)
stats = {'Oldest data point': str(antibiotic_data.index.values[0]),
'Newest data point': str(antibiotic_data.index.values[antibiotic_data.shape[0]-1]),
'Total data points': antibiotic_data.count(),
'Mean': antibiotic_data.mean(),
'Standard dev': antibiotic_data.std(),
'Min MIC': antibiotic_data.min(),
'Max MIC': antibiotic_data.max(),
'Median MIC': antibiotic_data.median(),
'Sample variance': antibiotic_data.var(),
'Skewness': antibiotic_data.skew(),
'Kurtosis': antibiotic_data.kurt()}
with open(save_path, 'wb') as file:
pickle.dump(stats, file)
def antibiotic_distribution_curve(self, antibiotic, bins='auto', remove_outliers=False, save_path='/',
fig_name="Distribution"):
"""Generate distribution curve for selected antibiotic.
args:-
antibiotic: string - antibiotic of interest
bins: integer/string - see numpy.histogram documentation for bins
remove_outliers: integer - standard deviations either side of the mean to remain included in series data.
Default = False, will include all data."""
antibiotic_data = self.antibiotic_series(antibiotic, remove_outliers=remove_outliers).values
hist, bins = np.histogram(antibiotic_data, bins=bins)
fig,ax = plt.subplots(figsize=(10,5))
ax.plot(bins[:-1], hist)
plt.title('Distribution of MIC values for {}'.format(antibiotic))
with open('{}/{}.png'.format(save_path, fig_name), 'wb') as file:
plt.savefig(file)
def antibiotic_timeseries(self, antibiotic, intervals='M', remove_outliers=False):
"""Generate timeseries with mean MIC value and standard deviations, using the time interval provided.
args:-
antibiotic: string - antibiotic of interest
interval: integer/string - see pandas documentation
remove_outliers: integer - standard deviations either side of the mean to remain included in series data.
Default = False, will include all data."""
antibiotic_data = self.antibiotic_series(antibiotic, remove_outliers=remove_outliers)
means = antibiotic_data.resample(intervals).mean().rename('Mean MIC')
std = antibiotic_data.resample(intervals).std().rename('SD')
return pd.concat([means,std], axis=1)
def antibiotic_trend_analysis(self, antibiotic, intervals='M',
include_sd=True, remove_outliers=False, save_path='/',
fig_name="Trend_Analysis"):
"""Generate trend line plot for mean MIC value over time, with line of best fit generated using
first degree polynomial regression.
args:-
antibiotic: string - antibiotic of interest
interval: integer/string - see pandas documentation
include_sd: boolean - include +/- 1 standard deviation either side of mean
remove_outliers: integer - standard deviations either side of the mean to remain included in series data.
Default = False, will include all data."""
fig,ax = plt.subplots(figsize=(10,5))
timeseries = self.antibiotic_timeseries(antibiotic, intervals, remove_outliers=remove_outliers)
timeseries.dropna(inplace=True)
coefficients, residuals, _, _, _ = np.polyfit(range(len(timeseries.index)),timeseries['Mean MIC'],1,full=True)
mse = residuals[0]/(len(timeseries.index))
nrmse = np.sqrt(mse)/(timeseries['Mean MIC'].max() - timeseries['Mean MIC'].min())
regression_analysis = 'First degree polynomial regression -- Slope: {0:2.6}, Fitting error: {1:2.6}%'.format(
np.round(coefficients[0], decimals=4), np.round(nrmse*100, decimals=6))
ax.plot(timeseries.index.values, timeseries['Mean MIC'], label="Mean MIC")
ax.plot(timeseries.index.values, [coefficients[0]*x + coefficients[1] for x in range(len(timeseries))])
if include_sd:
timeseries['SD +1'] = timeseries['Mean MIC'] + timeseries['SD']
timeseries['SD -1'] = timeseries['Mean MIC'] - timeseries['SD']
ax.plot(timeseries.index.values, timeseries['SD +1'], label="+1 SD")
ax.plot(timeseries.index.values, timeseries['SD -1'], label="-1 SD")
ax.legend(loc='upper right', shadow=True, fontsize='small')
plt.suptitle('{} Mean Inhibitory Concentration vs Time'.format(antibiotic), fontsize=16)
plt.title(regression_analysis, fontsize=12)
plt.xlabel('Time')
plt.ylabel('MIC')
with open('{}/{}.png'.format(save_path, fig_name), 'wb') as file:
plt.savefig(file)
def correlation_matrix(self, antibiotics='all', null_threshold=0.5, save_path='/', fig_name='Corr_Matrix'):
"""Generate correlation matrix of all drug MIC values, and show as heatmap
args:-
antibiotics: list of strings - antibiotics to include in matrix
null_threshold: float - the maximum percentage, taken as a percentage of dataset size, of null values
a column can have without being excluded from correlation matrix"""
if antibiotics == 'all':
columns = self.mic_dataframe.columns.tolist()
else:
columns = antibiotics
antibiotic_data = self.mic_dataframe[columns].copy()
antibiotic_data = antibiotic_data.apply(lambda x: pd.to_numeric(x, errors='coerce'), axis=1)
antibiotic_data = antibiotic_data.loc[:, (antibiotic_data.isnull().sum(axis=0)/
antibiotic_data.shape[0] < null_threshold)]
corr_matrix = antibiotic_data.corr()
plt.figure(figsize=(18,15))
sns.set(font_scale=1.5)
cmap = sns.cubehelix_palette(8, start=.5, rot=-.75, as_cmap=True)
sns.heatmap(corr_matrix, annot=False, cmap=cmap)
plt.title('Correlation matrix of MIC values')
with open('{}/{}.png'.format(save_path, fig_name), 'wb') as file:
plt.savefig(file)
def getopts(argv):
"""Collect command-line options in a dictionary
args:-
argv: list of command line arguments"""
opts = {}
while argv:
if argv[0][0] == '-':
opts[argv[0][1:]] = argv[1]
argv = argv[1:]
return opts
def run_process_data(myargs, save_path, pickle_file, start_date, end_date):
processing_data = ProcessData(pickle_file, start_date=start_date, end_date=end_date)
if 'antibiotic' in myargs.keys():
drug = myargs['antibiotic']
processing_data.antibiotic_descriptives(antibiotic=drug, save_path="{}{}/descriptive_stats.pickle".format(save_path, drug))
processing_data.antibiotic_distribution_curve(antibiotic=drug,bins=15, save_path="{}{}/figures/".format(save_path,drug), fig_name='distribution')
processing_data.antibiotic_distribution_curve(antibiotic=drug,bins=15, save_path="{}{}/figures/".format(save_path,drug), fig_name='woOutliers_descriptives', remove_outliers=3)
processing_data.antibiotic_trend_analysis(antibiotic=drug, save_path="{}{}/figures/".format(save_path,drug),include_sd=False, fig_name='distribution_noSD')
processing_data.antibiotic_trend_analysis(antibiotic=drug, save_path="{}{}/figures/".format(save_path, drug),remove_outliers=True, include_sd=False, fig_name='woOutliers_distribution_noSD')
processing_data.antibiotic_trend_analysis(antibiotic=drug, save_path="{}{}/figures/".format(save_path, drug), remove_outliers=True, fig_name='woOutliers_distribution_plusSD')
processing_data.correlation_matrix(save_path=save_path, fig_name='Correlation_Matrix')
return
def mkdir_p(path):
try:
os.makedirs(path, mode=0o777)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
if __name__ == '__main__':
myargs = getopts(sys.argv)
if 'dbname' in myargs.keys():
dbname = myargs['dbname']
else:
sys.stdout.write("Please specify database name e.g '-dbname database1'")
sys.exit()
if 'bug' in myargs.keys():
bug = myargs['bug']
else:
sys.stdout.write("Please specify target organism")
sys.exit()
if 'start_date' in myargs.keys() and 'end_date' in myargs.keys():
start_date = myargs['start_date']
end_date = myargs['end_date']
else:
start_date = None
end_date = None
user_id = myargs['userID']
drug = myargs['antibiotic']
if start_date != None and end_date != None:
save_path = '/home/rossco/Documents/web_projects/microbiology_data_portal/user_data/{}/{}_{}_{}/'.format(user_id, bug, start_date, end_date)
else:
save_path = '/home/rossco/Documents/web_projects/microbiology_data_portal/user_data/{}/{}/'.format(user_id, bug)
pickle_file = "{}{}.pickle".format(save_path, bug)
if os.path.exists(save_path):
if os.path.exists('{}{}/'.format(save_path, drug)):
sys.exit()
else:
mkdir_p("{}{}/".format(save_path, drug))
mkdir_p("{}{}/figures/".format(save_path, drug))
run_process_data(myargs, save_path, pickle_file, start_date, end_date)
else:
mkdir_p(save_path)
mkdir_p("{}{}/".format(save_path, drug))
mkdir_p("{}{}/figures/".format(save_path, drug))
client = pymongo.MongoClient()
extract = ExtractData(db_name=dbname, mongo_client=client)
bug_data = extract.get_mic_data(organism=bug)
file_name = '{}.pickle'.format(bug)
extract.to_pickle(mic_data=bug_data, path=save_path, filename=file_name)
run_process_data(myargs, save_path, pickle_file, start_date,end_date)
sys.exit()
| [
"[email protected]"
] | |
974ee489d6a9c2f187bc44cac0470cd633d1623c | dd673cc90672dc272e866c225a17442a0ac18123 | /Lesson02/easy.py | 259b74ac74cc0550bb5f95f3b64cf3ef91d66e33 | [] | no_license | sigma7i/PythonLessons | 6069019b97701ff3bcb3df23de607ea0a7a696fe | fe216ea9c5ea8cd998ee6b5a2b661eb385411d95 | refs/heads/master | 2021-01-24T12:12:06.780923 | 2018-03-21T10:47:43 | 2018-03-21T10:47:43 | 123,123,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | #задача 1
list_of_fruits = ["яблоко", "банан", "киви", "арбуз"]
counter = 1
for fruit in list_of_fruits:
print(f"{counter}.{fruit}")
counter +=1
#задача 2
list1 = ['a', 'b', 'c', 'd', 'e']
list2 = ['a', 'z', 'x', 'y', 'e']
difference = set(list1) - set(list2)
print(difference)
# задача 3
integer_list = range(1,11)
new_list = []
for i in integer_list:
if i % 2 == 0:
new_list.append(i / 4)
else:
new_list.append(i * 2)
print(new_list)
| [
"[email protected]"
] | |
7702bf4de1212f401127beb699b460c50a1bdb54 | 1f187ab1fea83acec04fed50aeb4e406670124cc | /django_events/events/views.py | fb8851436aa8716ec41cee1b84cb60f8ae118555 | [
"MIT"
] | permissive | chrisBrookes93/django-events-management | b231e4f7ff275a0910640e8138c06256b525f1fe | 93886448a7bb85c8758324977ff67bcacc80bbec | refs/heads/master | 2022-11-14T03:04:13.807371 | 2020-07-07T23:11:18 | 2020-07-07T23:11:18 | 275,236,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,308 | py | import logging
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import View, TemplateView, UpdateView, CreateView
from django.http import HttpResponseForbidden
from .models import Event
from .forms import EventForm
logger = logging.getLogger(__name__)
class EventCreate(LoginRequiredMixin, CreateView):
"""
View to create an event
"""
model = Event
fields = ['title', 'date_time', 'description']
template_name = 'events/create_event.html'
def form_valid(self, form):
"""
Patch in the organiser to resolve the foreign key
:param form: form submitted
"""
form.instance.organiser = self.request.user
return super(EventCreate, self).form_valid(form)
class EventUpdate(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
"""
View to edit an event
"""
template_name = 'events/edit_event.html'
form_class = EventForm
model = Event
def test_func(self):
"""
Permissions check to make sure that the current user is the event organiser
:return: True if the user is the organiser, else False
:rtype: bool
"""
event = self.get_object()
return self.request.user == event.organiser
def handle_no_permission(self):
"""
Return a forbidden HTTP error if the current user fails the permissions check
"""
return HttpResponseForbidden()
class EventView(LoginRequiredMixin, View):
def get(self, request, pk, *args, **kwargs):
"""
Render the basic web page that displays a given event. The actual data is retrieved from the API via AJAX, so
only the pk is passed into the template
"""
return render(request,
'events/view_event.html',
{'pk': pk})
FILTER_FUNC_TABLE = {
'o': Event.objects.get_events_organised_by_user,
'a': Event.objects.get_events_attended_by_user,
'p': Event.objects.get_events_in_past
}
class EventList(LoginRequiredMixin, TemplateView):
def get(self, request, *args, **kwargs):
"""
Renders a template to display the list of events
"""
# Check if the GET request has an event filter in it
query_filter = request.GET.get('filter')
# Use the specific QuerySet function based on the event filter
filter_func = FILTER_FUNC_TABLE.get(query_filter, Event.objects.get_current_events)
query_set = filter_func(request.user)
page = request.GET.get('page', 1)
paginator = Paginator(query_set, 10)
try:
event_list = paginator.page(page)
except PageNotAnInteger:
event_list = paginator.page(1)
except EmptyPage:
event_list = paginator.page(paginator.num_pages)
return render(request,
'events/list_events.html',
{
'events': event_list,
'query_filter': query_filter
})
| [
"[email protected]"
] | |
2d0a0c2e59cc654c8eaad2585ef80daaf1d740ff | 805d464569360ea6732febf6eb1e3be19f2b5f29 | /education_loan.py | e62d5cd5c3251b21778a60e715c22ab439dd4ae3 | [] | no_license | Mathan08/bank_loan | 334841a9a1a08f62eab1bc7fed5f1235ce96fd23 | b0dd9399da4439e21d56054f143affa63571795f | refs/heads/main | 2023-07-13T08:35:07.564352 | 2021-08-17T06:45:59 | 2021-08-17T06:45:59 | 397,138,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,036 | py | import mysql.connector
mydb=mysql.connector.connect(host="localhost",user="root",password="Janakiraman123!@#",database="education_loan")
mycursor=mydb.cursor()
class accountdetails:
def __init__(self, username,Account_number):
self.username = username
self.Account_number = Account_number
def user_details_checking(username,Account_number,bank_name):
mycursor.execute("select * from user_details where account_number like %s",(Account_number,))
data=mycursor.fetchall()
if username==data[0][1] and Account_number==data[0][2] and bank_name==data[0][3]:
return data[0][0]
def college_details(self):
global name
name=input("ENTER YOUR NAME AS PER CERTIFICATE:")
college_name=input('ENTER YOUR COLLEGE NAME:')
degree=input("ENTER YOUR DEGREE :")
CGPA=input("ENTER YOUR CGPA:")
mycursor.execute("insert into college_details(name,college_name,degree,CGPA)values(%s,%s,%s,%s);",(name,college_name,degree,CGPA,))
mydb.commit()
return 1
def loan_details(self):
mycursor.execute("select * from college_details where name like %s",(name,))
data=mycursor.fetchall()
cgpa=data[0][4]
cgpa=int(cgpa)
mycursor.execute("select * from loan_details;")
mydata=mycursor.fetchall()
for i in mydata:
if cgpa>=int(i[5]):
print(i)
username=str(input("ENTER YOUR NAME :"))
Account_number=str(input("ENTER YOUR ACCOUNT NUMBER :"))
bank_name=input("ENTER YOUR BANK NAME :")
IFSC_CODE=input("ENTER IFSC CODE :")
password=input("ENTER YOUR PASSWORD :")
checked_data=accountdetails(username,Account_number)
data=checked_data.user_details_checking
if data:
checking=input('ENTER YES IF ALREADY APPLIED FOR LOAN ELSE NO :')
checking=checking.lower()
if checking =="yes":
print("True")
else:
if college_details(username):
if loan_details(name):
print(True)
else:
print('ENTER VALID ACCOUNT NUMBER')
| [
"[email protected]"
] | |
bad121f3a1eeed6c74fe0f00ecc8248920f59431 | cc590780b4ca4edced303e0899c4d753340e22f6 | /login/bulk_data.py | 50558e71e694f78f9c6758c1a70263cbc2def1c6 | [] | no_license | guojiankaipython/- | b66c3f7293c04e67b8a4aee2e568046c6afe8a64 | 73a0ea7a402c9c7f5a1d7c040ee1f750b5d40801 | refs/heads/master | 2021-05-23T17:07:26.820290 | 2020-04-06T08:05:51 | 2020-04-06T08:05:51 | 253,394,467 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # import os
#
# if __name__ == '__main__':
#
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "login.settings")
# import django
# django.setup()
#
#
# from app01 import models
#
# c_list = []
#
# for i in range(521):
#
# obj = models.Customer(
# qq=f'{i+1}{i+2}{i+3}',
# name='刘旭%s'%i,
# course=['PythonFullStack',],
#
# )
# c_list.append(obj)
# models.Customer.objects.bulk_create(c_list) | [
"[email protected]"
] | |
07decb370299fda3f2ff899234b18278db953261 | 575959a55ce2412ac1ecec7d0a75b55a43044d43 | /opy/_regtest/src/core/process_test.py | b5dc459802d8d83829352f7df294d190a96880f1 | [
"Apache-2.0"
] | permissive | afunsten/oil | aa74a94db79ebe43c44d040149ea3b1bfb872bb7 | e52071e10a78157db1e4f0befc439a36ca1cbc01 | refs/heads/master | 2020-04-18T10:13:17.177302 | 2019-01-25T01:08:31 | 2019-01-25T01:08:31 | 167,460,658 | 1 | 0 | Apache-2.0 | 2019-01-25T00:48:20 | 2019-01-25T00:48:20 | null | UTF-8 | Python | false | false | 3,552 | py | #!/usr/bin/env python
"""
process_test.py: Tests for process.py
"""
import os
import unittest
from osh.meta import Id
from core import builtin
from osh.meta import ast
from core import process # module under test
from osh.meta import runtime
from core import util
from core.cmd_exec_test import InitExecutor # helper
Process = process.Process
ExternalThunk = process.ExternalThunk
log = util.log
def Banner(msg):
print('-' * 60)
print(msg)
_WAITER = process.Waiter()
def _ExtProc(argv):
return Process(ExternalThunk(argv, {}))
class ProcessTest(unittest.TestCase):
def testStdinRedirect(self):
waiter = process.Waiter()
fd_state = process.FdState()
PATH = '_tmp/one-two.txt'
# Write two lines
with open(PATH, 'w') as f:
f.write('one\ntwo\n')
# Should get the first line twice, because Pop() closes it!
r = runtime.PathRedirect(Id.Redir_Less, 0, PATH)
fd_state.Push([r], waiter)
line1 = builtin.ReadLineFromStdin()
fd_state.Pop()
fd_state.Push([r], waiter)
line2 = builtin.ReadLineFromStdin()
fd_state.Pop()
# sys.stdin.readline() would erroneously return 'two' because of buffering.
self.assertEqual('one\n', line1)
self.assertEqual('one\n', line2)
def testProcess(self):
# 3 fds. Does Python open it? Shell seems to have it too. Maybe it
# inherits from the shell.
print('FDS BEFORE', os.listdir('/dev/fd'))
Banner('date')
p = _ExtProc(['date'])
status = p.Run(_WAITER)
log('date returned %d', status)
self.assertEqual(0, status)
Banner('does-not-exist')
p = _ExtProc(['does-not-exist'])
print(p.Run(_WAITER))
# 12 file descriptors open!
print('FDS AFTER', os.listdir('/dev/fd'))
def testPipeline(self):
print('BEFORE', os.listdir('/dev/fd'))
p = process.Pipeline()
p.Add(_ExtProc(['ls']))
p.Add(_ExtProc(['cut', '-d', '.', '-f', '2']))
p.Add(_ExtProc(['sort']))
p.Add(_ExtProc(['uniq', '-c']))
pipe_status = p.Run(_WAITER)
log('pipe_status: %s', pipe_status)
print('AFTER', os.listdir('/dev/fd'))
def testPipeline2(self):
Banner('ls | cut -d . -f 1 | head')
p = process.Pipeline()
p.Add(_ExtProc(['ls']))
p.Add(_ExtProc(['cut', '-d', '.', '-f', '1']))
p.Add(_ExtProc(['head']))
print(p.Run(_WAITER))
ex = InitExecutor()
# Simulating subshell for each command
w1 = ast.CompoundWord()
w1.parts.append(ast.LiteralPart(ast.token(Id.Lit_Chars, 'ls')))
node1 = ast.SimpleCommand()
node1.words = [w1]
w2 = ast.CompoundWord()
w2.parts.append(ast.LiteralPart(ast.token(Id.Lit_Chars, 'head')))
node2 = ast.SimpleCommand()
node2.words = [w2]
w3 = ast.CompoundWord()
w3.parts.append(ast.LiteralPart(ast.token(Id.Lit_Chars, 'sort')))
w4 = ast.CompoundWord()
w4.parts.append(ast.LiteralPart(ast.token(Id.Lit_Chars, '--reverse')))
node3 = ast.SimpleCommand()
node3.words = [w3, w4]
p = process.Pipeline()
p.Add(Process(process.SubProgramThunk(ex, node1)))
p.Add(Process(process.SubProgramThunk(ex, node2)))
p.Add(Process(process.SubProgramThunk(ex, node3)))
print(p.Run(_WAITER))
# TODO: Combine pipelines for other things:
# echo foo 1>&2 | tee stdout.txt
#
# foo=$(ls | head)
#
# foo=$(<<EOF ls | head)
# stdin
# EOF
#
# ls | head &
# Or technically we could fork the whole interpreter for foo|bar|baz and
# capture stdout of that interpreter.
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
62d283bb94abee18862867a046831544f6f502da | a8b89eadc948be2d4478702624752157c0657f0c | /keylog.py | 5a19e62ae25a23c2e74596b3249d33696126a8e0 | [] | no_license | ghdic/keylogger | 0820e117491db26d7d6c51bf4fe1706792d405fd | 45b3213a04710a0e2448662529319303f2e5d514 | refs/heads/master | 2023-03-09T05:44:59.986377 | 2021-03-01T09:58:47 | 2021-03-01T09:58:47 | 330,139,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | import time
from pynput.keyboard import Listener
import ctypes
import psutil
import pickle
class Keylogger:
def __init__(self, controller):
self.controller = controller
def on_press(self, key):
data = pickle.dumps({'info': 1, 'key': key})
self.controller.send(data)
def get_title(self):
""" 현재 활성화된 윈도우의 정보를 가져옴"""
user32 = ctypes.windll.LoadLibrary('user32.dll')
handle = user32.GetForegroundWindow() # 현재 활성화 윈도우 핸들
buffer = ctypes.create_unicode_buffer(255) # 저장버퍼 할당
user32.GetWindowTextW(handle, buffer, ctypes.sizeof(buffer)) # 버퍼에 타이들 저장
pid = ctypes.c_ulong()
user32.GetWindowThreadProcessId(handle, ctypes.byref(pid)) # pid를 얻음
pid = pid.value
if pid == 0: return
p = psutil.Process(pid)
data = pickle.dumps({'info': 2, 'pid': pid, 'pname': p.name(), 'title': buffer.value})
self.controller.send(data)
def win_title(self):
""" 타이틀 변경 관리 """
pid, pname, title = 0, "", ""
while True:
time.sleep(0.1)
new_pid, new_pname, new_title = self.get_title()
if pid != new_pid: # 프로세스가 바뀐경우
print("pid change %s", new_pname)
else:
if title != new_title: # 타이틀이 바뀐경우
print("title change %d [%s]" % (new_pid, new_title))
pid, pname, title = new_pid, new_pname, new_title
def listen(self):
with Listener(on_press=self.on_press) as listener:
listener.join()
| [
"[email protected]"
] | |
9fc475508ae898d890fc912806d1b63456174705 | 3825940585ae0af37c5d1c621db65e027058b3ec | /python/python_fundamentals/Functions Intermediate I.py | bad95566007c5c36f4061bfc2a552db189fc1a5f | [] | no_license | HalaAlmulhim/Python- | 4a5fc2a98df3fe46441c5f71d824f9c159a8cac3 | ffffddbcbb21755bc0ccb54b653ccbc0e3bf4ed1 | refs/heads/main | 2023-06-22T19:07:11.512270 | 2021-07-12T13:53:37 | 2021-07-12T13:53:37 | 385,262,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | import random
def randInt(min= 0, max= 100):
num = random.random() * (max-min) + min
return round(num)
print(random.random()) ## should print a random integer between 0 to 50
print(random.random() * 50) # should print a random integer between 0 to 50
print(randInt(min=50)) # should print a random integer between 50 to 100
print(randInt(min=50, max=500)) # should print a random integer between 50 and 500
| [
"[email protected]"
] | |
bda8a2cc40c609fd85cabbaae0e183194330c3e7 | 64794b57c199e0cb9bd7d15c1791d8698652aa81 | /venv/bin/naked | 6ccc40dc2a4708d8f2e6bef125031fda49d08377 | [] | no_license | rid3thespiral/pyethwallet | 7897905a5330936e50f14b4d1056b99ee7fb4877 | 81764b58758f9b7933165bf85036a83b8e226d9a | refs/heads/master | 2021-04-11T10:52:40.813837 | 2020-03-21T16:13:10 | 2020-03-21T16:13:10 | 249,013,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | #!/Users/davide/PycharmProjects/pyethwallet/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from Naked.app import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
a0d65f2aa3c2bd0f3e9352e624e89f8a134efd89 | 5d39a4c647e09a7af941786d06ff40196afc6d16 | /binance-stream-receiver/DAL/Tick.py | 0b8f833ac94369973d22e33614c207d3e07d8e75 | [] | no_license | adriancr/btc-realtime-data | 5c98b82f45a35aa74ab1067941070ae4d5a10afd | ef2f514136a8ae67852350ff1af8647f13bf382c | refs/heads/main | 2023-06-06T21:18:51.110082 | 2021-07-07T01:22:51 | 2021-07-07T01:22:51 | 379,719,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,809 | py | # from pymongo import MongoClient
import pymongo
import os
class Tick:
DB_HOST = os.environ['MONGODB_HOSTNAME']
DB_USER = os.environ['MONGODB_USERNAME']
DB_PSW = os.environ['MONGODB_PASSWORD']
DB_PORT = 27017
DB_NAME = os.environ['MONGODB_DATABASE']
DB_COLLECTION_NAME = 'Ticks'
DB_COLS = {
'open': "float",
'close': "float",
'high': "float",
'low': "float",
'volume': "float",
'ts': "timestamp",
'trades_count': "float",
'asset_volume': "float",
'quote_asset_volume': "float",
'tb_base_asset_volumne': "float",
'tb_quote_asset_volume': "float"
}
MongoDBClient = False
DBConn = False
db = False
dbData = False
previousClose = 0
def __init__(self):
# print(self.DB_HOST, self.DB_USER, self.DB_PSW, self.DB_PORT, self.DB_NAME)
# Start Mongo client
self.MongoDBClient = pymongo.MongoClient(self.DB_HOST, self.DB_PORT)
# Connect to DB
self.DBConn = self.MongoDBClient[self.DB_NAME]
# Authenticate to DB
self.DBConn.authenticate(self.DB_USER, self.DB_PSW)
# Get collection (table) specific DB access object
self.db = self.DBConn.Ticks
print(self.DBConn, self.db)
def initialize(self, item):
self.dbData = {}
for key in self.DB_COLS:
if key in item:
dataType = self.DB_COLS[key]
if(dataType == 'float'):
self.dbData[key] = float(item[key])
pass
elif(dataType == 'timestamp'):
self.dbData[key] = item[key]
pass
def setPreviousClose(self, closePrice):
self.previousClose = float(closePrice)
def preliminaryCalculations(self):
# High - Low Amount
self.dbData['hl_difference'] = self.dbData['high'] - self.dbData['low']
# High - Low Percentage
self.dbData['hl_difference_percentage'] = self.dbData['hl_difference'] * 100 / self.dbData['high']
# Open - Close Amount
self.dbData['oc_difference'] = self.dbData['open'] - self.dbData['close']
# Open - Close Amount Percentage
self.dbData['oc_difference_percentage'] = self.dbData['oc_difference'] * 100 / self.dbData['open']
# Average open / close
self.dbData['oc_average'] = (self.dbData['open'] + self.dbData['close']) / 2
def extraCalcs(self):
# Velocity
if self.previousClose > 0:
self.dbData['velocity'] = self.previousClose - self.dbData['close']
print(f'V: ${self.dbData["velocity"]}')
def save(self):
# print(self.dbData)
self.db.insert(self.dbData) | [
"[email protected]"
] | |
019b37e958ac81bfffbd96be019f7ec28387189b | e5ae6dc424dc82455a8479b0cdb0a2031ec67fbe | /dashboard/migrations/0028_auto_20210627_1648.py | 67b49d3905acb2cfccf5c78251e833d08c29f212 | [] | no_license | NumanIbnMazid/auspicious | 93851c47e2bc5c48386d5058b9bac9016191c8c9 | 6c34c51acae0c72fcd55645ff428f00db4b5c467 | refs/heads/master | 2023-06-17T10:08:32.907112 | 2021-07-11T08:47:47 | 2021-07-11T08:47:47 | 361,688,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | # Generated by Django 3.2 on 2021-06-27 16:48
import ckeditor_uploader.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0027_alter_news_description'),
]
operations = [
migrations.AlterField(
model_name='job',
name='description',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True),
),
migrations.AlterField(
model_name='project',
name='scope',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
cc2797a54a4803d037a2651128ce94b755dbb47d | 1d71724e7d097ce1e82afd492e48af4a29925f6c | /HM_ip.py | 2ba600edaa7f06f56dd697a015adced2ecd6bb01 | [] | no_license | 13285707631/safety_automation | 9e93c05b089ec699a80318ecd08b717383227d49 | f0a9c96a69bbf9d5bcccfe866cf5e292ea8d6072 | refs/heads/main | 2023-04-22T02:37:20.595312 | 2021-05-12T03:44:16 | 2021-05-12T03:44:16 | 366,579,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | import time
import xlrd
import re
import xlwt
from xlutils.copy import copy
from selenium import webdriver
data = xlrd.open_workbook('md5.xls')
sheet1 = data.sheet_by_index(1)
data1 = copy(data)
sheet2 = data1.get_sheet('sheet2')
md5_list = sheet1.col_values(0)
option2 = webdriver.ChromeOptions()
option2.add_argument(r'user-data-dir=C:\Users\zouyingtong\AppData\Local\Google\Chrome\User Data1')
driver2 = webdriver.Chrome(options=option2)
count=0
for it in md5_list:
count=count+1
driver2.get('https://sc.b.qihoo.net/ip/'+it)
time.sleep(3)
div=driver2.find_element_by_class_name('logo-box')
img=div.find_element_by_tag_name('img')
text=img.get_attribute('src')
#print(text)
if(text=='https://s7.qhres2.com/static/690caa1e021e7040.svg'):
print("安全")
# print("1")
sheet2.write(count-1,1,"安全")
elif(text=='https://s7.qhres2.com/static/f33b5998c9d32e14.svg'):
print('危险')
sheet2.write(count - 1, 1, "危险")
elif (text == 'https://s7.qhres2.com/static/f2b955340cb1a4da.svg'):
print('普通')
sheet2.write(count - 1, 1, "普通")
elif (text == 'https://s6.qhres2.com/static/bda7ac71808f75d7.svg'):
print('未知')
sheet2.write(count - 1, 1, "未知")
data1.save('D:/pachong/HM_ip.xls') | [
"[email protected]"
] | |
9972c126e935fc0202bd7b53c071493d7ca7de1f | f4c37b6eeaf85a995050b3a6ec83c04f774d53b8 | /Lesson_6/Task3.py | c799bf5906fc0682249e361299d5f2cc58a48363 | [] | no_license | nsimonova/GeekBrains | fee907d7d9ad1916f715721a60c16055853ab2c3 | 17a95cd4ce70bc33d7bfac7ea8edefd1f032fa1d | refs/heads/master | 2023-01-31T18:29:18.431511 | 2020-12-16T18:33:49 | 2020-12-16T18:33:49 | 314,333,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,931 | py | # Реализовать базовый класс Worker (работник), в котором определить атрибуты: name, surname, position (должность), income (доход).
# Последний атрибут должен быть защищенным и ссылаться на словарь, содержащий элементы: оклад и премия,
# например, {"wage": wage, "bonus": bonus}. Создать класс Position (должность) на базе класса Worker.
# В классе Position реализовать методы получения полного имени сотрудника (get_full_name) и дохода с учетом премии (get_total_income).
# Проверить работу примера на реальных данных (создать экземпляры класса Position, передать данные, проверить значения
# атрибутов, вызвать методы экземпляров).
class Worker:
def __init__(self, name, surname, position, wage, bonus):
self.name = name
self.surname = surname
self.position = position
self._income = {"wage": wage, "bonus": bonus}
class Position(Worker):
def get_full_name(self):
return self.surname + ' ' + self.name
def get_total_income(self):
return self._income['wage'] + self._income['bonus']
manager = Position('Иван', 'Котов', 'менеджер', 40000, 10000)
print(manager.name)
print(manager.surname)
print(manager.position)
print(manager._income)
print(manager.get_full_name())
print(manager.get_total_income())
economist = Position('Инна', 'Носова', 'экономист', 30000, 5000)
print(economist.name)
print(economist.surname)
print(economist.position)
print(economist._income)
print(economist.get_full_name())
print(economist.get_total_income())
| [
"[email protected]"
] | |
670b78f9e4287cb9ad30cfc92198c09f4a3dfb34 | 3a3220cc2961f39fd0bdb86dc1c045446fef1122 | /apps/editors/tests/test_helpers.py | de57f41bf9803c6304d167d40d924e9b1496e28a | [] | no_license | zuzelvp/zamboni | 03a445c8c01a868d45e56015d1973822d807b873 | d900fde75ed69b55a19dbffea612d9ccae04bb82 | refs/heads/master | 2021-01-18T17:07:05.012750 | 2011-01-21T19:54:00 | 2011-01-21T23:16:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,024 | py | # -*- coding: utf8 -*-
import os
from mock import Mock
from nose.tools import eq_
from pyquery import PyQuery as pq
import test_utils
import amo
from amo.urlresolvers import reverse
from editors.helpers import ViewEditorQueueTable
class TestViewEditorQueueTable(test_utils.TestCase):
def setUp(self):
super(TestViewEditorQueueTable, self).setUp()
qs = Mock()
self.table = ViewEditorQueueTable(qs)
def test_addon_name(self):
row = Mock()
page = Mock()
page.start_index = Mock()
page.start_index.return_value = 1
row.addon_name = 'フォクすけといっしょ 0.12'.decode('utf8')
row.version_id = 1234
self.table.set_page(page)
a = pq(self.table.render_addon_name(row))
eq_(a.attr('href'),
reverse('editors.review', args=[row.version_id]) + '?num=1')
eq_(a.text(), row.addon_name)
def test_addon_type_id(self):
row = Mock()
row.addon_type_id = amo.ADDON_THEME
eq_(unicode(self.table.render_addon_type_id(row)), u'Theme')
def test_additional_info_site_specific(self):
row = Mock()
row.is_site_specific = True
eq_(self.table.render_additional_info(row), u'Site Specific')
def test_additional_info_for_platform(self):
row = Mock()
row.is_site_specific = False
row.platform_id = amo.PLATFORM_LINUX.id
eq_(self.table.render_additional_info(row), u'Linux only')
def test_additional_info_for_all_platforms(self):
row = Mock()
row.is_site_specific = False
row.platform_id = amo.PLATFORM_ALL.id
eq_(self.table.render_additional_info(row), u'')
def test_applications(self):
row = Mock()
row.applications = ','.join([str(amo.FIREFOX.id),
str(amo.THUNDERBIRD.id)])
doc = pq(self.table.render_applications(row))
eq_(sorted(a.attrib['class'] for a in doc('div div')),
['app-icon ed-sprite-firefox', 'app-icon ed-sprite-thunderbird'])
def test_waiting_time_in_days(self):
row = Mock()
row.days_since_created = 10
row.hours_since_created = 10 * 24
eq_(self.table.render_days_since_created(row), u'10 days')
def test_waiting_time_one_day(self):
row = Mock()
row.days_since_created = 1
row.hours_since_created = 24
eq_(self.table.render_days_since_created(row), u'1 day')
def test_waiting_time_in_hours(self):
row = Mock()
row.days_since_created = 0
row.hours_since_created = 22
eq_(self.table.render_days_since_created(row), u'22 hours')
def test_flags_admin_review(self):
row = Mock()
row.admin_review = True
doc = pq(self.table.render_flags(row))
eq_(doc('div').attr('class'), 'app-icon ed-sprite-admin-review')
def test_no_flags(self):
row = Mock()
row.admin_review = False
eq_(self.table.render_flags(row), '')
| [
"[email protected]"
] | |
59f07782e0d05489a26c8339eafa084c234b9684 | aec4c4d6a6dcbb53fad5ee6bd8d723ea46f50ed6 | /list_advanced_exercise/01_which_are_in_2.py | e1f787a1ef97a16c5f2898b0351a7b1fe961d6d3 | [] | no_license | Bukacha88/softuni_python_fundamentals | 46efaff70dbb141e9a3dff9cb0cb8fff7001ef59 | 6ef6389c957015cfa6277353710509bf7d8e4d39 | refs/heads/master | 2023-02-17T19:30:16.249434 | 2021-01-13T09:31:04 | 2021-01-13T09:31:04 | 298,317,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | substrings = input().split(", ")
strings = input()
print([substring for substring in substrings if substring in strings])
| [
"[email protected]"
] | |
0021a0c6a9b202c958bfd57efd3630e61ab29301 | adc3abab7ce04ba10b7a0606475f7ce7e275e9eb | /leetcode.37. 解数独.py | 6eab14963905ba0deb1087cc612c3b91e0d7c641 | [] | no_license | NonCover/- | ca4e994938318d9fccea173e07679e6029b653ef | 0cc970aaa03aa9300319a1e39e052e4beeec6698 | refs/heads/master | 2020-12-12T06:26:00.019090 | 2020-06-20T04:40:56 | 2020-06-20T04:40:56 | 234,063,173 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | '''
author: noc
time: 2020年4月10日
url: https://leetcode-cn.com/problems/sudoku-solver/
'''
'''
递归每一种可能性,当前的值无法满足条件的时候就回溯,修改之前的值,直到每一个值符合条件的时候就返回True。
当我插入board[i][j]的值时,前提要求这个位置为空,我们依次插入["1","2","3","4","5","6","7","8","9"],如果某一个值成立的话,就填下一个空,
当下一个空,无论填哪一个值都无法满足条件,就说明我们即将回溯到之前的空修改,修改后依然无法满足,那就继续回溯,直到满足条件
由于数据量是固定的,所以计算机最对会计算 9!的9次方这么多次,因为上一个空填入的值会影响下一个。
'''
class Solution:
def solveSudoku(self, board):
"""
Do not return anything, modify board in-place instead.
"""
self.recur(board)
def recur(self, board):
for i in range(9):
for j in range(9):
if board[i][j] == '.':
for val in ["1","2","3","4","5","6","7","8","9"]:
if self.row_col_constraint(i, j, board, val):
board[i][j] = val
if self.recur(board):
return True
else:
board[i][j] = '.'
return False
return True
# 行, 列约束
def row_col_constraint(self, row, col, board, val):
for i in range(9):
if board[row][i] == val:
return False
for i in range(row - row % 3, row - row % 3 + 3):
for j in range(col - col % 3, col - col % 3 + 3):
if board[i][j] == val:
return False
for i in range(9):
if board[i][col] == val:
return False
return True
if __name__ == '__main__':
board = [["5","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]]
Solution().solveSudoku(board)
for b in board:
print(b) | [
"[email protected]"
] | |
d4537bb78fde457f220b6842619b4619f7525dfc | cb0c6a71c47b78cf0511fafc8e31deafcd77dbf2 | /capitals.py | 28b1c55e4cf8b43fc0c956d4c46910cd4e274a7a | [] | no_license | kaismithereens/realpython | 123ef939c1822508980ba8a042d5d1ed442c2e75 | 7c57efbe12a3eaa4e8245b72486a77fc2cf78c74 | refs/heads/master | 2021-04-27T08:50:01.159261 | 2018-11-03T10:30:40 | 2018-11-03T10:30:40 | 122,499,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | capitals_dict = {
"Alabama": "Montgomery",
"Alaska": "Juneau",
"Arizona": "Phoenix",
"Arkansas": "Little Rock",
"California": "Sacramento",
"Colorado": "Denver",
"Connecticut": "Hartford",
"Delaware": "Dover",
"Florida": "Tallahassee",
"Georgia": "Atlanta",
}
| [
"[email protected]"
] | |
e219220037b9c26b07219a3a6cdc3dcfc8a7cf92 | 4128ff7064d0e5442d08e30e7878f249311bb3e3 | /jubakit/test/stub.py | 6c6d4673b64ae18513470a41f8d0c4e1bb693ea8 | [
"MIT"
] | permissive | vishalbelsare/jubakit | ce4d53ccd61b8569de61ba55f02bd451df3d4b73 | f6252ba627ce4e2e42eb9aafaaf05c882bc1c678 | refs/heads/master | 2021-06-10T01:21:27.557264 | 2019-05-16T05:19:40 | 2019-05-16T05:19:40 | 129,505,666 | 0 | 0 | MIT | 2021-04-04T15:07:16 | 2018-04-14T10:11:15 | Python | UTF-8 | Python | false | false | 1,295 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from unittest import TestCase
import jubatus
from jubakit.base import BaseLoader, BaseSchema, BaseDataset, BaseService, BaseConfig, GenericConfig
class StubLoader(BaseLoader):
DATA = [1,2,3]
def rows(self):
for d in self.DATA:
yield {'v': d}
yield None # None should be ignored by Dataset
class StubInfiniteLoader(BaseLoader):
def is_infinite(self):
return True
def rows(self):
d = 1
while True:
yield {'v': d}
d += 1
class StubService(BaseService):
@classmethod
def name(cls):
return '_stub'
@classmethod
def _client_class(cls):
return jubatus.common.ClientBase
class StubConfig(BaseConfig):
@classmethod
def _default(cls, cfg):
cfg['test'] = 1.0
class StubGenericConfig(GenericConfig):
@classmethod
def methods(cls):
return ['test', 'test2']
@classmethod
def _default_method(cls):
return 'test'
@classmethod
def _default_parameter(cls, method):
if method == 'test':
return None
elif method == 'test2':
return {'param1': 1, 'param2': 2}
class StubGenericConfigWithoutConverter(StubGenericConfig):
@classmethod
def _default_converter(cls):
return None
| [
"[email protected]"
] | |
e9518d10a5e6eb07e2fad892a857807646a924e5 | 870f94dd167d5aa11f66979782af2f35f7c3ed34 | /codecademy/battleship6.py | d9ec03ad63a43d7fee8b171d9ee7d80f6cd94290 | [] | no_license | alblaszczyk/python | 52f993844e681fbb455b924cf733923f3b377177 | 02504c066d0f2d1b8cca08a17f25f383df6247ad | refs/heads/master | 2021-09-20T00:57:12.299353 | 2018-08-01T20:23:44 | 2018-08-01T20:23:44 | 108,529,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | board = []
for i in range(5):
board.append(['O'] * 5)
def print_board(board_in):
for row in board:
print " ".join(row)
print_board(board)
| [
"[email protected]"
] | |
34024dbd22ffbb9e00c862b4b6b7328d4d2a250a | 49ab3747f5376c6346285179b03bffb4883b595d | /setup.py | 53b3b3a7d572827f946d45c32cb7c5f97cb73559 | [] | no_license | henryiii/blocklint | 529d1bc4d8b2b20a92944d30421248e6b5a718a9 | 9aedb4335fd7a9d1118c3848d0c9b4adb9ae7397 | refs/heads/master | 2022-11-29T11:25:55.305289 | 2020-08-07T18:19:12 | 2020-08-07T18:19:12 | 285,908,733 | 0 | 0 | null | 2020-08-07T19:46:09 | 2020-08-07T19:46:07 | null | UTF-8 | Python | false | false | 248 | py | from setuptools import setup
setup(
name='blocklint',
description='Lint for blocklisted words',
python_requires='>=2.7',
entry_points={
'console_scripts': [
'blocklint=blocklint.main:main',
],
},
)
| [
"[email protected]"
] | |
78b43379948036f344af666a9ad009ee67a3f4a7 | 7a4785af3fd65693e7816d40506f2e31440105b0 | /longest_substring_without_repeating_characters/solution.py | 2862a2d4a9d97077c3fbd35f540c8992e91d59a5 | [] | no_license | KevinBaileyCrum/leet_n_skeet | f595cd79a7e67beac14ec0420aa29d937cce18bc | bfdc43018a1da020aee2cc1629902f7d989ac196 | refs/heads/master | 2021-07-08T13:57:08.998841 | 2020-08-04T18:15:30 | 2020-08-04T18:15:30 | 172,585,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | # Kevin Crum
# finds the longest substring without repeating characters
# https://leetcode.com/problems/longest-substring-without-repeating-characters/
# input data from infile presented in a comma seperated list of form
# string, expected_integer_length_substring
def lengthOfLongestSubstring(s: 'str') -> 'int':
max_streak = 0
current_streak = 0
lookup_set = {}
beg = 0
current = 0
while current < len(s):
if s[current] in lookup_set:
end_letter = lookup_set[s[current]]
if current_streak > max_streak:
max_streak = current_streak
while beg <= end_letter:
del lookup_set[s[beg]]
beg += 1
current_streak -= 1
lookup_set[s[current]] = current
current_streak += 1
current += 1
if current_streak > max_streak:
max_streak = current_streak
return max_streak
if __name__ == "__main__":
with open ('infile.in') as fin:
for line in fin:
string, expected = line.split(',')
expected = expected.strip()
solution = lengthOfLongestSubstring(string)
if solution == int(expected.strip()):
print('success')
else:
print('failure\n %s'%(string))
print('\texpected: %s\n\tsolution: %s'%(expected, solution))
| [
"[email protected]"
] | |
74999f5e0be6261cf5e43d3d3da7c5c4171b16e5 | 572cfd22ed2431912c902bd8faf6a3c0c4a511bb | /pig_latin.py | 1adfedd5a9eae674195676dee85b10ab2e986328 | [] | no_license | gambitvixi/Python | d0b3e8af64f5c7ae36c2ff432b519998a0ea75d6 | 5fbddb1e21d83ccdd6301e8d730d2cd87f363fcf | refs/heads/master | 2021-10-08T06:34:42.924194 | 2018-12-09T15:08:04 | 2018-12-09T15:08:04 | 160,976,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | word = str(input("Please input the word to be translated: ")).lower()
samoglasnici = ["a", "e", "u", "i", "o"]
pig_latin = ""
sufiks = ""
mickete = False
for slovo in word:
for glas in samoglasnici:
if slovo == glas:
broj = word.index(glas)
mickete = True
break
if mickete == True:
break
print(broj)
| [
"[email protected]"
] | |
98d87b958fdbce0ac2c183e181145b0b53a88560 | 8ab09de71584752ee3d9d16c48fd2e812477b924 | /items/migrations/0013_item_neues feld.py | 97ad730f24ac745a7ebbd48c2177fce1b81c5d6b | [] | no_license | KaiPonel/CatalogWS | c81a7e8e1f43074990bdc10eda4474793f027a49 | 63c744073b5131fe86bdc9a3789aab4b08f44dc4 | refs/heads/master | 2023-04-07T22:43:25.750236 | 2021-04-21T17:35:12 | 2021-04-21T17:35:12 | 360,243,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # Generated by Django 2.1.5 on 2021-03-25 13:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('items', '0012_remove_item_neuesfeld'),
]
operations = [
migrations.AddField(
model_name='item',
name='Neues Feld',
field=models.CharField(default='null', max_length=10),
),
]
| [
"[email protected]"
] | |
86676b1cd142281ea4f4a88b62f32a919fa49e1c | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_9_v3/I_w_Mgt_to_Wxyz_focus_to_Cxy_focus_series_Pyramid/pyramid_3side/bce_s001_tv_s0p1_L4/step10_b1_train.py | 484deffac40d6f97e12e293ee566e739a6a6ad9c | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,892 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
###############################################################################################################################################################################################################
# 按F5執行時, 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~ 才可 import step10_a.py 喔!
code_exe_dir = os.path.dirname(code_exe_path) ### 目前執行 step10_b.py 的 dir
if(os.getcwd() != code_exe_dir): ### 如果 不是在 step10_b.py 的資料夾, 自動幫你切過去~
os.chdir(code_exe_dir)
# print("current_path:", os.getcwd())
###############################################################################################################################################################################################################
### 所有 指令 統一寫這邊
from step10_c_exp_command import *
######################################################################################################################
import subprocess as sb
# sb.run(cmd_python_step10_a + [f"ch032_1side_1__2side_1__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_2__2side_1__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_2__2side_2__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_2__2side_2__3side_2.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_3__2side_1__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_3__2side_2__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_3__2side_2__3side_2.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_3__2side_3__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_3__2side_3__3side_2.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_3__2side_3__3side_3.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_1__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_2__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_2__3side_2.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_3__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_3__3side_2.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_3__3side_3.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_4__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_4__3side_2.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_4__3side_3.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_4__2side_4__3side_4.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_1__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_2__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_2__3side_2.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_3__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_3__3side_2.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_3__3side_3.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_4__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_4__3side_2.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_4__3side_3.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_4__3side_4.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_5__3side_1.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_5__3side_2.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_5__3side_3.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_5__3side_4.{run}"])
# sb.run(cmd_python_step10_a + [f"ch032_1side_5__2side_5__3side_5.{run}"])
| [
"[email protected]"
] | |
60593c1e4021b0b3d0a887f3e857b9f644dece6f | 6d11e5bf852ab78df53c9894b725e34edbf24c03 | /upgrade_tasks/Helpers/Test/cmd_test.py | 617aed546df7bcae4fac1acd38bca5187d3fa83c | [] | no_license | abodup/Avamar-Upgrade-Tasks | cc7996c12a8320b77cce3252fe72c08e28b007bc | 029b5242e9b3c72dfcc5eb190bd1c63919954505 | refs/heads/master | 2021-07-09T15:58:57.122194 | 2017-10-11T10:09:34 | 2017-10-11T10:09:34 | 104,316,622 | 1 | 2 | null | 2017-10-06T08:30:07 | 2017-09-21T07:36:59 | Python | UTF-8 | Python | false | false | 703 | py | #!/usr/bin/python
import os
from datetime import datetime
############### Start cmd() ###############
def cmd(command):
printLog("Command: %s" %command)
os.system("%s 2>&1 | tee -a upgrade_tasks.log" %command)
############### End cmd() ####################
############## Start printLog() ##############
def printLog(message):
log = open("./upgrade_tasks.log", "a")
log.write("%s %s \n" %(localTime(), message))
log.close()
############## Start printLog() ##############
############### Start localTime() ###############
def localTime():
return str(datetime.now())
############### End localTime() ###############
cmd("sudo -u admin chmod a+x /home/admin/proactive_check/proactive_check.pl") | [
"[email protected]"
] | |
a96e31de7b7dd8c370234800c07105d2d148bc98 | df2da399fa30beef60714469293d0061670b893f | /flaskblog/tasks.py | 4b0d635d950b79efca54ff8995b43799d803db3d | [] | no_license | JoyceYang2018/project_flaskblog | 00a5a96bb5cf535bb2c0009319730462775f0156 | 8596c76bc3ce7748dcaba6d8d78e0e2e42f21c1f | refs/heads/master | 2022-12-15T19:03:46.168447 | 2018-06-02T08:14:24 | 2018-06-02T08:14:24 | 135,800,134 | 0 | 0 | null | 2022-11-22T02:32:28 | 2018-06-02T08:16:06 | JavaScript | UTF-8 | Python | false | false | 978 | py | #coding:utf-8
import smtplib
import datetime
from models import Reminder
from email.mime.text import MIMEText
from flask_mail import Message
from flaskblog.extensions import flask_celery,mail
@flask_celery.task(
bind=True,
ignore_result=True,
default_retry_delay=300,
max_retries=5
)
def remind(self,primary_key):
#用flask-mail给注册的用户发送提醒email
reminder = Reminder.query.get(primary_key)
msg = MIMEText(reminder.text)
msg['Subject']='Welcome!'
msg['FROM']='[email protected]'
msg['To']=reminder.email
try:
smtp_server = smtplib.SMTP('localhost')
smtp_server.starttls()
smtp_server.login('384381523','')
smtp_server.sendmail('[email protected]',[reminder.email],msg.as_string())
smtp_server.close()
return
except Exception as err:
self.retry(exc=err)
def on_reminder_save(mapper,connect,self):
remind.apply_async(args=(self.id),eta=self.data)
| [
"[email protected]"
] | |
7c4e06d38bcec0ab8cf210950af58101a7b2f52f | 1e987bd8b8be0dc1c139fa6bf92e8229eb51da27 | /util/photolib/photolib-csv.py | 5b03d345efb884102370e0bb6bee17c0c47ce10e | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | tszdanger/phd | c97091b4f1d7712a836f0c8e3c6f819d53bd0dd5 | aab7f16bd1f3546f81e349fc6e2325fb17beb851 | refs/heads/master | 2023-01-01T00:54:20.136122 | 2020-10-21T18:07:42 | 2020-10-21T18:09:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,152 | py | """A linter for ensuring that a Photo Library is organized correctly."""
import os
import pathlib
import sys
import typing
from labm8.py import app
from util.photolib import linters
from util.photolib import workspace
FLAGS = app.FLAGS
app.DEFINE_boolean("write_header", True, "Write the CSV header row.")
class CsvDump(linters.ToplevelLinter):
"""A linter which dumps the XMP metadata of files as a CSV."""
def __init__(self, *args, **kwargs):
super(CsvDump, self).__init__(*args, **kwargs)
def __call__(self, directory: pathlib.Path):
"""Run the linters."""
if directory == self.workspace.workspace_root:
directory = self.toplevel_dir
directory_str = str(directory.absolute())
toplevel_str = str(self.toplevel_dir.absolute())
# Start at the top level.
if not directory_str.startswith(toplevel_str):
return
for abspath, dirnames, filenames in os.walk(directory):
relpath = self.workspace.GetRelpath(abspath)
print("\033[KScanning", relpath, end=" ...\r")
sys.stdout.flush()
self._LintThisDirectory(abspath, relpath, dirnames, filenames)
def DumpCsvForDirsOrDie(paths: typing.List[pathlib.Path]) -> None:
for path in paths:
if not path.exists():
app.FatalWithoutStackTrace(f"File or directory not found: '{path}'")
# Linting is on a per-directory level, not per-file.
directories_to_lint = {
path if path.is_dir() else path.parent for path in paths
}
for i, directory in enumerate(sorted(directories_to_lint)):
directory = directory.absolute()
app.Log(2, "Dump directory `%s`", directory)
workspace_ = workspace.Workspace.FindWorkspace(directory)
linter = CsvDump(
workspace_,
"photos",
linters.CsvDirLinter,
linters.CsvFileLinter,
linters.TIMERS,
)
if FLAGS.write_header and not i:
csv_dump = linters.CsvFileDumper(workspace_)
csv_dump.csv_writer.writeheader()
linter(directory)
def main(argv):
"""Main entry point."""
dirs = [pathlib.Path(d) for d in (argv[1:] or [os.getcwd()])]
DumpCsvForDirsOrDie(dirs)
if __name__ == "__main__":
app.RunWithArgs(main)
| [
"[email protected]"
] | |
b6e2f2f1363bb8eb81ac7377d4925a5bd346d9e4 | e52dc36d3c4894cb04b7cd2159869592798ac5fa | /demo0/test3/booktest/models.py | c8557ba267620c94a1bfdb81841b4e4f9b51acf9 | [] | no_license | lck137/Py1904_lichaokang | 26184e778a598996c52a1d2dff03ed62e3fd9538 | e90048b4dbae42fa327bc8cc7937750bf2c3f052 | refs/heads/master | 2022-11-26T12:26:34.726156 | 2019-07-15T06:38:27 | 2019-07-15T06:38:27 | 194,625,303 | 1 | 0 | null | 2022-11-22T04:07:41 | 2019-07-01T07:50:55 | JavaScript | UTF-8 | Python | false | false | 773 | py | from django.db import models
# Create your models here.
class Bookinfo(models.Model):
title=models.CharField(max_length=20)
def __str__(self):
return self.title
class HeroManage(models.Manager):
def addhero(self, _name, _content, _gender, _book):
hero = Heroinfo()
hero.name = _name
hero.content = _content
hero.gender = _gender
hero.book = _book
hero.save()
class Heroinfo(models.Model):
name=models.CharField(max_length=20)
content=models.CharField(max_length=20)
gender=models.CharField(max_length=5,choices=(('man','男'),('women','女')))
book=models.ForeignKey(Bookinfo,on_delete=models.CASCADE)
objects=HeroManage()
def __str__(self):
return self.name
| [
"[email protected]"
] | |
34c4d9a681c48ff93096b0ff1409aea316d9f809 | d9b425f7dc5633dc76a5078be14519b5bf1de312 | /text-generation2.py | 14aa14dbaf7f747b8e4f9af35957bde16bb9896f | [] | no_license | joeyism/tf-text-generation | f90c7eb34b6be694b245bb051eb79c090a6b60b8 | f7221f91d218264454d1db3aa1d09da6f3ca66c6 | refs/heads/master | 2021-07-08T05:36:05.015663 | 2017-10-06T03:18:45 | 2017-10-06T03:18:45 | 103,059,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,203 | py | import numpy as np
import tensorflow as tf
print("Setup")
filename = './wonderland.txt'
data = open(filename, 'r').read()
unique_chars = list(set(data))
no_unique_chars = len(unique_chars)
num_to_char = {i:char for i, char in enumerate(unique_chars)}
char_to_num = {char:i for i, char in enumerate(unique_chars)}
no_of_features = no_unique_chars
length_of_sequence = 100
no_of_hidden = 700
no_of_layers = 10
generate_text_length = 100
print_per = 1
no_of_epochs = 5
pkeep=0.8
batch_size = 1
no_of_sequences = int(len(data)/length_of_sequence)
print("no of sequences: {}".format( no_of_sequences))
X = np.zeros((int(len(data)/length_of_sequence), length_of_sequence, no_of_features))
Y = np.zeros((int(len(data)/length_of_sequence), length_of_sequence, no_of_features))
# data generation
print("Data Generation")
for i in range(0, int(len(data)/length_of_sequence)):
X_sequence = data[i*length_of_sequence:(i+1)*length_of_sequence]
X_sequence_in_num = [char_to_num[char] for char in X_sequence]
input_sequence = np.zeros((length_of_sequence, no_of_features))
for j in range(length_of_sequence):
input_sequence[j][X_sequence_in_num[j]] = 1
X[i] = input_sequence
Y_sequence = data[i*length_of_sequence+1:(i+1)*length_of_sequence+1]
Y_sequence_in_num = [char_to_num[char] for char in Y_sequence]
output_sequence = np.zeros((length_of_sequence, no_of_features))
for j in range(length_of_sequence):
output_sequence[j][Y_sequence_in_num[j]] = 1
Y[i] = output_sequence
# create model
print("Creating Model")
batchX_placeholder = tf.placeholder(tf.int32, [batch_size, no_of_sequences, no_of_features], name='X')
batchY_placeholder = tf.placeholder(tf.int32, [batch_size, no_of_sequences, no_of_features], name ='Y')
Hin = tf.placeholder(tf.float32, [None, no_of_hidden*no_of_layers], name='Hin')
cells = [tf.nn.rnn_cell.BasicLSTMCell(int(no_of_hidden/2), state_is_tuple=False) for _ in range(no_of_layers)]
dropcells = [tf.nn.rnn_cell.DropoutWrapper(cell,input_keep_prob=pkeep) for cell in cells]
multicell = tf.nn.rnn_cell.MultiRNNCell(dropcells, state_is_tuple=False)
multicell = tf.nn.rnn_cell.DropoutWrapper(multicell, output_keep_prob=pkeep)
Yr, H = tf.nn.dynamic_rnn(multicell, tf.to_float(batchX_placeholder), dtype=tf.float32, initial_state=Hin)
Yflat = tf.reshape(Yr, [-1, int(no_of_hidden/2)])
Ylogits = tf.contrib.layers.linear(Yflat, no_of_features)
# https://github.com/martin-gorner/tensorflow-rnn-shakespeare/blob/master/rnn_train.py
break
W = tf.get_variable("W", [no_of_hidden, no_of_features])
b = tf.get_variable("b", [no_of_features])
embedding = tf.get_variable("embedding", [no_of_features, no_of_hidden])
inputs = tf.nn.embedding_lookup(embedding, batchX_placeholder)
init_state_placeholder = tf.nn.rnn_cell.BasicLSTMCell.zero_state(batch_size, tf.float32)
state_per_layer_list = tf.unstack(init_state_placeholder, axis=0)
rnn_tuple_state = tuple(
[tf.nn.rnn_cell.LSTMStateTuple(state_per_layer_list[idx][0], state_per_layer_list[idx][1])
for idx in range(no_of_layers)]
)
W2 = tf.Variable(np.random.rand(no_of_hidden, no_of_features), dtype=tf.float32)
b2 = tf.Variable(np.zeros((1, no_of_features)), dtype=tf.float32)
inputs_series = tf.split(batchX_placeholder, 1)
labels_series = tf.unstack(batchY_placeholder, axis=1) #tf.split(batchY_placeholder, 1)
cell = tf.nn.rnn_cell.BasicLSTMCell(no_of_hidden, state_is_tuple=True)
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[ tf.contrib.rnn.BasicLSTMCell(no_of_hidden) for _ in range(no_of_layers) ]
, state_is_tuple=True)
states_series, current_state = tf.contrib.rnn.static_rnn(stacked_lstm, inputs_series, rnn_tuple_state)
logits_series = [tf.matmul(state, W2) + b2 for state in states_series]
prediction_series = [tf.nn.softmax(logits) for logits in logits_series]
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) for logits, labels in zip(logits_series, labels_series)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)
def vector_to_letter(vector):
return num_to_char[np.argmax(vector)]
def letter_to_vector(letter):
result = np.zeros(no_of_features)
result[char_to_num[letter]] = 1
return result
def generate_text(sess, generate_text_length):
generated_num = np.random.randint(no_unique_chars)
generated_char = num_to_char[generated_num]
_current_state = np.zeros((no_of_layers, 2, 1, no_of_hidden))
input_vector = letter_to_vector(generated_char)
for i in range(generate_text_length):
print(vector_to_letter(input_vector), end="")
_current_state, _prediction_series = sess.run(
[current_state, prediction_series],
feed_dict = {
batchX_placeholder: [input_vector],
init_state_placeholder: _current_state
}
)
input_vector = letter_to_vector(vector_to_letter(_prediction_series[0]))
print("Run")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch_idx in range(no_of_epochs):
for sequence_idx in range(no_of_sequences):
_current_state = np.zeros((no_of_layers, 2, 1, no_of_hidden))
input = X[sequence_idx]
output = Y[sequence_idx]
loss_list = []
for char_idx in range(len(input)):
_total_loss, _train_step, _current_state, _prediction_series = sess.run(
[total_loss, train_step, current_state, prediction_series],
feed_dict = {
batchX_placeholder: [input[char_idx]],
batchY_placeholder: [output[char_idx]],
init_state_placeholder: _current_state
}
)
#print(vector_to_letter(_prediction_series[0]), end="")
#print("["+vector_to_letter(output[char_idx])+"]",end="")
loss_list.append(_total_loss)
if sequence_idx%print_per == 0:
print("Epoch {}, Sequence {}, Total Loss {}".format(epoch_idx, sequence_idx, np.mean(loss_list)))
saver = tf.train.Saver()
save_path = saver.save(sess, "./model.ckpt")
print("Model saved in %s" % save_path)
#sess.close()
| [
"[email protected]"
] | |
f6b4eed5733ebb43bff4396dafd7b8badc4164f9 | ae2308622f2801e78ee4348fba2c650dcec5f340 | /venv/lib/python3.6/site-packages/fixture/test/test_loadable/test_sqlalchemy_loadable.py | 0205ba55df9162e695e503099931a26498e91e3a | [] | no_license | formika92/PyQtApp | 425e49923312b35ef549ef58e8bf73361a54018d | 460c6ba336fdb964d208585b3afab0d3ad829ea0 | refs/heads/main | 2023-08-01T05:33:29.130245 | 2021-09-12T13:22:50 | 2021-09-12T13:22:50 | 403,931,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,698 | py | import unittest
from six import print_
from fixture.examples.db.sqlalchemy_examples import *
from fixture.loadable.sqlalchemy_loadable import *
from fixture.test import conf, env_supports
from fixture.test.test_loadable import *
def get_transactional_session():
if sa_major < 0.5:
session = scoped_session(
sessionmaker(
autoflush=False,
transactional=True,
),
scopefunc=lambda:__name__
)
return session
else:
session = scoped_session(
sessionmaker(
autoflush=True,
autocommit=False,
),
scopefunc=lambda:__name__
)
return session
def clear_session(session):
""" This method has a different name from version 0.5 """
if sa_major < 0.5:
session.clear()
else:
session.expunge_all()
def save_session(session, object):
""" This method has a different name from version 0.5 """
if sa_major < 0.5:
session.save(object)
else:
session.add(object)
def setup():
if not env_supports.sqlalchemy: raise SkipTest
def teardown():
pass
@raises(UninitializedError)
def test_cannot_teardown_unloaded_fixture():
class CategoryData(DataSet):
class cars:
name = 'cars'
engine = create_engine(conf.LITE_DSN)
metadata.bind = engine
db = SQLAlchemyFixture(
env=globals(),
engine=metadata.bind
)
data = db.data(CategoryData)
data.teardown()
@attr(unit=1)
def test_negotiated_medium():
class CategoryData(DataSet):
class cars:
name = 'cars'
engine = create_engine(conf.LITE_DSN)
metadata.bind = engine
metadata.create_all()
eq_(type(negotiated_medium(categories, CategoryData)), TableMedium)
eq_(is_table(categories), True)
clear_mappers()
mapper(Category, categories)
eq_(type(negotiated_medium(Category, CategoryData)), MappedClassMedium)
eq_(is_mapped_class(Category), True)
# hmmm
# eq_(is_assigned_mapper(Category), False)
clear_mappers()
ScopedSession = scoped_session(get_transactional_session())
ScopedSession.mapper(Category, categories)
eq_(type(negotiated_medium(Category, CategoryData)), MappedClassMedium)
eq_(is_mapped_class(Category), True)
eq_(is_assigned_mapper(Category), True)
@attr(unit=1)
def test_negotiated_medium_05():
if sa_major < 0.5:
raise SkipTest("Requires SQLAlchemy >= 0.5")
class FooData(DataSet):
class foo:
name = 'foozilator'
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
engine = create_engine(conf.LITE_DSN)
class DeclarativeFoo(Base):
__tablename__ = 'fixture_declarative_foo'
id = Column(Integer, primary_key=True)
name = Column(String)
DeclarativeFoo.metadata.bind = engine
DeclarativeFoo.__table__.create()
try:
eq_(type(negotiated_medium(DeclarativeFoo, FooData)), MappedClassMedium)
finally:
DeclarativeFoo.__table__.drop()
class TestSetupTeardown(unittest.TestCase):
class CategoryData(DataSet):
class cars:
name = 'cars'
class free_stuff:
name = 'get free stuff'
def setUp(self):
engine = create_engine(conf.LITE_DSN)
metadata.bind = engine
metadata.create_all()
Session = get_transactional_session()
self.session = Session()
self.fixture = SQLAlchemyFixture(
env={'CategoryData':Category},
engine=metadata.bind
)
clear_mappers()
mapper(Category, categories)
def tearDown(self):
metadata.drop_all()
self.session.close()
@attr(functional=1)
def test_setup_then_teardown(self):
eq_(self.session.query(Category).all(), [])
data = self.fixture.data(self.CategoryData)
data.setup()
clear_session(self.session)
cats = self.session.query(Category).order_by('name').all()
eq_(cats[0].name, 'cars')
eq_(cats[1].name, 'get free stuff')
data.teardown()
clear_session(self.session)
eq_(list(self.session.query(Category)), [])
class TestImplicitSABinding(unittest.TestCase):
class CategoryData(DataSet):
class cars:
name = 'cars'
class free_stuff:
name = 'get free stuff'
def setUp(self):
engine = create_engine(conf.LITE_DSN)
metadata.bind = engine
# metadata.bind.echo = True
metadata.create_all()
Session = get_transactional_session()
self.session = Session()
# note the lack of explicit binding :
self.fixture = SQLAlchemyFixture(
env={'CategoryData':Category},
)
clear_mappers()
# since categories is assigned to metadata, SA should handle binding for us
mapper(Category, categories)
def tearDown(self):
# metadata.bind.echo = False
metadata.drop_all()
self.session.close()
@attr(functional=1)
def test_setup_then_teardown(self):
eq_(self.session.query(Category).all(), [])
data = self.fixture.data(self.CategoryData)
data.setup()
clear_session(self.session)
cats = self.session.query(Category).order_by('name').all()
eq_(cats[0].name, 'cars')
eq_(cats[1].name, 'get free stuff')
data.teardown()
clear_session(self.session)
eq_(list(self.session.query(Category)), [])
class CategoryData(DataSet):
class cars:
name = 'cars'
class free_stuff:
name = 'get free stuff'
class ProductData(DataSet):
class truck:
name = 'truck'
category = CategoryData.cars
class OfferData(DataSet):
class free_truck:
name = "it's a free truck"
product = ProductData.truck
category = CategoryData.free_stuff
class free_spaceship(free_truck):
id = 99
name = "it's a free spaceship"
class free_tv(free_spaceship):
name = "it's a free TV"
class TestCascadingReferences(unittest.TestCase):
CategoryData = CategoryData
ProductData = ProductData
OfferData = OfferData
def setUp(self):
if not conf.HEAVY_DSN:
raise SkipTest("conf.HEAVY_DSN not defined")
engine = create_engine(conf.HEAVY_DSN)
metadata.bind = engine
metadata.create_all()
Session = get_transactional_session()
self.session = Session()
self.fixture = SQLAlchemyFixture(
env=globals(),
engine=metadata.bind,
style=NamedDataStyle(),
)
clear_mappers()
mapper(Category, categories)
mapper(Product, products, properties={
'category': relation(Category, backref='products')
})
mapper(Offer, offers, properties={
'product': relation(Product, backref='offers'),
'category': relation(Category, backref='offers')
})
def tearDown(self):
metadata.drop_all()
self.session.close()
clear_mappers()
# self.conn.close()
metadata.bind.dispose()
@attr(functional=1)
def test_setup_then_teardown(self):
eq_(self.session.query(Category).all(), [])
eq_(self.session.query(Product).all(), [])
eq_(self.session.query(Offer).all(), [])
data = self.fixture.data(self.OfferData)
data.setup()
clear_session(self.session)
cats = self.session.query(Category).order_by('name').all()
eq_(cats[0].name, 'cars')
eq_(cats[1].name, 'get free stuff')
prods = self.session.query(Product).order_by('name').all()
eq_(prods[0].name, 'truck')
eq_(prods[0].category, cats[0])
off = self.session.query(Offer).order_by('name').all()
eq_(off[0].name, "it's a free TV")
eq_(off[0].product, prods[0])
eq_(off[0].category, cats[1])
eq_(off[1].name, "it's a free spaceship")
eq_(off[1].product, prods[0])
eq_(off[1].category, cats[1])
eq_(off[2].name, "it's a free truck")
eq_(off[2].product, prods[0])
eq_(off[2].category, cats[1])
data.teardown()
clear_session(self.session)
eq_(self.session.query(Category).all(), [])
eq_(self.session.query(Product).all(), [])
eq_(self.session.query(Offer).all(), [])
class TestCollidingSessions(unittest.TestCase):
class CategoryData(DataSet):
class cars:
name = 'cars'
class free_stuff:
name = 'get free stuff'
def setUp(self):
self.engine = create_engine(conf.LITE_DSN)
# self.conn = self.engine.connect()
metadata.bind = self.engine
# metadata.bind.echo = True
metadata.create_all()
# metadata.bind.echo = False
self.ScopedSession = scoped_session(get_transactional_session())
self.session = self.ScopedSession()
self.fixture = SQLAlchemyFixture(
env={'CategoryData':Category},
engine=metadata.bind
)
clear_mappers()
mapper(Category, categories)
def tearDown(self):
metadata.drop_all()
self.session.close()
@attr(functional=1)
def test_setup_then_teardown(self):
eq_(self.session.query(Category).all(), [])
data = self.fixture.data(self.CategoryData)
data.setup()
clear_session(self.session)
cats = self.session.query(Category).order_by('name').all()
eq_(cats[0].name, 'cars')
eq_(cats[1].name, 'get free stuff')
# simulate the application running into some kind of error:
new_cat = Category()
new_cat.name = "doomed to non-existance"
save_session(self.session, new_cat)
self.session.rollback()
self.ScopedSession.remove()
data.teardown()
clear_session(self.session)
print_([(c.id, c.name) for c in self.session.query(Category).all()])
eq_(list(self.session.query(Category)), [])
class TestScopedSessions(unittest.TestCase):
class CategoryData(DataSet):
class cars:
name = 'cars'
class free_stuff:
name = 'get free stuff'
def setUp(self):
self.engine = create_engine(conf.LITE_DSN)
metadata.bind = self.engine
metadata.create_all()
ScopedSession = scoped_session(get_transactional_session())
self.session = ScopedSession()
self.fixture = SQLAlchemyFixture(
env={'CategoryData':Category},
engine=metadata.bind
)
clear_mappers()
mapper(Category, categories)
def tearDown(self):
metadata.drop_all()
self.session.close()
@attr(functional=1)
def test_setup_then_teardown(self):
eq_(self.session.query(Category).all(), [])
data = self.fixture.data(self.CategoryData)
data.setup()
clear_session(self.session)
cats = self.session.query(Category).order_by('name').all()
eq_(cats[0].name, 'cars')
eq_(cats[1].name, 'get free stuff')
data.teardown()
clear_session(self.session)
eq_(list(self.session.query(Category)), [])
class TestElixir(unittest.TestCase):
class CategoryData(DataSet):
class cars:
name = 'cars'
class free_stuff:
name = 'get free stuff'
def setUp(self):
if not env_supports.elixir:
raise SkipTest("elixir module not found")
import elixir
self.engine = create_engine(conf.LITE_DSN)
metadata.bind = self.engine
metadata.create_all()
class CategoryEntity(elixir.Entity):
elixir.using_options(tablename=str(categories))
# save_on_init IS VERY IMPORTANT
elixir.using_mapper_options(save_on_init=False)
self.CategoryEntity = CategoryEntity
self.fixture = SQLAlchemyFixture(
env={'CategoryData':CategoryEntity},
engine=metadata.bind
)
elixir.metadata.bind = self.engine
elixir.setup_all()
def tearDown(self):
metadata.drop_all()
@attr(functional=1)
def test_setup_then_teardown(self):
try:
from elixir import session as elixir_session
except ImportError:
from elixir import objectstore as elixir_session
eq_(len(elixir_session.query(self.CategoryEntity).all()), 0)
data = self.fixture.data(self.CategoryData)
data.setup()
eq_(len(elixir_session.query(self.CategoryEntity).all()), 2)
data.teardown()
eq_(elixir_session.query(self.CategoryEntity).all(), [])
class TestTableObjects(unittest.TestCase):
class CategoryData(DataSet):
class cars:
name = 'cars'
class free_stuff:
name = 'get free stuff'
def setUp(self):
self.engine = create_engine(conf.LITE_DSN)
metadata.bind = self.engine
metadata.create_all()
Session = get_transactional_session()
self.session = Session()
self.fixture = SQLAlchemyFixture(
# maps to a table object :
env={'CategoryData':categories},
engine=metadata.bind
)
clear_mappers()
mapper(Category, categories)
def tearDown(self):
metadata.drop_all()
self.session.close()
@attr(functional=1)
def test_setup_then_teardown(self):
eq_(self.session.query(Category).all(), [])
data = self.fixture.data(self.CategoryData)
data.setup()
clear_session(self.session)
cats = self.session.execute(categories.select()).fetchall()
eq_(cats[0].name, 'cars')
eq_(cats[1].name, 'get free stuff')
data.teardown()
clear_session(self.session)
eq_(self.session.execute(categories.select()).fetchall(), [])
class TestTableObjectsExplicitConn(object):
class CategoryData(DataSet):
class cars:
name = 'cars'
class free_stuff:
name = 'get free stuff'
def setUp(self):
if not conf.HEAVY_DSN:
raise SkipTest("conf.HEAVY_DSN not defined")
self.litemeta = MetaData(bind=conf.LITE_DSN)
LiteSession = sessionmaker(bind=self.litemeta.bind)
self.litesession = LiteSession()
heavymeta = MetaData(bind=create_engine(conf.HEAVY_DSN))
HeavySession = sessionmaker(bind=heavymeta.bind)
self.heavysession = HeavySession()
# this creates the default bind:
metadata.bind = heavymeta.bind
metadata.create_all()
# this creates the table in mem but does not bind
# the connection to the table:
categories.create(bind=self.litemeta.bind)
clear_mappers()
mapper(Category, categories)
def tearDown(self):
metadata.drop_all()
def test_with_engine_connection(self):
fixture = SQLAlchemyFixture(
# maps to a table object :
env={'CategoryData':categories},
# this should overwrite the default bind:
engine = self.litemeta.bind
)
data = fixture.data(CategoryData)
data.setup()
rs = self.heavysession.query(Category).all()
assert rs==[], "unexpected records in HEAVY_DSN db: %s" % rs
rs = self.litesession.query(Category).all()
eq_(len(rs), 2)
data.teardown()
rs = self.litesession.query(Category).all()
eq_(len(rs), 0)
def test_fixture_can_be_disposed():
if sa_major < 0.5:
from sqlalchemy.exceptions import InvalidRequestError
else:
from sqlalchemy.exc import InvalidRequestError
engine = create_engine(conf.LITE_DSN)
metadata.bind = engine
metadata.create_all()
Session = get_transactional_session()
session = Session()
fixture = SQLAlchemyFixture(
env={'CategoryData':Category},
engine=metadata.bind
)
class CategoryData(DataSet):
class cars:
name = 'cars'
class free_stuff:
name = 'get free stuff'
clear_mappers()
mapper(Category, categories)
data = fixture.data(CategoryData)
data.setup()
data.teardown()
fixture.dispose()
# cannot use fixture anymore :
try:
data.setup()
except InvalidRequestError:
pass
else:
assert False, "data.setup() did not raise InvalidRequestError after connection was disposed"
# a new instance of everything is needed :
metadata.create_all()
fixture = SQLAlchemyFixture(
env={'CategoryData':Category},
engine=metadata.bind
)
data = fixture.data(CategoryData)
data.setup()
data.teardown()
@attr(unit=True)
def test_SQLAlchemyFixture_configured_with_bound_session_and_conn():
class StubConnection:
def begin(self):
pass
stub_conn = StubConnection()
class StubTransaction:
def add(self, engine):
pass
fake_out_bind = 1
class StubSession:
bind_to = fake_out_bind
def create_transaction(self):
return StubTransaction()
stub_session = StubSession()
f = SQLAlchemyFixture(
session=stub_session, connection=stub_conn)
f.begin()
eq_(f.session, stub_session)
eq_(f.connection, stub_conn)
### was using this to work around postgres deadlocks...
# if dsn.startswith('postgres'):
# # postgres will put everything in a transaction, even after a commit,
# # and it seems that this makes it near impossible to drop tables after a test
# # (deadlock), so let's fix that...
# import psycopg2.extensions
# self.conn.connection.connection.set_isolation_level(
# psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
| [
"[email protected]"
] | |
fae323274d9df5df19c5909ba46c9c97b3b98cfc | afce5c1f6499ef73c91fcdffc8f75200bf3b71f7 | /microblog/migrations/versions/45e73e1d8309_users_table.py | 6e58a4e702813c1b8d29ed936149dbb7167180d6 | [] | no_license | michaelzh17/flaskmega | e15d69f2d3856c157bbeafd10a1cf533f2354cd1 | b2a3e6bd5bb7f24d93a79765904564f399166446 | refs/heads/master | 2022-12-09T20:42:21.377754 | 2018-11-05T16:10:17 | 2018-11-05T16:10:17 | 146,813,855 | 0 | 0 | null | 2022-12-08T02:59:04 | 2018-08-30T22:39:18 | Python | UTF-8 | Python | false | false | 1,131 | py | """users table
Revision ID: 45e73e1d8309
Revises:
Create Date: 2018-09-18 12:01:15.130771
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '45e73e1d8309'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
d0639903d08987691bc44256257a865b8c550fa8 | c69764ddde1b5338101f2d10d6281eb513c0bc4e | /api_reader/asgi.py | 63f39b670f0e8a457620a9d26e416c9abfb87421 | [] | no_license | mentally-gamez-soft/trains-manager-app | b51d2caa98e58bb3e3e947cc7d7f1a378b554ec0 | 04e4e8dc7a5af96233ac4aa8568a77546d6e26d7 | refs/heads/master | 2023-03-21T02:19:04.779210 | 2021-03-15T20:12:35 | 2021-03-15T20:12:35 | 348,110,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for api_reader project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api_reader.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
4ce9c6dba4806b29f781d19a3fd908153c5b1fbc | 83eabb30d0e3c826e02e9169c7bfa86458be5407 | /reports/Krechko/5/src/lab_5_Krechko.py | 9d6bc90e8f12180978814887429ff53fd06823e5 | [] | no_license | kroschenko/miapr_po4_2020 | 1fdac30f84a3110f70b45352208eb15eb1e3824f | 353314f2372735006e98720381e54a3fcffb54ae | refs/heads/master | 2023-02-07T14:27:40.295206 | 2020-12-24T16:27:10 | 2020-12-24T16:27:10 | 295,746,775 | 1 | 44 | null | 2020-12-27T12:15:17 | 2020-09-15T14:02:51 | Jupyter Notebook | UTF-8 | Python | false | false | 6,112 | py | import numpy as np
def sigmoid(x):
return np.tanh(x)
def dsigmoid(x):
return 1 - (sigmoid(x) ** 2)
# Среднеквадратична ошибка
def error(y,Y):
yy = y.ravel()
YY = Y.ravel()
return np.mean((yy.reshape(1,len(yy)) - YY.reshape(1,len(YY))) ** 2)
def adaptive(errors,outputs,inputs):
return np.divide(np.sum(np.dot(np.square(errors),np.subtract(1,np.square(outputs)))),np.multiply(np.add(1,np.sum(np.square(inputs))),np.sum(np.dot(np.square(errors),np.square(np.subtract(1,np.square(outputs)))))))
def training(inputs,predict,weights_hidden,weights_input,learning_rate):
# Выход скрытого слоя
inputs_hidden = np.dot(weights_hidden,inputs)
outputs_hidden = sigmoid_mapper(inputs_hidden)
# Выход выходного слоя
inputs_input = np.dot(weights_input,outputs_hidden)
outputs_input = sigmoid_mapper(inputs_input)
# Ошибка выходного слоя
error_input = np.subtract(outputs_input,predict)
# Градиент выходного слоя
gradient_input = dsigmoid(outputs_input)
delta_input = error_input * gradient_input
for w,d in zip(weights_input,delta_input):
# Корректируем выходные веса
ww, dd = [], []
ww = w.reshape(1,len(w))
dd.append(d)
ww -= learning_rate * np.dot(dd,outputs_hidden.reshape(1,len(outputs_hidden)))
for w,d in zip(weights_input,delta_input):
# Ошибка скрытого слоя
ww, dd = [], []
ww = w.reshape(1,len(w))
dd.append(d)
error_hidden = dd * ww
# Корректируем скрытые веса
gradient_hidden = dsigmoid(outputs_hidden)
delta_hidden = error_hidden * gradient_hidden
weights_hidden -= learning_rate * np.dot(inputs.reshape(len(inputs),1),delta_hidden).T
return weights_hidden,weights_input,learning_rate
def prediction(inputs,weights_hidden,weights_input):
inputs_hidden = np.dot(weights_hidden,inputs)
outputs_hidden = sigmoid_mapper(inputs_hidden)
inputs_input = np.dot(weights_input,outputs_hidden)
outputs_input = sigmoid_mapper(inputs_input)
return outputs_input
sigmoid_mapper = np.vectorize(sigmoid)
learning = []
predictions = []
learning_rate = 0.5
epoch = 0
epoch_maximum = 15000
error_minimum = 1e-5 # минимальная ошибка
n_input = 20 # количество входов
n_hidden = 10 # количество элементов скрытого слоя
n_output = 3 # количество выходов
w_hidden = np.random.normal(0.0,2 ** -0.5,(n_hidden,n_input))
w_input = np.random.normal(0.0,1,(n_output,n_hidden))
vectors = np.array([[1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1],
[1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0],
[1,1,1,0,0,0,1,1,1,0,0,0,1,1,1,0,0,0,1,1]])
codes = np.array([[1,0,0],[0,1,0],[0,0,1]])
for vector, code in zip(vectors, codes):
com = []
com.append(vector)
com.append(code)
learning.append(tuple(com))
while True:
inputs, predicts = [], []
for sample,predict in learning:
w_hidden,w_input,learning_rate = training(np.array(sample),np.array(predict),w_hidden,w_input,learning_rate)
inputs.append(np.array(sample))
predicts.append(np.array(predict))
error_learning = error(prediction(np.array(inputs).T,w_hidden,w_input),np.array(predicts))
epoch += 1
if error_learning <= error_minimum or epoch > epoch_maximum:
break
print("\nРЕЗУЛЬТАТЫ ОБУЧЕНИЯ:")
for sample,predict in learning:
output = prediction(sample,w_hidden,w_input)
print("прогноз : {:<30}\nожидаемый: {:<30}\n".format(str(output),str(np.array(predict))))
vvectors = np.array([[1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1],
[1,1,1,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,0],
[1,0,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,0,1,1],
[1,1,1,1,0,0,0,0,1,1,0,1,0,0,0,0,1,1,1,1],
[1,1,1,1,0,0,0,1,1,1,1,1,0,0,0,0,1,1,1,1]])
ccodes = np.array([[1,0,0],[1,0,0],[1,0,0],[1,0,0],[1,0,0]])
for vector, code in zip(vvectors, ccodes):
com = []
com.append(vector)
com.append(code)
predictions.append(tuple(com))
print("\nРЕЗУЛЬТАТЫ ПРОГНОЗИРОВАНИЯ:")
for sample,predict in predictions:
output = prediction(sample,w_hidden,w_input)
print("прогноз : {:<30}\nожидаемый: {:<30}\n".format(str(output),str(np.array(predict))))
predictions = []
vvectors = np.array([[1,0,1,0,1,0,1,0,1,1,1,0,1,0,1,0,1,0,1,0],
[1,1,1,0,1,0,1,0,1,0,0,0,1,0,1,0,1,0,1,0],
[1,0,1,0,1,0,1,0,1,0,1,1,1,0,1,0,1,0,0,0],
[1,0,1,0,1,0,1,0,1,1,1,0,0,0,1,0,1,0,1,0],
[1,0,1,0,1,0,1,0,0,0,1,0,1,1,1,0,1,0,1,0]])
ccodes = np.array([[0,1,0],[0,1,0],[0,1,0],[0,1,0],[0,1,0]])
for vector, code in zip(vvectors, ccodes):
com = []
com.append(vector)
com.append(code)
predictions.append(tuple(com))
for sample,predict in predictions:
output = prediction(sample,w_hidden,w_input)
print("прогноз : {:<30}\nожидаемый: {:<30}\n".format(str(output),str(np.array(predict))))
predictions = []
vvectors = np.array([[1,1,1,0,1,0,1,1,1,0,0,0,1,1,1,0,0,0,1,1],
[1,1,1,0,0,0,1,1,0,0,0,0,1,1,1,0,0,0,1,1],
[1,1,1,0,0,0,1,1,1,0,1,0,1,1,1,0,0,0,1,1],
[0,1,1,0,0,0,1,1,1,0,0,0,1,1,1,0,0,0,1,1],
[1,1,1,0,0,0,1,1,1,0,0,1,1,1,1,0,0,0,1,1]])
ccodes = np.array([[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1]])
for vector, code in zip(vvectors, ccodes):
com = []
com.append(vector)
com.append(code)
predictions.append(tuple(com))
for sample,predict in predictions:
output = prediction(sample,w_hidden,w_input)
print("прогноз : {:<30}\nожидаемый: {:<30}\n".format(str(output),str(np.array(predict)))) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.