blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33db11aceab4ec4e476e34730279b35203f78b6a
|
e0943ddbd3af5e5245001b2686b92d3c6dd7c410
|
/pages/cart_page.py
|
b7d852d7bf3f6c619524c642071c8398695255f8
|
[] |
no_license
|
lion7500000/TEST1
|
50e001c77e9733a2a11dc72b271a3c4173af4ef6
|
3d10809c2aef7b261a61512b13140216d11578d7
|
refs/heads/master
| 2020-12-28T00:35:55.214271 | 2020-02-21T17:41:10 | 2020-02-21T17:41:10 | 238,121,707 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,060 |
py
|
from pages.base_page import Page
from selenium.webdriver.common.by import By
class CartPage(Page):
CART_BTN = (By.CSS_SELECTOR, "span.btn-primary")
EMPTY_CART_BTN = (By.CSS_SELECTOR, "a.emptyCartButton")
#CONFIRM_EMPTY_CART_BTN = (By.CSS_SELECTOR, "div.modal-footer button.btn-primary")
CONFIRM_EMPTY_CART_BTN = (By.XPATH,"//button[@class='btn btn-primary'and starts-with(text(),'Empty Cart')]")
EMPTY_CART_TEXT = (By.CSS_SELECTOR, "span.btn-glass-cart")
#"p.header-1"
def open_cart_btn(self):
#self.wait_for_element_to_be_clickable( *self.CART_BTN )
#self.ivisibility_of_element(*self.CART_BTN)
self.click(*self.CART_BTN)
def click_empty_btn(self):
self.click(*self.EMPTY_CART_BTN)
#BTN = self.driver.find_elements(*self.CONFIRM_EMPTY_CART_BTN)[2].click()
#self.wait_for_element_to_be_clickable(*BTN)
def click_comfirm_empty(self):
self.click(*self.CONFIRM_EMPTY_CART_BTN)
def verify_cart_empty(self,text):
self.verify_text(text,*self.EMPTY_CART_TEXT)
|
[
"[email protected]"
] | |
61fb77a28bf6cafa8abe605cd779efcf83e5e63c
|
cf96476243618af67234eafa72f73a48f5c0b00b
|
/cjb/activate/bin/pip3.7
|
063f1e9b2bc8118d396087e05d20e10b8e5fe610
|
[] |
no_license
|
zzazan96/diet
|
073bad54cb4238a2308265e691f5ecb6ebbfb155
|
824155f55aca520c5f1457c064327ae8fe74d33f
|
refs/heads/master
| 2023-07-12T12:25:34.896738 | 2021-08-16T12:43:13 | 2021-08-16T12:43:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 252 |
7
|
#!/Users/scarlett/diet/cjb/activate/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | |
f3310d715e12cd7c439a777de5477b3d3be9c62a
|
c61cf7fee2aa94b10becdfe19f87d1d0bb1e2c4c
|
/src/main.py
|
daaf0c8877f33a6549f2c7ac2495da3937f1d787
|
[] |
no_license
|
samalabhialsh/learning_git
|
30eae66779572f5ecac03d0dbe2dba547b071463
|
29c74512bf5e167e6b4f491898625761f16f3127
|
refs/heads/master
| 2020-05-16T05:54:30.067053 | 2019-04-20T17:45:22 | 2019-04-20T17:45:22 | 182,830,550 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 45 |
py
|
# todo : what a class
# ensure yo have commit
|
[
"[email protected]"
] | |
34250fbbb60f8d96d2fde3d50ac954b2ae7d10ae
|
20a14e99a71bedc22742ddb64291d224d0c9e2fc
|
/Term5/DC/Lab2.4/client.py
|
7f0646cec08df6e099d70cfc6b17a35575448fe4
|
[] |
no_license
|
keyclicker/labs
|
f0cd8ed2261204ff7c42769a9fc0b0d7fa33d842
|
45692d9e2b17253aec061d386ad524f829cbd633
|
refs/heads/master
| 2023-05-11T16:18:36.693061 | 2023-05-05T08:51:12 | 2023-05-05T08:51:12 | 207,407,019 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 673 |
py
|
import Pyro4
ns = Pyro4.locateNS()
uri = ns.lookup("db")
o = Pyro4.Proxy(uri)
def _exec(pref, fun, *args):
print(pref, args, ":", fun(*args))
_exec("add_airline", o.add_airline, 50, "Name", "City")
_exec("add_flight", o.add_flight, 98, 50, "From", "To")
print()
_exec("update_airline", o.update_airline, 50, "Edited Name", "City")
_exec("update_flight", o.update_flight, 98, "From", "To EDIT")
print()
_exec("get_airline", o.get_airline, 50)
_exec("get_flight", o.get_flight, 98)
print()
_exec("remove_airline", o.remove_airline, 50)
_exec("remove_flight", o.remove_flight, 98)
print()
_exec("get_airlines", o.get_airlines)
_exec("get_flights", o.get_flights)
|
[
"[email protected]"
] | |
f808f5f5161ea2f01de5a8408fa274423f9ebdaf
|
bc2c968c23f2ece433f1c47b6125567c33468943
|
/YOLOv3_from_Scratch/model.py
|
3f3e0ca4f365862864e8ec5983ac0d3b79a9e3e1
|
[] |
no_license
|
pykwok/Pytorch_CV_Models_from_Scratsh
|
b310bf3c2e502074fe559bc6bde32cf907c43ad8
|
748608ce812720b65083d830189785429dd233b3
|
refs/heads/master
| 2023-07-09T07:38:45.909052 | 2021-07-28T04:49:02 | 2021-07-28T04:49:02 | 390,217,725 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,051 |
py
|
import torch
import torch.nn as nn
# 1. Tuplel: (out_channels, kernel_size, stride)
# 2. List : -- "B" : 论文上的作者圈起来的“残差卷积块” "B" indicating a residual block
# -- "1" : 重复的次数
# 3. Str : -- "S" is for “scale prediction” block and computing the yolo loss
# -- "U" is for upsampling the feature map and concatenating with a previous layer
# 53层Darknet + 53层其它的 = 106层
config = [
(32, 3, 1),
(64, 3, 2),
["B", 1],
(128, 3, 2),
["B", 2],
(256, 3, 2),
["B", 8],
(512, 3, 2),
["B", 8],
(1024, 3, 2),
["B", 4], # 上面的截止到这里是Darknet53的内容。 To this point is Darknet-53
(512, 1, 1), # 1×1卷积
(1024, 3, 1),
"S", # 输出层一(在第82层)。下采样倍率 / network stride:32
(256, 1, 1), # 1×1卷积
"U",
(256, 1, 1), # 1×1卷积
(512, 3, 1),
"S", # 输出层二(在第94层)。下采样倍率 / network stride:16
(128, 1, 1), # 1×1卷积
"U",
(128, 1, 1), # 1×1卷积
(256, 3, 1),
"S", # 输出层三(在第106层)。下采样倍率 / network stride:8
]
class CNNBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
bn_act=True, # 因为输出层scale layer不要用batchnorm
**kwargs):
super().__init__()
self.conv = nn.Conv2d(in_channels,
out_channels,
bias=not bn_act, # 如果用了BN,那bias是一个unnecessary parameter
**kwargs) # kernel_size、stride、padding等
self.bn = nn.BatchNorm2d(out_channels)
self.leaky = nn.LeakyReLU(0.1)
self.use_bn_act = bn_act
def forward(self, x):
if self.use_bn_act:
return self.leaky(self.bn(self.conv(x)))
else:
return self.conv(x)
# 残差块
# ["B", 1],..., ["B", 2],...,["B", 8],...,["B", 8],...,["B", 4],
class ResidualBlock(nn.Module):
def __init__(self,
channels,
use_residual=True,
num_repeats = 1):
super().__init__()
self.layers = nn.ModuleList()
for repeat in range(num_repeats):
self.layers += [
nn.Sequential(
CNNBlock(channels, # 论文的网络结构 "TyPE类型: convolution卷积、Filters个数: 32、卷积核大小 Size:1×1"
channels // 2, # 论文的网络结构 "TyPE类型: convolution卷积、Filters个数: 64、卷积核大小 Size:3×3"
kernel_size=1), # padding的默认值是 0
CNNBlock(channels // 2,
channels,
kernel_size=3,
padding=1),
)
]
self.use_residual = use_residual
self.num_repeats = num_repeats
def forward(self, x):
for layer in self.layers:
# 版本一:
if self.use_residual: # 如果有残差链接
x = x + layer(x)
else:
x = layer(x)
# 版本二:
# x = x + layer(x) if self.use_residual else layer(x)
return x
# 对于Batch_size张里的其中一张图片来说,经过网络后有三个输出层。
# 每层的输出是由 1×1的卷积得到的。卷积个数为: (4 + 1 + num_class) * num_anchor_box。
# 举例COCO数据集,(4+1+80)*3 = 255。
# 即,COCO数据集的 输出层,是由 255个1×1的卷积 得到的。
# COCO数据集的三个输出层的输出shape分别为: [Batch_size, 13, 13, 255]、[Batch_size, 26, 26, 255] 和 [Batch_size, 52, 52, 255]
# ----------------------------------------------
# | anchor_box1 | anchor_box2 | anchor_box1 |
# ----------------------------------------------
# 内容如下:
# [tx_anchor1, ty_anchor1, th_anchor1, tw_anchor1, Prob_object_anchor1, P_class1_anchor1, ..., P_NumOfClass_anchor1,
# tx_anchor2, ty_anchor2, th_anchor2, tw_anchor2, Prob_object_anchor2, P_class1_anchor2, ..., P_NumOfClass_anchor2,
# tx_anchor3, ty_anchor3, th_anchor3, tw_anchor3, Prob_object_anchor3, P_class1_anchor3, ..., P_NumOfClass_anchor3,]
class ScalePrediction(nn.Module):
def __init__(self,
in_channels,
num_classes):
super().__init__()
# 3个anchor box, 每个的内容:[tx, ty, th, tw, Prob_object_anchor1, P_class1, ..., P_NumOfClass]
self.pred = nn.Sequential(
CNNBlock(in_channels,
2 * in_channels,
kernel_size = 3,
padding = 1),
CNNBlock(in_channels * 2,
3 * (4 + 1 + num_classes), # 3 * (4 + 1 + 80) = 255
bn_act = False,
kernel_size = 1) # 1×1卷积的用途
)
self.num_classes = num_classes # COCO数据集的是 80
def forward(self, x):
# scale_cell_number有3种: 13 、26、 52
# 下面举例 scale_cell_number == 13
# reshape()后的shape是 :[batch_size, 3, 85, 13, 13]
# permute()后的shape是 :[batch_size, 3, 13, 13, 85]
return (self.pred(x)
.reshape(x.shape[0], 3, 5 + self.num_classes, x.shape[2], x.shape[3])
.permute(0, 1, 3, 4, 2))
class YOLOv3(nn.Module):
def __init__(self,
in_channels = 3,
num_classes = 80):
super().__init__()
self.num_classes = num_classes # 80
self.in_channels = in_channels # 3
self.layers = self._create_conv_layers()
def forward(self, x):
# for三种scale。
outputs = []
# 路由层 (skip connection里要和uosampling结果做concat的对象)
# 它是 ScalePrediction的前一层acnnblock的输出
route_connections = []
for layer in self.layers:
# isinstance == 输出层
if isinstance(layer, ScalePrediction):
outputs.append(layer(x))
# ScalePrediction是一个分支,我们希望continue主线下去,所以要come back
# x走两层分支:
# 1. 输出层分支
# 2. 继续走主线(上采样,前一层的同尺寸的特征层concate后,再卷积。再走两个分支)
# 所以要用 "continue"
continue
x = layer(x)
# isinstance == 残差块
# layer.num_repeats == 8 的残差块输出 是路由层
if isinstance(layer, ResidualBlock) and layer.num_repeats == 8:
route_connections.append(x)
# isinstance == 上采样 (一共只有两个上采样)
# Upsample后要做Skip connection
elif isinstance(layer, nn.Upsample):
x = torch.cat([x, route_connections[-1]],
dim=1) # concat along dim=1 for the channels
# pop掉
route_connections.pop()
return outputs
def _create_conv_layers(self):
layers = nn.ModuleList()
in_channels = self.in_channels
for module in config:
# isinstance == 卷积
if isinstance(module, tuple):
# 拆来元组的三个变量
out_channels, kernel_size, stride = module
layers.append(
CNNBlock(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding = 1 if kernel_size == 3 else 0, # kernel_size == 1的时候,padding = 0
)
)
in_channels = out_channels
# isinstance == 残差卷积块
elif isinstance(module, list):
num_repeats = module[1]
layers.append(ResidualBlock(in_channels,
num_repeats = num_repeats,))
# isinstance == 输出层"S" 或者 上采样"U"
elif isinstance(module, str):
if module == "S":
layers += [
ResidualBlock(in_channels,
use_residual=False,
num_repeats = 1),
CNNBlock(in_channels,
in_channels // 2,
kernel_size=1),
ScalePrediction(in_channels // 2,
num_classes = self.num_classes),
]
in_channels = in_channels // 2
elif module == "U":
layers.append(nn.Upsample(scale_factor=2),) # 上采样倍率为 2
# 上采样后要和之前的路由层concat, 所有要改变channel个数
# 一共两次上采样:
#第一次的channel:256。它和第一个ScalePrediction之前的那个CNNBlock的输出(channel值512)做connect。256 + 512 = 768 = 256 * 3
#第二次的channel:128。它和第二个ScalePrediction之前的那个CNNBlock的输出(channel值512)做connect。128 + 256 = 384 = 128 * 3
in_channels = in_channels * 3
return layers
if __name__ == "__main__":
num_classes = 80
IMEGE_SIZE = 416
model = YOLOv3(num_classes = num_classes)
x = torch.randn((2, 3, IMEGE_SIZE, IMEGE_SIZE))
out = model(x)
print(model)
assert model(x)[0].shape == (2, 3, IMEGE_SIZE//32, IMEGE_SIZE//32, num_classes + 5)
assert model(x)[1].shape == (2, 3, IMEGE_SIZE//16, IMEGE_SIZE//16, num_classes + 5)
assert model(x)[2].shape == (2, 3, IMEGE_SIZE//8, IMEGE_SIZE//8, num_classes + 5)
print("success!")
|
[
"[email protected]"
] | |
8fcc453a6612ea1113a68a205f138930103fba04
|
353c0c43c2e0df39b7716818ff11905fc66be081
|
/virtual/bin/pip3.6
|
40e80a31e8eeb49f54c4285f10939f8da4086138
|
[
"MIT"
] |
permissive
|
huguette135/Neighborhood
|
773e48987800331395c7c1041c810632414eddd4
|
d26042eb466379d81fcb674f6eda08e9bab3b177
|
refs/heads/master
| 2023-03-04T17:42:33.798425 | 2021-02-09T14:46:59 | 2021-02-09T14:46:59 | 336,979,024 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 259 |
6
|
#!/home/huguette/Desktop/neighborhood/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | |
c84ddf4da41f9c2db08ce89ab29e9bf4167f1205
|
11952e488001f2adf55fdf65cba35d3e74990bdd
|
/settings.py
|
ee42ac8a378aaa7ad885d1fb40b3bfbcf1b9e349
|
[] |
no_license
|
JustNikhill/Website-using-Django
|
4bd029132f1ac848b82a6ba731c6155e4641fc0e
|
a93b2bce26d5d39efd86bd96f8d3d57356b0638b
|
refs/heads/main
| 2023-04-23T22:05:28.244070 | 2021-05-02T16:04:52 | 2021-05-02T16:04:52 | 359,555,675 | 12 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,218 |
py
|
"""
Django settings for pyshop project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'uyczef5p@!3z_w2=i0_xsdq)$-m+5pe#hk0u4e!3nt&1@%16g4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'products.apps.ProductsConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pyshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pyshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
6e7234d4031ee4d6e9d5025b4d8a1f4a09dbbbaf
|
9f68585c2ad4acaa0635b590d7a9d024c0cc65c1
|
/CV_ROI/extract_ROI.py
|
cc584c15ba5030624c18ab91cb9af19993fb4af1
|
[] |
no_license
|
browserliu/RMB_TechDing
|
d74ccce9d52d592512939c90f11abb53b1b8d697
|
3f553a05aee273008ab91c1d6f8f5b1d9308e6c0
|
refs/heads/master
| 2022-04-14T15:06:43.748425 | 2020-04-07T09:06:36 | 2020-04-07T09:06:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,781 |
py
|
# -*- coding: UTF-8 -*-
"""
# version: python3.6
# author: TechDing
# email: [email protected]
# file: extract_ROI.py
# time: 2019/7/2 9:41
# doc: 使用OpenCV图像处理方式来提取RMB图像中编码所在区域(ROI)
"""
import os
MODEL_PATH=os.path.abspath('./CV_ROI/Models/Face_Model.h5')
import cv2
import numpy as np
def model_predict_img(model, img_arr): # OK
'''
使用model来预测img_arr,判断其属于哪一个类别
:param model: 已经训练好的分类模型
:param img_arr: 完整图片的ndarray, 0-255,uin8
:return: 该图片所属类别。
'''
def crop_img(img, size=(320, 320)):
if min(img.shape[:2]) < min(size): # 如果尺寸不够,则要resize
img = cv2.resize(img, (max(img.shape[1], size[0]), max(img.shape[0], size[1])))
H0 = int(img.shape[0] / 2)
W0 = int(img.shape[1] / 2) # center point
half_H = int(size[0] / 2)
half_W = int(size[1] / 2)
return cv2.resize(img[H0 - half_H:H0 + half_H, W0 - half_W:W0 + half_W], (int(size[0] / 2), int(size[1] / 2)))
small_img = np.array([crop_img(img_arr, size=(320, 320))])
predy = model.predict(small_img.astype('float64') / 255) # 此处只预测一张图片
return np.argmax(predy, axis=1)[0] # 此处只有一张图,所以取第一个即可
from keras.models import load_model
from glob import glob
import os
import keras.backend as K
def get_save_roi(src_folder, save_folder): # OK
'''
首先用model来预测img_path图片所属面值的类别,然后分类获取不同面值图片中的ROI(即编码所在区域),
并将该ROI图片保存到save_folder中。
# :param model_path: 已经训练好的面值分类模型的路径
:param src_folder: 需要获取的图片所在文件夹
:param save_folder: 获取的ROI保存的文件夹,文件名为该图片的文件名
:return: None
'''
def func1_01(img): # 0.1元
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def func2_01(img): # 0.1元
return img[-80:, 260:500]
def func1_02(img): # 0.2元
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
return cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel, iterations=1)
def func2_02(img):
return img[-80:, 270:510]
def func1_05(img):
img1 = cv2.GaussianBlur(img, (5, 5), 0)
return cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
def func2_05(img):
return img[-80:, 150:390]
def func1_1(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel, iterations=3)
gray = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel, iterations=3)
gray = cv2.equalizeHist(gray)
return gray
def func2_1(img):
img = img[-100:, 450:690]
return cv2.resize(img, (240, 80))
def func1_2(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel, iterations=4)
gray = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel, iterations=4)
return cv2.equalizeHist(gray)
def func2_2(img):
img = img[-100:, 430:690]
return cv2.resize(img, (240, 80))
def func1_5(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
return cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel, iterations=2)
def func2_5(img):
return img[-95:-15, 460:700]
def func1_10(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel, iterations=5)
gray = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel, iterations=5)
return cv2.equalizeHist(gray)
def func2_10(img):
img = img[-100:, 460:700]
return cv2.resize(img, (240, 80))
def func1_50(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel, iterations=3)
gray = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel, iterations=3)
return cv2.equalizeHist(gray)
def func2_50(img):
return img[200:280, -240:]
def func1_100(img): # 与50元的处理方式一样
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN, kernel, iterations=3)
gray = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, kernel, iterations=3)
return cv2.equalizeHist(gray)
def func2_100(img):
return img[200:280, -240:]
def func2_default(img):
roi = img[-120:-40, 580:870]
return cv2.resize(roi, (240, 80))
def calc_roi(img_arr, func1, func2):
'''
计算某图片的roi,该roi图片为(80,240,3)
:param img_arr: BGR图片,cv2.imread得到
:param func1: 图片处理的辅助函数
:param func2:截取图片的辅助函数
:return: roi
'''
def getAffine(img_arr, src_points):
dst_points = np.float32([[0, 400], [0, 0], [872, 0]])
affineMatrix = cv2.getAffineTransform(src_points, dst_points)
return cv2.warpAffine(img_arr, affineMatrix, (872, 400))
gray = func1(img_arr)
_, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
_, contours, _ = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
found = False
for contour in contours:
rect = cv2.minAreaRect(contour) # 获取最小外接矩形
width, height = rect[1] # 长宽
short = min(width, height)
long = max(width, height)
if short < min(img_arr.shape[:2]) * 0.5 or long < max(img_arr.shape[:2]) * 0.5 or short >= min(
img_arr.shape[:2]) or long >= max(img_arr.shape[:2]):
continue
box = cv2.boxPoints(rect) # 获取最小外接矩形的4个顶点坐标
box = np.int0(box)
# cv2.drawContours(img, [box], 0, (0, 0, 255), 2)
found = True
src_points = np.float32(box[1:]) if rect[2] < -45 else np.float32(box[:3])
changed = getAffine(img_arr, src_points)
break # 如果找到了一个就不用在继续寻找
return func2(changed) if found else func2_default(img_arr)
model_path=MODEL_PATH
all_imgs = glob(src_folder + '/*.jpg') # 原始图片都是jpg
model = load_model(model_path) # 模型加载非常耗时,需要注意
dict1 = dict(
zip(np.arange(9), [func1_01, func1_02, func1_05, func1_1, func1_2, func1_5, func1_10, func1_50, func1_100]))
dict2 = dict(
zip(np.arange(9), [func2_01, func2_02, func2_05, func2_1, func2_2, func2_5, func2_10, func2_50, func2_100]))
for idx, img_path in enumerate(all_imgs):
img0 = cv2.imread(img_path)
pred = model_predict_img(model, img0) # 模型预测出来的类别,0-8
roi = calc_roi(img0, dict1[pred], dict2[pred])
_, img_name = os.path.split(img_path)
cv2.imwrite(os.path.join(save_folder, img_name), roi)
print('\r{}/{} finished...'.format(idx + 1, len(all_imgs)), end=' ')
print('all cv_rois are saved to {}'.format(save_folder))
K.clear_session()
|
[
"[email protected]"
] | |
6b6f961f2543b2498fce105a3dc9af2cc10ae65d
|
30e3624ca8dbded7a0d1f012aa5c6674d7b74ec6
|
/src/python/resources/grpc_server.py
|
980e6ece41386716a01ad1b38aa94b2311cc78c2
|
[
"BSD-3-Clause"
] |
permissive
|
liangzz1991/taranis
|
af30c91525be83c5aa853af112f0b1985df12363
|
466666dad03e3a2011d053206fb450abd4e01845
|
refs/heads/master
| 2023-05-13T03:08:47.153436 | 2020-09-02T09:46:42 | 2020-09-02T09:46:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,159 |
py
|
# Copyright (C) 2019 Pierre Letessier
# This source code is licensed under the BSD 3 license found in the
# LICENSE file in the root directory of this source tree.
"""
GRPC Server
"""
import logging
import time
from concurrent import futures
from threading import Thread
import grpc
from errors.taranis_error import TaranisNotFoundError, TaranisAlreadyExistsError, TaranisError
from models.taranis_pb2 import DatabaseNameModel, NewDatabaseModel, SearchRequestModel, VectorsQueryModel, \
NewVectorsModel, IndexQueryModel, NewIndexModel
from models.taranis_pb2_grpc import TaranisServicer, add_TaranisServicer_to_server
from services.taranis_service import TaranisService
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
LOGGER = logging.getLogger("Taranis")
class Taranis(TaranisServicer):
def __init__(self, taranis_service: TaranisService):
self.taranis_service = taranis_service
def getDatabase(self, request: DatabaseNameModel, context):
try:
return self.taranis_service.get_database(request.name)
except TaranisNotFoundError as ex:
context.abort(grpc.StatusCode.NOT_FOUND, ex.message)
def createDatabase(self, request: NewDatabaseModel, context):
try:
return self.taranis_service.create_database(request)
except TaranisAlreadyExistsError as ex:
context.abort(grpc.StatusCode.ALREADY_EXISTS, ex.message)
def deleteDatabase(self, request: DatabaseNameModel, context):
try:
return self.taranis_service.delete_database(request.name)
except TaranisNotFoundError as ex:
context.abort(grpc.StatusCode.NOT_FOUND, ex.message)
except TaranisError as ex:
context.abort(grpc.StatusCode.INTERNAL, ex.message)
def getIndex(self, request: IndexQueryModel, context):
try:
return self.taranis_service.get_index(request.db_name, request.index_name)
except TaranisNotFoundError as ex:
context.abort(grpc.StatusCode.NOT_FOUND, ex.message)
except TaranisError as ex:
context.abort(grpc.StatusCode.INTERNAL, ex.message)
def deleteIndex(self, request: IndexQueryModel, context):
try:
return self.taranis_service.delete_index(request.db_name, request.index_name)
except TaranisNotFoundError as ex:
context.abort(grpc.StatusCode.NOT_FOUND, ex.message)
except TaranisError as ex:
context.abort(grpc.StatusCode.INTERNAL, ex.message)
def createIndex(self, request: NewIndexModel, context):
try:
return self.taranis_service.create_index(request)
except TaranisAlreadyExistsError as ex:
context.abort(grpc.StatusCode.ALREADY_EXISTS, ex.message)
def trainIndex(self, request: IndexQueryModel, context):
try:
return self.taranis_service.train_index(request.db_name, request.index_name)
except TaranisError as ex:
context.abort(grpc.StatusCode.INTERNAL, ex.message)
def reindex(self, request: IndexQueryModel, context):
try:
return self.taranis_service.reindex(request.db_name, request.index_name)
except TaranisError as ex:
context.abort(grpc.StatusCode.INTERNAL, ex.message)
def addVectors(self, request: NewVectorsModel, context):
try:
return self.taranis_service.put_vectors(request.db_name, request.vectors, index_name=request.index_name)
except TaranisNotFoundError as ex:
context.abort(grpc.StatusCode.NOT_FOUND, ex.message)
except TaranisError as ex:
context.abort(grpc.StatusCode.INTERNAL, ex.message)
def getVectors(self, request: VectorsQueryModel, context):
return self.taranis_service.get_vectors(request.db_name, request.ids)
def searchVectors(self, request: SearchRequestModel, context):
return self.taranis_service.search(request.db_name,
list(request.vectors),
index_name=request.index_name,
k=request.k, n_probe=request.n_probe)
class GRPCServer(Thread):
def __init__(self, taranis_service, listen_address='[::]', listen_port=50051, max_workers=10):
Thread.__init__(self)
self.listen_address = listen_address
self.listen_port = listen_port
self.max_workers = max_workers
self.taranis_service = taranis_service
def run(self):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=self.max_workers))
add_TaranisServicer_to_server(Taranis(self.taranis_service), server)
server_listen = '{}:{}'.format(self.listen_address, self.listen_port)
server.add_insecure_port(server_listen)
LOGGER.info("Starting gRPC server on %s", server_listen)
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
logging.info("KeyboardInterrupt")
server.stop(0)
# if __name__ == '__main__':
# logging.basicConfig()
# serve()
|
[
"[email protected]"
] | |
243b00fb792df0d908725a77d369f7a886e958ca
|
7319bdc1aa1edd9e37424da47264882753dda919
|
/monitor_nomina.py
|
fde617e7fa6aa3fb079d6c0dc9c7e6ee000411ae
|
[] |
no_license
|
njmube/satconnect
|
4ff81ac132811d2784d82a872be34590f53021db
|
de421f546a6f7f4cc5f247d1b2ba91ac272bdcb9
|
refs/heads/master
| 2023-03-18T12:58:18.379008 | 2017-10-24T07:14:05 | 2017-10-24T07:14:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 263 |
py
|
# -*- coding: utf-8 -*-
from LibTools.filesystem import Carpeta
from slaves import SentinelNomina
import settings
if __name__ == '__main__':
carpeta = Carpeta(settings.folder_nomina)
sentinela = SentinelNomina(carpeta)
sentinela.start_Monitoring()
|
[
"="
] |
=
|
cf868c8859e677ff8e62e32485a1042898d1de4c
|
1f8f3704e86cbda95c26057a947115ad32d643d7
|
/python/ex046.py
|
02824e97d4b663fce3d1060aedc6ab96c00d1be8
|
[
"MIT"
] |
permissive
|
lucasdiogomartins/curso-em-video
|
21ac8f9c092112e541b1f76a93743b9de2ecfcb8
|
9da92b6255a11021f719a9e0ce994db639e1ac38
|
refs/heads/main
| 2023-07-02T04:12:32.799219 | 2021-08-13T23:27:38 | 2021-08-13T23:27:38 | 389,199,712 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 90 |
py
|
from time import sleep
for c in range(10, 0, -1):
print(c)
sleep(1)
print('BUM!')
|
[
"[email protected]"
] | |
bb98f35adc8e0f2ec79f4ea7a0b2314a9ec8bec0
|
0a85e9ecb51c89110794aeb399fc3ccc0bff8c43
|
/InterviewCake/Practice Problems/reverse_string_inPlace.py
|
482b60e1d1415f53519182dd35b2f0e7cd6af001
|
[] |
no_license
|
jordan-carson/Data_Structures_Algos
|
6d246cd187e3c3e36763f1eedc535ae1b95c0b18
|
452bb766607963e5ab9e39a354a24ebb26ebaaf5
|
refs/heads/master
| 2020-12-02T23:19:11.315890 | 2020-09-15T01:23:29 | 2020-09-15T01:23:29 | 231,147,094 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 421 |
py
|
STRING = ['a', 'b', 'c', 'd']
def reverse_string(string_list):
left_index = 0
right_index = len(string_list) - 1
while left_index < right_index:
string_list[left_index], string_list[right_index] = \
string_list[right_index], string_list[left_index]
left_index += 1
right_index -= 1
return string_list
if __name__ == '__main__':
print(reverse_string(STRING))
|
[
"[email protected]"
] | |
2d04eb4a6d7119cd114da0714ffeaa23551be0a1
|
ad5ad404d24f1ef195d069b2e9d36b1a22cfd25d
|
/kde/applications/kiten/kiten.py
|
68d4236f5c283e083b03af733ec7b7b92ed78a0d
|
[
"BSD-2-Clause"
] |
permissive
|
arruor/craft-blueprints-kde
|
6643941c87afd09f20dd54635022d8ceab95e317
|
e7e2bef76d8efbc9c4b84411aa1e1863ac8633c1
|
refs/heads/master
| 2020-03-22T17:54:38.445587 | 2018-07-10T11:47:21 | 2018-07-10T11:47:21 | 140,423,580 | 0 | 0 | null | 2018-07-10T11:43:08 | 2018-07-10T11:43:07 | null |
UTF-8
|
Python
| false | false | 1,228 |
py
|
import info
class subinfo(info.infoclass):
def setTargets(self):
self.versionInfo.setDefaultValues()
self.description = "Kiten"
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = "default"
self.buildDependencies["kde/frameworks/extra-cmake-modules"] = "default"
self.runtimeDependencies["libs/qt5/qtbase"] = "default"
self.runtimeDependencies["kde/frameworks/tier1/karchive"] = "default"
self.runtimeDependencies["kde/frameworks/tier2/kcompletion"] = "default"
self.runtimeDependencies["kde/frameworks/tier1/kconfig"] = "default"
self.runtimeDependencies["kde/frameworks/tier1/kcoreaddons"] = "default"
self.runtimeDependencies["kde/frameworks/tier2/kcrash"] = "default"
self.runtimeDependencies["kde/frameworks/tier2/kdoctools"] = "default"
self.runtimeDependencies["kde/frameworks/tier1/ki18n"] = "default"
self.runtimeDependencies["kde/frameworks/tier3/khtml"] = "default"
self.runtimeDependencies["kde/frameworks/tier3/kxmlgui"] = "default"
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
|
[
"[email protected]"
] | |
5867eadfdd7b174b5c35658d67afcf6e1dd59727
|
cb756c30bbb1f88d0168470460aafd9746361c72
|
/lesson_file_05.py
|
6b383326b9a8096a072430a2965944132128d960
|
[] |
no_license
|
Shinrei-Boku/kreis_academy_python
|
fc215bdfa6dc9903ec3fb345d75cbd007bf080ea
|
ced1c4effcf9ce89c5ca88545f0dcaee872abf47
|
refs/heads/main
| 2023-05-27T06:39:25.273270 | 2021-06-08T17:03:55 | 2021-06-08T17:03:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 214 |
py
|
#kreis academy file操作
import string
s = """\
私の名前は $name です。
歳は $age です。
"""
template = string.Template(s)
contents = template.substitute(name='近藤',age='20')
print(contents)
|
[
"[email protected]"
] | |
1065b7ee6d71f8b412d54e715c0c2b3734c8efb1
|
d0e8e7896f6ac8c82e1d2158ca59d64fbca24b11
|
/scramble.py
|
b6df1726134eab2dd8f3363b93bf59dbc5fb9b5b
|
[
"MIT"
] |
permissive
|
mdhunter/random-python
|
e91f99b1261d71d6fb2e7456f7140ae2aeb9c922
|
81daa7077eb0fc8a417f006b6deb6a957108f266
|
refs/heads/master
| 2021-01-24T08:09:04.774024 | 2016-10-07T04:46:34 | 2016-10-07T04:46:34 | 70,199,591 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,157 |
py
|
#!/usr/bin/env python3
"""
Scrambles an image into blocks
Copyright 2016 Mathew Hunter
"""
import argparse
import random
from PIL import Image
class Scrambler():
"""
A class that contains functionality to scramble an image using a simple
block shuffling method
"""
# The number of horizontal and vertical blocks to fit into the image
hblocks = 5
vblocks = 5
# Initializer
def __init__(self, hblocks, vblocks):
"""
Creates a new instance.
Args:
hblocks: the number of blocks to fit horizontally
vblocks: the number of blocks to fit vertically
"""
self.hblocks = hblocks
self.vblocks = vblocks
def scramble_image(self, source_image):
"""
Scrambles an image.
Args:
source_image: an Image reference
Returns:
A scrambled image
"""
# Build the random list of block ids
block_ids = [x for x in range(0, self.vblocks * self.hblocks)]
random.shuffle(block_ids)
# Create an empty destination image
dest_image = Image.new(source_image.mode, source_image.size)
# For each block, copy the referenced block from the source image to the next
# block in the destination image
current_block = 0
for block_id in block_ids:
self.__copy_block(source_image, block_id, dest_image, current_block)
current_block += 1
return dest_image
# Calculates the pixel coordinates of the specified blocks
def __calculate_block_coordinates(self, block_id, width, height):
"""
Calculates pixel coordinates for a block.
Args:
block_id: the id of the block
width: the width of a block
height: the height of a block
Returns:
a tuple containing the upper left and lower right coordinates
"""
# Calculate block row/column
row = int(block_id / self.vblocks)
column = int(block_id % self.vblocks)
# Calculate the start and end coordinates
start = (column * width, row * height)
end = (start[0] + width, start[1] + height)
return (start[0], start[1], end[0], end[1])
# Copies a block in a source image to a destination image
def __copy_block(self, source_image, source_block, dest_image, dest_block):
"""
Copies a block from the source image into a block in the destination image.
Args:
source_image: the source image
source_block: the id of the block in the source image to copy
dest_image: the destination image
dest_block: the id of the block into which to copy the source block
"""
# Calculate block width/height
image_size = source_image.size
width = int(image_size[0] / self.hblocks)
height = int(image_size[1] / self.vblocks)
# Calculate the source and destination blocks
source_coords = self.__calculate_block_coordinates(source_block, width, height)
dest_coords = self.__calculate_block_coordinates(dest_block, width, height)
# Crop a region from the source image and paste it into the destination
region = source_image.crop(source_coords)
dest_image.paste(region, dest_coords)
if __name__ == "__main__":
# Parse the arguments
parser = argparse.ArgumentParser(description="Scrambles an image into blocks")
parser.add_argument("source_file", help="the file to scramble")
parser.add_argument("--vblocks", nargs="?", type=int,
help="the number of blocks to fit vertically", default=5)
parser.add_argument("--hblocks", nargs="?", type=int,
help="the number of blocks to fit horizontally", default=5)
args = parser.parse_args()
# Load the source image
source_image = Image.open(args.source_file)
# Create a Scrambler instance and scramble
scrambler = Scrambler(args.hblocks, args.vblocks)
dest_image = scrambler.scramble_image(source_image)
dest_image.show()
|
[
"[email protected]"
] | |
7b35ac2384529e8bb902194f56b1d0d824520edc
|
016109b9f052ffd037e9b21fa386b36089b05813
|
/hashP4.py
|
559f024f058f63f9e587e9c5a8b7a38c51b5ec47
|
[] |
no_license
|
nsshayan/DataStructuresAndAlgorithms
|
9194508c5227c5c8c60b9950917a4ea8da8bbab2
|
2f7ee1bc8f4b53c35d1cce62e898a9695d99540a
|
refs/heads/master
| 2022-09-29T21:15:33.803558 | 2022-09-08T17:14:59 | 2022-09-08T17:14:59 | 73,257,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 689 |
py
|
A,k = map(int,raw_input().rstrip().split(" "))
nos = map(int,raw_input().rstrip().split(" "))
hashMap = [0 for y in range(1000002)]
for i in range(A):
hashMap[nos[i]] += 1
left = 0
right = 1000001
flag = 0
while left < right:
if hashMap[left] == 0 or hashMap[right]==0:
while hashMap[left]==0:
left += 1
while hashMap[right] == 0:
right -= 1
if (left + right ) == k and left != right:
flag = 1
break
elif left+right > k:
right -= 1
elif left + right < k:
left += 1
if left+right == k and left == right and hashMap[left] > 1:
flag = 1
if flag == 1:
print "YES"
else :
print "NO"
|
[
"[email protected]"
] | |
38a986a16c81beefac8e3c9216f635f9c05fdd36
|
b0345b816826afa275dc328c8308234c9536b507
|
/chapter16/class_variable_part2.py
|
8b40f96a5af92435850b10f282996d5b641b2ccd
|
[] |
no_license
|
jaymin1570/PythonTutorial
|
1edf8436de41bdbecd378a6103b9172c60c8dd5a
|
f0bb9a76a0f6c5d1703bf105ff7c4107c6b78729
|
refs/heads/master
| 2020-11-29T06:01:53.274869 | 2019-12-25T04:13:59 | 2019-12-25T04:13:59 | 230,039,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 724 |
py
|
class Laptop:
discount_percent =10
def __init__(self,brand,model_name,price):
self.Laptop_brand=brand
self.Lpatop_model_name=model_name
self.Laptop_price=price
self.Laptop_name=brand +' '+model_name
def apply_discount(self):
discount=self.Laptop_price-((self.Laptop_price*self.discount_percent)/100)
return f"after appling discount price:{discount}"
# Laptop.discount_percent=100
l1=Laptop('HP','HP 15-da007ttx',48000)
l2=Laptop('apple','macbook',230000)
print(l1.Laptop_name)
print(l1.Laptop_price)
print(l1.apply_discount())
print(l2.Laptop_name)
print(l2.Laptop_price)
l2.discount_percent=50
print(l2.__dict__)
print(l2.apply_discount())
print(l1.__dict__)
|
[
"[email protected]"
] | |
7091475a03d37a18e9d953f65307c93e950ce3ad
|
fee71dd79c16f8e4aa4be46aa25863a3e8539a51
|
/ear/core/bs2051.py
|
058eefc981611aa995294b0783b491c5ba08e367
|
[
"BSD-3-Clause-Clear"
] |
permissive
|
ebu/ebu_adm_renderer
|
d004ed857b3004c9de336426f402654779a0eaf8
|
ef2189021203101eab323e1eccdd2527b32a5024
|
refs/heads/master
| 2023-08-09T09:13:06.626698 | 2022-12-07T12:22:39 | 2022-12-07T12:22:39 | 123,921,945 | 61 | 13 |
BSD-3-Clause-Clear
| 2023-08-30T17:17:05 | 2018-03-05T13:15:36 |
Python
|
UTF-8
|
Python
| false | false | 1,791 |
py
|
import pkg_resources
from ..compatibility import load_yaml
from .geom import PolarPosition
from .layout import Channel, Layout
def _dict_to_channel(d):
position = PolarPosition(azimuth=d["position"]["az"],
elevation=d["position"]["el"],
distance=1.0)
return Channel(
name=d["name"],
is_lfe=d.get("is_lfe", False),
polar_position=position,
polar_nominal_position=position,
az_range=tuple(d.get("az_range", (position.azimuth, position.azimuth))),
el_range=tuple(d.get("el_range", (position.elevation, position.elevation))),
)
def _dict_to_layout(d):
return Layout(
name=d["name"],
channels=list(map(_dict_to_channel, d["channels"])),
)
def _load_layouts():
fname = "data/2051_layouts.yaml"
with pkg_resources.resource_stream(__name__, fname) as layouts_file:
layouts_data = load_yaml(layouts_file)
layouts = list(map(_dict_to_layout, layouts_data))
for layout in layouts:
errors = []
layout.check_positions(callback=errors.append)
assert errors == []
layout_names = [layout.name for layout in layouts]
layouts_dict = {layout.name: layout for layout in layouts}
return layout_names, layouts_dict
layout_names, layouts = _load_layouts()
def get_layout(name):
"""Get data for a layout specified in BS.2051.
Parameters:
name (str): Full layout name, e.g. "4+5+0"
Returns:
Layout: object representing the layout; real speaker positions are set
to the nominal positions.
"""
if name not in layout_names:
raise KeyError("Unknown layout name '{name}'.".format(name=name))
return layouts[name]
|
[
"[email protected]"
] | |
5777d717f17ec91cd38fe5807faba4b2da0b9bf7
|
3defb855022314f768af12beb417b364cbb58e4f
|
/blog/models.py
|
5a3e8d67edbc4137e061c9f8494ef0acccc8f7c8
|
[] |
no_license
|
CYetlanezi/my-first-blog
|
ec2fe3b2d18c2bf8063f5b8d36d1017e46a83275
|
099b1614af03c99de786cefb722b081b239e1d65
|
refs/heads/master
| 2021-01-10T18:12:08.756141 | 2016-02-27T23:25:38 | 2016-02-27T23:25:38 | 52,673,602 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 554 |
py
|
from django.db import models
from django.utils import timezone
#jwdkw
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
[
"[email protected]"
] | |
1628b9d704c430771ffe07895f60f69d5d03c21c
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nntwelv.py
|
4e1af148e299d47bb87a0be2b829ebcc80cee86d
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 2,600 |
py
|
ii = [('CookGHP3.py', 8), ('LyelCPG2.py', 4), ('MarrFDI.py', 2), ('RogePAV2.py', 11), ('CoolWHM2.py', 20), ('KembFFF.py', 1), ('GodwWSL2.py', 22), ('RogePAV.py', 4), ('SadlMLP.py', 6), ('WilbRLW.py', 15), ('WilbRLW4.py', 9), ('RennJIT.py', 15), ('ProuWCM.py', 4), ('AubePRP2.py', 28), ('CookGHP.py', 6), ('ShawHDE.py', 6), ('MartHSI2.py', 11), ('LeakWTI2.py', 19), ('UnitAI.py', 9), ('KembFJ1.py', 20), ('WilkJMC3.py', 7), ('WilbRLW5.py', 7), ('LeakWTI3.py', 18), ('PettTHE.py', 14), ('MarrFDI3.py', 7), ('PeckJNG.py', 19), ('BailJD2.py', 5), ('AubePRP.py', 21), ('GellWPT.py', 10), ('AdamWEP.py', 7), ('FitzRNS3.py', 37), ('WilbRLW2.py', 10), ('ClarGE2.py', 54), ('GellWPT2.py', 7), ('WilkJMC2.py', 5), ('CarlTFR.py', 93), ('SeniNSP.py', 4), ('LyttELD.py', 1), ('CoopJBT2.py', 1), ('GrimSLE.py', 1), ('RoscTTI3.py', 2), ('AinsWRR3.py', 4), ('CookGHP2.py', 4), ('KiddJAE.py', 6), ('RoscTTI2.py', 2), ('CoolWHM.py', 27), ('MarrFDI2.py', 2), ('CrokTPS.py', 7), ('ClarGE.py', 47), ('LandWPA.py', 1), ('BuckWGM.py', 13), ('IrviWVD.py', 9), ('LyelCPG.py', 41), ('GilmCRS.py', 5), ('DaltJMA.py', 12), ('WestJIT2.py', 23), ('DibdTRL2.py', 17), ('AinsWRR.py', 2), ('CrocDNL.py', 9), ('MedwTAI.py', 18), ('WadeJEB.py', 38), ('FerrSDO2.py', 2), ('TalfTIT.py', 1), ('NewmJLP.py', 3), ('GodwWLN.py', 10), ('CoopJBT.py', 1), ('KirbWPW2.py', 6), ('SoutRD2.py', 4), ('BackGNE.py', 22), ('LeakWTI4.py', 29), ('LeakWTI.py', 26), ('MedwTAI2.py', 9), ('BachARE.py', 133), ('SoutRD.py', 6), ('DickCSG.py', 1), ('BuckWGM2.py', 2), ('WheeJPT.py', 27), ('MereHHB3.py', 37), ('HowiWRL2.py', 14), ('BailJD3.py', 1), ('MereHHB.py', 31), ('WilkJMC.py', 24), ('HogaGMM.py', 15), ('MartHRW.py', 9), ('MackCNH.py', 11), ('WestJIT.py', 16), ('BabbCEM.py', 25), ('FitzRNS4.py', 21), ('CoolWHM3.py', 14), ('DequTKM.py', 9), ('FitzRNS.py', 47), ('BentJRP.py', 3), ('LyttELD3.py', 2), ('RoscTTI.py', 11), ('ThomGLG.py', 11), ('StorJCC.py', 16), ('KembFJ2.py', 20), ('LewiMJW.py', 20), ('BabbCRD.py', 3), ('MackCNH2.py', 13), ('JacoWHI2.py', 34), ('SomeMMH.py', 8), ('HaliTBC.py', 1), ('WilbRLW3.py', 20), ('MereHHB2.py', 13), ('BrewDTO.py', 2), ('JacoWHI.py', 29), ('ClarGE3.py', 31), ('RogeSIP.py', 10), ('MartHRW2.py', 8), ('DibdTRL.py', 19), ('FitzRNS2.py', 43), ('HogaGMM2.py', 5), ('MartHSI.py', 10), ('EvarJSP.py', 7), ('DwigTHH.py', 6), ('NortSTC.py', 1), ('SadlMLP2.py', 4), ('BowrJMM2.py', 4), ('LyelCPG3.py', 11), ('BowrJMM3.py', 3), ('BeckWRE.py', 2), ('TaylIF.py', 5), ('WordWYR.py', 1), ('DibdTBR.py', 1), ('ThomWEC.py', 3), ('KeigTSS.py', 20), ('KirbWPW.py', 4), ('WaylFEP.py', 9), ('ClarGE4.py', 77), ('HowiWRL.py', 16)]
|
[
"[email protected]"
] | |
89cf7f4a3c783d5bfcc7ce09e34dc0ce72854b8d
|
a7e2993776df55ddc8f17c483524e627591741be
|
/Question3_1.py
|
ac3b2cc7183ce87fe7368e790571715c83d276ba
|
[] |
no_license
|
alu-rwa-dsa/week-1-list-complexity-fiona_wanji_dirac
|
02956ffafb732898e1089a90d1027c78485fbd74
|
dd5a6c2d6b012d552981674776744df6e3b756de
|
refs/heads/main
| 2023-03-01T12:51:52.661840 | 2021-02-02T17:56:32 | 2021-02-02T17:56:32 | 335,308,144 | 0 | 0 | null | 2021-02-02T14:03:45 | 2021-02-02T14:03:39 | null |
UTF-8
|
Python
| false | false | 153 |
py
|
# Question 3
# finding the maximum value in a list
listA = ["apple", "banana", "candles"]
print("The Maximum Value in this list is: ", str(max(listA)))
|
[
"[email protected]"
] | |
6a541d980a359c5dc3526b6cc3e3a9de9848afe5
|
d64db65e12c613518b4357178eebfb27b318f4c6
|
/song.py
|
a8efbb9437f5f99e8b3e468a2a7e6017df91e8e9
|
[
"MIT"
] |
permissive
|
mozanunal/harmonica-note-converter
|
a4794db16698b06b1d136edfd2fedfafe3bf253e
|
aba9e248d2562e90622e7da5350175cfb8ee88cf
|
refs/heads/master
| 2021-07-16T06:31:35.849291 | 2021-06-21T11:36:44 | 2021-06-21T11:36:44 | 151,971,490 | 6 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,975 |
py
|
from __future__ import print_function
from musthe import *
def convertNoteMap(noteMap):
newNoteMap = {}
for key, value in noteMap.items():
newNoteMap[key] = Note(value).number
return newNoteMap
class Song(object):
def __init__(self, name):
self.name = name
self.noteList = []
self.lineBreakList = []
def shift(self, ratio):
newNoteList = []
for note in self.noteList:
newNoteList.append(note+ratio)
self.noteList = newNoteList
def getFromTabs(self, noteMap, tabs):
tabList = []
# read file
counter = 0
for line in tabs.splitlines():
for tab in line.split(" "):
if tab is not "":
tabList.append(tab)
counter += 1
self.lineBreakList.append(counter)
# convert generic notes
for tab in tabList:
self.noteList.append(
Note( noteMap[tab] ).number
)
def exportTabs(self, noteMap):
convertedNoteMap = convertNoteMap(noteMap)
invNoteMap = {v: k for k, v in convertedNoteMap.items()}
tabs = []
for note in self.noteList:
try:
tabs.append( invNoteMap[ note ] )
except:
tabs.append( "XX" )
self.printTabs(tabs)
return tabs
def printNotes(self):
print ("--------------")
print (self.name)
for id, note in enumerate(self.noteList):
if id in self.lineBreakList:
print("")
print(str(note.letter)+str(note.accidental)+str(note.octave),)
print ("\n--------------")
def printTabs(self, tabs):
print ("--------------")
print (self.name)
for idx, tab in enumerate(tabs):
if idx in self.lineBreakList:
print("")
print(tab, end=" ")
print ("\n--------------")
|
[
"[email protected]"
] | |
bb86e6cf9d5e401f16ddddb0dac811d9f2c57d11
|
950bdea00a3ea4090f5f90716359d9c2668d1695
|
/google/cloud/bigquery/job/__init__.py
|
4c16d0e20219be2ab776a41c971451cebbbdc381
|
[
"Apache-2.0"
] |
permissive
|
akamil-etsy/python-bigquery
|
a01f19258e3522e459d8472315f9ea8b90dd8784
|
cf0b0d862e01e9309407b2ac1a48f0bfe23d520d
|
refs/heads/master
| 2023-07-03T22:15:17.427257 | 2021-08-05T14:59:15 | 2021-08-05T14:59:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,188 |
py
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Jobs."""
from google.cloud.bigquery.job.base import _AsyncJob
from google.cloud.bigquery.job.base import _error_result_to_exception
from google.cloud.bigquery.job.base import _DONE_STATE
from google.cloud.bigquery.job.base import _JobConfig
from google.cloud.bigquery.job.base import _JobReference
from google.cloud.bigquery.job.base import ReservationUsage
from google.cloud.bigquery.job.base import ScriptStatistics
from google.cloud.bigquery.job.base import ScriptStackFrame
from google.cloud.bigquery.job.base import UnknownJob
from google.cloud.bigquery.job.copy_ import CopyJob
from google.cloud.bigquery.job.copy_ import CopyJobConfig
from google.cloud.bigquery.job.copy_ import OperationType
from google.cloud.bigquery.job.extract import ExtractJob
from google.cloud.bigquery.job.extract import ExtractJobConfig
from google.cloud.bigquery.job.load import LoadJob
from google.cloud.bigquery.job.load import LoadJobConfig
from google.cloud.bigquery.job.query import _contains_order_by
from google.cloud.bigquery.job.query import DmlStats
from google.cloud.bigquery.job.query import QueryJob
from google.cloud.bigquery.job.query import QueryJobConfig
from google.cloud.bigquery.job.query import QueryPlanEntry
from google.cloud.bigquery.job.query import QueryPlanEntryStep
from google.cloud.bigquery.job.query import ScriptOptions
from google.cloud.bigquery.job.query import TimelineEntry
from google.cloud.bigquery.enums import Compression
from google.cloud.bigquery.enums import CreateDisposition
from google.cloud.bigquery.enums import DestinationFormat
from google.cloud.bigquery.enums import Encoding
from google.cloud.bigquery.enums import QueryPriority
from google.cloud.bigquery.enums import SchemaUpdateOption
from google.cloud.bigquery.enums import SourceFormat
from google.cloud.bigquery.enums import WriteDisposition
# Include classes previously in job.py for backwards compatibility.
__all__ = [
"_AsyncJob",
"_error_result_to_exception",
"_DONE_STATE",
"_JobConfig",
"_JobReference",
"ReservationUsage",
"ScriptStatistics",
"ScriptStackFrame",
"UnknownJob",
"CopyJob",
"CopyJobConfig",
"OperationType",
"ExtractJob",
"ExtractJobConfig",
"LoadJob",
"LoadJobConfig",
"_contains_order_by",
"DmlStats",
"QueryJob",
"QueryJobConfig",
"QueryPlanEntry",
"QueryPlanEntryStep",
"ScriptOptions",
"TimelineEntry",
"Compression",
"CreateDisposition",
"DestinationFormat",
"Encoding",
"QueryPriority",
"SchemaUpdateOption",
"SourceFormat",
"WriteDisposition",
]
|
[
"[email protected]"
] | |
a97f5084296f2d3ee5ab642a3f8b277181382173
|
8aa8d7f742eaba0f89d70ef42918e076be01bae6
|
/YR/week3/problem263.py
|
30a7b99b382dedbb7fbb099f89f217a9af37700a
|
[] |
no_license
|
robin9804/RoadToDeepLearningKing
|
d88a49995b836cb49ce680a0d385a1bb2ae87e99
|
fe8695b0d8d7beb5d64d450806e7866a3b103875
|
refs/heads/main
| 2023-03-19T03:10:16.223113 | 2021-03-11T07:31:09 | 2021-03-11T07:31:09 | 325,275,554 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 545 |
py
|
# problem 263
N, M = list(input("enter the ints : ").split())
N, M = int(N), int(M)
def sum_list(l1, l2):
lists = []
for i in l1:
for j in l2:
lists.append(i + j)
return lists
def func(N ,M):
lists = []
if N == 1:
if M in range(1, 7):
return [[M]]
else:
return False
else:
for i in range(1, 7):
if func(N-1, M-i) != False:
lists += sum_list([[i]], func(N-1, M-i))
return lists
print(func(N, M))
print(len(func(N, M)))
|
[
"[email protected]"
] | |
ec9c672ba561dc9a563055772efd8060d3f58b19
|
aaaf02ac5af827adfae7182f9e56648582ee65c7
|
/src/beam_search_best_para_old.py
|
b15fcdd8a04904b53d0bba279c1ae4b3b6d3d965
|
[] |
no_license
|
LifengFan/Triadic-Belief-Dynamics
|
542c84d5e0c1e76a3457a6fab19d70a8885961c3
|
18a5e2a4435a3f847c87a7782a774dfe1e94d485
|
refs/heads/main
| 2023-06-30T08:25:51.951416 | 2021-08-03T13:18:17 | 2021-08-03T13:18:17 | 352,404,707 | 23 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 38,421 |
py
|
import glob
import sys
from metadata import *
from utils import *
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering, DBSCAN
import argparse
import pickle
import numpy, scipy.io
import torch
import numpy as np
import joblib
import pywt
from overall_event_get_input import *
import joblib
from Atomic_node_only_lstm import Atomic_node_only_lstm
import copy
import time
import threading
import os, os.path
from metric import *
# from BeamSearchClass import *
from multiprocessing import Process
#import torch.multiprocessing
from Atomic_node_only_lstm_517 import Atomic_node_only_lstm_first_view
# torch.multiprocessing.set_start_method('spawn', force='True')
from joblib import Parallel, delayed
from os import listdir
from para_bank import generate_para_bank
from itertools import product
class BeamSearch(object):
# ================
# init functions
# ================
def __init__(self, args, event_net, atomic_net):
self.args = args
self.epsilon = 0.000001
self.init_cps_all = pickle.load(open(args.init_cps, 'rb'), encoding='latin1')
self.event_net=event_net
self.atomic_net=atomic_net
self.event_trans_table = pickle.load(open(os.path.join(args.stat_path, 'event_trans_normalized.p'), 'rb'), encoding='latin1')
def init(self, clip):
self.clip = clip # with .p
self.clip_length = event_seg_tracker[clip.split('.')[0]][-1][1] + 1
# initial cps
self.init_cps_T = self.init_cps_all[0][self.clip]
self.init_cps_B = self.init_cps_all[1][self.clip]
# add the final frame
self.init_cps_T.append(self.clip_length)
self.init_cps_T=list(np.unique(self.init_cps_T))
self.init_cps_B.append(self.clip_length)
self.init_cps_B = list(np.unique(self.init_cps_B))
with open(self.args.tracker_bbox + clip, 'rb') as f:
self.person_tracker_bbox = pickle.load(f, encoding='latin1')
with open(self.args.battery_bbox + clip, 'rb') as f:
self.person_battery_bbox = pickle.load(f, encoding='latin1')
# attmat
with open(self.args.attmat_path + clip, 'rb') as f:
self.attmat_obj = pickle.load(f, encoding='latin1')
with open(self.args.cate_path + clip, 'rb') as f:
self.category = pickle.load(f, encoding='latin1')
# feature
with open(os.path.join(self.args.data_path2, 'feature_single', clip), 'rb') as f:
self.feature_single = pickle.load(f, encoding='latin1')
if tracker_skeID[clip.split('.')[0]] == 'skele1.p':
self.T_skeID = 1
self.B_skeID = 2
elif tracker_skeID[clip.split('.')[0]] == 'skele2.p':
self.T_skeID = 2
self.B_skeID = 1
# init root node and tree for each new clip
self.root_node = {'T': {'cp': [0], 'event': [], 'mind': [], 'event_vec':[]}, 'B': {'cp': [0], 'event': [], 'mind': [], 'event_vec':[]},
'score':{'prior_aggr':{'cnt_joint':0., 'cnt_single':0.},
'prior_event':{'e_T':0., 'cnt_T':0., 'e_S':0., 'cnt_S':0., 'T_last_type':None, 'B_last_type':None},
'prior_fluent':0.,
'like_P':{'T_D_i':0., 'T_CNT_i':0., 'B_D_i':0., 'B_CNT_i':0., 'T_D_t':0., 'T_CNT_t':0., 'B_D_t':0., 'B_CNT_t':0., 'last_T_hist':None, 'last_B_hist':None, 'D_s':0., 'CNT_s':0.},
'like_E':{'e_E':0., 'CNT':0.},
'like_M':0,
'T_update_flag':0,
'B_update_flag':0
}
} # flag indicates whether it's updated.
self.Tree = {'nodes': [self.root_node], 'level': 0}
self.check_event_flag=True
# ================
# utils functions
# ================
def seg2frame(self, node):
segs = node['cp']
events = node['event']
#assert (len(segs) - 1) == len(events)
frame_labels = np.zeros((segs[-1]))
for seg_id, seg in enumerate(segs[:-1]):
event = events[seg_id][0]
start = seg
end = segs[seg_id + 1]
frame_labels[start: end] = event
return frame_labels
def check_event(self, node):
# if not valid, return True
tracker_frames = self.seg2frame(node['T'])
battery_frames = self.seg2frame(node['B'])
overlap_id = min(tracker_frames.shape[0], battery_frames.shape[0])
compare=np.abs(tracker_frames[:overlap_id]-battery_frames[:overlap_id])
return np.sum(compare) > 0
def cps2segs(self, cps):
segs = []
if len(cps) >= 2:
cp_l = cps[0]
segs = []
for idx in range(1, len(cps)):
cp_r = cps[idx]
segs.append([cp_l, cp_r])
cp_l = cp_r
return segs
def combcps(self, cps1, cps2):
comb = []
comb.extend(cps1)
comb.extend(cps2)
comb = list(np.sort(np.unique(np.array(comb))))
return comb
def seg2cp(self, segs):
cp = []
for seg in segs:
cp.append(seg[0])
return cp
def ori_seg_id(self, ori_cps, seg):
if len(ori_cps) > 1:
for idx in range(1, len(ori_cps)):
if seg[0] >= ori_cps[idx - 1] and seg[1] <= ori_cps[idx]:
return idx - 1
return None
def freq2hist(self, freq_feature):
seg_features = np.empty((1, 0))
for dim_id in range(freq_feature.shape[1]):
video_vec = freq_feature[:, dim_id] / 4
hist, bin_edges = np.histogram(video_vec, bins=self.args.hist_bin, density=True)
seg_features = np.hstack([seg_features, hist.reshape((1, -1))])
return seg_features
def temporal2freq(self, feature):
coeffs = pywt.dwt(feature, 'sym2')
cA1, _ = coeffs
new_feature = self.freq2hist(cA1)
return new_feature
def find_sep_gt(self, tracker_gt_seg, segs_T):
curr_gt_seg = []
start_frame = segs_T[0][0]
end_frame = segs_T[-1][1]
for seg in tracker_gt_seg:
max_start = max(seg[0], start_frame)
min_end = min(seg[1], end_frame)
if max_start < min_end:
curr_gt_seg.append(seg)
return curr_gt_seg
# ================
# score functions
# ================
def prior_energy_aggr(self, node):
N_p = len(node['T']['cp']) / float(node['T']['cp'][-1]) + len(node['B']['cp']) / float(node['B']['cp'][-1])
e_aggr = self.args.lambda_1 * N_p
return e_aggr, node
def prior_energy_event(self, node):
# event validness score (temporal transition and spatial concurrency, from dataset)
e_T = node['score']['prior_event']['e_T']
cnt_T = node['score']['prior_event']['cnt_T']
T_last_type=node['score']['prior_event']['T_last_type']
B_last_type=node['score']['prior_event']['B_last_type']
# temporal transition
T_new_type = node['T']['event'][-1][0]
if T_last_type is not None:
trans_key = (T_last_type, T_new_type)
e_T += self.event_trans_table[trans_key]
cnt_T += 1
T_last_type=T_new_type
B_new_type = node['B']['event'][-1][0]
if B_last_type is not None:
trans_key = (B_last_type, B_new_type)
e_T += self.event_trans_table[trans_key]
cnt_T += 1
B_last_type=B_new_type
node['score']['prior_event']['e_T']=e_T
node['score']['prior_event']['cnt_T']=cnt_T
node['score']['prior_event']['T_last_type']=T_last_type
node['score']['prior_event']['B_last_type']=B_last_type
e_T = e_T / (cnt_T + self.epsilon)
# spatial concurrency
e_S = node['score']['prior_event']['e_S']
cnt_S = node['score']['prior_event']['cnt_S']
segs_T = self.cps2segs(node['T']['cp'])
segs_B = self.cps2segs(node['B']['cp'])
if node['score']['T_update_flag']==1:
for idx2 in range(len(segs_B)):
if segs_T[-1][0] >= segs_B[idx2][1]:
continue
elif segs_T[-1][1] <= segs_B[idx2][0]:
break
else:
event_T = node['T']['event'][-1][0]
event_B = node['B']['event'][idx2][0]
if event_T == event_B:
e_S += 1
cnt_S += 1
else:
e_S += 0
cnt_S += 1
if node['score']['B_update_flag']==1:
for idx1 in range(len(segs_T)):
if segs_T[idx1][0] >= segs_B[-1][1]:
break
elif segs_T[idx1][1] <= segs_B[-1][0]:
continue
else:
event_T = node['T']['event'][idx1][0]
event_B = node['B']['event'][-1][0]
if event_T == event_B:
e_S += 1
cnt_S += 1
else:
e_S += 0
cnt_S += 1
if node['score']['T_update_flag']==1 and node['score']['B_update_flag']==1:
event_T = node['T']['event'][-1][0]
event_B = node['B']['event'][-1][0]
if event_T == event_B:
e_S -= 1
cnt_S -= 1
else:
e_S -= 0
cnt_S-= 1
node['score']['prior_event']['e_S']=e_S
node['score']['prior_event']['cnt_S']=cnt_S
e_S = e_S / (cnt_S + self.epsilon)
e_event = -self.args.lambda_4 * (e_T + self.epsilon) - self.args.lambda_5 * (e_S + self.epsilon)
return e_event, node
def likelihood_energy_P(self, node):
# tracker
# inner particle distance
T_D_i = node['score']['like_P']['T_D_i']
T_CNT_i = node['score']['like_P']['T_CNT_i']
seg = [node['T']['cp'][-2], node['T']['cp'][-1]]
feature = self.feature_single[self.T_skeID][seg[0]:seg[1], :]
sum_tmp = 0
for idx in range(1, feature.shape[0]):
sum_tmp += np.linalg.norm(feature[idx - 1] - feature[idx])
sum_tmp = sum_tmp / float(feature.shape[0])
T_D_i += sum_tmp
T_CNT_i += 1
node['score']['like_P']['T_D_i']= T_D_i
node['score']['like_P']['T_CNT_i']= T_CNT_i
# inter particle distance - T
T_D_t = node['score']['like_P']['T_D_t']
T_CNT_t = node['score']['like_P']['T_CNT_t']
last_T_hist= node['score']['like_P']['last_T_hist']
T_hist = self.temporal2freq(feature)
if last_T_hist is not None:
T_D_t += np.linalg.norm(last_T_hist - T_hist)/312.
T_CNT_t += 1
node['score']['like_P']['T_D_t']=T_D_t
node['score']['like_P']['T_CNT_t']=T_CNT_t
node['score']['like_P']['last_T_hist']=T_hist
# battery
# inner particle distance
B_D_i = node['score']['like_P']['B_D_i']
B_CNT_i = node['score']['like_P']['B_CNT_i']
seg = [node['B']['cp'][-2], node['B']['cp'][-1]]
feature = self.feature_single[self.B_skeID][seg[0]:seg[1], :]
sum_tmp = 0
for idx in range(1, feature.shape[0]):
sum_tmp += np.linalg.norm(feature[idx - 1] - feature[idx])
sum_tmp = sum_tmp / float(feature.shape[0])
B_D_i += sum_tmp
B_CNT_i += 1
node['score']['like_P']['B_D_i']= B_D_i
node['score']['like_P']['B_CNT_i']= B_CNT_i
# inter particle distance - T
B_D_t = node['score']['like_P']['B_D_t']
B_CNT_t = node['score']['like_P']['B_CNT_t']
last_B_hist= node['score']['like_P']['last_B_hist']
B_hist = self.temporal2freq(feature)
if last_B_hist is not None:
B_D_t += np.linalg.norm(last_B_hist - B_hist)/312.
B_CNT_t += 1
node['score']['like_P']['B_D_t']=B_D_t
node['score']['like_P']['B_CNT_t']=B_CNT_t
node['score']['like_P']['last_B_hist']=B_hist
e_P = self.args.beta_1 * (T_D_i / (T_CNT_i + self.epsilon) + B_D_i / (B_CNT_i + self.epsilon)) - self.args.beta_3 * (T_D_t / (T_CNT_t + self.epsilon) + B_D_t / (B_CNT_t + self.epsilon))
return e_P, node
def likelihood_energy_E(self, node): # todo: add the event validness check here
e_E = node['score']['like_E']['e_E']
CNT = node['score']['like_E']['CNT']
if node['score']['T_update_flag'] == 1:
e_E += node['T']['event'][-1][1]
CNT += 1
if node['score']['B_update_flag'] == 1:
e_E += node['B']['event'][-1][1]
CNT += 1
node['score']['like_E']['e_E'] = e_E
node['score']['like_E']['CNT'] = CNT
e_E = e_E / CNT
energy_E = -self.args.gamma_1 * e_E
return energy_E, node
# ================
# tree functions
# ================
def tree_prune(self, all_possible_path):
'''
input:
all possible paths in this level
:return:
top N paths
'''
score_all = []
node_ids = []
all_possible_nodes_new=[]
if self.check_event_flag:
for idx, node in enumerate(all_possible_path):
# calculate score for the current node/path
if not self.check_event(node): # note the if here
e_aggr, node= self.prior_energy_aggr(node)
e_event,node=self.prior_energy_event(node)
e_P, node=self.likelihood_energy_P(node)
e_E, node=self.likelihood_energy_E(node)
node_score = -e_aggr-e_event-e_P-e_E
score_all.append(node_score)
node_ids.append(idx)
all_possible_nodes_new.append(node)
else:
for idx, node in enumerate(all_possible_path):
# calculate score for the current node/path
e_aggr, node = self.prior_energy_aggr(node)
e_event, node = self.prior_energy_event(node)
e_P, node = self.likelihood_energy_P(node)
e_E, node = self.likelihood_energy_E(node)
node_score = -e_aggr - e_event - e_P - e_E
score_all.append(node_score)
node_ids.append(idx)
all_possible_nodes_new.append(node)
ordered_index = list(np.argsort(np.array(score_all))[::-1])
selected_index = ordered_index[:self.args.topN]
node_ids = np.array(node_ids)
top_node_ids = node_ids[selected_index]
# assert len(top_node_ids)>0, 'no valid top nodes!'
if len(top_node_ids)>0:
self.Tree['nodes'] = []
for node_id in top_node_ids:
node = all_possible_nodes_new[node_id]
self.Tree['nodes'].append(node)
return True
else:
self.check_event_flag=False
print('err! no valid top nodes! first time')
score_all = []
node_ids = []
all_possible_nodes_new = []
for idx, node in enumerate(all_possible_path):
# calculate score for the current node/path
e_aggr, node = self.prior_energy_aggr(node)
e_event, node = self.prior_energy_event(node)
e_P, node = self.likelihood_energy_P(node)
e_E, node = self.likelihood_energy_E(node)
node_score = -e_aggr - e_event - e_P - e_E
score_all.append(node_score)
node_ids.append(idx)
all_possible_nodes_new.append(node)
ordered_index = list(np.argsort(np.array(score_all))[::-1])
selected_index = ordered_index[:self.args.topN]
node_ids = np.array(node_ids)
top_node_ids = node_ids[selected_index]
if len(top_node_ids)>0:
self.Tree['nodes'] = []
for node_id in top_node_ids:
node = all_possible_nodes_new[node_id]
self.Tree['nodes'].append(node)
return True
else:
print('err! no valid top nodes! second time')
return False
def node_expand(self, parent_node):
'''
input:
parent checkpoint node
:return:
all possible children nodes
'''
t_node_id = parent_node['T']['cp'][-1]
possible_t_cp =[]
for i in range(len(self.init_cps_T)):
if self.init_cps_T[i] > t_node_id:
possible_t_cp.append(self.init_cps_T[i])
b_node_id = parent_node['B']['cp'][-1]
possible_b_cp = []
for j in range(len(self.init_cps_B)):
if self.init_cps_B[j] > b_node_id:
possible_b_cp.append(self.init_cps_B[j])
return possible_t_cp, possible_b_cp
def tree_grow(self,process_i):
#print("I am process {}-- into tree grow".format(process_i))
'''
input:
current top N possible path
:return:
new top N possible path
'''
all_possible_nodes = []
start_time = time.time()
for idx, parent_node in enumerate(self.Tree['nodes']):
# find possible child nodes of the current node
possible_t_cp, possible_b_cp = self.node_expand(parent_node)
search_N_cp_T = min(len(possible_t_cp), self.args.search_N_cp)
search_N_cp_B = min(len(possible_b_cp), self.args.search_N_cp)
# all possible paths
if len(possible_t_cp) >=1 and len(possible_b_cp) >=1:
for cp_t_id in possible_t_cp[:search_N_cp_T]:
for cp_b_id in possible_b_cp[:search_N_cp_B]:
combinations = list(product([0, 1, 2], repeat=2))
for combination in combinations:
new_node = copy.deepcopy(parent_node)
new_node['T']['cp'].append(cp_t_id)
new_node['B']['cp'].append(cp_b_id)
start = parent_node['T']['cp'][-1]
end = cp_t_id
with open(op.join(self.args.save_event_score, self.clip.split('.')[0], 'tracker',
'{}_{}.p'.format(start, end)), 'rb') as f:
outputs = pickle.load(f)
new_node['T']['event'].append([combination[0], outputs[0][combination[0]]])
new_node['T']['event_vec'].append(outputs[0])
start = parent_node['B']['cp'][-1]
end = cp_b_id
with open(op.join(self.args.save_event_score, self.clip.split('.')[0], 'battery',
'{}_{}.p'.format(start, end)), 'rb') as f:
outputs = pickle.load(f)
new_node['B']['event'].append([combination[1], outputs[0][combination[1]]])
new_node['B']['event_vec'].append(outputs[0])
new_node['score']['T_update_flag'] = 1
new_node['score']['B_update_flag'] = 1
all_possible_nodes.append(new_node)
elif len(possible_t_cp) == 0 and len(possible_b_cp) >=1:
for cp_b_id in possible_b_cp[:search_N_cp_B]:
new_node1, new_node2, new_node3 = copy.deepcopy(parent_node), copy.deepcopy(
parent_node), copy.deepcopy(parent_node)
# add new cp
new_node1['B']['cp'].append(cp_b_id)
new_node2['B']['cp'].append(cp_b_id)
new_node3['B']['cp'].append(cp_b_id)
# predict event for current seg
# battery
start = parent_node['B']['cp'][-1]
end = cp_b_id
with open(op.join(self.args.save_event_score, self.clip.split('.')[0], 'battery',
'{}_{}.p'.format(start, end)), 'rb') as f:
outputs = pickle.load(f)
new_node1['B']['event'].append([0, outputs[0][0]])
new_node1['B']['event_vec'].append(outputs[0])
new_node2['B']['event'].append([1, outputs[0][1]])
new_node2['B']['event_vec'].append(outputs[0])
new_node3['B']['event'].append([2, outputs[0][2]])
new_node3['B']['event_vec'].append(outputs[0])
new_node1['score']['T_update_flag'] = 0
new_node1['score']['B_update_flag'] = 1
new_node2['score']['T_update_flag'] = 0
new_node2['score']['B_update_flag'] = 1
new_node3['score']['T_update_flag'] = 0
new_node3['score']['B_update_flag'] = 1
all_possible_nodes.append(new_node1)
all_possible_nodes.append(new_node2)
all_possible_nodes.append(new_node3)
elif len(possible_t_cp) >= 1 and len(possible_b_cp) == 0:
for cp_t_id in possible_t_cp[:search_N_cp_T]:
new_node1, new_node2, new_node3 = copy.deepcopy(parent_node), copy.deepcopy(
parent_node), copy.deepcopy(parent_node)
# add new cp
new_node1['T']['cp'].append(cp_t_id)
new_node2['T']['cp'].append(cp_t_id)
new_node3['T']['cp'].append(cp_t_id)
# predict event for current seg
# battery
start = parent_node['T']['cp'][-1]
end = cp_t_id
with open(op.join(self.args.save_event_score, self.clip.split('.')[0], 'tracker',
'{}_{}.p'.format(start, end)), 'rb') as f:
outputs = pickle.load(f)
new_node1['T']['event'].append([0, outputs[0][0]])
new_node1['T']['event_vec'].append(outputs[0])
new_node2['T']['event'].append([1, outputs[0][1]])
new_node2['T']['event_vec'].append(outputs[0])
new_node3['T']['event'].append([2, outputs[0][2]])
new_node3['T']['event_vec'].append(outputs[0])
new_node1['score']['T_update_flag'] = 1
new_node1['score']['B_update_flag'] = 0
new_node2['score']['T_update_flag'] = 1
new_node2['score']['B_update_flag'] = 0
new_node3['score']['T_update_flag'] = 1
new_node3['score']['B_update_flag'] = 0
all_possible_nodes.append(new_node1)
all_possible_nodes.append(new_node2)
all_possible_nodes.append(new_node3)
if len(all_possible_nodes) == 0:
return self.Tree['nodes'][0]
else:
flag=self.tree_prune(all_possible_nodes)
self.Tree['level'] += 1
if flag==True:
return None
elif flag==False:
return self.Tree['nodes'][0]
def finetune_para(atomic_net, event_net, clip_list, para_idx, para_bank, args):
beam_search = BeamSearch(args, event_net, atomic_net)
start_time=time.time()
pid = threading.get_ident()
if args.resume:
try:
with open(op.join(args.save_path, 'resume_rec_para_{}.p'.format(para_idx)), 'rb') as f:
resume_rec=pickle.load(f, encoding='latin1')
para_idx_, i_clip_sp, para_, clip_, cnt_clip, err_seg, err_event=resume_rec
assert para_idx_== para_idx
i_para_sp = 0
ERR_PARA = []
except:
print("!"*10)
print("\033[31m ERROR: no correct resume_rec file! \033[0m")
i_para_sp = 0
i_clip_sp = 0
ERR_PARA=[]
cnt_clip=0
err_seg=0
err_event=0
else:
i_para_sp=0
i_clip_sp=0
ERR_PARA=[]
cnt_clip = 0
err_seg = 0
err_event = 0
for i_para in range(i_para_sp, len(para_bank)):
para=para_bank[i_para]
args.topN=para['topN']
args.lambda_1 = para['lambda_1']
args.lambda_4=para['lambda_4']
args.lambda_5=para['lambda_5']
args.beta_1=para['beta_1']
args.beta_3=para['beta_3']
args.gamma_1=para['gamma_1']
args.search_N_cp=para['search_N_cp']
finetune_save_path=args.save_path
if i_para>i_para_sp:
i_clip_sp=0
for i_clip in range(i_clip_sp, len(clip_list)):
if i_clip==0:
cnt_clip = 0
err_seg = 0
err_event = 0
clip=clip_list[i_clip]
with open(op.join(args.save_path, 'resume_rec_para_{}.p'.format(para_idx)), 'wb') as f:
pickle.dump([para_idx, i_clip, para, clip, cnt_clip, err_seg, err_event], f)
beam_search.init(clip)
print(" PID {}, para_idx {}, video {} ({}/{})".format(pid, para_idx, clip, i_clip, len(clip_list)-1))
print("="*74)
# beam search
while True:
Tree_best=beam_search.tree_grow(pid)
if Tree_best is not None:
break
# evaluation
cps_T=Tree_best['T']['cp']
cps_B=Tree_best['B']['cp']
event_T=Tree_best['T']['event']
event_B=Tree_best['B']['event']
# seg
segs_T=beam_search.cps2segs(cps_T)
segs_B=beam_search.cps2segs(cps_B)
tracker_gt_seg = event_seg_tracker[clip.split('.')[0]]
battery_gt_seg = event_seg_battery[clip.split('.')[0]]
err_seg += segment_error(segs_T, tracker_gt_seg, args.seg_alpha) + segment_error(segs_B, battery_gt_seg,args.seg_alpha)
# event
# tracker
event_gt_T = event_seg_tracker[clip.split('.')[0]]
len_T = event_gt_T[-1][1]
frame_events_T_gt = np.zeros((len_T))
for i, seg in enumerate(event_gt_T):
start = event_gt_T[i][0]
end = event_gt_T[i][1]
event = event_gt_T[i][2]
frame_events_T_gt[start:end] = event
frame_events_T = np.zeros((len_T))
for i, seg in enumerate(segs_T):
event = event_T[i][0]
start = seg[0]
end = seg[1]
frame_events_T[start:end] = event
# battery
event_gt_B = event_seg_battery[clip.split('.')[0]]
len_B = event_gt_B[-1][1]
frame_events_B_gt = np.zeros((len_B))
for i, seg in enumerate(event_gt_B):
start = event_gt_B[i][0]
end = event_gt_B[i][1]
event = event_gt_B[i][2]
frame_events_B_gt[start:end] = event
frame_events_B = np.zeros((len_B))
for i, seg in enumerate(segs_B):
event = event_B[i][0]
start = seg[0]
end = seg[1]
frame_events_B[start:end] = event
err_event += np.sum(frame_events_T != frame_events_T_gt) + np.sum(frame_events_B != frame_events_B_gt)
cnt_clip += 1
print("pid {} para_idx {} vid {} temp_err {} used time {}".format(pid, para_idx, i_clip, (err_seg + err_event) / float(cnt_clip), time.time() - start_time))
print("="*70)
# current para
assert(cnt_clip>0)
ERR_PARA.append((err_seg + err_event)/float(cnt_clip))
print("pid {} para_idx {} err {} used time {}".format(pid, para_idx, (err_seg + err_event)/float(cnt_clip), time.time()-start_time))
print("=" * 70)
with open(op.join(finetune_save_path, "para_err_{}.p".format(para_idx)), 'wb') as f:
pickle.dump([para_idx, para,(err_seg + err_event)/float(cnt_clip), Tree_best, clip], f)
def parse_arguments():
parser=argparse.ArgumentParser(description='')
# path
home_path='/home/lfan/Dropbox/Projects/NIPS20/'
home_path2='/media/lfan/HDD/NIPS20/'
parser.add_argument('--project-path',default = home_path)
parser.add_argument('--project-path2', default=home_path2)
parser.add_argument('--data-path', default=home_path+'data/')
parser.add_argument('--data-path2', default=home_path2 + 'data/')
parser.add_argument('--img-path', default=home_path+'annotations/')
parser.add_argument('--save-path', default='/media/lfan/HDD/NIPS20/Result2/BeamSearch_best_para_home_0531_1/')
parser.add_argument('--init-cps', default='/media/lfan/HDD/NIPS20/data/init_cps/CPS_NEW.p')
parser.add_argument('--stat-path', default=home_path+'data/stat/')
parser.add_argument('--attmat-path', default=home_path+'data/record_attention_matrix/')
parser.add_argument('--cate-path', default=home_path2+'data/track_cate/')
parser.add_argument('--tracker-bbox', default=home_path2+'data/tracker_record_bbox/')
parser.add_argument('--battery-bbox', default=home_path2+'data/record_bbox/')
parser.add_argument('--obj-bbox', default=home_path2+'data/post_neighbor_smooth_newseq/')
parser.add_argument('--ednet-path', default=home_path+'model/ednet_tuned_best.pth')
parser.add_argument('--atomic-path', default=home_path+'model/atomic_best.pth')
parser.add_argument('--seg-label', default=home_path + 'data/segment_labels/')
parser.add_argument('--feature-single', default=home_path2 + 'data/feature_single/')
parser.add_argument('--save-event-score', default='/media/lfan/HDD/NIPS20/Result/EVENT_SCORE/')
# parameter
parser.add_argument('--lambda-1', default=1)
parser.add_argument('--lambda-4', default=10)
parser.add_argument('--lambda-5', default=1)
parser.add_argument('--beta-1', default=1)
parser.add_argument('--beta-3', default=10)
parser.add_argument('--gamma-1', default=10)
parser.add_argument('--search-N-cp', default=5)
parser.add_argument('--topN', default=5)
parser.add_argument('--hist-bin', default=10)
parser.add_argument('--seg-alpha', default=50)
# others
parser.add_argument('--cuda', default=False)
parser.add_argument('--ip', default='192.168.1.17')
parser.add_argument('--port', default=1234)
parser.add_argument('--resume',default=False)
parser.add_argument('--test-func-batch-size', default=16)
return parser.parse_args()
def select_para():
# select the best para--round 1
folders = listdir('/media/lfan/HDD/NIPS20/Result2/')
ERR_ALL=[]
PARA_ALL=[]
for folder in folders:
#if folder=='BeamSearch_BEST_PARA_azure_0523_2' or folder=='BeamSearch_BEST_PARA_thor_0523_2' or folder=='BeamSearch_BEST_PARA_home_0523_2':
if folder=='BeamSearch_BEST_PARA_home_0525':
files=listdir(op.join('/media/lfan/HDD/NIPS20/Result2/', folder))
for file in files:
if file.startswith('para_err_'):
with open(op.join('/media/lfan/HDD/NIPS20/Result2/', folder, file), 'rb') as f:
#_, para,err=pickle.load(f)
para_idx, para, err, Tree_best=pickle.load(f)
ERR_ALL.append(err)
PARA_ALL.append(para)
print(len(ERR_ALL))
res_i=np.argsort(np.array(ERR_ALL))
#print(res_i)
res_v=np.sort(ERR_ALL)
#print(res_v)
print(res_v[:])
for i in res_i[:]:
print(PARA_ALL[i])
# select the best para--round 1 --0523 18:10
# {'topN': 5, 'lambda_2': 0.1, 'lambda_3': 0.1, 'lambda_4': 10, 'lambda_5': 10, 'lambda_6': 10, 'beta_1': 0.1,
# 'beta_2': 10, 'beta_3': 0.1, 'hist_bin': 10, 'search_N_cp': 5}
# ERROR 1012.5
# round 2&3 --0523 22:36
# the same conclusion
# round 4 --0524 14:30
# the same conclusion
# round 5 -- 0524 18:17
##################################### after the bug ########################################
# [1836.36666667]
# {'topN': 3, 'lambda_2': 0.1, 'lambda_3': 0.1, 'lambda_4': 10, 'lambda_5': 1, 'beta_1': 0.1, 'beta_2': 1, 'beta_3': 0.1, 'hist_bin': 10, 'search_N_cp': 5}
# 1848.96666667 --topN=7
# 1834.3
# {'topN': 1, 'lambda_2': 0.1, 'lambda_3': 0.1, 'lambda_4': 10, 'lambda_5': 1, 'beta_1': 0.1, 'beta_2': 1, 'beta_3': 0.1, 'hist_bin': 10, 'search_N_cp': 5}
# clip[1]
# 1572. --{'topN': 3, 'lambda_2': 10, 'lambda_3': 1, 'lambda_4': 10, 'lambda_5': 1, 'beta_1': 0.1, 'beta_2': 1, 'beta_3': 0.1, 'hist_bin': 10, 'search_N_cp': 5}
# 1572 -- topN': 3, 'lambda_2': 15, 'lambda_3': 0.5, 'lambda_4': 5, 'lambda_5': 1, 'beta_1': 0.1, 'beta_2': 1, 'beta_3': 0.1, 'hist_bin': 10, 'search_N_cp': 5}
# 0525
# clip[1]
# [2433.]
# {'topN': 5, 'lambda_1': 1, 'lambda_2': 0.1, 'lambda_3': 0.1, 'lambda_4': 10, 'lambda_5': 1, 'beta_1': 1, 'beta_2': 0.1, 'beta_3': 0.1, 'gamma_1': 10, 'search_N_cp': 5}
def run():
print("=" * 74)
print('/' * 5)
print("/ \033[31m [Important!] Don't forget to change your project name accordingly! \033[0m ")
print('/' * 5)
print("=" * 74)
args = parse_arguments()
if not op.exists(args.save_path):
os.makedirs(args.save_path)
atomic_event_net = Atomic_node_only_lstm_first_view()
load_best_checkpoint(atomic_event_net, path=args.atomic_path)
if args.cuda and torch.cuda.is_available():
atomic_event_net.cuda()
atomic_event_net.eval()
event_net=EDNet()
event_net.load_state_dict(torch.load(args.ednet_path))
if args.cuda and torch.cuda.is_available():
event_net.cuda()
event_net.eval()
para_bank = generate_para_bank(args)
random.seed(0)
random.shuffle(clips_with_gt_event)
#print(clips_with_gt_event)
#Parallel(n_jobs=-1)(delayed(finetune_para)(atomic_event_net, event_net, clips_with_gt_event[20:], para_idx, [para], args) for para_idx, para in enumerate(para_bank[4000:]))
# Parallel(n_jobs=-1)(delayed(finetune_para)(atomic_event_net, event_net, clips_with_gt_event[:10], para_idx, [para], args) for para_idx, para in enumerate(para_bank))
# Parallel(n_jobs=-1)(delayed(finetune_para)(atomic_event_net, event_net, clips_with_gt_event, para_idx, [para], args) for para_idx, para in enumerate(para_bank[6000:]))
# 0524
#para_bank=[{'topN': 3, 'lambda_1':1,'lambda_2': 10, 'lambda_3': 1, 'lambda_4': 5, 'lambda_5': 1, 'beta_1': 0.1, 'beta_2': 1, 'beta_3': 0.001, 'gamma_1': 10, 'search_N_cp': 5}]
Parallel(n_jobs=1)(delayed(finetune_para)(atomic_event_net, event_net, [clips_with_gt_event[0]], para_idx, [para], args) for para_idx, para in enumerate(para_bank))
print([clips_with_gt_event[0]])
#finetune_para(atomic_event_net, event_net, [clips_with_gt_event[1]], 0, para_bank[:], args)
#'test_94342_21': [[0, 13, 0], [14, 66, 3], [67, 594, 2], [595, 1097, 2], [1098, 1133, 0]]
def select_para_new():
path='/media/lfan/HDD/NIPS20/'+'Result0530/v1/'
seg_rank=[]
event_rank=[]
all_rank=[]
with open(op.join(path, 'para_bank.p'), 'rb') as f:
para_bank=pickle.load(f)
for para_idx in range(len(para_bank)):
seg_err = []
event_err = []
all_err = []
files=glob.glob(op.join(path, 'RES_para_{}_*'.format(para_idx)))
for file in files:
with open(file, 'rb') as f:
para, clip, Tree_best, err_seg, err_event=pickle.load(f)
seg_err.append(err_seg)
event_err.append(err_event)
all_err.append(err_seg+err_event)
seg_mean=np.mean(np.array(seg_err))
event_mean=np.mean(np.array(event_err))
all_mean=np.mean(np.array(all_err))
seg_rank.append(seg_mean)
event_rank.append(event_mean)
all_rank.append(all_mean)
seg_rank_=list(np.sort(np.array(seg_rank)))
event_rank_=list(np.sort(np.array(event_rank)))
all_rank_=list(np.sort(np.array(all_rank)))
seg_idxrank=list(np.argsort(np.array(seg_rank)))
event_idxrank=list(np.argsort(np.array(event_rank)))
all_idxrank=list(np.argsort(np.array(all_rank)))
para_bank=np.array(para_bank)
N=3
print('seg')
print(seg_rank_[:N])
print(para_bank[seg_idxrank[:N]])
print('event')
print(event_rank_[:N])
print(para_bank[event_idxrank[:N]])
print('all')
print(all_rank_[:N])
print(para_bank[all_idxrank[:N]])
def run_finetune_para_new():
args = parse_arguments()
if not op.exists(args.save_path):
os.makedirs(args.save_path)
atomic_event_net = Atomic_node_only_lstm_first_view()
load_best_checkpoint(atomic_event_net, path=args.atomic_path)
if args.cuda and torch.cuda.is_available():
atomic_event_net.cuda()
atomic_event_net.eval()
event_net=EDNet()
event_net.load_state_dict(torch.load(args.ednet_path))
if args.cuda and torch.cuda.is_available():
event_net.cuda()
event_net.eval()
para_bank = generate_para_bank(args)
random.seed(0)
random.shuffle(clips_with_gt_event)
para_all=para_bank
clips_all=clips_with_gt_event
combinations=list(product(range(len(para_all)), range(len(clips_all))))
print('There are {} tasks in total. {} clips, {} paras'.format(len(combinations), len(clips_all), len(para_all)))
Parallel(n_jobs=-1)(delayed(finetune_para)(atomic_event_net, event_net, comb[1], clips_all[comb[1]], comb[0], para_all[comb[0]], args, len(clips_all), len(para_all)) for _, comb in enumerate(combinations[7500:]))
if __name__ == "__main__":
args=parse_arguments()
# select_para()
with open(op.join(args.save_path, "para_err_{}.p".format(0)), 'rb') as f:
para_idx, para, err, Tree_best, clip=pickle.load(f)
pass
# run()
|
[
"[email protected]"
] | |
977f42b6e45aed18b75481bcd0ff35b2b08c1db3
|
7c95ce7736d2beffef02260e414a5c01930bf0e8
|
/tests/tac/arithmetic_correct.prg.tac
|
afc1f6d1dc02668c555110117def156669bb58e6
|
[
"MIT"
] |
permissive
|
jhosoume/porygon_lang
|
27e6570e03481f4fe5c990a583a29812b5b67955
|
5fd1cc00f66d08f2301c978c9af716d83db1e3b9
|
refs/heads/master
| 2023-03-02T18:20:32.461494 | 2021-02-17T14:25:33 | 2021-02-17T14:25:33 | 292,926,255 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 925 |
tac
|
.table
.code
mov $0, 8
main:
add $1, $0, 3
mov $2, 40.000000
println $2
println '-'
div $3, $2, 10.000000
mov $2, $3
println $2
println '-'
mov $4, 1
add $6, $4, 4
mov $5, $6
add $7, $4, 7
mov $4, $7
add $8, $5, 42
add $9, $8, 8
mov $5, $9
println $5
println $4
println '-'
mov $10, 0
and $12, $10, 1
mov $11, $12
add $14, $4, $5
inttofl $14, $14
mov $13, $14
println $13
println '-'
println 1
println 2.000000
print 't'
print 'r'
print 'u'
println 'e'
println '-'
print 'E'
print 'n'
print 't'
print 'e'
print 'r'
print ' '
print 'i'
print 'n'
print 't'
print 'e'
print 'g'
print 'e'
print 'r'
print ':'
println
scani $15
println $15
print 'E'
print 'n'
print 't'
print 'e'
print 'r'
print ' '
print 'f'
print 'l'
print 'o'
print 'a'
print 't'
print ':'
println
scanf $16
println $16
print 'E'
print 'n'
print 't'
print 'e'
print 'r'
print ' '
print 'c'
print 'h'
print 'a'
print 'r'
print ':'
println
scanc $17
println $17
|
[
"[email protected]"
] | |
d54936556f9329e822f43ab88327ff24a0f715ff
|
54167cb4be1b9486b514b1b1ae6781ec6e6d388a
|
/build.py
|
865f4213ca09b550c06889dd98f43332545139d6
|
[
"MIT"
] |
permissive
|
Le0Developer/autoreport
|
d352bee46d800864de794208bc61caf0017c1230
|
ead5e3f0fbf7d0c534f6425fba6bd6794c0fbbb2
|
refs/heads/master
| 2022-11-10T09:16:58.580170 | 2020-06-25T17:07:28 | 2020-06-25T17:07:28 | 274,971,717 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,263 |
py
|
import subprocess, os, shutil, sys
name = 'autoreport'
kwargs = {}
if '--debug' not in sys.argv:
kwargs[ 'stdout' ] = subprocess.DEVNULL
kwargs[ 'stderr' ] = subprocess.PIPE
if shutil.which( 'moonc' ) is None:
print( 'Unable to find `moonc` on path. Is Moonscript properly installed?' )
exit( 1 )
# used for adding globals like `gui` to the whitelist
# TODO: find a fix for windows
if os.path.exists( 'lint_config.moon' ) and (not os.path.exists( 'lint_config.lua' ) or os.stat( 'lint_config.moon' ).st_mtime > os.stat( 'lint_config.lua' ).st_mtime):
print( 'Compiling `lint_config.moon`... ', end='', flush=True )
subprocess.run( [shutil.which( 'moonc' ), os.path.abspath( 'lint_config.moon' )], check = True, **kwargs )
print( 'DONE.' )
print( 'Checking moonscripts... ', end='', flush=True )
result = subprocess.run( [shutil.which( 'moonc' ), '-l', '.'], **kwargs )
if result.returncode:
print( 'FAILED.' )
if kwargs and result.stderr:
print( result.stderr.decode() )
else: print( 'OK.' )
print( f'Compiling `{name}.moon`... ', end='', flush=True )
result = subprocess.run( [shutil.which( 'moonc' ), os.path.abspath( f'{name}.moon' )], **kwargs )
if result.returncode:
print( 'FAILED.' )
if kwargs and result.stderr:
print( result.stderr.decode() )
exit( 1 )
else: print( 'OK.' )
print( 'Fixing lua...', end='', flush=True )
with open( f'{name}.lua', 'rb' ) as f:
lua = f.read()
lua = lua.replace( b'return "__REMOVE_ME__"', b'' )
with open( f'{name}.lua', 'wb' ) as f:
f.write( lua )
print( 'DONE.' )
luanames = [ 'lua5.1', 'lua' ]
for luapath in luanames:
if shutil.which( luapath ) is not None: break
else:
print( 'Unable to find lua executable.' )
exit( 1 )
print( 'Minifying lua...', end='', flush=True )
r = subprocess.run( [luapath, 'minifier.lua', f'{name}.lua', f'{name}_minified.lua'], **kwargs )
if result.returncode:
print( 'FAILED.' )
if kwargs and result.stderr:
print( result.stderr.decode() )
exit( 1 )
else:
print( 'OK.' )
print( f'Shrinked by {1 - (os.stat( f"{name}_minified.lua" ).st_size / os.stat( f"{name}.lua" ).st_size):%} ({os.stat( f"{name}_minified.lua" ).st_size - os.stat( f"{name}.lua" ).st_size} bytes)' )
|
[
"[email protected]"
] | |
8b937f748ecd23a5a902c0f78026fc265309d665
|
6bbfd303dbacc21a2443e681aea5c1a1c21b872d
|
/pytorch/evaluation/evaluation_segm.py
|
e8c4b838a59872954ed5e09b055df73f0933ccfb
|
[] |
no_license
|
gregoryperkins/PC-Reg-RT
|
fbba9d4f9c55b7e2e9068e8f8a55fc1eba3c76a8
|
7d70ca97019cc7ddc374ffd962e0f63391ec181d
|
refs/heads/main
| 2023-09-04T02:58:42.843287 | 2021-10-16T07:19:25 | 2021-10-16T07:19:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,348 |
py
|
import numpy as np
import torch
import SimpleITK as sitk
def GetSD(predict,label):
predict = predict.astype(np.uint8)
label = label.astype(np.uint8)
mask1 = sitk.GetImageFromArray(predict,isVector=False)
mask2 = sitk.GetImageFromArray(label,isVector=False)
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
hausdorff_distance_filter.Execute(mask1, mask2)
ave_distance = hausdorff_distance_filter.GetAverageHausdorffDistance()
idx_predict = np.where(predict!=0)
sum=0
print(np.size(idx_predict[0]))
for i in range(np.size(idx_predict[0])):
mask_temp = np.zeros_like(predict,dtype=np.uint8)
mask_temp[idx_predict[0][i]][idx_predict[1][i]][idx_predict[2][i]]=1
mask_temp = sitk.GetImageFromArray(mask_temp,isVector=False)
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
hausdorff_distance_filter.Execute(mask_temp, mask2)
distance_pixel = hausdorff_distance_filter.GetHausdorffDistance()
sum=sum+np.square(distance_pixel-ave_distance)
result=np.sqrt(sum/np.size(idx_predict[0]))
return result
def Getcontour(img):
image = sitk.GetImageFromArray(img.astype(np.uint8),isVector=False)
filter = sitk.SimpleContourExtractorImageFilter()
image = filter.Execute(image)
image = sitk.GetArrayFromImage(image)
return image.astype(np.uint8)
def GetMaxConponent(img, index=1):
if img.max() < index:
return np.zeros_like(img,dtype=np.uint8)
image = sitk.GetImageFromArray((img == index).astype(np.uint8),isVector=False)
filter = sitk.ConnectedComponentImageFilter()
image = filter.Execute(image)
image = sitk.GetArrayFromImage(image).astype(np.uint8)
maxindex = 0
max_sum = 0
for i in range(1, image.max()+1):
temp = (image == i).sum()
if temp > max_sum:
max_sum = temp
maxindex = i
if maxindex == 0:
return np.zeros_like(img, dtype=np.uint8)
else:
return (image == maxindex).astype(np.uint8) * index
def GrayMorphologicalClosingImage(img):
image = sitk.GetImageFromArray(img.astype(np.uint8),isVector=False)
filter = sitk.GrayscaleMorphologicalClosingImageFilter()
image = filter.Execute(image)
image = sitk.GetArrayFromImage(image)
return image.astype(np.uint8)
def HausdorffDistance(predict, label, index=1):
predict = (predict == index).astype(np.uint8)
label = (label == index).astype(np.uint8)
predict_sum = predict.sum()
label_sum = label.sum()
if predict_sum != 0 and label_sum != 0 :
mask1 = sitk.GetImageFromArray(predict,isVector=False)
mask2 = sitk.GetImageFromArray(label,isVector=False)
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
hausdorff_distance_filter.Execute(mask1, mask2)
result1 = hausdorff_distance_filter.GetHausdorffDistance()
result2 = hausdorff_distance_filter.GetAverageHausdorffDistance()
result = result1,result2
elif predict_sum != 0 and label_sum == 0:
result = 'FP','FP'
elif predict_sum == 0 and label_sum != 0:
result = 'FN','FN'
else:
result = 'TN','TN'
return result
def dice3D(eval_segm, gt_segm, index=1):
'''
eval_segm:the matirx to evaluate
gt_segm: ground truth
'''
if type(eval_segm) == np.ndarray:
eval_segm = torch.from_numpy(eval_segm).byte()
if type(gt_segm) == np.ndarray:
gt_segm = torch.from_numpy(gt_segm).byte()
eps = 1e-6
#assert eval_segm.size == gt_segm.size
#gt_segm = gt_segm.byte()
eval_segm = (eval_segm == index)
sum_eval = eval_segm.sum().item()
gt_segm = (gt_segm == index)
sum_gt = gt_segm.sum().item()
if sum_eval != 0 and sum_gt != 0:
intersection = torch.sum(eval_segm * gt_segm).item()
union = torch.sum(eval_segm).item() + torch.sum(gt_segm).item() + eps
dice_ = 2.0 * intersection / union
elif sum_eval != 0 and sum_gt == 0:
dice_ = 'FP'
elif sum_eval == 0 and sum_gt != 0:
dice_ = 'FN'
else:
dice_ = 'TN'
return dice_
def jaccard(eval_segm, gt_segm, index=1):
'''
eval_segm:the matirx to evaluate
gt_segm: ground truth
'''
if type(eval_segm) == np.ndarray:
eval_segm = torch.from_numpy(eval_segm.copy()).byte()
if type(gt_segm) == np.ndarray:
gt_segm = torch.from_numpy(gt_segm.copy()).byte()
eps = 1e-6
#assert eval_segm.size == gt_segm.size
#gt_segm = gt_segm.byte()
eval_segm[eval_segm != index] = 0
eval_segm[eval_segm == index] = 1
sum_eval = eval_segm.sum().item()
gt_segm[gt_segm != index] = 0
gt_segm[gt_segm == index] = 1
sum_gt = gt_segm.sum().item()
if sum_eval != 0 and sum_gt != 0:
intersection = torch.sum(eval_segm * gt_segm).item()
union = torch.sum(eval_segm).item() + torch.sum(gt_segm).item() - intersection + eps
dice_ = intersection / union
elif sum_eval != 0 and sum_gt == 0:
dice_ = 'FP'
elif sum_eval == 0 and sum_gt != 0:
dice_ = 'FN'
else:
dice_ = 'TN'
return dice_
def pixel_accuracy_ex(eval_segm, gt_segm):
'''
eval_segm,gt_segm should be format of (N_slice,height,width)
'''
assert (eval_segm.shape == gt_segm.shape)
num = eval_segm.shape[0]
result = np.zeros((num), np.float32)
for i in range(num):
result[i] = pixel_accuracy(eval_segm[i, ...], gt_segm[i, ...])
return result.mean()
def mean_accuracy_ex(eval_segm, gt_segm):
'''
eval_segm,gt_segm should be format of (N_slice,height,width)
'''
assert(eval_segm.shape == gt_segm.shape)
num = eval_segm.shape[0]
result = np.zeros((num), np.float32)
for i in range(num):
result[i] = mean_accuracy(eval_segm[i, ...], gt_segm[i, ...])
return result.mean()
def mean_IU_ex(eval_segm, gt_segm):
'''
eval_segm,gt_segm should be format of (N_slice,height,width)
'''
assert (eval_segm.shape == gt_segm.shape)
num = eval_segm.shape[0]
result = np.zeros((num), np.float32)
for i in range(num):
result[i] = mean_IU(eval_segm[i, ...], gt_segm[i, ...])
return result.mean()
def frequency_weighted_IU_ex(eval_segm, gt_segm):
'''
eval_segm,gt_segm should be format of (N_slice,height,width)
'''
assert (eval_segm.shape == gt_segm.shape)
num = eval_segm.shape[0]
result = np.zeros((num), np.float32)
for i in range(num):
result[i] = frequency_weighted_IU(eval_segm[i, ...], gt_segm[i, ...])
return result.mean()
def mean_IU(eval_segm, gt_segm):
'''
(1/n_cl) * sum_i(n_ii / (t_i + sum_j(n_ji) - n_ii))
'''
check_size(eval_segm, gt_segm)
cl, n_cl = union_classes(eval_segm, gt_segm)
_, n_cl_gt = extract_classes(gt_segm)
eval_mask, gt_mask = extract_both_masks(eval_segm, gt_segm, cl, n_cl)
IU = list([0]) * n_cl
for i, c in enumerate(cl):
curr_eval_mask = eval_mask[i, ...]
curr_gt_mask = gt_mask[i, ...]
if (np.sum(curr_eval_mask) == 0) or (np.sum(curr_gt_mask) == 0):
continue
n_ii = np.sum(np.logical_and(curr_eval_mask, curr_gt_mask))
t_i = np.sum(curr_gt_mask)
n_ij = np.sum(curr_eval_mask)
IU[i] = n_ii / (t_i + n_ij - n_ii)
mean_IU_ = np.sum(IU) / n_cl_gt
return mean_IU_
def extract_classes(segm):
cl = np.unique(segm)
n_cl = len(cl)
return cl, n_cl
def union_classes(eval_segm, gt_segm):
eval_cl, _ = extract_classes(eval_segm)
gt_cl, _ = extract_classes(gt_segm)
cl = np.union1d(eval_cl, gt_cl)
n_cl = len(cl)
return cl, n_cl
def check_size(eval_segm, gt_segm):
assert eval_segm.shape == gt_segm.shape
def extract_masks(segm, cl, n_cl):
slices, h, w = segm.shape
masks = np.zeros((n_cl, slices, h, w))
for i, c in enumerate(cl):
masks[i, ...] = segm == c
return masks
def extract_both_masks(eval_segm, gt_segm, cl, n_cl):
eval_mask = extract_masks(eval_segm, cl, n_cl)
gt_mask = extract_masks(gt_segm, cl, n_cl)
return eval_mask, gt_mask
'''
Exceptions
'''
class EvalSegErr(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
[
"[email protected]"
] | |
f8d5dce88b9247d9a90d492f283f79cf33da1598
|
0b0f22b490472e9e547c97780c90434256e894aa
|
/BinomDist.py
|
83439537ac77ccac9d1da0726e19dbbf7b85b873
|
[] |
no_license
|
adilsaju/hrank-py
|
fd679f7c9cb35c0734d56656cce61f6c282b525b
|
ae9b959fbffe7b1e2a4ccca01ae50107ab2f85bc
|
refs/heads/master
| 2020-08-13T18:24:27.423120 | 2019-10-14T14:32:25 | 2019-10-14T14:32:25 | 215,015,940 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 160 |
py
|
from math import factorial as fact
p=1.09/2.09
# print(p)
b=0
for i in range(3,7):
b+=fact(6)/fact(6-i)/fact(i)*p**i*(1-p)**(6-i)
print(b)
print(f'{b:.3f}')
|
[
"[email protected]"
] | |
f20a8203b58d8a3599ccb48ae2374dc36be9b17c
|
7a9c04075c8520901458ad3ed1c971032cad94ab
|
/pemfc/setup.py
|
24ead4b4356bf1ef3602ae1ca8faa76c566e1cda
|
[
"MIT"
] |
permissive
|
rupertpaulson/PEMFC-Model
|
0b05a41249c709606a81bcb00c00de452ab230fb
|
aec3d57a2d780dc95b2705c37644ec1e9d311d88
|
refs/heads/master
| 2023-04-24T12:32:00.017617 | 2021-04-28T18:30:01 | 2021-04-28T18:30:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,395 |
py
|
import os
import sys
from cx_Freeze import setup, Executable
import pkg_resources
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
# if sys.platform == "win32":
# base = "Win32GUI"
PYTHON_INSTALL_DIR = os.path.dirname(os.path.dirname(os.__file__))
os.environ['TCL_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tcl8.6')
os.environ['TK_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tk8.6')
icon_file = r'C:\Users\feierabend\PycharmProjects\PEMFCModel\pemfc\logo-zbt.ico'
include_files = \
[(os.path.join(PYTHON_INSTALL_DIR, 'DLLs', 'tcl86t.dll'),
os.path.join('lib', 'tcl86t.dll')),
(os.path.join(PYTHON_INSTALL_DIR, 'DLLs', 'tk86t.dll'),
os.path.join('lib', 'tk86t.dll')), icon_file]
packages = ["os", "numpy", "scipy", "matplotlib", "tkinter"]
options = {
'build_exe': {
'packages': packages,
'namespace_packages': ['mpl_toolkits'],
'includes': ['matplotlib.backends.backend_tkagg'],
'excludes': ['scipy.optimize', 'pandas', 'PyQt5', 'scipy.spatial',
'numba', 'numpy.fft', 'scipy.signal'],
'include_files': include_files
}
}
setup(name="pemfc",
version="0.1",
description="PEMFC Model",
options=options,
executables=[Executable("gui_app.py", base=base, icon=icon_file)])
|
[
"[email protected]"
] | |
7c5e77e8e8708914b94c95c7da9fc3574ad25c8c
|
a14795a79fd8f39cede7fa5eb86f9717b5c289c2
|
/backend/course/api/v1/serializers.py
|
977b3866deffb183b0133225485e9b022f8b7e3e
|
[] |
no_license
|
crowdbotics-apps/dearfuturescientist-21123
|
fcdbe95a9cd9e8713198b6accbeeb56aa5b0b2d4
|
5b282411ebaf39580b938f6678afc8a36e34aba4
|
refs/heads/master
| 2022-12-30T20:23:25.888830 | 2020-10-05T19:00:56 | 2020-10-05T19:00:56 | 301,510,630 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,622 |
py
|
from rest_framework import serializers
from course.models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
PaymentMethod,
SubscriptionType,
Enrollment,
Lesson,
Category,
)
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = "__all__"
class SubscriptionTypeSerializer(serializers.ModelSerializer):
class Meta:
model = SubscriptionType
fields = "__all__"
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = "__all__"
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = "__all__"
class RecordingSerializer(serializers.ModelSerializer):
class Meta:
model = Recording
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = "__all__"
class SubscriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Subscription
fields = "__all__"
class EnrollmentSerializer(serializers.ModelSerializer):
class Meta:
model = Enrollment
fields = "__all__"
class ModuleSerializer(serializers.ModelSerializer):
class Meta:
model = Module
fields = "__all__"
|
[
"[email protected]"
] | |
fcb2745a3b28acb9bdab55a49b61a805e5d2198f
|
55493112595d303d39b90ca9112e1d0a52f435e4
|
/WorkforceManagement/views/Computer_View.py
|
4fc447fa4d1e6adaa0a611f92c7069d1ab909d56
|
[] |
no_license
|
NSS-Spontaneous-Spoonbills/Sprint2
|
a06c2ea08dbe58289984591b5ef412242924f86f
|
7fd603ee531556b32b100c5a9f109b0e9207f369
|
refs/heads/master
| 2020-03-25T11:38:55.449223 | 2018-08-13T21:00:35 | 2018-08-13T21:00:35 | 143,741,505 | 0 | 1 | null | 2018-08-13T21:26:08 | 2018-08-06T14:38:30 |
Python
|
UTF-8
|
Python
| false | false | 1,944 |
py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from WorkforceManagement.models import Computer
from WorkforceManagement.forms import *
def Computer_List_View(request):
"""Displays all computers in the database
Author: Erin Meaker
"""
computers = Computer.objects.all()
return render(request, 'WorkforceManagement/Computer_List.html', {'computers': computers})
def Computer_Detail_View(request, pk):
"""Displays details about a specific computer
Author: Erin Meaker
"""
computer = get_object_or_404(Computer, pk=pk)
return render(request, 'WorkforceManagement/Computer_Detail.html', {'computer': computer})
def Computer_New_View(request):
"""Displays form for adding new computer to the database
Author: Erin Meaker
"""
if request.method == "POST":
form = Computer_New_Form(request.POST)
new_comp = form.save(commit=False)
new_comp.save()
return redirect('computer_detail', pk=new_comp.pk)
else:
form = Computer_New_Form()
return render(request, 'WorkforceManagement/Computer_Update.html', {'form': form})
def Computer_Update_View(request, pk):
"""Displays form for updating the computers
Author: Erin Meaker
"""
computer = get_object_or_404(Computer, pk=pk)
if request.method == "POST":
form = Computer_Update_Form(request.POST, instance=computer)
computer = form.save(commit=False)
computer.save()
return redirect('computer_detail', pk=computer.pk)
else:
form = Computer_Update_Form(instance=computer)
return render(request, 'WorkforceManagement/Computer_Update.html', {'form': form})
def Computer_Delete_View(request, pk):
"""Displays template for deleting a computer
Author: Erin Meaker"""
computer = get_object_or_404(Computer, pk=pk)
computer.delete()
return redirect('computer_list')
|
[
"[email protected]"
] | |
008c40060f81f0ef48c112788bee069210b91464
|
ef906ddd1ed8d6a6c365428a6400ebb7bdafb9ab
|
/build/core_msgs/catkin_generated/pkg.installspace.context.pc.py
|
06b90a1e8dd92266ddf8fce8f5d53c2507cbc0a0
|
[] |
no_license
|
Eugene-Jeon/myslam_ver1
|
af68a9b73cee30950a315c5acab48f6610cd8521
|
9b382e6580e1b9a4f3b8b7634b85f37a8e96da02
|
refs/heads/master
| 2023-01-02T13:07:56.714315 | 2020-11-04T08:11:42 | 2020-11-04T08:11:42 | 309,932,122 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 451 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs;geometry_msgs;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "core_msgs"
PROJECT_SPACE_DIR = "/home/jyj/myslam/install"
PROJECT_VERSION = "0.0.0"
|
[
"[email protected]"
] | |
c44442fa22b72533c3c5546504d0637dc5fa26f0
|
e2c37cd6a236ef51779e127c0ce09c4f7299a5b6
|
/AutoTest_Project_DRInland/multi_processframe/ProjectTools/common.py
|
995d0a01101237dc06f7f849d0d7b035b235321f
|
[] |
no_license
|
Sinxs/airtest
|
78e2ccf796eceaa154765c234b13062a12e4caee
|
786b32c5ae7d579f2419df2459b8a4d1700b974a
|
refs/heads/master
| 2020-06-17T12:23:49.891069 | 2019-11-30T03:40:38 | 2019-11-30T03:40:38 | 195,922,885 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 27,389 |
py
|
# -*- encoding=utf8 -*-
__author__ = "Lee.li"
# import xlwings as xw
import smtplib
import socket
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
import sys
import re
from airtest.core.api import *
import json
import numpy as np
import configparser # 配置文件分析器
import traceback
import os
import time
import inspect
from PIL import Image
from airtest.core.android.adb import ADB
from poco.drivers.unity3d import UnityPoco
from multi_processframe.ProjectTools import initial
import platform
excelpath = os.path.abspath(os.path.join(os.getcwd(), "../platform/static/Report/Excel"))
"""
命名规则:
1.模块尽量使用小写命名,首字母保持小写,尽量不要用下划线(除非多个单词,且数量不多的情况)
2.类名使用驼峰(CamelCase)命名风格,首字母大写,私有类可用一个下划线开头
3.函数名一律小写,如有多个单词,用下划线隔开,私有函数在函数前加一个下划线_
4.变量名首字母尽量小写, 如有多个单词,用下划线隔开,后续字母大写
5.常量采用全大写,如有多个单词,使用下划线隔开
"""
index_print = print
def print(*args, **kwargs):
if get_system() == 'Windows':
index_print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), *args, **kwargs)
else:
index_print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + 28800)), *args, **kwargs)
config = configparser.ConfigParser() # 实现插值的配置器
_parentPath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # 获取当前文件上层路径
_rootPath = os.path.dirname(os.path.abspath(_parentPath)) # 获取当前目录根目录
config_Path = _rootPath + '/config.ini' # 获取config.ini的路径
def get_system():
"""
获取系统版本
:return: 返回系统版本
"""
system = platform.system()
return system
def set_config(case):
key = "progress"
temp = get_value(config_Path, key)
if case not in temp:
temp.append(case)
if "None" in temp:
temp.remove("None")
if '' in temp:
del(temp[0])
getdata = str(temp).replace(" ", "").replace("[", "").replace("]", "").replace("\'", "").replace("\"", "")
if getdata != "":
config.read(config_Path)
config.set("config", key, getdata)
config.write(open(config_Path, "w"))
def uwa_dot(dot):
config.read(config_Path)
config.set("config", "progressnum", str(dot))
config.write(open(config_Path, "w"))
def del_progress():
_parentPath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # 获取当前文件上层路径
_rootPath = os.path.dirname(os.path.abspath(_parentPath)) # 获取当前目录根目录
config_Path = _rootPath + '/config.ini' # 获取config.ini的路径
key = "progress"
config.read(config_Path)
config.set("config", key, "")
config.write(open(config_Path, "w"))
def get_value(path, key):
config.read(path, encoding='utf-8-sig')
# config.read(path, encoding='GBK')
temp = config.get('config', key)
result_list = temp.split(',')
return result_list
def get_script_list(file_Path):
"""
这是一个处理TestCase目录下的模块脚本文件,获取文件名称
:param file_Path: 文件路径,就是TestCase的路径
:return: 返回值是是TestCase下所有需要测试的用例脚本
"""
dir_List = os.listdir(file_Path) # 返回包含目录中文件名的列表
script_List = [] # 定义一个空列表,用来存储脚本模块
for i in range(len(dir_List)):
mode_Name = dir_List[i].split(".") # 把模块文件分割成["name","py"]的形式赋值给mode_Name
if mode_Name[0] != "__init__" and mode_Name[0] != "__pycache__": # 去除构造函数和运行文件
if mode_Name[1].lower() == "py": # 获取所需要的模块py文件
script_List.append(mode_Name[0]) # 把满足上两个条件的的模块名称添加给script_List
return script_List
def deviceconnect(devices):
"""
用于poco实例化的公用方法
:param devices: 制定设备
:return:
"""
dev = connect_device("android:///" + devices)
poco = UnityPoco(device=dev)
return poco
def goback(devices):
try:
poco = deviceconnect(devices)
for i in range(3):
if poco("Close").exists():
poco("Close").click()
else:
return None
except ConnectionAbortedError as e:
print(f"{e} 主机断开连接,杀掉游戏进程,脚本重新启动")
initial.restart_app(devices)
def printred(mes, end='\n'):
"""
这是打印红色加粗字体,在html显示报错信息信息d函数
:param mes: 需要打印的信息
:return: 返回加工后的打印内容
"""
if end == "," or end == ",":
return print(f"<font color=\"red\" ><b>ERROR:</b>{mes}</font>", end=",")
elif end == "":
return print(f"<font color=\"red\" ><b>ERROR:</b>{mes}</font>", end=" ")
else:
return print(f"<font color=\"red\" ><b>ERROR:</b>{mes}</font>")
def printgreen(mes, end='\n'):
"""
这是打印绿色字体,在html显示正确信息的函数
:param mes: 需要打印的信息
:return: 返回加工后的打印内容
"""
if end == "," or end == ",":
return print(f"<font color=\"green\" >{mes}</font>", end=",")
elif end == "":
return print(f"<font color=\"green\" >{mes}</font>", end=" ")
else:
return print(f"<font color=\"green\" >{mes}</font>")
def printcolor(mes, color, end='\n'):
"""
这是打印定制颜色字体,在html显示正确信息的函数
:param mes: 需要打印的信息
:return: 返回加工后的打印内容
red=红
green=绿
"""
if end == "," or end == ",":
return print(f"<font color=\"{color}\" >{mes}</font>", end=",")
elif end == "":
return print(f"<font color=\"{color}\" >{mes}</font>", end=" ")
else:
return print(f"<font color=\"{color}\" >{mes}</font>")
def sendemail(report_Name, receivers, mailtype):
addr = socket.gethostbyname(socket.gethostname())
SENDER = '[email protected]'
PASSWORD = 'mjpuxtwhyatgbcga'
# RECEIVERS = ['[email protected]'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱
if mailtype == 1:
mailtitle = 'UWA--龙之谷国内版本性能测试完成,请自行去uwa查询报告'
else:
mailtitle = 'BTV--龙之谷国内版本自动化测试报告'
report_Name = f'{report_Name}.html'
# htmlfile = report_Path + '\\' + report_Name # 获取报告路径
# accessory = MIMEApplication(open(htmlfile,'rb').read())
# accessory.add_header('Content-Disposition', 'attachment', filename=report_Name)
try:
message = MIMEMultipart()
message['From'] = Header("Lee.li", 'utf-8')
message['To'] = Header("123u.com", 'utf-8')
message['Subject'] = Header(mailtitle, 'utf-8')
# 邮件正文内容
message.attach(MIMEText('''Dar all :
测试报告已经生成,详情见http://''' + str(addr) + ''':8000/report
界面按F5刷新出最新测试报告,
此报告名称是:''' + str(report_Name)))
server = smtplib.SMTP_SSL("smtp.qq.com", 465)
server.login(SENDER, PASSWORD)
server.sendmail(SENDER, receivers, message.as_string())
server.quit()
print("邮件发送成功...")
except smtplib.SMTPException as e:
print('邮件发送失败...:', e) # 打印错误
def _get_screen_size(devices):
'获取手机屏幕大小'
size_str = os.popen(f'adb -s {devices} shell wm size').read()
if not size_str:
print('请安装 ADB 及驱动并配置环境变量')
sys.exit()
m = re.search(r'(\d+)x(\d+)', size_str)
if m:
sizeheight = "{height}".format(height=m.group(1))
sizewidth = "{width}".format(width=m.group(2))
return int(sizeheight), int(sizewidth)
return "1920x1080"
def setswipe(type, x, y, devices):
if type == 1: # 1280 * 720
devicessizi = _get_screen_size(devices)
x0 = x[0] / 1280 * devicessizi[1]
y0 = x[1] / 720 * devicessizi[0]
x1 = y[0] / 1280 * devicessizi[1]
y1 = y[1] / 720 * devicessizi[0]
swipe((x0, y0), (x1, y1), 5)
def settouch(type, x, y, devices, times=1):
if type == 1: # 1280 * 720
devicessizi = _get_screen_size(devices)
x = x / 1280 * devicessizi[0]
y = y / 720 * devicessizi[1]
touch([x, y], times=times)
# -*- coding: utf-8 -*-
def create_log_json(start, nowtime, devices):
devices_name = os.popen(f"adb -s {devices} shell getprop ro.product.model").read().replace(' ', '')
nowstime = f'{time.strftime("%Y-%m-%d-%H-%M-%S", start)}'
if get_value(config_Path, 'uwatype')[0] == '1':
report_Name = devices_name.split()[0] + "-UWA_" + str(nowstime)
else:
report_Name = devices_name.split()[0] + "-BTV_" + str(nowstime)
# 获取测试报告路径
report_path = (os.path.abspath(os.path.join(os.getcwd(), f"../platform/static/Report/{report_Name}")))
datapath = report_path + '/data'
create_time = time.strftime("%m%d%H%M", nowtime)
jsonfile = datapath + f'/{create_time}_{report_Name}_log.json'
if os.path.exists(jsonfile):
raise Exception("FileHasExisted")
f = open(jsonfile, "w")
resultData = {
"Time_series": [],
"TotalMemory": [],
"AllocatedMemory": [],
"UsedMemory": [],
"FreeMemory": [],
"TotalCPU": [],
"AllocatedCPU": [],
"FPS": [],
"PNGAddress": [],
"data_count": [],
}
f.write(json.dumps(resultData))
f.close()
return jsonfile
def record_to_json(jsonfilepath, list):
for i in range(len(list)):
if list[i] == "N/a":
list[i] = "0"
list[1] = float(list[1])
list[2] = float(list[2])
list[3] = float(list[3])
list[4] = float(list[4])
# todo:因为totalcpu的数值进行了处理但是allocatedcpu却没有进行相同的处理,在报告中就会显示应用占比高于总占比
list[5] = float(list[5]) * 100
list[6] = float(list[6]) * 100
list[7] = float(list[7])
f = open(jsonfilepath, "r+")
strdata = f.read()
f.seek(0)
dictdata = json.loads(strdata)
dictdata = json.loads(strdata)
dictdata["Time_series"].append(list[0])
dictdata["TotalMemory"].append(list[1])
dictdata["AllocatedMemory"].append(list[2])
dictdata["UsedMemory"].append(list[3])
dictdata["FreeMemory"].append(list[4])
dictdata["TotalCPU"].append(list[5])
dictdata["AllocatedCPU"].append(list[6])
dictdata["FPS"].append(list[7])
dictdata["PNGAddress"].append(list[8])
strdata = json.dumps(dictdata)
f.write(strdata)
f.close()
def calculate_by_json(jsonfile):
f = open(jsonfile, "r+")
strdata = f.read()
f.seek(0)
dictdata = json.loads(strdata)
memorylist = list(dictdata["AllocatedMemory"])
cpulist = list(dictdata["AllocatedCPU"])
fpslist = list(dictdata["FPS"])
while 0 in memorylist:
memorylist.remove(0)
while 0 in cpulist:
cpulist.remove(0)
while 0 in fpslist:
fpslist.remove(0)
Max_AllocatedMemory = max(memorylist)
Min_AllocatedMemory = min(memorylist)
Avg_AllocatedMemory = format(np.average(memorylist), ".2f")
Max_AllocatedCPU = max(cpulist)
Min_AllocatedCPU = min(cpulist)
Avg_AllocatedCPU = format(np.average(cpulist), ".2f")
# Max_FPS = max(fpslist)
# Min_FPS = min(fpslist)
# Avg_FPS = format(np.average(fpslist), ".2f")
Max_FPS = Min_FPS = Avg_FPS = "N/a"
# 防止对某些应用或某些机型,因取不到fps导致max函数报错因而中断流程的问题。
if len(fpslist) != 0:
Max_FPS = max(fpslist)
Min_FPS = min(fpslist)
Avg_FPS = format(np.average(fpslist), ".2f")
dictdata["data_count"].append({"Max_AllocatedMemory": [Max_AllocatedMemory],
"Min_AllocatedMemory": [Min_AllocatedMemory],
"Avg_AllocatedMemory": [Avg_AllocatedMemory],
"Max_AllocatedCPU": [str(Max_AllocatedCPU) + "%"],
"Min_AllocatedCPU": [str(Min_AllocatedCPU) + "%"],
"Avg_AllocatedCPU": [str(Avg_AllocatedCPU) + "%"],
"Max_FPS": [Max_FPS], "Min_FPS": [Min_FPS],
"Avg_FPS": [Avg_FPS]})
strdata = json.dumps(dictdata)
f.write(strdata)
f.close()
# if __name__=="__main__":
# jsonfile=r"D:\AirtestID\AutoTest_Project_DRInland\platform\static\Report\Excel\08051847_62001_log.json"
# calculate_by_json(jsonfile)
# # nowtime = time.localtime()
# # device = "123465"
# # create_log_json(nowtime,device)
# 创建一个log_excel用以记录性能数据
# def create_log_excel(start, nowtime, devices):
# devices_name = os.popen(f"adb -s {devices} shell getprop ro.product.model").read().replace(' ', '')
# nowstime = f'{time.strftime("%Y-%m-%d-%H-%M-%S", start)}'
# report_Name = devices_name.split()[0] + "_" + str(nowstime)
# # 获取测试报告路径
# report_path = (os.path.abspath(os.path.join(os.getcwd(), f"../platform/static/Report/{report_Name}")))
# datapath = report_path + '/data'
# create_time = time.strftime('%m%d%H%M', nowtime)
# exclefile = datapath + f'/{create_time}_{devices_name}_log.xlsx'
# app = xw.App(visible=True, add_book=False)
# wb = app.books.add()
# sheet = wb.sheets("Sheet1")
# sheet.range('A1').value = ["Time", "TotalMemory(MB)", "AllocatedMemory(MB)", "UsedMemory(MB)", "FreeMemory(MB)",
# "TotalCPU", "AllocatedCPU", "FPS", "", "PNG", "PNGAddress"]
# sheet.range('A1:I1').color = 205, 197, 191
# print("创建Excel文件:{}".format(exclefile))
# return exclefile, sheet, wb
# 计算一个sheet里已存在的所有数据,然后返回该sheet里的各项的平均、最大、最小值。
def calculate(sheet):
rng = sheet.range('A1').expand()
nrow = rng.last_cell.row
AllocatedMemory = sheet.range("C2:C{}".format(nrow)).value
sum_UsedMemory = sheet.range("D2:D{}".format(nrow)).value
sum_FreeMemory = sheet.range("E2:E{}".format(nrow)).value
TotalCPU = sheet.range("F2:F{}".format(nrow)).value
AllocatedCPU = sheet.range("G2:G{}".format(nrow)).value
FPS = sheet.range("H2:H{}".format(nrow)).value
JankCount = sheet.range("I2:I{}".format(nrow)).value
sum_TotalCPU = []
while "N/a" in AllocatedMemory:
AllocatedMemory.remove("N/a")
while "N/a" in AllocatedCPU:
AllocatedCPU.remove("N/a")
while "N/a" in FPS:
FPS.remove("N/a")
while "N/a" in JankCount:
JankCount.remove("N/a")
for i in range(len(TotalCPU)):
tmp = float(TotalCPU[i].split("%")[0])
sum_TotalCPU.append(tmp)
avg_am, max_am, min_am = getcount(AllocatedMemory)
avg_um, max_um, min_um = getcount(sum_UsedMemory)
avg_fm, max_fm, min_fm = getcount(sum_FreeMemory)
avg_tc, max_tc, min_tc = getcount(sum_TotalCPU)
avg_ac, max_ac, min_ac = getcount(AllocatedCPU)
avg_fps, max_fps, min_fps = getcount(FPS)
avg_jc, max_jc, min_jc = getcount(JankCount)
if avg_tc == "N/a":
pass
else:
avg_tc = str(format(avg_tc, ".2f")) + "%"
max_tc = str(format(max_tc, ".2f")) + "%"
min_tc = str(format(min_tc, ".2f")) + "%"
if avg_ac == "N/a":
pass
else:
avg_ac = str(format(avg_ac * 100, ".2f")) + "%"
max_ac = str(format(max_ac * 100, ".2f")) + "%"
min_ac = str(format(min_ac * 100, ".2f")) + "%"
avglist = ["平均值", "", avg_am, avg_um, avg_fm, avg_tc, avg_ac, avg_fps, avg_jc]
maxlist = ["最大值:", "", max_am, max_um, max_fm, max_tc, max_ac, max_fps, max_jc]
minlist = ["最小值:", "", min_am, min_um, min_fm, min_tc, min_ac, min_fps, min_jc]
return avglist, maxlist, minlist
# 统计一个list的平均、最大、最小值
def getcount(list):
sum = avg = max = min = 0
flag = 0
try:
for Na in list:
flag = flag + 1
if flag == 1:
sum = float(Na)
max = float(Na)
min = float(Na)
else:
sum = sum + float(Na)
if float(Na) > max:
max = float(Na)
elif float(Na) < min:
min = float(Na)
except Exception as e:
print(e)
if sum == 0:
avg = "N/a"
max = "N/a"
min = "N/a"
else:
avg = float(format(sum / flag, ".2f"))
return avg, max, min
# 读取传过来的list和excel,将list写入excel的下一行
def record_to_excel(sheet, list, **kwargs):
rng = sheet.range('A1').expand()
nrow = rng.last_cell.row
currentcell = "A" + str(nrow + 1)
currentcellpng = "J" + str(nrow + 1)
currentcellpngvalue = "K" + str(nrow + 1)
currentcellrange = currentcell + ":" + "H" + str(nrow + 1)
sheet.range(currentcell).value = list
if nrow % 2 == 0:
sheet.range(currentcellrange).color = 173, 216, 230
else:
sheet.range(currentcellrange).color = 221, 245, 250
for key, value in kwargs.items():
if key == "color":
sheet.range(currentcellrange).color = value
if key == "png":
sheet.range(currentcellpng).add_hyperlink(value, "截图", "提示:点击打开截图")
sheet.range(currentcellpngvalue).value = value
sheet.autofit()
# 在excel里查找指定键名的列,将该列所有数值(不算最后3行统计行)返回成一个serieslist
def get_series(sheet, Key):
rng = sheet.range('A1').expand()
nrow = rng.last_cell.row - 3
rng2 = sheet.range('A1:K1')
serieslist = []
for key in rng2:
if key.value == Key:
cum = key.address
cum = cum.split("$")[1]
tmp = cum + "2:" + cum + str(nrow)
serieslist = sheet.range(tmp).value
break
if Key == "TotalCPU":
for i in range(len(serieslist)):
serieslist[i] = float(
format(float(serieslist[i].split("%")[0]) / float(serieslist[i].split("%")[1].split("/")[1]) * 100,
"0.2f"))
if serieslist[i] == "N/a":
serieslist[i] = 0
if Key == "AllocatedCPU":
for i in range(len(serieslist)):
if serieslist[i] == "N/a":
serieslist[i] = 0
else:
serieslist[i] = float(format(float(serieslist[i]) * 100, "0.2f"))
return serieslist
# 在序列表里查询指定键值对,转成json返回
def get_json(sheet, Key):
series = get_series(sheet, Key)
series_json = json.dumps({Key: series})
return series_json
adb = ADB().adb_path
# 用来给设备初始化MiniCap的,介绍见 https://blog.csdn.net/saint_228/article/details/92142914
def ini_MiniCap(devices):
try:
parent_path = os.path.abspath(os.path.dirname(inspect.getfile(inspect.currentframe())) + os.path.sep + ".")
root_path = os.path.abspath(os.path.dirname(parent_path) + os.path.sep + ".")
ABIcommand = adb + " -s {} shell getprop ro.product.cpu.abi".format(devices)
ABI = os.popen(ABIcommand).read().strip()
AndroidVersion = os.popen(adb + " -s {} shell getprop ro.build.version.sdk".format(devices)).read().strip()
airtest_minicap_path = os.path.abspath(
os.path.dirname(root_path) + os.path.sep + ".") + "\\airtest\\core\\android\\static\\stf_libs"
airtest_minicapso_path = os.path.abspath(os.path.dirname(
root_path) + os.path.sep + ".") + "\\airtest\\core\\android\\static\\stf_libs\\minicap-shared\\aosp\\libs\\" + "android-{}\\{}\\minicap.so".format(
AndroidVersion, ABI)
push_minicap = adb + " -s {} push {}/{}/minicap".format(devices, airtest_minicap_path,
ABI) + " /data/local/tmp/"
push_minicapso = adb + " -s {} push {}".format(devices, airtest_minicapso_path) + " /data/local/tmp/"
os.popen(push_minicap)
os.popen(push_minicapso)
chmod = adb + " -s {} shell chmod 777 /data/local/tmp/*".format(devices)
os.popen(chmod)
wm_size_command = adb + " -s {} shell wm size".format(devices)
vm_size = os.popen(wm_size_command).read()
vm_size = vm_size.split(":")[1].strip()
start_minicap = adb + " -s {} shell LD_LIBRARY_PATH=/data/local/tmp /data/local/tmp/minicap -P {}@{}/0 -t".format(
devices, vm_size, vm_size)
result = os.popen(start_minicap).read()
print(result)
print("设备{}上已经成功安装并开启了MiniCap。".format(devices))
except Exception as e:
print(e, traceback.format_exc())
def get_screen_shot(start, starttime, devices, action):
"""
实现手机截图功能
:param devices: 截图的设备
:param start: 截图发生的时间
:param action: 当时的操作描述,属于哪个测试用例下的
:return:
"""
devices_name = os.popen(f"adb -s {devices} shell getprop ro.product.model").read().replace(' ', '')
nowtime = f'{time.strftime("%Y-%m-%d-%H-%M-%S", start)}'
if get_value(config_Path, 'uwatype')[0] == '1':
report_Name = devices_name.split()[0] + "-UWA_" + str(nowtime)
else:
report_Name = devices_name.split()[0] + "-BTV_" + str(nowtime)
# 获取测试报告路径
report_path = (os.path.abspath(os.path.join(os.getcwd(), f"../platform/static/Report/{report_Name}")))
screenpath = report_path + '/Screenshot'
pngtime = time.strftime('%Y%m%d_%H%M%S', time.localtime(starttime))
picture_PNG = screenpath + "/" + pngtime + "_" + "_" + action + ".png"
packname = pngtime + "_" + "_" + action + ".png"
os.system("adb -s " + devices + " shell screencap -p /sdcard/screencap.png") # 调用adb命令实现截图
file_Path = open(picture_PNG, "a+", encoding="utf-8") # 打开文件启用添加模式
file_Path.close() # 关闭打开的文件路径
time.sleep(1)
os.system(f"adb -s {devices} pull /sdcard/screencap.png {picture_PNG}") # 把截图放到截图路径中去
time.sleep(1)
print(
"<img src='" + f"/static/Report/{report_Name}/Screenshot/" + packname + "' width=600 />") # 通过src路径获取图片,并显示出来
return picture_PNG
def GetScreen(start, starttime, devices, action):
ABIcommand = adb + " -s {} shell getprop ro.product.cpu.abi".format(devices)
ABI = os.popen(ABIcommand).read().strip()
if ABI == "x86":
png = GetScreenbyADBCap(start, starttime, devices, action)
else:
try:
png = GetScreenbyMiniCap(start, starttime, devices, action)
except:
print("MiniCap截图失败,换ADB截图")
png = GetScreenbyADBCap(start, starttime, devices, action)
return png
# 用ADBCAP的方法截图
def GetScreenbyADBCap(start, starttime, devices, action):
devices_name = os.popen(f"adb -s {devices} shell getprop ro.product.model").read().replace(' ', '')
nowtime = f'{time.strftime("%Y-%m-%d-%H-%M-%S", start)}'
if get_value(config_Path, 'uwatype')[0] == '1':
report_Name = devices_name.split()[0] + "-UWA_" + str(nowtime)
else:
report_Name = devices_name.split()[0] + "-BTV_" + str(nowtime)
# 获取测试报告路径
report_path = (os.path.abspath(os.path.join(os.getcwd(), f"../platform/static/Report/{report_Name}")))
screenpath = report_path + '/Screenshot'
# 先给昵称赋值,防止生成图片的命名格式问题。
if ":" in devices:
nickname = devices.split(":")[1]
else:
nickname = devices
pngtime = time.strftime('%Y%m%d_%H%M%S', time.localtime(starttime))
png = screenpath + "/" + pngtime + nickname + "_" + action + ".png"
pngname = pngtime + nickname + "_" + action + ".png"
os.system(adb + " -s " + devices + " shell screencap -p /sdcard/screencap.png")
time.sleep(1)
fp = open(png, "a+", encoding="utf-8")
fp.close()
os.system(adb + " -s " + devices + " pull /sdcard/screencap.png " + png)
time.sleep(0.5)
# ADB截图过大,需要压缩,默认压缩比为0.2,全屏。
compressImage(png)
print("<img src='" + f"/static/Report/{report_Name}/Screenshot/" + pngname + "' width=600 />")
return png
# 用MiniCap的方法截图,使用前需要确保手机上已经安装MiniCap和MiniCap.so。一般用过STF和airtestide的手机会自动安装,若未安装,则可以执行Init_MiniCap.py,手动安装。
def GetScreenbyMiniCap(start, starttime, devices, action):
devices_name = os.popen(f"adb -s {devices} shell getprop ro.product.model").read().replace(' ', '')
nowtime = f'{time.strftime("%Y-%m-%d-%H-%M-%S", start)}'
if get_value(config_Path, 'uwatype')[0] == '1':
report_Name = devices_name.split()[0] + "-UWA_" + str(nowtime)
else:
report_Name = devices_name.split()[0] + "-BTV_" + str(nowtime)
# 获取测试报告路径
report_path = (os.path.abspath(os.path.join(os.getcwd(), f"../platform/static/Report/{report_Name}")))
screenpath = report_path + '/Screenshot'
# 先给昵称赋值,防止生成图片的命名格式问题。
if ":" in devices:
nickname = devices.split(":")[1]
else:
nickname = devices
# 创建图片
pngtime = time.strftime('%Y%m%d_%H%M%S', time.localtime(starttime))
png = screenpath + "/" + pngtime + nickname + "_" + action + ".png"
pngname = pngtime + nickname + "_" + action + ".png"
# 获取设备分辨率
wmsizecommand = f"adb -s {devices} shell wm size"
size = os.popen(wmsizecommand).read()
size = size.split(":")[1].strip()
slist = size.split("x")
size = slist[1] + "x" + slist[0]
# 将设备号和分辨率填入minicap的命令,获得截图。
screen = f"adb -s {devices} shell \"LD_LIBRARY_PATH=/data/local/tmp /data/local/tmp/minicap -P {size}@{size}/0 -s > /sdcard/screencap.png\""
os.system(screen)
time.sleep(0.5)
os.system(adb + " -s " + devices + " pull /sdcard/screencap.png " + png)
print("<img src='" + f"/static/Report/{report_Name}/Screenshot/" + pngname + "' width=600 />")
return png
# 图片压缩批处理,cr为压缩比,其他参数为屏幕截取范围
def compressImage(path, cr=0.7, left=0, right=1, top=0, buttom=1):
# 打开原图片压缩
sImg = Image.open(path)
w, h = sImg.size # 获取屏幕绝对尺寸
box = (int(w * left), int(h * top), int(w * right), int(h * buttom))
sImg = sImg.crop(box)
time.sleep(0.1)
# 设置压缩尺寸和选项
dImg = sImg.resize((int(w * cr), int(h * cr)), Image.ANTIALIAS)
time.sleep(0.1)
# 压缩图片路径名称
dImg.save(path) # save这个函数后面可以加压缩编码选项JPEG之类的
|
[
"[email protected]"
] | |
183b2cca5672ea6af6d3cf9b56a760a15a8e0b67
|
7595c5c3d9b7ec68e87a388b4ba0a67fba65416b
|
/STEP1_feature_selection.py
|
2a6c1829a89cfc618b5202d493510b00fc2f406d
|
[] |
no_license
|
paul-baumann/SELECTOR
|
aa5769d54c67a168881ecdce8190f224d77a461d
|
0681665aaffe8fb194e715b241ba6df8e9fdd457
|
refs/heads/master
| 2020-09-19T21:29:28.641590 | 2017-03-17T20:11:38 | 2017-03-17T20:11:38 | 68,096,380 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,590 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#############################################
# This class is an entry point for SELECTOR.
# It allows specifying database address and
# the experiments that should be executed.
#
# copyright Paul Baumann
#############################################
import thread
import threading
from time import time
import Database_Handler
import UserData
from EvaluationRun import EvaluationRun
import SFFS
# import ResultAnalysis
import UserDataAssemble
import NextPlaceOrSlotPredictionTask
import warnings
warnings.filterwarnings('ignore')
from pylab import *
DEBUG_LEVEL = 6
THREAD_LEVEL = 0
IS_PER_DAY_PERIOD = False
##
# This method adds a log entry to the database as soon as an experiments has been started.
# It therefore allows identifying unfinished or cancelled experiments in the database
# and thus to easily remove them.
##
def Save_Start_Evaluation_Run_To_DB(evaluation_run):
# store prediction run details
values = []
values.append(evaluation_run.userData.userId)
values.append(datetime.datetime.now().strftime('%d-%m-%Y-%H:%M:%S'))
values.append(evaluation_run.selected_algorithm)
values.append(evaluation_run.selected_metric)
values.append(evaluation_run.userData.optimization_set.ground_truth.shape[0])
values.append(evaluation_run.userData.training_set.ground_truth.shape[0])
values.append(evaluation_run.userData.test_set.ground_truth.shape[0])
values.append(evaluation_run.userData.optimization_set.ground_truth.shape[0] + evaluation_run.userData.training_set.ground_truth.shape[0] + evaluation_run.userData.test_set.ground_truth.shape[0])
values.append(', '.join(str(x) for x in list(evaluation_run.userData.optimization_set.rows_mask)))
values.append(', '.join(str(x) for x in list(evaluation_run.userData.training_set.rows_mask)))
values.append(', '.join(str(x) for x in list(evaluation_run.userData.test_set.rows_mask)))
values.append(evaluation_run.is_network)
values.append(evaluation_run.is_temporal)
values.append(evaluation_run.is_spatial)
values.append(evaluation_run.is_context)
values.append(evaluation_run.start_time)
values.append(evaluation_run.end_time)
dbHandler = Database_Handler.Get_DB_Handler()
run_fields = ['user_id', 'start_timestamp', 'selected_algorithm', 'selected_metric', 'number_of_optimization_data',
'number_of_training_data', 'number_of_test_data', 'number_of_total_data',
'optimization_array', 'training_array', 'test_array', 'is_network', 'is_temporal', 'is_spatial', 'is_context',
'start_time', 'end_time']
insert_id = dbHandler.insert("%s_Prediction_Run" % (evaluation_run.task), run_fields, values)
evaluation_run.run_id = insert_id
return evaluation_run
##
# This method logs the end of an experiment
##
def Save_End_Evaluation_Run_To_DB(evaluation_run):
# store prediction run details
timestamp = datetime.datetime.now().strftime('%d-%m-%Y-%H:%M:%S')
dbHandler = Database_Handler.Get_DB_Handler()
query = "UPDATE %s_Prediction_Run SET end_timestamp = '%s' WHERE id = %i" % (evaluation_run.task, timestamp, evaluation_run.run_id)
dbHandler.update(query)
##
# SELECTOR
##
def Run_Main_Loop():
start = time()
## PREPARE TO RUN THE LOOP
list_of_metrics = [EvaluationRun.metrics_next_place, EvaluationRun.metrics_next_place, EvaluationRun.metrics_next_place]
algorithms = [EvaluationRun.alg_knn_dyn, EvaluationRun.alg_perceptron, EvaluationRun.alg_decision_tree, EvaluationRun.alg_svm];
# Select a configuration depending on whether the mobility should be predicted for specific day periods of time or not
if IS_PER_DAY_PERIOD:
start_periods = [1, 49, 69];
end_periods = [48, 68, 96];
tasks = [EvaluationRun.task_next_place_daily]
task_objects = [NextPlaceOrSlotPredictionTask]
else:
start_periods = [1];
end_periods = [96];
tasks = [EvaluationRun.task_next_slot_place, EvaluationRun.task_next_slot_transition, EvaluationRun.task_next_place]
task_objects = [NextPlaceOrSlotPredictionTask, NextPlaceOrSlotPredictionTask, NextPlaceOrSlotPredictionTask]
# read user list
text_file = open("userids.txt", "r")
userids = text_file.read().split('\n')
text_file.close()
# input parameters allow specifying which tasks for which users should be executed
start_task = int(sys.argv[1]) - 1
end_task = int(sys.argv[2])
start_used_id = int(sys.argv[3])
end_user_id = int(sys.argv[4])
# update arrays according to the user's parameters
list_of_metrics = list_of_metrics[start_task:end_task]
tasks = tasks[start_task:end_task]
task_objects = task_objects[start_task:end_task]
userids = userids[start_used_id:end_user_id]
## RUN THE LOOP
for user in userids:
task_id = -1
threads = []
for current_task in tasks:
task_id = task_id + 1
# Execute for each day period of time
for time_index in range(len(start_periods)):
userData = UserData.UserData()
userData.userId = int(user)
evaluation_run = EvaluationRun()
evaluation_run.task = current_task
evaluation_run.task_object = task_objects[task_id]
# feature group selection
evaluation_run.is_network = True;
evaluation_run.is_temporal = True;
evaluation_run.is_spatial = True;
evaluation_run.is_context = True;
evaluation_run.start_time = start_periods[time_index]
evaluation_run.end_time = end_periods[time_index]
# get data
if DEBUG_LEVEL > 0:
print("Loading... USER: %s -- after: %s seconds" % (user, time() - start))
evaluation_run.userData = userData
user_data_assemble = UserDataAssemble.UserDataAssemble(evaluation_run)
evaluation_run = user_data_assemble.Get_User_Data()
if DEBUG_LEVEL > 0:
print("Loading DONE -- USER: %s -- after: %s seconds" % (user, time() - start))
# run threads
if THREAD_LEVEL > 2: # usually 2
task_thread = threading.Thread( target=Thread_Task, args=(task_id, evaluation_run, algorithms, list_of_metrics, userData, start,) )
threads.append(task_thread)
task_thread.start()
else:
Thread_Task(task_id, evaluation_run, algorithms, list_of_metrics, userData, start)
if THREAD_LEVEL > 2:
for thread in threads:
thread.join()
print ("FINISH after : %s seconds" % (time() - start))
def Thread_Task(task_id, evaluation_run, algorithms, list_of_metrics, userData, start):
threads = []
for current_algorithm in algorithms:
metrics = list_of_metrics[task_id]
current_evaluation_run = EvaluationRun()
current_evaluation_run.copy(evaluation_run)
current_evaluation_run.selected_algorithm = current_algorithm
if THREAD_LEVEL > 1:
algorithm_thread = threading.Thread( target=Thread_Algorithm, args=(current_evaluation_run, metrics, start,) )
threads.append(algorithm_thread)
algorithm_thread.start()
else:
Thread_Algorithm(current_evaluation_run, metrics, start)
if THREAD_LEVEL > 1:
for thread in threads:
thread.join()
current_task = evaluation_run.task
user = evaluation_run.userData.userId
if DEBUG_LEVEL > 1:
print("Done with TASK: %s, user: %s -- after: %s seconds" % (current_task, user, time() - start))
print("######################################################")
##
# Run SELECTOR in different parallel threads for each machine learning algorithm
##
def Thread_Algorithm(evaluation_run, metrics, start):
current_algorithm = evaluation_run.selected_algorithm
current_task = evaluation_run.task
user = evaluation_run.userData.userId
threads = []
for current_metric in metrics:
current_evaluation_run = EvaluationRun()
current_evaluation_run.copy(evaluation_run)
current_evaluation_run.selected_metric = current_metric
if THREAD_LEVEL > 0:
metric_thread = threading.Thread( target=Thread_Metric, args=(current_evaluation_run, start,) )
threads.append(metric_thread)
metric_thread.start()
else:
Thread_Metric(current_evaluation_run, start)
if THREAD_LEVEL > 0:
for thread in threads:
thread.join()
if DEBUG_LEVEL > 2:
print("Done with ALGORITHM: %s, task: %s, user: %s -- after: %s seconds" % (current_algorithm, current_task, user, time() - start))
print("######################################################")
##
# Run SELECTOR in different parallel threads for each metric
##
def Thread_Metric(evaluation_run, start):
current_metric = evaluation_run.selected_metric
current_algorithm = evaluation_run.selected_algorithm
current_task = evaluation_run.task
user = evaluation_run.userData.userId
if DEBUG_LEVEL > 4:
print("Starting with metric: %s, algorithm: %s, task: %s, user: %s" % (current_metric, current_algorithm, current_task, user))
##### save data to database
evaluation_run = Save_Start_Evaluation_Run_To_DB(evaluation_run)
# prepare data
evaluation_run.training_set = evaluation_run.userData.optimization_set
evaluation_run.test_set = evaluation_run.userData.training_set
# run SFFS
sffs = SFFS.SFFS(evaluation_run, 10, start)
sffs.Run_SFFS()
Save_End_Evaluation_Run_To_DB(evaluation_run)
if DEBUG_LEVEL > 4:
print("Done with METRIC: %s, algorithm: %s, task: %s, user: %s, run_id: %s, day period: %s-%s -- after: %s seconds" % (current_metric,
current_algorithm,
current_task,
user,
evaluation_run.run_id,
evaluation_run.start_time,
evaluation_run.end_time,
time() - start))
print("######################################################")
##
# Entry point of the script
##
if __name__ == "__main__":
Run_Main_Loop()
|
[
"[email protected]"
] | |
4eafbe41f6fee575aa359ac6a6e2c5ad90173a6b
|
6fe7b54962042f95526b5c8cff12b88394bc63cc
|
/Intro.py
|
8ad175ca0fa03d7ed063fae35be75944eabe76e2
|
[] |
no_license
|
elblogbruno/Warship.py
|
70aeb9c42a3296e60f65a74bdf6e3ae0ff09e114
|
e00d2ffb23971cfade2e6d17906ef0501d023b00
|
refs/heads/master
| 2022-06-14T07:35:45.740227 | 2019-03-29T18:27:01 | 2019-03-29T18:27:01 | 169,792,029 | 1 | 0 | null | 2019-03-29T18:26:51 | 2019-02-08T20:06:28 |
Python
|
UTF-8
|
Python
| false | false | 1,883 |
py
|
from __future__ import division
from pyfiglet import Figlet
from WarshipGame import *
import os
from asciimatics.effects import Scroll, Mirage, Wipe, Cycle, Matrix, \
BannerText, Stars, Print
from asciimatics.particles import DropScreen
from asciimatics.renderers import FigletText, SpeechBubble, Rainbow, Fire
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.exceptions import ResizeScreenError
import sys
playIntro = True
def demo(screen):
scenes = []
screen.print_at('Press a to enter',0,0)
effects = [
Matrix(screen, stop_frame=100),
Mirage(
screen,
FigletText("Warship.py"),
screen.height // 2 - 3,
Screen.COLOUR_GREEN,
start_frame=100,
stop_frame=200),
Wipe(screen, start_frame=150),
Cycle(
screen,
FigletText("Warship.py"),
screen.height // 2 - 3,
start_frame=200),
]
scenes.append(Scene(effects, 250, clear=False))
effects = [
Mirage(
screen,
FigletText("Coded and"),
screen.height,
Screen.COLOUR_GREEN),
Mirage(
screen,
FigletText("designed by:"),
screen.height + 8,
Screen.COLOUR_GREEN),
Mirage(
screen,
FigletText("Bruno Moya"),
screen.height + 16,
Screen.COLOUR_RED),
Scroll(screen, 3),
]
#playIntro = False
scenes.append(Scene(effects, (screen.height + 24) * 3))
screen.play(scenes,stop_on_resize=True, repeat=False)
if __name__ == "__main__":
while playIntro == True:
try:
Screen.wrapper(demo)
playIntro= False
except ResizeScreenError:
pass
MainMenu()
|
[
"[email protected]"
] | |
0f20818aacacd277b492468e80b7128771cc7584
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_97/1704.py
|
2ef79a2cad74434c186149c67d373ceeab96e152
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 837 |
py
|
def areRecycled(number1, number2):
recycled = False
numero1 = number1
for i in range(len(number2)):
numero1.insert(0,numero1.pop())
if numero1 == number2:
return True
return False
archi = open("C-small-attempt2.in","r")
cant = open("output.dat","w")
cases = int(archi.readline().split()[0])
for i in range(cases):
cont = 0
label = "Case #" + str(i+1) + ": "
numeros = archi.readline().replace('\n','').split(" ")
limInferior = int(numeros[0])
limSuperior = int(numeros[1])
j=limInferior
while j < limSuperior:
k=j+1;
while k<= limSuperior:
if areRecycled(list(str(k)),list(str(j))):
cont = cont + 1
k = k + 1
j = j + 1
label = label + str(cont) + '\n'
cant.writelines(label)
|
[
"[email protected]"
] | |
4d7ab7bfcefd8572eb06e3978ebf7097d6c4a4f4
|
232fc2c14942d3e7e28877b502841e6f88696c1a
|
/dizoo/multiagent_particle/config/cooperative_navigation_collaq_config.py
|
59f41109f0f514f61ca8866df2a01ca581003b23
|
[
"Apache-2.0"
] |
permissive
|
shengxuesun/DI-engine
|
ebf84221b115b38b4b3fdf3079c66fe81d42d0f7
|
eb483fa6e46602d58c8e7d2ca1e566adca28e703
|
refs/heads/main
| 2023-06-14T23:27:06.606334 | 2021-07-12T12:36:18 | 2021-07-12T12:36:18 | 385,454,483 | 1 | 0 |
Apache-2.0
| 2021-07-13T02:56:27 | 2021-07-13T02:56:27 | null |
UTF-8
|
Python
| false | false | 2,129 |
py
|
from easydict import EasyDict
n_agent = 5
num_landmarks = n_agent
collector_env_num = 4
evaluator_env_num = 2
cooperative_navigation_collaq_config = dict(
env=dict(
n_agent=n_agent,
num_landmarks=num_landmarks,
max_step=100,
collector_env_num=collector_env_num,
evaluator_env_num=evaluator_env_num,
manager=dict(shared_memory=False, ),
n_evaluator_episode=5,
stop_value=0,
),
policy=dict(
cuda=True,
on_policy=True,
model=dict(
agent_num=n_agent,
obs_shape=2 + 2 + (n_agent - 1) * 2 + num_landmarks * 2,
alone_obs_shape=2 + 2 + (num_landmarks) * 2,
global_obs_shape=n_agent * 2 + num_landmarks * 2 + n_agent * 2,
action_shape=5,
hidden_size_list=[128, 128, 64],
attention=True,
self_feature_range=[2, 4], # placeholder
ally_feature_range=[4, n_agent * 2 + 2], # placeholder
attention_size=32,
),
agent_num=n_agent,
learn=dict(
update_per_collect=100,
batch_size=32,
learning_rate=0.0001,
target_update_theta=0.001,
discount_factor=0.99,
),
collect=dict(
n_sample=600,
unroll_len=16,
env_num=collector_env_num,
),
eval=dict(env_num=evaluator_env_num, ),
other=dict(eps=dict(
type='exp',
start=1.0,
end=0.05,
decay=100000,
), ),
),
)
cooperative_navigation_collaq_config = EasyDict(cooperative_navigation_collaq_config)
main_config = cooperative_navigation_collaq_config
cooperative_navigation_collaq_create_config = dict(
env=dict(
import_names=['dizoo.multiagent_particle.envs.particle_env'],
type='cooperative_navigation',
),
env_manager=dict(type='subprocess'),
policy=dict(type='collaq'),
)
cooperative_navigation_collaq_create_config = EasyDict(cooperative_navigation_collaq_create_config)
create_config = cooperative_navigation_collaq_create_config
|
[
"[email protected]"
] | |
584b5a6b71b6b60458b433164a9881caae0eed66
|
03d733b0c6ed233ecfd47d984acce8b779bf345b
|
/Exercícios/Mundo 2/ex059.py
|
273c3698e4713b179f047e49533434010e3a9efb
|
[
"MIT"
] |
permissive
|
gslmota/Programs-PYTHON
|
ccbc7aa758dfeb7ce0a011654fee62a8dd0f563d
|
cf6f98ded31e1bc32997ad6887d96e60975c3cad
|
refs/heads/master
| 2022-11-29T08:54:48.535215 | 2020-08-09T23:44:06 | 2020-08-09T23:44:06 | 274,794,244 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 832 |
py
|
# Jogo do Par ou Ímpar
from random import randint
vit = 0
while True:
jpe = int(input('Digite um número: '))
npc = randint(1,11)
soma = (npc + jpe)
tipo = ' '
while tipo not in 'PI':
tipo = str(input('Par ou Ímpar? [P/I]')).strip().upper()[0]
print('Voce Jogou {} o computador jogou {} o total é {}'.format(jpe, npc, soma))
print('Deu PAR' if soma % 2 == 0 else 'Deu IMPAR')
if tipo == 'P':
if soma % 2 == 0:
print('Voce Venceu!')
vit += 1
else:
print('Voce Perdeu!')
break
elif tipo == 'I':
if soma % 2 == 1:
print('Voce Venceu!')
vit += 1
else:
print('Voce Perdeu!')
break
print('Vamos Jogar Novamente!')
print(f'Game Over! Voce venceu {vit} vezes!')
|
[
"[email protected]"
] | |
e28e54568e948672b8d93164c20139007a1b5bfe
|
093f50e4d8f619eeb2307c6d925d291830e8115d
|
/StacksAndQueues/stackofplates.py
|
f07d5bb5a5ecbc5123315536219cb1d36005b2ab
|
[] |
no_license
|
cseydlitz/practice
|
e021889e182fdfcdd6d806e3c3856a15a65ddb47
|
b7c76c621bf18a33a9fc57991ff763c937d394fd
|
refs/heads/master
| 2022-11-29T00:08:30.196098 | 2020-08-07T02:25:00 | 2020-08-07T02:25:00 | 266,435,572 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 705 |
py
|
class SetofStacks:
"""Prompt: when a stack exceeded a value, create a new stack."""
def __init__(self):
self.items_collection = []
self.items = []
def pop(self):
return self.items.pop(0)
def push(self, item):
if self.is_full():
self.items =[]
return self.push(item)
if self.is_empty:
self.items_collection.insert(0,self.items)
self.items.insert(0,item)
def peek(self):
return self.items[0]
def is_empty(self):
return self.items == []
def is_full(self):
return len(self.items) == 10
def popAt(self, index):
return self.items_collection[index].pop(0)
|
[
"[email protected]"
] | |
94495ae9bda52bd44a846dc64ca184a3dab2436d
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/KISS/testcase/firstcases/testcase9_006.py
|
61760f5dab43c2ef13a77980e6ed785b691254ad
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,084 |
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'fr.neamar.kiss',
'appActivity' : 'fr.neamar.kiss.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'fr.neamar.kiss/fr.neamar.kiss.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase006
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"fr.neamar.kiss:id/menuButton\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Device settings\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"9_006\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'fr.neamar.kiss'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"[email protected]"
] | |
3b1e139a64a671372f265f7eaae95372f0ad68f9
|
cef5197f152cc467635a6c8c2e7e0c6a2519b78c
|
/XALT2/xalt_file_to_db_mod.py
|
d742bf24e694b86cd5d15cf7a4537872f02545bf
|
[
"MIT"
] |
permissive
|
adityakavalur/slurm-docker-cluster
|
cc134bcec5247706d76dc9b75fddf2ca876f2167
|
d54703ddcab9d456be4743dae0f51daf3d549df5
|
refs/heads/master
| 2023-04-12T13:40:02.699694 | 2021-05-05T17:05:20 | 2021-05-05T17:05:20 | 299,394,194 | 0 | 0 | null | 2020-09-28T18:15:18 | 2020-09-28T18:15:17 | null |
UTF-8
|
Python
| false | false | 12,918 |
py
|
#!/bin/sh
# -*- python -*-
################################################################################
# This file is python 2/3 bilingual.
# The line """:" starts a comment in python and is a no-op in shell.
""":"
# Shell code to find and run a suitable python interpreter.
for cmd in python3 python python2; do
command -v > /dev/null $cmd && exec $cmd $0 "$@"
done
echo "Error: Could not find a valid python interpreter --> exiting!" >&2
exit 2
":""" # this line ends the python comment and is a no-op in shell.
################################################################################
# Git Version: xalt-2.9.8
#-----------------------------------------------------------------------
# XALT: A tool that tracks users jobs and environments on a cluster.
# Copyright (C) 2013-2014 University of Texas at Austin
# Copyright (C) 2013-2014 University of Tennessee
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
#-----------------------------------------------------------------------
# xalt_json_to_db takes the output found in the ~/.xalt.d/[link,run]*
# output files and puts it into the database
#
# optional input:
# XALT_USERS: colon separated list of users; only these users are
# considered instead of all
#
from __future__ import print_function
import os, sys, re, MySQLdb, json, time, argparse, time, traceback
dirNm, execName = os.path.split(os.path.realpath(sys.argv[0]))
sys.path.insert(1,os.path.realpath(os.path.join(dirNm, "../libexec")))
sys.path.insert(1,os.path.realpath(os.path.join(dirNm, "../site")))
from XALTdb import XALTdb
from XALTdb import TimeRecord
from xalt_util import *
from xalt_global import *
from progressBar import ProgressBar
from Rmap_XALT import Rmap
import warnings, getent
warnings.filterwarnings("ignore", "Unknown table.*")
logger = config_logger()
my_epoch = time.time()
import inspect
def __LINE__():
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back.f_lineno
def __FILE__():
return inspect.currentframe().f_code.co_filename
class CmdLineOptions(object):
""" Command line Options class """
def __init__(self):
""" Empty Ctor """
pass
def execute(self):
""" Specify command line arguments and parse the command line"""
parser = argparse.ArgumentParser()
parser.add_argument("--delete", dest='delete', action="store_true", help="delete files after reading")
parser.add_argument("--timer", dest='timer', action="store_true", help="Time runtime")
parser.add_argument("--report_file", dest='listFn', action="store_true", help="list file")
parser.add_argument("--reverseMapD", dest='rmapD', action="store", help="Path to the directory containing the json reverseMap")
parser.add_argument("--u2acct", dest='u2acct', action="store", help="Path to the json file containing default charge account strings for users")
parser.add_argument("--syshost", dest='syshost', action="store", default="*", help="name of the cluster")
parser.add_argument("--confFn", dest='confFn', action="store", default="xalt_db.conf", help="Name of the database")
args = parser.parse_args()
return args
def keep_or_delete(fn, deleteFlg):
delta = my_epoch - os.stat(fn).st_mtime
if (delta > 86400 and deleteFlg):
os.remove(fn)
def link_json_to_db(xalt, listFn, reverseMapT, deleteFlg, linkFnA, countT, active, pbar):
"""
Reads in each link file name and converts json to python table and sends it to be written to DB.
@param xalt: An XALTdb object.
@param listFn: A flag that causes the name of the file to be written to stderr.
@param reverseMapT: The Reverse Map Table.
@param deleteFlg: A flag that says to delete files after processing.
@param linkFnA: An array of link file names
"""
num = 0
query = ""
try:
for fn in linkFnA:
if (listFn):
sys.stderr.write(fn+"\n")
XALT_Stack.push("fn: "+fn) # push fn
try:
f = open(fn,"r")
except:
continue
try:
linkT = json.loads(f.read())
except:
f.close()
v = XALT_Stack.pop()
keep_or_delete(fn, deleteFlg)
continue
f.close()
xalt.link_to_db(reverseMapT, linkT)
num += 1
if (active):
countT['any'] += 1
pbar.update(countT['any'])
try:
if (deleteFlg):
os.remove(fn)
except:
pass
v = XALT_Stack.pop()
carp("fn",v)
except Exception as e:
print(XALT_Stack.contents())
print(query)
print ("link_json_to_db(): Error: ",e)
print(traceback.format_exc())
sys.exit (1)
return num
def pkg_json_to_db(xalt, listFn, syshost, deleteFlg, pkgFnA, countT, active, pbar):
"""
Reads in each link file name and converts json to python table and sends it to be written to DB.
@param xalt: An XALTdb object.
@param listFn: A flag that causes the name of the file to be written to stderr.
@param syshost: The name of the cluster being processed.
@param deleteFlg: A flag that says to delete files after processing.
@param pkgFnA: An array of link file names
"""
num = 0
query = ""
try:
for fn in pkgFnA:
if (listFn):
sys.stderr.write(fn+"\n")
XALT_Stack.push("fn: "+fn) # push fn
try:
f = open(fn,"r")
except:
continue
try:
pkgT = json.loads(f.read())
except:
f.close()
v = XALT_Stack.pop()
keep_or_delete(fn, deleteFlg)
continue
f.close()
xalt.pkg_to_db(syshost, pkgT)
num += 1
if (active):
countT['any'] += 1
pbar.update(countT['any'])
try:
if (deleteFlg):
os.remove(fn)
except:
pass
v = XALT_Stack.pop()
carp("fn",v)
except Exception as e:
print(XALT_Stack.contents())
print(query)
print ("pkg_json_to_db(): Error: ",e)
print(traceback.format_exc())
sys.exit (1)
return num
def run_json_to_db(xalt, listFn, reverseMapT, u2acctT, deleteFlg, runFnA, countT, active, pbar, timeRecord):
"""
Reads in each run file name and converts json to python table and sends it to be written to DB.
@param xalt: An XALTdb object.
@param listFn: A flag that causes the name of the file to be written to stderr.
@param reverseMapT: The Reverse Map Table.
@param u2acctT: The map for user to default account string
@param deleteFlg: A flag that says to delete files after processing.
@param runFnA: An array of run file names
"""
num = 0
query = ""
try:
for fn in runFnA:
if (listFn):
sys.stderr.write(fn+"\n")
XALT_Stack.push("fn: "+fn)
try:
f = open(fn,"r")
except:
continue
try:
runT = json.loads(f.read())
except:
f.close()
v = XALT_Stack.pop()
keep_or_delete(fn, deleteFlg)
continue
f.close()
stored = xalt.run_to_db(reverseMapT, u2acctT, runT, timeRecord)
try:
if (deleteFlg):
os.remove(fn)
except:
pass
if (active):
countT['any'] += 1
pbar.update(countT['any'])
if (stored):
num += 1
v = XALT_Stack.pop()
carp("fn",v)
except Exception as e:
print(XALT_Stack.contents())
print(query.encode("ascii","ignore"))
print ("run_json_to_db(): Error:",e)
print(traceback.format_exc())
sys.exit (1)
return num
def passwd_generator():
"""
This generator walks the /etc/passwd file and returns the next
user and home directory. If XALT_USERS is set then it used that
instead. It is a colon separated list.
Super hack: if the colon separated list has a ";" in it then the
first part is the user the second is the home directory. This is
use in testing.
"""
xaltUserA = os.environ.get("XALT_USERS")
if (xaltUserA):
for user in xaltUserA.split(":"):
idx = user.find(";")
if (idx != -1):
hdir = user[idx+1:]
user = user[:idx]
else:
hdir = os.path.expanduser("~" + user)
yield user, hdir
else:
for entry in getent.passwd():
yield entry.name, entry.dir
def build_resultDir(hdir, transmission, kind):
tail = ""
if (transmission == "file_separate_dirs"):
tail = kind
prefix = os.environ.get("XALT_FILE_PREFIX","/data/xalt2_json_moved")
if (not prefix or prefix == "USE_HOME"):
return os.path.join(hdir,".xalt.d",tail)
return os.path.join(prefix,tail)
def store_json_files(homeDir, transmission, xalt, rmapT, u2acctT, args, countT, pbar, timeRecord):
active = True
if (homeDir):
countT['any'] += 1
pbar.update(countT['any'])
active = False
xaltDir = build_resultDir(homeDir, transmission, "link")
XALT_Stack.push("Directory: " + xaltDir)
if (os.path.isdir(xaltDir)):
XALT_Stack.push("link_json_to_db()")
linkFnA = files_in_tree(xaltDir, "*/link." + args.syshost + ".*.json")
linkFnA.sort()
countT['lnk'] += link_json_to_db(xalt, args.listFn, rmapT, args.delete, linkFnA, countT, active, pbar)
XALT_Stack.pop()
XALT_Stack.pop()
xaltDir = build_resultDir(homeDir, transmission, "run")
XALT_Stack.push("Directory: " + xaltDir)
if (os.path.isdir(xaltDir)):
XALT_Stack.push("run_json_to_db()")
runFnA = files_in_tree(xaltDir, "*/run." + args.syshost + ".*.json")
runFnA.sort();
countT['run'] += run_json_to_db(xalt, args.listFn, rmapT, u2acctT, args.delete, runFnA,
countT, active, pbar, timeRecord)
XALT_Stack.pop()
XALT_Stack.pop()
xaltDir = build_resultDir(homeDir, transmission, "pkg")
XALT_Stack.push("Directory: " + xaltDir)
if (os.path.isdir(xaltDir)):
XALT_Stack.push("pkg_json_to_db()")
pkgFnA = files_in_tree(xaltDir, "*/pkg." + args.syshost + ".*.json")
pkgFnA.sort()
countT['pkg'] += pkg_json_to_db(xalt, args.listFn, args.syshost, args.delete, pkgFnA,
countT, active, pbar)
XALT_Stack.pop()
XALT_Stack.pop()
def main():
"""
Walks the list of users via the passwd_generator and load the
link and run files.
"""
# Using home directories or a global location.
xalt_file_prefix = os.environ.get("XALT_FILE_PREFIX","/data/xalt2_json_moved")
# Find transmission style
transmission = os.environ.get("XALT_TRANSMISSION_STYLE")
if (not transmission):
transmission = "file"
if (not transmission):
transmission = "file"
transmission = transmission.lower()
timeRecord = TimeRecord()
# Push command line on to XALT_Stack
sA = []
sA.append("CommandLine:")
for v in sys.argv:
sA.append('"'+v+'"')
XALT_Stack.push(" ".join(sA))
args = CmdLineOptions().execute()
xalt = XALTdb(args.confFn)
if (xalt_file_prefix == "USE_HOME"):
num = int(capture("LD_PRELOAD= getent passwd | wc -l"))
pbar = ProgressBar(maxVal=num)
else:
xaltDir = build_resultDir("", transmission, "")
allFnA = files_in_tree(xaltDir, "*/*." + args.syshost + ".*.json")
pbar = ProgressBar(maxVal=len(allFnA))
icnt = 0
t1 = time.time()
rmapT = Rmap(args.rmapD).reverseMapT()
u2acctT = {}
if (args.u2acct):
fp = open(args.u2acct,"r")
u2acctT = json.loads(fp.read())
fp.close()
countT = {}
countT['lnk'] = 0
countT['run'] = 0
countT['pkg'] = 0
countT['any'] = 0
if (xalt_file_prefix == "USE_HOME"):
for user, homeDir in passwd_generator():
store_json_files(homeDir, transmission, xalt, rmapT, u2acctT, args, countT, pbar, timeRecord)
else:
store_json_files("", transmission, xalt, rmapT, u2acctT, args, countT, pbar, timeRecord)
xalt.connect().close()
pbar.fini()
t2 = time.time()
rt = t2 - t1
if (args.timer):
print("Time: ", time.strftime("%T", time.gmtime(rt)))
print("num links: ", countT['lnk'], ", num pkgs: ", countT['pkg'], ", num runs: ", countT['run'])
timeRecord.print()
if ( __name__ == '__main__'): main()
|
[
"[email protected]"
] | |
acf9152aed982fcac24aa9534713bc19bd3c9e07
|
2c7949e01a91506c23d380e7d79e4a89e047f80f
|
/server/multiupload.py
|
67410a01792463a77484aa30834a10f779d24425
|
[] |
no_license
|
a1xndr/ec504-project
|
acc3230c4b5b7085ea3ee4483ac7c42fca35f46f
|
414f28cdde597188b42054be89ca202178f093a7
|
refs/heads/master
| 2021-04-12T09:03:21.167275 | 2018-05-10T21:09:23 | 2018-05-10T21:09:23 | 126,352,096 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 545 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import argparse
from uploadr.app import app
parser = argparse.ArgumentParser(description="Uploadr")
parser.add_argument(
"--port", "-p",
type=int,
help="Port to listen on",
default=2006,
)
args = parser.parse_args()
if __name__ == '__main__':
flask_options = dict(
host='0.0.0.0',
debug=False,
port=args.port,
threaded=True,
)
app.secret_key = "secret"
app.run(**flask_options)
|
[
"[email protected]"
] | |
57121db32dc71bee051c58f2448ac93e1d65df5e
|
15226505515f59829203b19837d7ac963146c10e
|
/codeforces/problemset/1300A/soln.py
|
89955908056effb342c1c21e59485134655b861d
|
[] |
no_license
|
rgab1508/challenges
|
c26a6df44c715eeb506662cdeb69db6da7d10fb5
|
48eaadab9f20bc5779ff901dfd5e7dbaf90c66f7
|
refs/heads/master
| 2021-07-08T06:44:57.433927 | 2021-05-04T07:59:09 | 2021-05-04T07:59:09 | 242,348,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
import random
t = int(input())
def mult(a:list):
m = 1
for i in range(len(a)):
m *= a[i]
return m
def fix_arr(a: list):
pass
for it in range(t):
n = int(input())
a = list(map(int, input().split(" ")))
c = 0
for i in range(n):
if a[i] == 0:
a[i] += 1
c += 1
while sum(a) == 0 or mult(a) == 0:
a[0] += 1
c += 1
print(c)
|
[
"[email protected]"
] | |
e033711077ed0c62c253ab1bb6999b26c97df1f4
|
5d30a3b495c7c44e1328f7c23b7e37a43d35a789
|
/bot/bot_service.py
|
d83a385a2739a57dd1149881afa9abf95e37a1d7
|
[] |
no_license
|
seungwoonlee/clien_bot
|
ab5f4934e0fa8f9545ab7f5f9d2b2864a5cacc0d
|
1d7f6e379326cacf64684d9834f86a8014223283
|
refs/heads/master
| 2020-05-24T01:39:36.661568 | 2019-05-13T08:36:25 | 2019-05-13T08:36:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,175 |
py
|
import json
import logging
from functools import wraps
from threading import Thread
import pika
import telegram
from telegram.error import Unauthorized
from telegram.ext import Updater, CommandHandler
from bot.data_service import DataService
from bot.env import Environments
class Bot(object):
class Decorators(object):
@classmethod
def send_typing_action(cls, func):
@wraps(func)
def wrapper(*args, **kwargs):
instance, bot, update = args
bot.send_chat_action(chat_id=update.effective_message.chat_id,
action=telegram.ChatAction.TYPING)
return func(instance, bot, update, **kwargs)
return wrapper
def __init__(self, token, mongo_uri):
self.logger = logging.getLogger('bot')
self.__bot = telegram.Bot(token=token)
self.updater = Updater(bot=self.__bot)
self.dispatcher = self.updater.dispatcher
self.init_handlers()
self.data_service = DataService(mongo_uri)
# 게시판 종류는 우선 하나만
self.board = 'allsell'
self.keyboard = [
['/register', '/list'],
['/clear', '/help']
]
self.env = Environments()
def init_consumer(self, mq_host, mq_port, queue):
self._connection = pika.BlockingConnection(
pika.ConnectionParameters(host=mq_host, port=mq_port)
)
channel = self._connection.channel()
channel.queue_declare(queue=queue)
channel.basic_consume(queue=queue, on_message_callback=self.consumer_cb)
channel.basic_qos(prefetch_count=1)
thread = Thread(target=channel.start_consuming)
self.logger.info('Waiting consuming...')
thread.start()
thread.join(0)
def consumer_cb(self, ch, method, properties, body):
received = json.loads(body)
chat_id = received['chat_id'] if 'chat_id' in received else None
message = received['message'] if 'message' in received else None
if not chat_id or not message:
self.logger.warning('chat_id or message is None. received: {}'.format(received))
return
self.logger.info('Received body chat_id: {} message: {}'.
format(received['chat_id'], received['message']))
try:
self.send_message(chat_id, message, telegram.ParseMode.MARKDOWN)
except Unauthorized as e:
self.logger.warning('[{}] Unauthoriezed exception. Details: {}'
.format(chat_id, str(e)))
ch.basic_ack(delivery_tag=method.delivery_tag)
def init_handlers(self):
self.add_handler('start', self.start_bot)
self.add_handler('register', self.register_keywords, has_args=True)
self.add_handler('list', self.show_registered_keywords)
self.add_handler('stop', self.stop_bot)
self.add_handler('clear', self.clear)
self.add_handler('help', self.help)
def add_handler(self, command, callback, has_args=False):
handler = CommandHandler(command, callback, pass_args=has_args)
self.dispatcher.add_handler(handler)
self.logger.info('Registered handler for command {}.'.format(command))
@Decorators.send_typing_action
def start_bot(self, bot, update):
chat_id = update.message.chat_id
# chat_id DB 저장
inserted = self.data_service.insert_new_chat_id(chat_id)
self.logger.info('[{}] Bot registered. inserted_id: {}'.format(chat_id, inserted))
welcome_lines = [
'클리앙 알리미 봇입니다.',
'현재는 사고팔고 게시판에 대해서만 서비스가 가능합니다.'
]
update.message.reply_text('\n'.join(welcome_lines))
# reply_markup = telegram.ReplyKeyboardMarkup(self.keyboard)
# self.send_message(chat_id, self._make_help_message(), telegram.ParseMode.MARKDOWN, reply_markup)
self.send_message(chat_id, self._make_help_message(), telegram.ParseMode.MARKDOWN)
@Decorators.send_typing_action
def register_keywords(self, bot, update, args):
chat_id = update.message.chat_id
if len(args) < 1:
update.message.reply_text('키워드가 입력되지 않았습니다.')
else:
str_args = ','.join(args)
self.logger.info('[{}] Input arguments: {}'.format(chat_id, str_args))
# chat_id, keywords DB 저장
updated = self.data_service.update_keywords(chat_id, self.board, args)
self.logger.info('[{}] Updated id: {}'.format(chat_id, updated))
self.logger.info('[{}] Registered keywords: {}'.format(chat_id, str_args))
messages = [
'키워드가 등록되었습니다.',
'등록된 키워드: _{}_'.format(str_args)
]
update.message.reply_text('\n'.join(messages), parse_mode=telegram.ParseMode.MARKDOWN)
@Decorators.send_typing_action
def clear(self, bot, update):
chat_id = update.message.chat_id
# chat_id의 모든 keywords를 DB에서 제거
updated = self.data_service.clear_keywords(chat_id, self.board)
self.logger.info('[{}] Updated id: {}'.format(chat_id, updated))
self.logger.info('[{}] Unregistered all keywords'.format(chat_id))
# keyword list DB에서 가져오기
# registered = self.data_service.select_keywords(chat_id, self.board)
update.message.reply_text('키워드 리스트가 초기화 되었습니다.')
@Decorators.send_typing_action
def show_registered_keywords(self, bot, update):
chat_id = update.message.chat_id
# DB에서 chat_id로 등록된 keyword list 가져오기
keywords = self.data_service.select_keywords(chat_id, self.board)
self.logger.info('[{}] Registered keywords: {}'.format(chat_id, keywords))
if len(keywords) > 0:
msg = '현재 등록되어있는 키워드: _{}_'.format(','.join(keywords))
else:
msg = '등록된 키워드가 없습니다.'
update.message.reply_text(msg, parse_mode=telegram.ParseMode.MARKDOWN)
@Decorators.send_typing_action
def help(self, bot, update):
chat_id = update.message.chat_id
self.logger.info('[{}] Help message requested.'.format(chat_id))
update.message.reply_text(self._make_help_message(), parse_mode=telegram.ParseMode.MARKDOWN)
def send_message(self, chat_id, msg, parse_mode=None, reply_markup=None):
self.__bot.send_message(chat_id=chat_id, text=msg, parse_mode=parse_mode, reply_markup=reply_markup)
self.logger.info('[{}] Sent message: {}'.format(chat_id, msg))
def stop_bot(self, bot, update):
chat_id = update.message.chat_id
# DB에서 사용자 제거
self.data_service.delete_chat_id(chat_id)
self.logger.info('[{}] Bot unregistered.'.format(chat_id))
def shutdown(self):
if not self._connection.is_closed:
self._connection.close()
self.updater.stop()
self.updater.idle = False
def run(self):
self.updater.start_polling()
self.logger.info('Start polling...')
self.init_consumer(self.env.config['MQ_HOST'], self.env.config['MQ_PORT'], self.board)
def _make_help_message(self):
help_lines = [
'<클리앙 알리미 도움말>',
'*/start* : 시작',
'*/register* : 키워드 등록',
' _/register 키워드1 키워드2 키워드3&키워드4..._',
' _참고1 : 키워드 여러개를 &와 붙여서 지정시 여러개 키워드가 동시에 존재하는 게시물을 찾습니다._',
' _참고2 : 이미 등록한 키워드 리스트가 있다면 일괄 교체합니다._',
'*/list* : 등록한 키워드 리스트 표시',
'*/clear* : 등록 키워드 전체 삭제',
'*/help* : 도움말 표시',
'문의사항이 있다면 *[email protected]*으로 연락주세요.'
]
return '\n'.join(help_lines)
|
[
"[email protected]"
] | |
9eddcc94dddbf71297ccedb6d4b86131d4417d32
|
bd73e20283ede1dfb0e52239ee762aaa80e11283
|
/lesson6_step10.py
|
e1a9160893032af8dac4439e22b0a5bf1e7cdbbb
|
[] |
no_license
|
HimariMinami/stepik---auto-tests-course---Selenium-and-Python
|
586e454e9e2accd76f70fa269a9ac2ff7d9811ac
|
f62c130818b4e2141a400ed8093f4cb14ec95a96
|
refs/heads/master
| 2022-02-24T04:54:02.757334 | 2019-09-12T12:53:41 | 2019-09-12T12:53:41 | 206,775,676 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,603 |
py
|
from selenium import webdriver
import time
try:
link = "http://suninjuly.github.io/registration1.html"
browser = webdriver.Chrome()
browser.get(link)
# Ваш код, который заполняет обязательные поля
input1 = browser.find_element_by_class_name("first")
input1.send_keys("Ivan")
input2 = browser.find_element_by_class_name("second")
input2.send_keys("Petrov")
input3 = browser.find_element_by_class_name("third")
input3.send_keys("[email protected]")
# Отправляем заполненную форму
button = browser.find_element_by_css_selector("button.btn")
button.click()
# Проверяем, что смогли зарегистрироваться
# ждем загрузки страницы
time.sleep(1)
# находим элемент, содержащий текст
welcome_text_elt = browser.find_element_by_tag_name("h1")
# записываем в переменную welcome_text текст из элемента welcome_text_elt
welcome_text = welcome_text_elt.text
# с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта
assert "Congratulations! You have successfully registered!" == welcome_text
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
|
[
"[email protected]"
] | |
b44993437a675fb60000cdcc2f6cce1d3fb234c0
|
4d5e2e158b88a80c33096834c1464f45eacf9b24
|
/homePage/views/tutorial.py
|
09c329b200652a6a2f5028fb52f1cb7bf32ac6eb
|
[
"Apache-2.0"
] |
permissive
|
bmackley/ancientassyrian
|
337286073d5e976c184aadf5b0c4fd0e88321ee4
|
baaf973a1162765d964f613e8bd839ef8cc3ea60
|
refs/heads/master
| 2021-03-27T14:33:21.686574 | 2016-03-25T22:48:15 | 2016-03-25T22:48:15 | 52,554,232 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,302 |
py
|
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.contrib.auth import authenticate, login
from django.conf import settings
import decimal, datetime
from homePage import models as m
from django.contrib.auth import logout
from . import templater
import datetime
def process_request(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/')
if request.urlparams[0] == "logout":
logout(request)
return HttpResponseRedirect('/')
form = LoginForm()
createForm = CreateUserForm()
if request.urlparams[0] == "1":
createForm = CreateUserForm(request.POST)
if createForm.is_valid():
newUser = m.User()
newUser.username = createForm.cleaned_data['username']
newUser.email = createForm.cleaned_data['email']
newUser.password = createForm.cleaned_data['password']
newUser.save()
return HttpResponseRedirect('/')
else:
if request.method == "POST":
form = LoginForm(request.POST)
if form.is_valid():
userN = form.cleaned_data['username'].lower()
user = authenticate(username = userN, password = form.cleaned_data['password'])
if user is not None:
login(request, user)
else:
raise forms.ValidationError("Please Enter a username")
return HttpResponseRedirect('/')
tvars = {
'form' : form,
'createForm' : createForm,
}
return templater.render_to_response(request, 'tutorial.html', tvars)
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
class CreateUserForm(forms.Form):
username = forms.CharField(required=True, label='Username', widget=forms.TextInput(attrs={'class':'form-control'}))
email = forms.CharField(required=False, label='Email', widget=forms.TextInput(attrs={'class':'form-control'}))
password = forms.CharField(required=False, widget=forms.PasswordInput(attrs={'class':'form-control'}))
retypepassword = forms.CharField(required=False, label='Re-Enter Password', widget=forms.PasswordInput(attrs={'class':'form-control'}))
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(CreateUserForm, self).__init__(*args, **kwargs)
def clean(self):
if self.cleaned_data['username'] == "":
raise forms.ValidationError("Please enter a username to sign up")
if self.cleaned_data['username'] == m.User.objects.filter(username = self.cleaned_data['username']):
raise forms.ValidationError("The username is taken.")
if self.cleaned_data['email'] == m.User.objects.filter(email = self.cleaned_data['email']):
raise forms.ValidationError("The email is in use.")
if self.cleaned_data['password'] == "":
raise forms.ValidationError("You must enter a password.")
if self.cleaned_data['password'] != self.cleaned_data['retypepassword']:
raise forms.ValidationError("The passwords do not match.")
return self.cleaned_data
|
[
"[email protected]"
] | |
29ef78376dd563998f38b02a60deaeb27c911a9b
|
ec84daf26e137b46fa77c18750c99e886ce8c6db
|
/upgradedDiskaun.py
|
6022d8d2051d8a0079196415a5c74c9c28432ee3
|
[] |
no_license
|
SharvahGobithasan/Kad-Diskaun-F2-ASK
|
8eeb272f6f457765c3da42a2293f47b6dc9a4ed1
|
8575769a413daaaea28b62874c0595f5aa31ff6a
|
refs/heads/master
| 2022-10-15T07:44:45.786304 | 2020-06-11T07:49:31 | 2020-06-11T07:49:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,002 |
py
|
'''
sharvah 11/6/2020
'''
from os import system
system('cls') # to clear the screen
kad = input("Masukkan jenis kad diskaun anda:")
x=kad.casefold()
#print(x)
try:
while True:
if x == "kad premium":
mata = int(input("Masukkan mata ganjaran kad anda:"))
if(mata >= 500):
print("Peratus diskaun ialah 50%")
elif(mata >= 400):
print("Peratus diskaun ialah 40%")
elif(mata >= 300):
print("Peratus diskaun ialah 30%")
elif(mata >= 200):
print("Peratus diskaun ialah 20%")
elif (mata >= 100):
print("Peratus diskaun ialah 10%")
else:
print("Maaf mata anda tidak mencukupi untuk mendapat diskaun")
else:
print("Maaf anda memerlukan kad premium untuk mendapat diskaun")
break
except KeyboardInterrupt: # If CTRL+C is pressed, exit cleanly:
system('cls')
print('Bye!')
|
[
"[email protected]"
] | |
83c63b60c22628725f344b1bf4635e30bbf5aae9
|
577fd6f5ce00ba4b530937e84f3b426b30cd9d08
|
/Checkiolearn/Polygon/sun_angle.py
|
ecd226f204d9bf718eb6cd5d5451c14c7f50b0f1
|
[] |
no_license
|
YxiangJ/Python
|
33e2d0d4c26ce35ccd3504b73de15e45adb6946c
|
bcb1a0ace39fbcbe868a341652085c0ddf307c17
|
refs/heads/master
| 2018-09-24T08:24:13.692535 | 2018-06-07T01:11:00 | 2018-06-07T01:11:00 | 126,120,268 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 563 |
py
|
def sun_angle(time):
# replace this for solution
l = time.split(':')
result = (int(l[0]) - 6) * 15 + int(l[1]) / 4
if int(l[0]) > 18 or int(l[0]) < 6:
return "I don't see the sun!"
else:
return result
if __name__ == '__main__':
print("Example:")
print(sun_angle("07:00"))
# These "asserts" using only for self-checking and not necessary for auto-testing
assert sun_angle("07:00") == 15
assert sun_angle("01:23") == "I don't see the sun!"
print("Coding complete? Click 'Check' to earn cool rewards!")
|
[
"[email protected]"
] | |
699b7062a1c9a0e705a481a5c8cf42e5a18dc7f6
|
ef20884169d10ec9ac4d1d3b77ee35245d248294
|
/practice/first_step_with_tensorflow/kmean_create_data.py
|
b95cc97c8c9d36f85fbdcbe9af721f29fd09ec7d
|
[] |
no_license
|
heaven324/Deeplearning
|
64016671879cdf1742eff6f374cfb640cfc708ae
|
a7a8d590fa13f53348f83f8c808538affbc7b3e8
|
refs/heads/master
| 2023-05-05T08:54:27.888155 | 2021-05-22T08:25:47 | 2021-05-22T08:25:47 | 188,010,607 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
import numpy as np
num_points = 2000
vectors_set = []
for i in range(num_points):
if np.random.random() > 0.5:
vectors_set.append([np.random.normal(0.0, 0.9), np.random.normal(0.0, 0.9)])
else:
vectors_set.append([np.random.normal(3.0, 0.5), np.random.normal(1.0, 0.5)])
# 난수 생성 확인
#print(vectors_set)
|
[
"[email protected]"
] | |
523ec7af329f9d30feb8db299d7b11428588618e
|
f6337f3f1156a6bcea46201eca0dd4fc3b52bcdd
|
/dante/config.py
|
04021686c923dfe4465c694ef946a983639d01fc
|
[
"Apache-2.0"
] |
permissive
|
sbg/dante
|
c43cf8134f51cd7b8fe4ae9184b1376b359bfc6c
|
104543c3ccb5e762d3e9cd6e8fa04c5fa91e2227
|
refs/heads/master
| 2021-01-15T10:29:25.735400 | 2019-10-02T10:05:47 | 2019-10-02T10:05:47 | 99,581,458 | 9 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,384 |
py
|
import os
import json
from copy import deepcopy
from collections import OrderedDict
from configparser import ConfigParser
class Config:
PARENT_DIR = os.path.abspath(os.getcwd())
CONFIG_FILE = os.path.join(PARENT_DIR, 'setup.cfg')
SECTION = 'dante'
GRAPH_ATTRIBUTE_SECTION = 'dante:graph_attributes'
GRAPH_NODE_SECTION = 'dante:graph_node_attributes'
GRAPH_EDGE_SECTION = 'dante:graph_edge_attributes'
DEFAULT_PARSER = None
DEFAULT_ANY_VERSION = 'Any'
DEFAULT_CHECKS = ['conflicts', 'cyclic', 'missing', 'validate']
DEFAULT_IGNORE_LIST = [
'dante', 'pip', 'setuptools', 'wheel',
]
DEFAULT_ALLOW_NAMED_VERSIONS = False
DEFAULT_NAMED_VERSION_PATTERNS = []
DEFAULT_REQUIREMENTS_FILE_PATH = 'requirements.txt'
DEFAULT_LOCK_FILE_PATH = 'requirements.lock'
DEFAULT_REQUIREMENTS_FILES = ['requirements.txt']
DEFAULT_LOCK_FILES = ['requirements.lock']
GRAPH_DEFAULT_NAME = 'dante-graph'
GRAPH_DEFAULT_FILENAME = None
GRAPH_DEFAULT_FORMAT = 'pdf'
GRAPH_DEFAULT_ENGINE = 'dot'
GRAPH_DEFAULT_STRICT = True
GRAPH_DEFAULT_ATTRIBUTES = {}
GRAPH_DEFAULT_NODE_ATTRIBUTES = {
'shape': 'box3d',
}
GRAPH_DEFAULT_EDGE_ATTRIBUTES = {
'fontsize': '10',
}
parser = DEFAULT_PARSER
any_version = DEFAULT_ANY_VERSION
checks = DEFAULT_CHECKS
ignore_list = DEFAULT_IGNORE_LIST
allow_named_versions = DEFAULT_ALLOW_NAMED_VERSIONS
named_version_patterns = DEFAULT_NAMED_VERSION_PATTERNS
requirements_files = DEFAULT_REQUIREMENTS_FILES
lock_files = DEFAULT_LOCK_FILES
lock_file_path = DEFAULT_LOCK_FILE_PATH
graph_name = GRAPH_DEFAULT_NAME
graph_filename = GRAPH_DEFAULT_FILENAME
graph_format = GRAPH_DEFAULT_FORMAT
graph_engine = GRAPH_DEFAULT_ENGINE
graph_attributes = GRAPH_DEFAULT_ATTRIBUTES
graph_strict = GRAPH_DEFAULT_STRICT
graph_node_attributes = GRAPH_DEFAULT_NODE_ATTRIBUTES
graph_edge_attributes = GRAPH_DEFAULT_EDGE_ATTRIBUTES
def __getattribute__(self, item):
"""Return a copy of the attribute if that attribute is a list or a dict
:param item: Requested attribute
:return: Attribute or copy of that attribute
"""
if isinstance(item, list) or isinstance(item, dict):
return deepcopy(item)
return item
@staticmethod
def absolute_path(file_path):
"""Get absolute path for a file
:param file_path: Relative filepath
:return: Absolute path string
"""
return os.path.join(Config.PARENT_DIR, file_path)
@classmethod
def read_from_file(cls):
"""Read and set configuration from file
:return: None
"""
if not os.path.exists(cls.CONFIG_FILE):
# If config file does not exist use default config
return
parser = ConfigParser()
parser.read(cls.CONFIG_FILE)
if parser.has_section(cls.SECTION):
cls.any_version = cls.get_option(
parser, cls.SECTION, 'any_version', cls.any_version
)
cls.checks = cls.get_list(
parser, cls.SECTION, 'checks', cls.checks
)
cls.ignore_list = cls.get_list(
parser, cls.SECTION, 'ignore_list', cls.ignore_list
)
allow_named_versions = cls.get_option(
parser,
cls.SECTION,
'allow_named_versions',
cls.allow_named_versions
)
cls.allow_named_versions = (
str(allow_named_versions).lower() == 'true'
)
cls.named_version_patterns = cls.get_list(
parser,
cls.SECTION,
'named_version_patterns',
cls.named_version_patterns
)
cls.lock_file_path = cls.get_option(
parser,
cls.SECTION,
'lock_file_path',
cls.lock_file_path
)
cls.requirements_files = [
cls.absolute_path(file_) for file_ in cls.get_list(
parser,
cls.SECTION,
'requirements_files',
cls.requirements_files
)
]
cls.lock_files = [
cls.absolute_path(file_) for file_ in cls.get_list(
parser,
cls.SECTION,
'lock_files',
cls.lock_files
)
]
cls.graph_name = cls.get_option(
parser, cls.SECTION, 'graph_name', cls.graph_name
)
cls.graph_filename = cls.get_option(
parser, cls.SECTION, 'graph_filename', cls.graph_filename
)
cls.graph_format = cls.get_option(
parser, cls.SECTION, 'graph_format', cls.graph_format
)
cls.graph_engine = cls.get_option(
parser, cls.SECTION, 'graph_engine', cls.graph_engine
)
graph_strict = cls.get_option(
parser, cls.SECTION, 'graph_strict', cls.graph_strict
)
cls.graph_strict = (
str(graph_strict).lower() == 'true'
)
if parser.has_section(cls.GRAPH_ATTRIBUTE_SECTION):
items = cls.GRAPH_DEFAULT_ATTRIBUTES.copy()
items.update(dict(parser.items(
section=cls.GRAPH_ATTRIBUTE_SECTION
)))
cls.graph_attributes = items
if parser.has_section(cls.GRAPH_NODE_SECTION):
items = cls.GRAPH_DEFAULT_NODE_ATTRIBUTES.copy()
items.update(dict(parser.items(section=cls.GRAPH_NODE_SECTION)))
cls.graph_node_attributes = items
if parser.has_section(cls.GRAPH_EDGE_SECTION):
items = cls.GRAPH_DEFAULT_EDGE_ATTRIBUTES.copy()
items.update(dict(parser.items(section=cls.GRAPH_EDGE_SECTION)))
cls.graph_edge_attributes = items
@staticmethod
def get_option(parser, section, option, default):
"""Get option from parser
:param parser: Option parser
:param section: Option section in config file
:param option: Option name
:param default: Default value for option
:return: Option value
"""
return (
parser.get(section, option)
if parser.has_option(section, option)
else default
)
@staticmethod
def get_list(parser, section, option, default):
"""Get option from parser in list format
:param parser: Option parser
:param section: Option section in config file
:param option: Option name
:param default: Default value for option
:return: Option value
"""
result = Config.get_option(parser, section, option, default)
try:
if isinstance(result, str):
return [item for item in result.split('\n') if item]
if isinstance(result, list):
return result
return default
except ValueError:
return default
@classmethod
def to_json(cls):
"""Return options in json format
:return: json string with options as keys
"""
return json.dumps(OrderedDict((
('dante', OrderedDict((
('any_version', cls.any_version),
('checks', cls.checks),
('ignore_list', cls.ignore_list),
('allow_named_versions', cls.allow_named_versions),
('named_version_patterns', cls.named_version_patterns),
('lock_file_path', cls.lock_file_path),
('requirements_files', cls.requirements_files),
('lock_files', cls.lock_files),
('graph_name', cls.graph_name),
('graph_filename', cls.graph_filename),
('graph_format', cls.graph_format),
('graph_engine', cls.graph_engine),
('graph_strict', cls.graph_strict)))),
('graph_attributes', cls.graph_attributes),
('graph_node_attributes', cls.graph_node_attributes),
('graph_edge_attributes', cls.graph_edge_attributes),
)), indent=4)
|
[
"[email protected]"
] | |
8359e06a1051806061634ed990aabd4bfe599351
|
1a4722073be8c36562888dd9db694e30e2c4fa2f
|
/gacha/logging/log_base.py
|
92589da622ccee06393868946d2b4ed4df756844
|
[
"MIT"
] |
permissive
|
colorstheforce/gacha.py
|
9258b080647efe1b03145ab40438c0bbbf5b4801
|
946f31adb40f3184ce4ddd447439bbd5421d3506
|
refs/heads/main
| 2023-02-08T04:46:27.301147 | 2021-01-05T00:10:18 | 2021-01-05T00:10:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 629 |
py
|
from .log_level import LogLevel
class LogBase:
def __init__(self, log_level: LogLevel):
self.log_level = log_level
def write(self, level: LogLevel, message: str):
raise NotImplementedError
def debug(self, message: str):
self.write(LogLevel.DEBUG, message)
def info(self, message: str):
self.write(LogLevel.INFORMATION, message)
def warning(self, message: str):
self.write(LogLevel.WARNING, message)
def error(self, message: str):
self.write(LogLevel.ERROR, message)
def critical(self, message: str):
self.write(LogLevel.CRITICAL, message)
|
[
"[email protected]"
] | |
263ca80ed3ebdcc465692fef40cd71b494ac004c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03807/s835726532.py
|
c603899438bd501bb5871b424daa8724dfe35dfc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 164 |
py
|
N = int(input())
a = list(map(int,input().split()))
odd = 0
for i in range(N):
if a[i] % 2:
odd += 1
if odd % 2:
print('NO')
else:
print('YES')
|
[
"[email protected]"
] | |
7f4df28cde0459f92fa2c4450f65da050a157ffc
|
de4f45e415f3262fdb3a897d24882301a5305cbd
|
/tests/tkimgloader_tests/test_editor.py
|
36d34844ba256ca6d3b2fb2d8ff31ef24565a6cf
|
[
"MIT"
] |
permissive
|
ericziethen/tkimgloader
|
2bef695a634db51141c99accddd2e85c2704e212
|
66ae55b0c77c0d347a24bf78b92cdfc9d6907d01
|
refs/heads/master
| 2022-05-17T17:30:13.084383 | 2020-03-22T00:17:21 | 2020-03-22T00:17:21 | 239,040,650 | 0 | 0 |
MIT
| 2022-03-12T00:15:44 | 2020-02-07T23:29:49 |
Python
|
UTF-8
|
Python
| false | false | 1,464 |
py
|
import os
from tkinter import filedialog
import tkimgloader.scripts.editor as editor
SAMPLE_DIR = R'C:\Projects\This Project'
REL_FILE_PATH = R'SubDir\File.json'
SAMPLE_FILE = os.path.join(SAMPLE_DIR, REL_FILE_PATH)
def editor_init_mock_returns(monkeypatch):
def mockreturn(mockself):
return None
monkeypatch.setattr(editor.ImgEditor, '_init_canvas', mockreturn)
monkeypatch.setattr(editor.ImgEditor, '_draw_menu', mockreturn)
def test_ask_directory(monkeypatch):
def mockreturn(*, title, initialdir):
return SAMPLE_DIR
monkeypatch.setattr(filedialog, 'askdirectory', mockreturn)
assert editor.ask_directory('Title') == SAMPLE_DIR
def test_editor_init(monkeypatch):
editor_init_mock_returns(monkeypatch)
edit = editor.ImgEditor('fake_root', SAMPLE_DIR)
assert edit.root_window == 'fake_root'
assert edit.working_dir == SAMPLE_DIR
def test_ask_file(monkeypatch):
def mockreturn(**kwargs):
return SAMPLE_FILE
monkeypatch.setattr(filedialog, 'askopenfilename', mockreturn)
assert editor.ask_image_filepath('Title', 'Initial Dir') == SAMPLE_FILE
def test_rel_path(monkeypatch):
editor_init_mock_returns(monkeypatch)
def mockreturn_openfile(**kwargs):
return SAMPLE_FILE
monkeypatch.setattr(filedialog, 'askopenfilename', mockreturn_openfile)
edit = editor.ImgEditor('fake_root', SAMPLE_DIR)
assert edit._get_rel_path(SAMPLE_FILE) == REL_FILE_PATH
|
[
"[email protected]"
] | |
c86f70bdcf4470203ca1dc33afc5bd310c8f1884
|
f39d4d63cc588ebbf64fca114a9fdeec123572b3
|
/Experiments/baseline_unet_none_all.py
|
92a95f8627593ef23989edd338db4ee2e1574a9c
|
[] |
no_license
|
Ravimk07/Low_rank_attention_OCT
|
54aeb70d42e52b06929d0ce8b4ef1735f5998eae
|
1dbdca82615875159d368ba09b91be07d1c633a0
|
refs/heads/master
| 2023-02-26T03:13:21.389342 | 2021-02-01T09:39:54 | 2021-02-01T09:39:54 | 371,002,842 | 1 | 0 | null | 2021-05-26T11:02:47 | 2021-05-26T11:02:46 | null |
UTF-8
|
Python
| false | false | 835 |
py
|
import torch
import sys
sys.path.append("..")
# ===================
from OCT_train import trainModels
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == '__main__':
#
trainModels(model='unet',
data_set='ours',
input_dim=1,
epochs=50,
width=16,
depth=4,
depth_limit=6,
repeat=3,
l_r=1e-3,
l_r_s=True,
train_batch=4,
shuffle=True,
loss='ce',
norm='bn',
log='MICCAI_Our_Data_Results',
class_no=2,
cluster=True,
data_augmentation_train='none',
data_augmentation_test='all')
print('Finished.')
|
[
"[email protected]"
] | |
eb18c07b3db4ac0a4b01868b44cf2c474b068369
|
dbbe32ca6dbf8b3990f7848ab1c0639b1325f16e
|
/scraping.py
|
c8b92c48206a5c03716094770029f75b7ef96e6b
|
[] |
no_license
|
aapatel96/nyt_scraper
|
ab346b1a85263d1c312871d72d7bdf62cf9605e5
|
1c869e356b5a23df96ce4121823b3315d2fa06d8
|
refs/heads/master
| 2021-05-05T18:16:19.547829 | 2018-09-19T02:02:31 | 2018-09-19T02:02:31 | 103,596,416 | 0 | 0 | null | 2017-09-15T00:46:16 | 2017-09-15T00:46:16 | null |
UTF-8
|
Python
| false | false | 980 |
py
|
import pymongo
from lxml import html
import requests
import re
import time
'''
timestamp = str(time.time())
filename = 'nytimes_data_' + timestamp + '.txt'
textfile = open(filename, 'w')
'''
def scrapeText(link):
##Retrieve the page using https get method
page = requests.get(link)
## creates a tree structure
tree = html.fromstring(page.content)
## Parse content out based on the three common tags and classes used by NYT html formatting standards
content = tree.xpath('//div[@class="articleBody"]/text()')
content = content + tree.xpath('//p[@class="story-body-text story-content"]/text()')
content = content + tree.xpath('//p[@class="story-body-text"]/text()')
## Combine the list into a single string
text = ''.join(content)
return text
'''
## writes to file...can change later.
textfile.write(text)
textfile.write('\n')
'''
|
[
"[email protected]"
] | |
984769b8bfd917b7f3a450664dda8ca833caabdc
|
b7f3edb5b7c62174bed808079c3b21fb9ea51d52
|
/components/safe_browsing/content/web_ui/DEPS
|
c4dfe28ac40a5b9fd60086f5f0bb2d45f1b6d99f
|
[
"BSD-3-Clause"
] |
permissive
|
otcshare/chromium-src
|
26a7372773b53b236784c51677c566dc0ad839e4
|
64bee65c921db7e78e25d08f1e98da2668b57be5
|
refs/heads/webml
| 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 |
BSD-3-Clause
| 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null |
UTF-8
|
Python
| false | false | 409 |
include_rules = [
"+components/enterprise/common/proto/connectors.pb.h",
"+components/grit/components_resources.h",
"+components/password_manager/core/browser/hash_password_manager.h",
"+components/user_prefs",
"+components/safe_browsing/core/proto/csd.pb.h",
"+components/strings/grit/components_strings.h",
"+components/grit/components_scaled_resources.h",
"+components/safe_browsing_db",
]
|
[
"[email protected]"
] | ||
c4396f5c7741fd012d68623efe57df5b5b7e569b
|
52000f39a52ad81bd2c49a205b21418795cd6cd7
|
/Linear and Nonlinear SLAM/problem_set/nonlinear.py
|
3a36a38c244bbb6b30db7a55cd54ee3e7fd9a416
|
[] |
no_license
|
manikandtan-ck/Robot-Localization-and-Mapping
|
4fb3821fc57c86acc3b03b4b8ce26b899967adfe
|
2a6b005aa1565453f3212c94740744cf5bc6c074
|
refs/heads/main
| 2023-05-02T14:52:37.878760 | 2021-05-28T16:48:09 | 2021-05-28T16:48:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,700 |
py
|
'''
Initially written by Ming Hsiao in MATLAB
Rewritten in Python by Wei Dong ([email protected]), 2021
'''
import numpy as np
import scipy.linalg
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import spsolve
import argparse
import matplotlib.pyplot as plt
from solvers import *
from utils import *
def warp2pi(angle_rad):
"""
Warps an angle in [-pi, pi]. Used in the update step.
\param angle_rad Input angle in radius
\return angle_rad_warped Warped angle to [-\pi, \pi].
"""
angle_rad = angle_rad - 2 * np.pi * np.floor(
(angle_rad + np.pi) / (2 * np.pi))
return angle_rad
def init_states(odoms, observations, n_poses, n_landmarks):
'''
Initialize the state vector given odometry and observations.
'''
traj = np.zeros((n_poses, 2))
landmarks = np.zeros((n_landmarks, 2))
landmarks_mask = np.zeros((n_landmarks), dtype=np.bool)
for i in range(len(odoms)):
traj[i + 1, :] = traj[i, :] + odoms[i, :]
for i in range(len(observations)):
pose_idx = int(observations[i, 0])
landmark_idx = int(observations[i, 1])
if not landmarks_mask[landmark_idx]:
landmarks_mask[landmark_idx] = True
pose = traj[pose_idx, :]
theta, d = observations[i, 2:]
landmarks[landmark_idx, 0] = pose[0] + d * np.cos(theta)
landmarks[landmark_idx, 1] = pose[1] + d * np.sin(theta)
return traj, landmarks
def odometry_estimation(x, i):
'''
\param x State vector containing both the pose and landmarks
\param i Index of the pose to start from (odometry between pose i and i+1)
\return odom Odometry (\Delta x, \Delta) in the shape (2, )
'''
# TODO: return odometry estimation
odom = np.zeros((2, ))
odom[0] = x[2*(i+1)]-x[2*i] # delta x
odom[1] = x[2*(i+1)+1]-x[2*i+1] # delta y
return odom
def bearing_range_estimation(x, i, j, n_poses):
'''
\param x State vector containing both the pose and landmarks
\param i Index of the pose to start from
\param j Index of the landmark to be measured
\param n_poses Number of poses
\return obs Observation from pose i to landmark j (theta, d) in the shape (2, )
'''
# TODO: return bearing range estimations
obs = np.zeros((2, ))
delta_x = x[n_poses*2+2*j]-x[2*i]
delta_y = x[n_poses*2+2*j+1]-x[2*i+1]
obs[0] = np.arctan2(delta_y,delta_x)
obs[1] = np.sqrt(delta_x**2+delta_y**2)
return obs
def compute_meas_obs_jacobian(x, i, j, n_poses):
'''
\param x State vector containing both the pose and landmarks
\param i Index of the pose to start from
\param j Index of the landmark to be measured
\param n_poses Number of poses
\return jacobian Derived Jacobian matrix in the shape (2, 4)
'''
# TODO: return jacobian matrix
jacobian = np.zeros((2, 4))
delta_x = x[n_poses*2+2*j]-x[2*i]
delta_y = x[n_poses*2+2*j+1]-x[2*i+1]
jacobian[0,0] = delta_y/(delta_x**2+delta_y**2)
jacobian[0,1] = -delta_x/(delta_x**2+delta_y**2)
jacobian[0,2] = -delta_y/(delta_x**2+delta_y**2)
jacobian[0,3] = delta_x/(delta_x**2+delta_y**2)
jacobian[1,0] = -delta_x/np.sqrt(delta_x**2+delta_y**2)
jacobian[1,1] = -delta_y/np.sqrt(delta_x**2+delta_y**2)
jacobian[1,2] = delta_x/np.sqrt(delta_x**2+delta_y**2)
jacobian[1,3] = delta_y/np.sqrt(delta_x**2+delta_y**2)
return jacobian
def create_linear_system(x, odoms, observations, sigma_odom, sigma_observation,
n_poses, n_landmarks):
'''
\param x State vector x at which we linearize the system.
\param odoms Odometry measurements between i and i+1 in the global coordinate system. Shape: (n_odom, 2).
\param observations Landmark measurements between pose i and landmark j in the global coordinate system. Shape: (n_obs, 4).
\param sigma_odom Shared covariance matrix of odometry measurements. Shape: (2, 2).
\param sigma_observation Shared covariance matrix of landmark measurements. Shape: (2, 2).
\return A (M, N) Jacobian matrix.
\return b (M, ) Residual vector.
where M = (n_odom + 1) * 2 + n_obs * 2, total rows of measurements.
N = n_poses * 2 + n_landmarks * 2, length of the state vector.
'''
n_odom = len(odoms)
n_obs = len(observations)
M = (n_odom + 1) * 2 + n_obs * 2
N = n_poses * 2 + n_landmarks * 2
A = np.zeros((M, N))
b = np.zeros((M, ))
sqrt_inv_odom = np.linalg.inv(scipy.linalg.sqrtm(sigma_odom))
sqrt_inv_obs = np.linalg.inv(scipy.linalg.sqrtm(sigma_observation))
# TODO: First fill in the prior to anchor the 1st pose at (0, 0)
A[0:2,0:2] = sqrt_inv_odom @ np.eye(2)
# TODO: Then fill in odometry measurements
for i in range(n_odom):
Ho = np.array([[-1, 0 ,1, 0],[0, -1, 0, 1]])
odm_temp = odoms[i,:]
oe = odometry_estimation(x,i)
A_temp = sqrt_inv_odom @ Ho
B_temp = sqrt_inv_odom @ (odm_temp-oe)
A[(i*2+2):(i*2+4),(i*2):(i*2+4)]= A_temp
b[(i*2+2):(i*2+4)] = B_temp
# TODO: Then fill in landmark measurements
obs_offset_idx = (n_odom+1)*2
l_offset_idx = n_poses*2
for j in range(n_obs):
pose_idx = int(observations[j,0])
landmark_idx = int(observations[j,1])
measure_temp = observations[j,2:4]
Hl = compute_meas_obs_jacobian(x, pose_idx, landmark_idx, n_poses)
be = bearing_range_estimation(x, pose_idx, landmark_idx, n_poses)
A_temp = sqrt_inv_obs @ Hl
rlow = j*2+obs_offset_idx
rhigh = j*2+2+obs_offset_idx
A[rlow:rhigh, pose_idx*2:pose_idx*2+2]= A_temp[0:2,0:2]
A[rlow:rhigh, l_offset_idx+landmark_idx*2:l_offset_idx+landmark_idx*2+2]= A_temp[0:2,2:4]
diff_measure = np.zeros((2, ))
diff_measure[0] = warp2pi(measure_temp[0]-be[0])
diff_measure[1] = measure_temp[1]-be[1]
b[rlow:rhigh] = sqrt_inv_obs @ diff_measure
return csr_matrix(A), b
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('data', default='../data/2d_nonlinear.npz')
parser.add_argument(
'--method',
nargs='+',
choices=['default', 'pinv', 'qr', 'lu', 'qr_colamd', 'lu_colamd'],
default=['default'],
help='method')
args = parser.parse_args()
data = np.load(args.data)
# Plot gt trajectory and landmarks for a sanity check.
gt_traj = data['gt_traj']
gt_landmarks = data['gt_landmarks']
plt.plot(gt_traj[:, 0], gt_traj[:, 1], 'b-')
plt.scatter(gt_landmarks[:, 0], gt_landmarks[:, 1], c='b', marker='+')
plt.show()
n_poses = len(gt_traj)
n_landmarks = len(gt_landmarks)
odom = data['odom']
observations = data['observations']
sigma_odom = data['sigma_odom']
sigma_landmark = data['sigma_landmark']
# Initialize: non-linear optimization requires a good init.
for method in args.method:
print(f'Applying {method}')
traj, landmarks = init_states(odom, observations, n_poses, n_landmarks)
print('Before optimization')
plot_traj_and_landmarks(traj, landmarks, gt_traj, gt_landmarks)
# Iterative optimization
x = vectorize_state(traj, landmarks)
for i in range(10):
A, b = create_linear_system(x, odom, observations, sigma_odom,
sigma_landmark, n_poses, n_landmarks)
dx, _ = solve(A, b, method)
x = x + dx
traj, landmarks = devectorize_state(x, n_poses)
print('After optimization')
plot_traj_and_landmarks(traj, landmarks, gt_traj, gt_landmarks)
|
[
"[email protected]"
] | |
c14d81b13ff0bfca027e09587f8f586914771894
|
8051c715e86095c1a0f2d6dcee78150417562d00
|
/app/api/response_api.py
|
8ea2f772957ae7aa5d8b6a8b84bed6bcac25e956
|
[
"BSD-3-Clause"
] |
permissive
|
minkione/Apfell
|
45bd47249afa59389ab8237558c52d3f083cae29
|
096b6524c44b0673f11d18bd2388193d074380d6
|
refs/heads/master
| 2020-03-28T12:22:37.741190 | 2018-09-10T02:42:06 | 2018-09-10T02:42:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,766 |
py
|
from app import apfell, db_objects
from sanic.response import json
from app.database_models.model import Task, Response
import base64
from sanic_jwt.decorators import protected, inject_user
from app.api.file_api import create_filemeta_in_database_func, download_file_to_database_func
import json as js
# This gets all responses in the database
@apfell.route(apfell.config['API_BASE'] + "/responses/", methods=['GET'])
@inject_user()
@protected()
async def get_all_responses(request, user):
try:
all_responses = await db_objects.execute(Response.select())
except Exception as e:
return json({'status': 'error',
'error': 'Cannot get responses'})
return json([c.to_json() for c in all_responses])
# Get a single response
@apfell.route(apfell.config['API_BASE'] + "/response/<rid:int>", methods=['GET'])
@inject_user()
@protected()
async def get_one_response(request, user, rid):
try:
resp = await db_objects.get(Response, id=rid)
except Exception as e:
return json({'status': 'error', 'error': 'Cannot get that response'})
return json(resp.to_json())
# implant calling back to update with base64 encoded response from executing a task
# We don't add @protected or @injected_user here because the callback needs to be able to post here for responses
@apfell.route(apfell.config['API_BASE'] + "/responses/<tid:int>", methods=['POST'])
async def update_task_for_callback(request, tid):
data = request.json
decoded = base64.b64decode(data['response']).decode("utf-8")
try:
task = await db_objects.get(Task, id=tid)
except Exception as e:
return json({'status': 'error',
'error': 'Task does not exist'})
try:
if 'response' not in data:
return json({'status': 'error', 'error': 'task response not in data'})
if task.command.cmd == "download":
try:
download_response = js.loads(decoded)
if 'total_chunks' in download_response:
return await create_filemeta_in_database_func(download_response)
elif 'chunk_data' in download_response:
return await download_file_to_database_func(download_response)
except Exception as e:
pass
resp = await db_objects.create(Response, task=task, response=decoded)
task.status = "processed"
await db_objects.update(task)
status = {'status': 'success'}
resp_json = resp.to_json()
return json({**status, **resp_json}, status=201)
except Exception as e:
print(e)
return json({'status': 'error',
'error': 'Failed to update task',
'msg': str(e)})
|
[
"[email protected]"
] | |
95ee687b93fed84377a920004fe7c683bc76f0b0
|
cd65b46dddbc0749ed0204031c8b92b65d8eddc4
|
/bad/image_preprocess.py
|
3529d3b7bc01def72780b9e395125a3b97f5b736
|
[] |
no_license
|
yuvrajdalia/posture-assistance
|
76d0b35bea6f1aee6da9249e9ffbd8c95319ee55
|
587ee0d1bee808c02200ca2e2f9dee451ea8b976
|
refs/heads/master
| 2022-11-12T21:54:56.394413 | 2020-06-28T16:40:56 | 2020-06-28T16:40:56 | 275,626,300 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,327 |
py
|
from matplotlib import pyplot as plt
import os
import pandas as pd
from gluoncv import model_zoo, data, utils
from gluoncv.data.transforms.pose import detector_to_simple_pose, heatmap_to_coord
detector = model_zoo.get_model('yolo3_mobilenet1.0_coco', pretrained=True)
pose_net = model_zoo.get_model('simple_pose_resnet18_v1b', pretrained=True)
# Note that we can reset the classes of the detector to only include
# human, so that the NMS process is faster.
detector.reset_class(["person"], reuse_weights=['person'])
df = pd.read_csv('pose.csv')
good_images=os.listdir('/home/yuvi/projects/minorproject/openpose/bad')
coords=[]
for good_image in good_images:
if (good_image != 'pose.csv' and good_image != 'image_preprocess.py'):
print(good_image)
x, img = data.transforms.presets.ssd.load_test(good_image, short=512)
print('Shape of pre-processed image:', x.shape)
class_IDs, scores, bounding_boxs = detector(x)
pose_input, upscale_bbox = detector_to_simple_pose(img, class_IDs, scores, bounding_boxs)
predicted_heatmap = pose_net(pose_input)
pred_coords, confidence = heatmap_to_coord(predicted_heatmap, upscale_bbox)
arr1=pred_coords[0][5:7]
arr4=[]
for i in arr1:
arr4.append(i[0].asnumpy()[0])
arr4.append(i[1].asnumpy()[0])
print(arr4)
print(arr1)
arr2=pred_coords[0][11:17]
for i in arr2:
arr4.append(i[0].asnumpy()[0])
arr4.append(i[1].asnumpy()[0])
print(arr4)
df=df.append({'left_shoulder_x':arr4[0],'left_shoulder_y':arr4[1],'right_shoulder_x':arr4[2],'right_shoulder_y':arr4[3],
'lef_hip_x':arr4[4],'lef_hip_y':arr4[5],'right_hip_x':arr4[6],'right_hip_y':arr4[7],'left_knee_x':arr4[8],'left_knee_y':arr4[9],'right_knee_x':arr4[10],
'right_knee_y':arr4[11],'left_ankle_x':arr4[12],'left_ankle_y':arr4[13],'right_ankle_x':arr4[14],'right_ankle_y':arr4[15],'category':0},ignore_index=True)
#df.append(new_row, ignore_index=True)
print(arr2)
arr5=[1,1,1,1,1,1,1,1]
arr3=(arr1.squeeze().asnumpy().tolist()+arr2.squeeze().asnumpy().tolist())
coords.append(arr3)
#print(df.head)
'''ax = utils.viz.plot_keypoints(img,pred_coords, confidence,
class_IDs, bounding_boxs, scores,
box_thresh=0.5, keypoint_thresh=0.2)
plt.show()'''
print(df.head)
df.to_csv(r'./pose_final.csv', index = False)
|
[
"[email protected]"
] | |
ae6e03e53cdd7bd682948a5cc9423891a77a455a
|
74714db0d28d849a974838b18545914747d9b7b8
|
/PyTest/test_logging.py
|
fcd37e710f11e6023cb063fec0c40b752d2d3908
|
[] |
no_license
|
rahusingh1/AutomationRS
|
e93f070c8e6e03f6a6205730800fd754b2b24109
|
96b5bdd692364c69f53162ff002a9031faa5b25c
|
refs/heads/main
| 2023-08-05T02:17:33.026102 | 2021-09-19T09:46:37 | 2021-09-19T09:46:37 | 394,286,294 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,072 |
py
|
import logging
def test_loggingDemo():
logger = logging.getLogger(__name__) # create object to print the logs
filehandler = logging.FileHandler("logfile.log") # create object where to print
# used to define the format of logs.
# asctime is a variable that wrapped in % so asctime executed at run time and print time
# levelname is the type of log that will be printed like debug or info etc,
# at run time python check what kind of error message and print the same.
# we are wrapping it in %() so that it will evaluate at run time
# and using s so to be treated as string and easily concatenation will be happened.
# used message variable so whatever message you have in double quotes will be printed.
formatter = logging.Formatter("%(asctime)s : %(levelname)s : %(name)s : %(message)s")
# format of log that will be printed are as follows
# 2019-02-17 12:40:14,798 : CRITICAL : <test case name> : Fatal error in payment on step 4, cannot continue.
# make a connection between formater and filehander so that logger will get the knowledge and connection between them.
filehandler.setFormatter(formatter) # now connection established
logger.addHandler(filehandler) # create handler what to print
# set levels - if you set level to debug then you can see all logs in the output else only the last one.
logger.setLevel(logging.ERROR)
# hierarchy or order what logger gets defined is debug > info > warning > error > critical
# so if you put debug line after info then it will skip the debug print logs only after info as per order.
logger.debug("A debug statement is executed")
logger.info("Information statement")
logger.warning("Something is in warning mode")
logger.error("A major error has happened")
logger.critical("Critical issue")
# Q. Can we just see only error logs
# A. Yes we can do that if we set level to error then logger will print from that level to next level only.
# in pytest no matter what you write you have to wrap it under a method
|
[
"[email protected]"
] | |
f14fb0928eff57f50aaf3c6b9771a0b547e9facd
|
691394498d5324eab8ed5b71682f75c9b4c3d758
|
/Problem46_ProjectEuler.py
|
dee02d2074ab971e1d3eb8a22110d59fd25c741e
|
[] |
no_license
|
pratyaydeep/Python-programs
|
67a1ac640f5a5d2b192011847f120e8c2137eeeb
|
99c0427fb3ab18030ee810dc61d9c51fc505da60
|
refs/heads/master
| 2020-04-13T06:08:29.808713 | 2018-12-08T15:02:07 | 2018-12-08T15:02:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 624 |
py
|
def prime(n):
if n <= 1:
return False
elif n < 9:
for i in range(2,n):
if n % i == 0:
return False
break
else:
return True
else:
for i in range(2,int(n ** 0.5)+1):
if n % i == 0:
return False
break
else:
return True
n = 35
while True:
if prime(n):
n += 2
else:
for i in range(1,int((n/2)**0.5)+1):
if prime(n - 2 * i**2):
break
else:
break
n += 2
print (n)
|
[
"[email protected]"
] | |
4ef0a26a6bf9821f4d1258569482c2b9781bc3ef
|
ba5377e5adf9f14944c0827166e3d17bb0aea64e
|
/26class_04.py
|
cfeb596a9a586b36d557816a855e3f06d1db0f54
|
[] |
no_license
|
ArhamChouradiya/Python-Course
|
34aaaa780cdb9beef2722d15c7e1c73dd2053323
|
503f3c3832617b818f061e3db4cd0f5e2ca24f52
|
refs/heads/master
| 2020-12-06T01:28:01.926712 | 2020-01-07T10:32:55 | 2020-01-07T10:32:55 | 232,300,438 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 318 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 4 13:15:19 2019
@author: arham
"""
class student:
major= "CSE"
def __init__(self,rollno,name):
self.rollno=rollno
self.name=name
s1=student(1,"JOhn")
s2=student(2,"jane")
print(s1.major)
print(student.major)
|
[
"[email protected]"
] | |
33ba309fa7b283c7bca90cf230299060040928be
|
1d0097e25c983c764be6871c4e9d19acd83c9a6d
|
/llvm-3.2.src/utils/lit/lit/Util.py
|
f29480900ce76ca519ff720add0e93b93a9bacd9
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
smowton/llpe
|
16a695782bebbeadfd1abed770d0928e464edb39
|
8905aeda642c5d7e5cd3fb757c3e9897b62d0028
|
refs/heads/master
| 2022-03-11T23:08:18.465994 | 2020-09-16T07:49:12 | 2020-09-16T07:49:12 | 1,102,256 | 50 | 10 |
NOASSERTION
| 2020-09-16T07:49:13 | 2010-11-22T12:52:25 |
C++
|
UTF-8
|
Python
| false | false | 4,325 |
py
|
import os, sys
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(capture(['sysctl', '-n', 'hw.ncpu']))
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
return ncpus
return 1 # Default
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
import errno
if not path or os.path.exists(path):
return
parent = os.path.dirname(path)
if parent != path:
mkdir_p(parent)
try:
os.mkdir(path)
except OSError,e:
# Ignore EEXIST, which may occur during a race condition.
if e.errno != errno.EEXIST:
raise
def capture(args, env=None):
import subprocess
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out,_ = p.communicate()
return out
def which(command, paths = None):
"""which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified)."""
if paths is None:
paths = os.environ.get('PATH','')
# Check for absolute match first.
if os.path.isfile(command):
return command
# Would be nice if Python had a lib function for this.
if not paths:
paths = os.defpath
# Get suffixes to search.
# On Cygwin, 'PATHEXT' may exist but it should not be used.
if os.pathsep == ';':
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
# Search the paths...
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, command + ext)
if os.path.exists(p):
return p
return None
def checkToolsPath(dir, tools):
for tool in tools:
if not os.path.exists(os.path.join(dir, tool)):
return False;
return True;
def whichTools(tools, paths):
for path in paths.split(os.pathsep):
if checkToolsPath(path, tools):
return path
return None
def printHistogram(items, title = 'Items'):
import itertools, math
items.sort(key = lambda (_,v): v)
maxValue = max([v for _,v in items])
# Select first "nice" bar height that produces more than 10 bars.
power = int(math.ceil(math.log(maxValue, 10)))
for inc in itertools.cycle((5, 2, 2.5, 1)):
barH = inc * 10**power
N = int(math.ceil(maxValue / barH))
if N > 10:
break
elif inc == 1:
power -= 1
histo = [set() for i in range(N)]
for name,v in items:
bin = min(int(N * v/maxValue), N-1)
histo[bin].add(name)
barW = 40
hr = '-' * (barW + 34)
print '\nSlowest %s:' % title
print hr
for name,value in items[-20:]:
print '%.2fs: %s' % (value, name)
print '\n%s Times:' % title
print hr
pDigits = int(math.ceil(math.log(maxValue, 10)))
pfDigits = max(0, 3-pDigits)
if pfDigits:
pDigits += pfDigits + 1
cDigits = int(math.ceil(math.log(len(items), 10)))
print "[%s] :: [%s] :: [%s]" % ('Range'.center((pDigits+1)*2 + 3),
'Percentage'.center(barW),
'Count'.center(cDigits*2 + 1))
print hr
for i,row in enumerate(histo):
pct = float(len(row)) / len(items)
w = int(barW * pct)
print "[%*.*fs,%*.*fs)" % (pDigits, pfDigits, i*barH,
pDigits, pfDigits, (i+1)*barH),
print ":: [%s%s] :: [%*d/%*d]" % ('*'*w, ' '*(barW-w),
cDigits, len(row),
cDigits, len(items))
|
[
"[email protected]"
] | |
fa390b34a6534aa22cfd97538d43abc2b537133b
|
daa48566b53d0e4c968f96baf48cce6454690ac7
|
/Spider/test.py
|
b7c71758ea9f7b7bee3abf470ebe7c91a2d1a4fb
|
[] |
no_license
|
cjw876422081/web_spider
|
0ecc0a68aacf6f2236a641b786eb7d19f310e4cd
|
50e5997ee3a24e98349f018f1eb435bd1bea7893
|
refs/heads/master
| 2020-08-29T14:33:54.086597 | 2019-10-28T14:13:54 | 2019-10-28T14:13:54 | 218,062,532 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 184 |
py
|
#-*-coding:utf-8-*-
from urllib import request
import bs4
response = request.urlopen("http://www.xuexila.com/fwn/xindetihui/gongzuo/4510798.html")
html = response.read().decode('gbk')
|
[
"[email protected]"
] | |
0cca339b321d19c57c835673ae248d9498a2e659
|
85f84bec4de006b374270383c26b88d3e59858ea
|
/dataset/FontImgDatasetBase.py
|
b2529fc88d7745d6f3f4f76bd1e322e123fdd101
|
[] |
no_license
|
haznai/Font_Completion_Graph
|
cf76f8e5372769fd100b8e47090e72861f566bae
|
3bfbaaede14b5de207641a11e0c58baa52fc6952
|
refs/heads/master
| 2023-07-08T11:52:57.296032 | 2021-08-09T09:27:25 | 2021-08-09T09:27:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,262 |
py
|
import os
import pickle
import random
import collections
import numpy as np
import torch.utils.data as data
from PIL import Image
from dataset.norm_img import load_img_dict
from dataset.norm_seq import load_seq_dict, load_quant_dict
import torchvision.transforms as transforms
class FontDatasetIMG(data.Dataset):
def __init__(self, subset, config, label_idx, char_idx, data_transforms,
load_seq, nPoints, input_dim, padding, residual, quantize, pair, DA):
self.labels = [d.rstrip().split(' ') for d in open(config['sketch_list']).readlines()]
if subset in ['test', 'query']:
self.labels = sorted(self.labels)
self.fnames = [v[0] for v in self.labels]
if pair:
fonts = [k for k, v in collections.Counter([l[2] for l in self.labels]).items()]
labels_by_font = {f: [] for f in fonts}
for n, info in enumerate(self.labels):
# if info[2] in fonts:
labels_by_font[info[2]].append([n, info[0], info[2]])
self.fonts = labels_by_font
self.subset = subset
self.char_idx = char_idx
self.label_idx = label_idx
if label_idx == -1:
mapper = pickle.load(open(config['mapper'], "rb"))
keys = sorted(['%s-%s-%s' % (c, s, w)
for c in range(len(mapper['categories']))
for s in range(len(mapper['styles']))
for w in range(len(mapper['weights']))])
self.mapper = {k: n for n, k in enumerate(keys)}
self.img_dict = load_img_dict(config['img_path'], subset)
self.data_transforms = data_transforms
self.load_seq = load_seq
if load_seq:
self.data_dict = load_seq_dict(config['data_dict_file'])
if quantize:
self.quant_dict = load_quant_dict(config['data_dict_file'].replace('dict', 'kmeans'))
self.input_dim = input_dim
self.nPoints = nPoints
self.padding = padding
self.residual = residual
self.quantize = quantize
self.DA = DA
self.pair = pair
def __len__(self):
return len(self.labels)
def get_item(self, filename, item):
char = int(self.labels[item][self.char_idx].strip())
if self.label_idx == -1:
label = self.labels[item]
c, s, w = int(label[4].strip()), int(label[5].strip()), int(label[6].strip())
label = self.mapper['%s-%s-%s' % (c, s, w)]
else:
label = int(self.labels[item][self.label_idx].strip())
sketch = self.data_transforms(self.img_dict[filename])
data = {'filename': filename, 'sketch': sketch, 'label': label, 'char': char}
if self.load_seq:
coordinate, flag_bits, stroke_len, seqs, indices = self.data_dict[filename]
coordinate = coordinate.astype('float32')[:self.nPoints]
flag_bits = flag_bits.astype('int')[:self.nPoints]
indices = indices[:self.nPoints]
# assert self.nPoints >= stroke_len and self.nPoints == len(coordinate)
stroke_len = min(stroke_len, self.nPoints)
coordinate = np.array(coordinate)
if self.DA:
nums = coordinate[:, :2]
scale = random.random() + 0.5
if random.random() < 0.5:
nums[:, 0] *= scale
else:
nums[:, 1] *= scale
min_x, min_y = nums.min(axis=0)
max_x, max_y = nums.max(axis=0)
# mean_x, mean_y = nums.mean(axis=0)
rangs_x, range_y = max_x - min_x, max_y - min_y
coordinate[:, :2] = (nums[:, :2] - (min_x, min_y)) * 256. / max(rangs_x, range_y)
if self.residual:
residual = coordinate[1:stroke_len, :2] - coordinate[:stroke_len - 1, :2]
residual = np.row_stack((coordinate[:1, :2], residual, coordinate[stroke_len:, :2]))
residual = np.column_stack((residual, coordinate[:, 2:]))
coordinate[:, :2] -= coordinate[:1, :2]
else:
residual = np.zeros(coordinate.shape)
if self.padding == 'zero':
_mask = self.generate_padding_mask(stroke_len, nPoints=self.nPoints)
coordinate = coordinate * _mask
flag_bits = flag_bits * _mask
residual = residual * _mask
elif self.padding == 'repeat':
coordinate = np.concatenate([coordinate[:stroke_len]] * (self.nPoints // stroke_len + 1))
flag_bits = np.concatenate([flag_bits[:stroke_len]] * (self.nPoints // stroke_len + 1))
residual = np.concatenate([residual[:stroke_len]] * (self.nPoints // stroke_len + 1))
elif self.padding == 'rewind':
coordinate = np.concatenate([coordinate[:stroke_len], coordinate[:stroke_len][::-1]])
flag_bits = np.concatenate([flag_bits[:stroke_len], flag_bits[:stroke_len][::-1]])
residual = np.concatenate([residual[:stroke_len], residual[:stroke_len][::-1]])
coordinate = np.concatenate([coordinate] * (self.nPoints // (stroke_len * 2) + 1))
flag_bits = np.concatenate([flag_bits] * (self.nPoints // (stroke_len * 2) + 1))
residual = np.concatenate([residual] * (self.nPoints // (stroke_len * 2) + 1))
else:
assert self.padding == 'default'
coordinate = coordinate[:self.nPoints, :self.input_dim]
residual = residual[:self.nPoints, :self.input_dim]
flag_bits = flag_bits[:self.nPoints]
indices = indices[:self.nPoints]
indices[-1] = 0
attention_masks = self.generate_padding_mask(stroke_len, nPoints=self.nPoints)
# attention_masks = self.get_attention(flag_bits, stroke_len)
data.update({'coordinate': coordinate, 'flag_bits': flag_bits, 'residual': residual,
'stroke_len': stroke_len, 'indices': np.expand_dims(indices, 1),
'attention_masks': attention_masks})
if self.quantize:
quantize = self.quant_dict[filename].astype('long')
data['quantize'] = quantize
return data
def getitem_by_name(self, fname):
if '/' not in fname:
fname = fname.split('/')[-1]
if '.' in fname:
fname = fname.split('.')[0]
fname = '%s/%s.svg' % (fname[0], fname)
item = self.fnames.index(fname)
data = self.get_item(fname, item)
return data
def __getitem__(self, item):
info = self.labels[item]
data = self.get_item(info[0], item)
if not self.pair:
return data
else:
info2 = random.choice(self.fonts[info[2]])
data2 = self.get_item(info2[1], info2[0])
return (data, data2)
def generate_padding_mask(self, stroke_length, nPoints):
padding_mask = np.ones([nPoints, 1], int)
padding_mask[stroke_length:, :] = 0
return padding_mask
if __name__ == '__main__':
dataset_path = '/home/alternative/font_data/google_font_dataset/uppercase'
config = {'sketch_list': os.path.join(dataset_path, 'tiny_query_set.txt'),
'data_dict_file': os.path.join(dataset_path, 'tiny_query_dataset_dict.pickle'),
'img_path': dataset_path}
transform = transforms.Compose([
transforms.Grayscale(),
# transforms.Resize(299),
transforms.Pad(299),
transforms.CenterCrop(299),
transforms.ToTensor()
])
# subset, config, postfix, data_transforms, label_idx, char_idx
sampleDS = FontDatasetIMG('query', config, 1, 3, transform,
True, 150, 4, 'zero', False, False, False, False)
sketch = next(iter(sampleDS))['sketch']
print(sketch.shape, sketch.max())
data = np.array(sketch.permute(1, 2, 0).cpu().numpy() * 255, dtype=np.ubyte)
print(data.shape, data.max())
img = Image.fromarray(data.squeeze())
img.save(os.path.join('demo/sampleDS.png'))
|
[
"[email protected]"
] | |
06c70bfe440dff42352a060b456146f906d3767b
|
560822ea6b2de76b4344ffca2b0084a8da3b35a5
|
/test/test_utilities.py
|
4cad6da5cc77a7d570490a38a6dbeaf234165450
|
[] |
no_license
|
gkunapuli/mdml-py
|
cca0af65bbf8c1dab4f8300a962dc4b895861e9e
|
9517cc78413ab5b639b0e28cf3aab0b03b2a02f7
|
refs/heads/master
| 2020-05-30T00:03:18.368858 | 2019-05-30T17:29:40 | 2019-05-30T17:29:40 | 189,450,149 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,185 |
py
|
import unittest
import numpy as np
import algorithms.utilities as util
class RankOneUpdateTest(unittest.TestCase):
def test_asymmetric_triplet_generation(self):
n_source = 10
n_target = 15
xs, ys, xt, yt = self.generate_two_domain_data(n_source, n_target)
example_pairs, similarity_labels = util.generate_cross_domain_triples(xs, ys, xt, yt)
try:
# Ensure that the labels have been generated correctly
test_labels = [2 * np.int(ys[pair[0]] == yt[pair[1]]) - 1 for pair in example_pairs]
np.testing.assert_array_equal(test_labels, similarity_labels)
result = True
except AssertionError as err:
result = False
print(err)
self.assertTrue(result)
@staticmethod
def generate_two_domain_data(n_source, n_target):
xs = np.random.randn(n_source, 10)
ys = 2 * (np.random.rand(n_source) > 0.5).astype(int) - 1
xt = np.random.randn(n_target, 10)
yt = 2 * (np.random.rand(n_target) > 0.5).astype(int) - 1
return xs, ys, xt, yt
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
ba897465ddc7bea4ef33e45bb292ec6dcdea5381
|
392495a85f77e72e7c3562576aa362d7860c17ee
|
/backend/setup.py
|
244a73c8d40ebb0836da93cb7d08757fdc76199d
|
[] |
no_license
|
messa/aiohttp-nextjs-graphql-demo-forum
|
ef51c26720a6f67a36f08d5caeba4e2d9bef0332
|
38fb66d011faec881b184e132aa7347517ee99e6
|
refs/heads/master
| 2020-04-16T22:38:08.171305 | 2019-02-04T02:18:35 | 2019-02-04T02:18:35 | 165,976,811 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 386 |
py
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='forum-backend',
version='0.0.1',
packages=find_packages(exclude=['doc', 'tests*']),
install_requires=[
'aiohttp',
'aiohttp-graphql',
'pyyaml',
],
entry_points={
'console_scripts': [
'forum-backend=forum_backend:main',
],
})
|
[
"[email protected]"
] | |
8e8516567da050393095124d42c7601023b8cc02
|
70eef679af91823579963dfb33eb94358353da87
|
/evennia/utils/inlinefunc.py
|
8cc14a95895b753b1f9faddf1c1a673ef8191dc1
|
[
"BSD-3-Clause"
] |
permissive
|
Pinacolada64/evennia
|
17e68f4f6b7ddcb4891256ceab2fbf02d185b9db
|
ed1b3ee8195cb93cd3382625d8d20d83d63c5322
|
refs/heads/master
| 2020-04-30T01:39:53.499431 | 2016-02-26T11:02:20 | 2016-02-26T11:02:20 | 52,920,172 | 1 | 0 | null | 2016-03-02T00:15:02 | 2016-03-02T00:15:02 | null |
UTF-8
|
Python
| false | false | 8,450 |
py
|
"""
Inlinefunc
**Note: This module is deprecated. Use evennia.utils.nested_inlinefuncs instead.**
This is a simple inline text language for use to custom-format text in
Evennia. It is applied BEFORE ANSI/MUX parsing is applied.
To activate Inlinefunc, settings.INLINEFUNC_ENABLED must be set.
The format is straightforward:
{funcname([arg1,arg2,...]) text {/funcname
Example:
"This is {pad(50,c,-) a center-padded text{/pad of width 50."
->
"This is -------------- a center-padded text--------------- of width 50."
This can be inserted in any text, operated on by the parse_inlinefunc
function. funcname() (no space is allowed between the name and the
argument tuple) is picked from a selection of valid functions from
settings.INLINEFUNC_MODULES.
Commands can be nested, and will applied inside-out. For correct
parsing their end-tags must match the starting tags in reverse order.
Example:
"The time is {pad(30){time(){/time{/padright now."
->
"The time is Oct 25, 11:09 right now."
An inline function should have the following call signature:
def funcname(text, *args, **kwargs)
where the text is always the part between {funcname(args) and
{/funcname and the *args are taken from the appropriate part of the
call. It is important that the inline function properly clean the
incoming args, checking their type and replacing them with sane
defaults if needed. If impossible to resolve, the unmodified text
should be returned. The inlinefunc should never cause a traceback.
"""
import re
from django.conf import settings
from evennia.utils import utils, logger
_DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
# inline functions
def pad(text, *args, **kwargs):
"""
Pad to width. pad(text, width=78, align='c', fillchar=' ')
"""
width = _DEFAULT_WIDTH
align = 'c'
fillchar = ' '
for iarg, arg in enumerate(args):
if iarg == 0:
width = int(arg) if arg.strip().isdigit() else width
elif iarg == 1:
align = arg if arg in ('c', 'l', 'r') else align
elif iarg == 2:
fillchar = arg[0]
else:
break
return utils.pad(text, width=width, align=align, fillchar=fillchar)
def crop(text, *args, **kwargs):
"""
Crop to width. crop(text, width=78, suffix='[...]')
"""
width = _DEFAULT_WIDTH
suffix = "[...]"
for iarg, arg in enumerate(args):
if iarg == 0:
width = int(arg) if arg.strip().isdigit() else width
elif iarg == 1:
suffix = arg
else:
break
return utils.crop(text, width=width, suffix=suffix)
def wrap(text, *args, **kwargs):
"""
Wrap/Fill text to width. fill(text, width=78, indent=0)
"""
width = _DEFAULT_WIDTH
indent = 0
for iarg, arg in enumerate(args):
if iarg == 0:
width = int(arg) if arg.strip().isdigit() else width
elif iarg == 1:
indent = int(arg) if arg.isdigit() else indent
return utils.wrap(text, width=width, indent=indent)
def time(text, *args, **kwargs):
"""
Inserts current time.
"""
import time
strformat = "%h %d, %H:%M"
if args and args[0]:
strformat = str(args[0])
return time.strftime(strformat)
def you(text, *args, **kwargs):
"""
Inserts your name.
"""
name = "You"
sess = kwargs.get("session")
if sess and sess.puppet:
name = sess.puppet.key
return name
# load functions from module (including this one, if using default settings)
_INLINE_FUNCS = {}
for module in utils.make_iter(settings.INLINEFUNC_MODULES):
_INLINE_FUNCS.update(utils.all_from_module(module))
_INLINE_FUNCS.pop("inline_func_parse", None)
# dynamically build regexes for found functions
_RE_FUNCFULL = r"\{%s\((.*?)\)(.*?){/%s"
_RE_FUNCFULL_SINGLE = r"\{%s\((.*?)\)"
_RE_FUNCSTART = r"\{((?:%s))"
_RE_FUNCEND = r"\{/((?:%s))"
_RE_FUNCSPLIT = r"(\{/*(?:%s)(?:\(.*?\))*)"
_RE_FUNCCLEAN = r"\{%s\(.*?\)|\{/%s"
_INLINE_FUNCS = dict((key, (func, re.compile(_RE_FUNCFULL % (key, key), re.DOTALL & re.MULTILINE),
re.compile(_RE_FUNCFULL_SINGLE % key, re.DOTALL & re.MULTILINE)))
for key, func in _INLINE_FUNCS.items() if callable(func))
_FUNCSPLIT_REGEX = re.compile(_RE_FUNCSPLIT % r"|".join([key for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
_FUNCSTART_REGEX = re.compile(_RE_FUNCSTART % r"|".join([key for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
_FUNCEND_REGEX = re.compile(_RE_FUNCEND % r"|".join([key for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
_FUNCCLEAN_REGEX = re.compile("|".join([_RE_FUNCCLEAN % (key, key) for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
# inline parser functions
def _execute_inline_function(funcname, text, session):
"""
Get the enclosed text between {funcname(...) and {/funcname
and execute the inline function to replace the whole block
with the result.
Args:
funcname (str): Inlinefunction identifier.
text (str): Text to process.
session (Session): Session object.
Notes:
This lookup is "dumb" - we just grab the first end tag we find. So
to work correctly this function must be called "inside out" on a
nested function tree, so each call only works on a "flat" tag.
"""
def subfunc(match):
"""
replace the entire block with the result of the function call
"""
args = [part.strip() for part in match.group(1).split(",")]
intext = match.group(2)
kwargs = {"session":session}
return _INLINE_FUNCS[funcname][0](intext, *args, **kwargs)
return _INLINE_FUNCS[funcname][1].sub(subfunc, text)
def _execute_inline_single_function(funcname, text, session):
"""
Get the arguments of a single function call (no matching end tag)
and execute it with an empty text input.
Args:
funcname (str): Function identifier.
text (str): String to process.
session (Session): Session id.
"""
def subfunc(match):
"replace the single call with the result of the function call"
args = [part.strip() for part in match.group(1).split(",")]
kwargs = {"session":session}
return _INLINE_FUNCS[funcname][0]("", *args, **kwargs)
return _INLINE_FUNCS[funcname][2].sub(subfunc, text)
def parse_inlinefunc(text, strip=False, session=None):
"""
Parse inline function-replacement.
Args:
text (str): Text to parse.
strip (bool, optional): Remove all supported inlinefuncs from text.
session (bool): Session calling for the parsing.
Returns:
text (str): Parsed text with processed results of
inlinefuncs.
"""
if strip:
# strip all functions
return _FUNCCLEAN_REGEX.sub("", text)
stack = []
for part in _FUNCSPLIT_REGEX.split(text):
endtag = _FUNCEND_REGEX.match(part)
if endtag:
# an end tag
endname = endtag.group(1)
while stack:
new_part = stack.pop()
part = new_part + part # add backwards -> fowards
starttag = _FUNCSTART_REGEX.match(new_part)
if starttag:
startname = starttag.group(1)
if startname == endname:
part = _execute_inline_function(startname, part, session)
break
stack.append(part)
# handle single functions without matching end tags; these are treated
# as being called with an empty string as text argument.
outstack = []
for part in _FUNCSPLIT_REGEX.split("".join(stack)):
starttag = _FUNCSTART_REGEX.match(part)
if starttag:
logger.log_dep("The {func()-style inlinefunc is deprecated. Use the $func{} form instead.")
startname = starttag.group(1)
part = _execute_inline_single_function(startname, part, session)
outstack.append(part)
return "".join(outstack)
def _test():
# this should all be handled
s = "This is a text with a{pad(78,c,-)text {pad(5)of{/pad {pad(30)nice{/pad size{/pad inside {pad(4,l)it{/pad."
s2 = "This is a text with a----------------text of nice size---------------- inside it ."
t = parse_inlinefunc(s)
assert(t == s2)
return t
|
[
"[email protected]"
] | |
b4789ea9ea16d8ca423e264f31a5abc00f5ed27f
|
fdde679c2252175d33bd82e87522d63c48b942b3
|
/iveBeenEverywhereMan.py
|
246c6db446b95debe1f92ffc79341ce0a2225463
|
[] |
no_license
|
ChaseMorgan2001/KattisProblems
|
85a3142889b2a0f885a71214e2631b595d1e3e89
|
9ea99edc69adb3dd5bc2beb678cebeb60c24de7e
|
refs/heads/main
| 2023-04-06T10:07:13.400975 | 2021-04-10T18:31:55 | 2021-04-10T18:31:55 | 306,489,023 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 316 |
py
|
cases = int(input())
t = "a"*cases
for i in t:
s=[]
trips = int(input())
n = "a"*trips
cities = list()
for i in n:
city = input()
cities.append(city)
for i in cities:
if i not in s:
s.append(i)
places=len(s)
print(places)
|
[
"[email protected]"
] | |
2f24322b0f1035af9561273dcb755c914216736b
|
d5973b68cabb0f6775cc6cbf257653ed9d860d85
|
/argonaut/model/post.py
|
422192d93555b9c6329da6b1a5614011b929db4d
|
[
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
mountainlandbot/argonaut
|
7c103ec8e9cfd702c5f00f448ff40a3aee459c90
|
c8978a2bf64f5ab181c029af2b0765a25cbbd690
|
refs/heads/master
| 2023-03-19T00:37:24.694420 | 2012-03-13T21:23:52 | 2012-03-13T21:23:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,378 |
py
|
"""The post model"""
from sqlalchemy import Column, or_
from sqlalchemy.sql import join
from sqlalchemy.types import Integer, Unicode, UnicodeText, Date
from argonaut.model.meta import Base, Session
class Post(Base):
__tablename__ = 'post'
id = Column(Integer, primary_key=True)
subject = Column(Unicode(300), nullable=False)
body = Column(UnicodeText, nullable=False)
posted = Column(Date, nullable=False)
author = Column(Integer, nullable=True)
viewed = Column(Integer, default=0)
status = Column(Integer, default=1)
def __init__(self, id=None, subject=None, body=None, posted=None, author=None, viewed=None, status=None):
self.id = id
self.subject = subject
self.body = body
self.posted = posted
self.author = author
self.viewed = viewed
self.status = status
def __unicode__(self):
return self.subject
def __repr__(self):
return "<Post('%s','%s', '%s', '%s', '%s', '%s', '%s')>" % (self.id,self.subject,self.body,self.posted,self.author,self.viewed,self.status)
__str__ = __unicode__
def get(id, active_only=True):
try:
if active_only:
return Session.query(Post).filter(Post.status == 1).filter(Post.id == id).first()
else:
return Session.query(Post).get(id)
except TypeError:
return None
def get_many(amount=10, order='asc', active_only=True, count_only=False, filter=None):
try:
query = Session.query(Post)
if active_only:
query = query.filter(Post.status == 1)
if filter:
query = query.filter(or_(Post.subject.like('%'+filter+'%'),Post.body.like('%'+filter+'%')))
if count_only:
return query.limit(amount).count()
else:
if order == 'desc':
return query.order_by(Post.id.desc()).limit(amount)
else:
return query.order_by(Post.id.asc()).limit(amount)
except Exception:
return None
def new():
return Post()
def save(post):
Session.add(post)
Session.commit()
def get_by_tag_name(name):
from argonaut.model.tag import Tag
from argonaut.model.tag_post import Tag_Post
return Session.query(Post).join(Tag_Post).join(Tag).filter(Post.status==1).filter(Tag.name==name).order_by(Post.id.desc())
|
[
"[email protected]"
] | |
33a9522157798c987f60feb1e75125aa8b7fc82a
|
e38a1e48dc0c08af64e473bc4f5f1ea268002407
|
/Preparacao_dos_dados/remove_outliers.py
|
d063c246edf5990ea6fa5151618cfbc308297884
|
[] |
no_license
|
guicornelli/pfc
|
6d277c92bd1b3487278eb0f6fca6a3199c081f1b
|
7502a1d288eb6392c8011fee0dbd90075796aa36
|
refs/heads/master
| 2020-04-04T21:57:50.993531 | 2018-11-06T01:09:08 | 2018-11-06T01:09:08 | 156,305,021 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,172 |
py
|
import numpy as np
def remove_outlier(values, times): # define a função com uma variavel de entrada
fator = 1.5 # 1.5 é o fator de multiplicacao
q75, q25 = np.percentile(values, [75, 25]) # retorna o terceiro e primeiro quartil
iqr = q75 - q25 # calcula o iqr(interquartile range)
lowpass = q25 - (iqr * fator) # calcula o valor minimo para aplicar no filtro
highpass = q75 + (iqr * fator) # calcula o valor maximo para aplicar no filtro
outliers = np.argwhere(values < lowpass) # descobre onde estao os valores menores que o valor minimo
values = np.delete(values, outliers) # deleta esses valores
times = np.delete(times, outliers)
outliers = np.argwhere(values > highpass) # descobre onde estao os valores maiores que o valor maximo
values = np.delete(values, outliers) # deleta esses valores
times = np.delete(times, outliers)
return values, times, q75, q25 # retorna a variavel sem outliers
|
[
"[email protected]"
] | |
9b1774fd8119b1bbeca86bf668133761b869002f
|
51f34f3ae85f770d33f49962ad21d3334900e5d9
|
/ApiTest/cfg/prod_conf.py
|
825b6a1c4dc18ee57f7cf63c6f0f22a6c499d08c
|
[] |
no_license
|
FirelGef/mail_api_test
|
dec3da72d55b06c06f0c1aeb0442cb2de812bd3c
|
d86248a069de4b9cb8590389dee18f5291730483
|
refs/heads/master
| 2021-07-23T17:20:14.839618 | 2020-04-23T13:39:51 | 2020-04-23T13:39:51 | 150,568,492 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 65 |
py
|
protocol = 'https'
farm = 'e.mail.ru'
auth_farm = 'auth.mail.ru'
|
[
"[email protected]"
] | |
410aa5e90d452ce0c150cc25c78df4ee555a14c6
|
20c20938e201a0834ccf8b5f2eb5d570d407ad15
|
/abc094/arc095_a/7981214.py
|
7f6c3dcab7bd1fb3884adf64c039c5841bf608cf
|
[] |
no_license
|
kouhei-k/atcoder_submissions
|
8e1a1fb30c38e0d443b585a27c6d134bf1af610a
|
584b4fd842ccfabb16200998fe6652f018edbfc5
|
refs/heads/master
| 2021-07-02T21:20:05.379886 | 2021-03-01T12:52:26 | 2021-03-01T12:52:26 | 227,364,764 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 172 |
py
|
N=int(input())
a=list(map(int,input().split()))
b=sorted(a)
ans=[b[N//2],b[(N//2) -1]]
for i in range(N):
if a[i] >= ans[0]:
print(ans[1])
else:
print(ans[0])
|
[
"[email protected]"
] | |
50a42d0ddd0cfd3c7a8fed644973d44ce7ca568e
|
6d2a55c9358a735669f06066c1fc1294528a2dc5
|
/androcg.py
|
f88f5c9d47808e38884173a640cb166e6e59b06a
|
[
"Apache-2.0"
] |
permissive
|
dmuppiri/androguard
|
02548e96e05e462b8b437cf4c3714db1cfbc4710
|
b1ccfe11a4de596a114f6bc0f92a4c1d76cf297b
|
refs/heads/master
| 2020-03-12T17:42:48.828581 | 2018-04-21T20:11:57 | 2018-04-21T20:11:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,978 |
py
|
#!/usr/bin/env python3
from androguard.misc import AnalyzeAPK
from androguard.core.androconf import show_logging
from androguard.core.analysis.analysis import ExternalMethod
from androguard.core.bytecode import FormatClassToJava
import matplotlib.pyplot as plt
import networkx as nx
from argparse import ArgumentParser
import sys
import logging
log = logging.getLogger("androcfg")
def _add_node(G, method, entry_points):
"""
Wrapper to add methods to a graph
"""
if method not in G.node:
if isinstance(method, ExternalMethod):
is_external = True
else:
is_external = False
if method.get_class_name() in entry_points:
is_entry_point = True
else:
is_entry_point = False
G.add_node(method, external=is_external, entrypoint=is_entry_point)
def generate_graph(dx, classname=".*", methodname=".*", descriptor=".*",
accessflags=".*", no_isolated=False, entry_points=[]):
"""
Generate a directed graph based on the methods found by the filters applied.
The filters are the same as in
:meth:`~androguard.core.analaysis.analaysis.Analysis.find_methods`
A networkx.DiGraph is returned, containing all edges only once!
that means, if a method calls some method twice or more often, there will
only be a single connection.
:param dx: :class:`~androguard.core.analysis.analysis.Analysis`
:param entry_points: A list of classes that are marked as entry point
:rtype: DiGraph
"""
CG = nx.DiGraph()
# Note: If you create the CG from many classes at the same time, the drawing
# will be a total mess...
for m in dx.find_methods(classname=classname, methodname=methodname,
descriptor=descriptor, accessflags=accessflags):
orig_method = m.get_method()
log.info("Found Method --> {}".format(orig_method))
if no_isolated and len(m.get_xref_to) == 0:
log.info("Skipped {}, because if has no xrefs".format(orig_method))
continue
_add_node(CG, orig_method, entry_points)
for other_class, callee, offset in m.get_xref_to():
_add_node(CG, callee, entry_points)
# As this is a DiGraph and we are not interested in duplicate edges,
# check if the edge is already in the edge set.
# If you need all calls, you probably want to check out MultiDiGraph
if not CG.has_edge(orig_method, callee):
CG.add_edge(orig_method, callee)
return CG
def plot(CG):
"""
Plot the call graph using matplotlib
For larger graphs, this should not be used!
"""
pos = nx.spring_layout(CG)
internal = []
external = []
for n in CG.node:
if isinstance(n, ExternalMethod):
external.append(n)
else:
internal.append(n)
nx.draw_networkx_nodes(CG, pos=pos, node_color='r', nodelist=internal)
nx.draw_networkx_nodes(CG, pos=pos, node_color='b', nodelist=external)
nx.draw_networkx_edges(CG, pos, arrow=True)
nx.draw_networkx_labels(CG, pos=pos, labels={x: "{} {}".format(x.get_class_name(), x.get_name()) for x in CG.edge})
plt.draw()
plt.show()
def _write_gml(G, path):
"""
Wrapper around nx.write_gml
"""
return nx.write_gml(G, path, stringizer=str)
def main():
parser = ArgumentParser(description="Create a call graph based on the data"
"of Analysis and export it into a graph format.")
parser.add_argument("APK", nargs=1, help="The APK to analyze")
parser.add_argument("--output", "-o", default="callgraph.gml",
help="Filename of the output file, the extension is used to decide which format to use (default callgraph.gml)")
parser.add_argument("--show", "-s", action="store_true", default=False,
help="instead of saving the graph, print it with mathplotlib (you might not see anything!")
parser.add_argument("--verbose", "-v", action="store_true", default=False,
help="Print more output")
parser.add_argument("--classname", default=".*", help="Regex to filter by classname")
parser.add_argument("--methodname", default=".*", help="Regex to filter by methodname")
parser.add_argument("--descriptor", default=".*", help="Regex to filter by descriptor")
parser.add_argument("--accessflag", default=".*", help="Regex to filter by accessflags")
parser.add_argument("--no-isolated", default=False, action="store_true",
help="Do not store methods which has no xrefs")
args = parser.parse_args()
if args.verbose:
show_logging(logging.INFO)
a, d, dx = AnalyzeAPK(args.APK[0])
entry_points = map(FormatClassToJava, a.get_activities() + a.get_providers() + a.get_services() + a.get_receivers())
entry_points = list(entry_points)
log.info("Found The following entry points by search AndroidManifest.xml: {}".format(entry_points))
CG = generate_graph(dx,
args.classname,
args.methodname,
args.descriptor,
args.accessflag,
args.no_isolated,
entry_points,
)
write_methods = dict(gml=_write_gml,
gexf=nx.write_gexf,
gpickle=nx.write_gpickle,
graphml=nx.write_graphml,
yaml=nx.write_yaml,
net=nx.write_pajek,
)
if args.show:
plot(CG)
else:
writer = args.output.rsplit(".", 1)[1]
if writer in ["bz2", "gz"]:
writer = args.output.rsplit(".", 2)[1]
if writer not in write_methods:
print("Could not find a method to export files to {}!".format(writer))
sys.exit(1)
write_methods[writer](CG, args.output)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
3bea51c8b7af4f467a716a04d5aeb0d0cbec113c
|
3211227ed613bef10063e9ada4780bda40dc6d74
|
/src/reproducible/cache.py
|
2f62cbf9c4d4a935c2f461cd842a2ff99fafa3a3
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
LachlanGunn/reproducible
|
24128a7552a3441014a924a0ce9113bf7c2c0724
|
0988ced254f52f99ed51dc63f3b5ac6d75efdff8
|
refs/heads/master
| 2021-01-21T12:35:19.005622 | 2017-09-13T06:24:37 | 2017-09-13T06:24:37 | 102,085,916 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,438 |
py
|
#!/usr/bin/env python3
from __future__ import absolute_import, division, print_function, \
unicode_literals
import os.path
import pickle
import reproducible
import reproducible.data
class Cache(object):
pass
class MemoryCache(Cache):
"""Memory-backed cache.
MemoryCache provides a key-value object store in memory, stored in
a standard dictionary.
"""
def __init__(self):
super(MemoryCache, self).__init__()
self.cache = {}
def set(self, key, value):
# type: (str, reproducible.data.Data) -> None
self.cache[key] = value
def get(self, key):
# type: (str) -> object
return self.cache.get(key)
def is_cached(self, key):
# type: (str) -> bool
return key in self.cache.keys()
class FileCache(Cache):
"""Disk-backed cache.
FileCache provides a key-value store on disk. Each item in the cache
has a directory, named for its key. The directory contains two files:
- /type: The pickled object type.
- /data: The serialised data itself.
As the objects in question are of type reproducible.Data, we can use
the unpickled type object to get access to the appropriate
.load() class method.
"""
@classmethod
def __check_directory__(cls, root):
# type: (str) -> bool
return os.path.isdir(root)
def __init__(self, root, debug=None):
"""
Args:
root (str): The root directory of the cache.
debug (file): A file-like object to which to log cache accesses.
"""
# type: (str, bool) -> None
super(FileCache, self).__init__()
if not self.__check_directory__(root):
os.mkdir(root)
self.root = root
self.debug = debug
def is_cached(self, key):
# type: (str) -> bool
return self.__check_directory__(os.path.join(self.root, key))
def get(self, key):
# type: (str) -> object
if self.debug:
print("GET %s\n -> " % (key, ), file=self.debug, end="")
base_path = os.path.join(self.root, key)
with open(os.path.join(base_path, 'data'), 'rb') as fh, \
open(os.path.join(base_path, 'type'), 'rb') as fh_type:
data_type = pickle.load(fh_type)
if self.debug:
data = fh.read()
hash_context = reproducible.hash_family()
hash_context.update(data)
print(hash_context.hexdigest(), file=self.debug)
return data_type.loads(data)
return data_type.load(fh)
def set(self, key, value):
# type: (str, reproducible.data.Data) -> None
if self.debug:
print('SET %s' % key, file=self.debug)
base_path = os.path.join(self.root, key)
if not self.__check_directory__(base_path):
os.mkdir(base_path)
with open(os.path.join(base_path, 'data'), 'wb') as fh, \
open(os.path.join(base_path, 'type'), 'wb') as fh_type:
value.dump(fh)
pickle.dump(type(value), fh_type)
def set_cache(cache):
"""Set the global cache.
Args:
cache (reproducible.Cache): The new cache object.
Return:
Nothing.
"""
# type: (Cache) -> None
global _cache
_cache = cache
def get_cache():
# type: () -> Cache
global _cache
return _cache
_cache = MemoryCache()
|
[
"[email protected]"
] | |
272d69ada04ca5d2d1fc84952f10733e50ccda18
|
aaf0ae622738df951c7eeafbf20b8b6de36622ae
|
/node_modules/canvas/build/config.gypi
|
4403b58241bcac0ca22c7cfedb017630dea67b76
|
[
"MIT"
] |
permissive
|
ganezasan/lambda-svg-to-png-fabric
|
9edf2cdc5b07e3ab8b53b3fe1e0a6a500586cef9
|
1de464f6270ed7b57939ff051a4b400183b5fceb
|
refs/heads/master
| 2020-05-29T08:42:58.608420 | 2016-10-02T14:39:48 | 2016-10-02T14:39:48 | 69,797,917 | 5 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,744 |
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"gas_version": "2.23",
"host_arch": "x64",
"icu_data_file": "icudt56l.dat",
"icu_data_in": "../../deps/icu/source/data/in/icudt56l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "./deps/icu",
"icu_small": "true",
"icu_ver_major": "56",
"node_byteorder": "little",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/ec2-user/.node-gyp/4.5.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/home/ec2-user/.npm-init.js",
"userconfig": "/home/ec2-user/.npmrc",
"node_version": "4.5.0",
"user": "500",
"save": "true",
"editor": "vi",
"tag": "latest",
"global": "",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "500",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/home/ec2-user/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/2.15.9 node/v4.5.0 linux x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0002",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/tmp",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"[email protected]"
] | |
0e310d88fcfceac2312a159d2be0be14c3aeb4b7
|
a344184e98417473750d637906c717d72291d77a
|
/Simulation/forest_fire.py
|
2f2db83022d81f686a6a34865c1f4697289eb864
|
[
"MIT"
] |
permissive
|
dashdeckers/Wildfire-Control-Python
|
c79af370ef1e6065cef22b92313f77ce4a216f64
|
8ace34e389b4d177b84efe964e8d327a82eb3ed6
|
refs/heads/master
| 2023-04-03T13:39:09.776862 | 2020-10-28T12:31:31 | 2020-10-28T12:31:31 | 174,835,169 | 2 | 0 |
MIT
| 2023-03-24T23:47:02 | 2019-03-10T14:31:27 |
Python
|
UTF-8
|
Python
| false | false | 3,721 |
py
|
from .constants import (
METADATA,
)
from .utility import (
grass,
dirt,
layer,
get_name,
color2ascii,
)
from .environment import (
World,
Agent,
)
class ForestFire:
def __init__(self):
self.W = World()
self.METADATA = METADATA
self.DEBUG = METADATA['debug']
self.layer = layer
self.get_name = get_name
self.width = METADATA['width']
self.height = METADATA['height']
self.n_actions = METADATA['n_actions']
# Execute an action in the environment
def step(self, action):
# Handle basic movement actions
if action in ["N", "S", "E", "W"] or action in range(4):
self.W.agents[0].move(action)
# Handle the dig action
if METADATA['allow_dig_toggle'] and action in ["D", 4]:
self.W.agents[0].toggle_digging()
# If the action is not handled, the agent does nothing
# Update environment only every AGENT_SPEED steps
METADATA['a_speed_iter'] -= 1
if METADATA['a_speed_iter'] == 0:
self.update()
METADATA['a_speed_iter'] = METADATA['a_speed']
# Return the state, reward and whether the simulation is done
return [self.W.get_state(),
self.W.get_reward(),
not self.W.RUNNING,
{}]
# Reset the simulation to its initial state
def reset(self):
self.W.reset()
return self.W.get_state()
# Print an ascii rendering of the simulation
def render(self):
# Print index markers along the top
print(" ", end="")
for x in range(self.width):
print(x % 10, end="")
print("")
return_map = "\n"
for y in range(self.height):
# Print index markers along the left side
print(y % 10, end="")
for x in range(self.width):
# If the agent is at this location, print A
if self.W.agents and (self.W.agents[0].x, self.W.agents[0].y) == (x, y):
return_map += 'A'
print("A", end="")
# Otherwise use the ascii mapping to print the correct symbol
else:
symbol = color2ascii[self.W.env[x, y, layer['gray']]]
return_map += symbol
print(symbol, end="")
return_map += '\n'
print("")
print("")
# Return a string representation of the map incase we want to save it
return return_map
# Updates the simulations internal state
def update(self):
# Remove dead agents
self.W.agents = [a for a in self.W.agents if not a.is_dead()]
# Iterate over a copy of the set, to avoid ConcurrentModificationException
burning = list(self.W.burning_cells)
# For each burning cell
for cell in burning:
# Reduce it's fuel. If it has not burnt out, continue
# Burnt out cells are removed automatically by this function
if self.W.reduce_fuel(cell):
# For each neighbour of the (still) burning cell
for n_cell in self.W.get_neighbours(cell):
# If that neighbour is burnable
if self.W.is_burnable(n_cell):
# Apply heat to it from the burning cell
# This function adds the n_cell to burning cells if it ignited
self.W.apply_heat_from_to(cell, n_cell)
# Simulation is terminated when there are no more burning cells or agents
if not self.W.agents or not self.W.burning_cells:
self.W.RUNNING = False
|
[
"[email protected]"
] | |
3df0428323b57cc88de0ee7226ebd300382dd1ef
|
7a968bade9602cb9ba2f4e994b0e8df65685651f
|
/prac8/silver_service_taxi_test.py
|
ef3730f5b48dfe46793da137518f192955859214
|
[] |
no_license
|
vthang4799/CP1804practicals
|
4eeef6a9caf6513fe1ead666cc3224739ded5967
|
e4b8d4c3aeb2a6e52256eca34a58e3604ab1a4ac
|
refs/heads/master
| 2022-12-24T10:26:18.335443 | 2020-09-29T07:46:11 | 2020-09-29T07:46:11 | 283,714,883 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 277 |
py
|
from prac8.silver_service_taxi import SilverServiceTaxi
def main():
silver_taxi = SilverServiceTaxi("Fast Taxi", 100, 2)
silver_taxi.drive(18)
print(silver_taxi)
print("Current fare: ${}".format(silver_taxi.get_fare()))
if __name__ == '__main__':
main()
|
[
"vthang4799"
] |
vthang4799
|
4c00485efe1951610673676ed0c3fe161d042837
|
c6d94644c10f2614b9242e62bfbaff2848f1a7db
|
/Bot.py
|
296db20cb1823227e728dbdea823d217a185711e
|
[] |
no_license
|
FX196/JSTrading
|
ee308ddd3efbd28f3dbcd6175aff5a1f32e6103d
|
a81a67d051d401db026cda3d31306ef04f76c35b
|
refs/heads/master
| 2020-05-03T06:59:53.711763 | 2019-07-13T16:38:24 | 2019-07-13T16:38:24 | 178,486,613 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 977 |
py
|
import time
def import_module(module_name):
"""Helper function to import module"""
import sys
import os.path as osp
import importlib
sys.path.append(osp.join(osp.dirname(__file__), 'strategies'))
return importlib.import_module(module_name)
class Bot:
def __init__(self, exchange, strategies):
self.exchange = exchange
self.strategies = [import_module(strategy) for strategy in strategies]
self.filename = exchange+"_"+str(time.strftime("%H:%M:%S", time.gmtime()))
self.file_obj = open(self.filename, "w")
def run(self):
"""
infinite loop while connected
:return:
"""
data = self.exchange.read()
while data:
self.file_obj.write(data)
trades = []
for strategy in self.strategies:
trades.extend(strategy.trade(self.exchange))
self.exchange.trade_batch(trades)
data = self.exchange.read()
|
[
"[email protected]"
] | |
28bdf11e78383b1bcbc1a3bb33cf6a446abf3117
|
b7cd5628c9559732b4426c94bd4a4eda65d5e992
|
/model/quadrature_rules.py
|
f4a6b7d5cfc262c1796da81e7d07903ecafde2e8
|
[] |
no_license
|
o-smith/SphericalNeuralField
|
b7fe57e814723ed260a63bd80c372c9e0a89c5c7
|
b21d0ac6401ba38a69a2c412c017e90149040e3e
|
refs/heads/master
| 2022-01-15T14:27:53.383300 | 2019-05-09T10:10:42 | 2019-05-09T10:10:42 | 125,877,841 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,680 |
py
|
#!/usr/bin/env python
"""\
C version: Dmitri Laikov
F77 version: Christoph van Wuellen, http://www.ccl.net
Python version: Richard P. Muller, 2002.
This subroutine is part of a set of subroutines that generate
Lebedev grids [1-6] for integration on a sphere. The original
C-code [1] was kindly provided by Dr. Dmitri N. Laikov and
translated into fortran by Dr. Christoph van Wuellen.
This subroutine was translated from C to fortran77 by hand.
Users of this code are asked to include reference [1] in their
publications, and in the user- and programmers-manuals
describing their codes.
[1] V.I. Lebedev, and D.N. Laikov
'A quadrature formula for the sphere of the 131st
algebraic order of accuracy'
Doklady Mathematics, Vol. 59, No. 3, 1999, pp. 477-481.
[2] V.I. Lebedev
'A quadrature formula for the sphere of 59th algebraic
order of accuracy'
Russian Acad. Sci. Dokl. Math., Vol. 50, 1995, pp. 283-286.
[3] V.I. Lebedev, and A.L. Skorokhodov
'Quadrature formulas of orders 41, 47, and 53 for the sphere'
Russian Acad. Sci. Dokl. Math., Vol. 45, 1992, pp. 587-592.
[4] V.I. Lebedev
'Spherical quadrature formulas exact to orders 25-29'
Siberian Mathematical Journal, Vol. 18, 1977, pp. 99-107.
[5] V.I. Lebedev
'Quadratures on a sphere'
Computational Mathematics and Mathematical Physics, Vol. 16,
1976, pp. 10-24.
[6] V.I. Lebedev
'Values of the nodes and weights of ninth to seventeenth
order Gauss-Markov quadrature formulae invariant under the
octahedron group with inversion'
Computational Mathematics and Mathematical Physics, Vol. 15,
1975, pp. 44-51.
"""
from math import sqrt, isnan
import numpy as np
def genOh_a00(v):
"(0,0,a) etc. (6 points)"
a=1.0
return [(a,0,0,v),(-a,0,0,v),(0,a,0,v),(0,-a,0,v),(0,0,a,v),(0,0,-a,v)]
def genOh_aa0(v):
"(0,a,a) etc, a=1/sqrt(2) (12 points)"
a=sqrt(0.5)
return [(0,a,a,v),(0,-a,a,v),(0,a,-a,v),(0,-a,-a,v),
(a,0,a,v),(-a,0,a,v),(a,0,-a,v),(-a,0,-a,v),
(a,a,0,v),(-a,a,0,v),(a,-a,0,v),(-a,-a,0,v)]
def genOh_aaa(v):
"(a,a,a) etc, a=1/sqrt(3) (8 points)"
a = sqrt(1./3.)
return [(a,a,a,v),(-a,a,a,v),(a,-a,a,v),(-a,-a,a,v),
(a,a,-a,v),(-a,a,-a,v),(a,-a,-a,v),(-a,-a,-a,v)]
def genOh_aab(v,a):
"(a,a,b) etc, b=sqrt(1-2 a^2), a input (24 points)"
b = sqrt(1.0 - 2.0*a*a)
return [(a,a,b,v),(-a,a,b,v),(a,-a,b,v),(-a,-a,b,v),
(a,a,-b,v),(-a,a,-b,v),(a,-a,-b,v),(-a,-a,-b,v),
(a,b,a,v),(-a,b,a,v),(a,-b,a,v),(-a,-b,a,v),
(a,b,-a,v),(-a,b,-a,v),(a,-b,-a,v),(-a,-b,-a,v),
(b,a,a,v),(-b,a,a,v),(b,-a,a,v),(-b,-a,a,v),
(b,a,-a,v),(-b,a,-a,v),(b,-a,-a,v),(-b,-a,-a,v)]
def genOh_ab0(v,a):
"(a,b,0) etc, b=sqrt(1-a^2), a input (24 points)"
b=sqrt(1.0-a*a)
return [(a,b,0,v),(-a,b,0,v),(a,-b,0,v),(-a,-b,0,v),
(b,a,0,v),(-b,a,0,v),(b,-a,0,v),(-b,-a,0,v),
(a,0,b,v),(-a,0,b,v),(a,0,-b,v),(-a,0,-b,v),
(b,0,a,v),(-b,0,a,v),(b,0,-a,v),(-b,0,-a,v),
(0,a,b,v),(0,-a,b,v),(0,a,-b,v),(0,-a,-b,v),
(0,b,a,v),(0,-b,a,v),(0,b,-a,v),(0,-b,-a,v)]
def genOh_abc(v,a,b):
"(a,b,c) etc, c=sqrt(1-a^2-b^2), a,b input (48 points)"
c=sqrt(1.0 - a*a - b*b)
return [(a,b,c,v),(-a,b,c,v),(a,-b,c,v),(-a,-b,c,v),
(a,b,-c,v),(-a,b,-c,v),(a,-b,-c,v),(-a,-b,-c,v),
(a,c,b,v),(-a,c,b,v),(a,-c,b,v),(-a,-c,b,v),
(a,c,-b,v),(-a,c,-b,v),(a,-c,-b,v),(-a,-c,-b,v),
(b,a,c,v),(-b,a,c,v),(b,-a,c,v),(-b,-a,c,v),
(b,a,-c,v),(-b,a,-c,v),(b,-a,-c,v),(-b,-a,-c,v),
(b,c,a,v),(-b,c,a,v),(b,-c,a,v),(-b,-c,a,v),
(b,c,-a,v),(-b,c,-a,v),(b,-c,-a,v),(-b,-c,-a,v),
(c,a,b,v),(-c,a,b,v),(c,-a,b,v),(-c,-a,b,v),
(c,a,-b,v),(-c,a,-b,v),(c,-a,-b,v),(-c,-a,-b,v),
(c,b,a,v),(-c,b,a,v),(c,-b,a,v),(-c,-b,a,v),
(c,b,-a,v),(-c,b,-a,v),(c,-b,-a,v),(-c,-b,-a,v)]
def leb6():
return genOh_a00(0.1666666666666667)
def leb14():
return genOh_a00(0.06666666666666667)\
+ genOh_aaa(0.07500000000000000)
def leb26():
return genOh_a00(0.04761904761904762)\
+ genOh_aa0(0.03809523809523810) \
+ genOh_aaa(0.03214285714285714)
def leb38():
return genOh_a00(0.009523809523809524)\
+ genOh_aaa(0.3214285714285714E-1) \
+ genOh_ab0(0.2857142857142857E-1,0.4597008433809831E+0)
def leb50():
return genOh_a00(0.1269841269841270E-1)\
+ genOh_aa0(0.2257495590828924E-1) \
+ genOh_aaa(0.2109375000000000E-1) \
+ genOh_aab(0.2017333553791887E-1,0.3015113445777636E+0)
def leb74():
return genOh_a00(0.5130671797338464E-3)\
+ genOh_aa0(0.1660406956574204E-1) \
+ genOh_aaa(-0.2958603896103896E-1) \
+ genOh_aab(0.2657620708215946E-1,0.4803844614152614E+0) \
+ genOh_ab0(0.1652217099371571E-1,0.3207726489807764E+0)
def leb86():
return genOh_a00(0.1154401154401154E-1) \
+ genOh_aaa(0.1194390908585628E-1) \
+ genOh_aab(0.1111055571060340E-1,0.3696028464541502E+0) \
+ genOh_aab(0.1187650129453714E-1,0.6943540066026664E+0) \
+ genOh_ab0(0.1181230374690448E-1,0.3742430390903412E+0)
def leb110():
return genOh_a00(0.3828270494937162E-2) \
+ genOh_aaa(0.9793737512487512E-2) \
+ genOh_aab(0.8211737283191111E-2,0.1851156353447362E+0) \
+ genOh_aab(0.9942814891178103E-2,0.6904210483822922E+0) \
+ genOh_aab(0.9595471336070963E-2,0.3956894730559419E+0) \
+ genOh_ab0(0.9694996361663028E-2,0.4783690288121502E+0)
def leb146():
return genOh_a00(0.5996313688621381E-3) \
+ genOh_aa0(0.7372999718620756E-2) \
+ genOh_aaa(0.7210515360144488E-2) \
+ genOh_aab(0.7116355493117555E-2,0.6764410400114264E+0) \
+ genOh_aab(0.6753829486314477E-2,0.4174961227965453E+0) \
+ genOh_aab(0.7574394159054034E-2,0.1574676672039082E+0) \
+ genOh_abc(0.6991087353303262E-2,0.1403553811713183E+0,
0.4493328323269557E+0)
def leb170():
return genOh_a00(0.5544842902037365E-2) \
+ genOh_aa0(0.6071332770670752E-2) \
+ genOh_aaa(0.6383674773515093E-2) \
+ genOh_aab(0.5183387587747790E-2,0.2551252621114134E+0) \
+ genOh_aab(0.6317929009813725E-2,0.6743601460362766E+0) \
+ genOh_aab(0.6201670006589077E-2,0.4318910696719410E+0) \
+ genOh_ab0(0.5477143385137348E-2,0.2613931360335988E+0) \
+ genOh_abc(0.5968383987681156E-2,0.4990453161796037E+0,
0.1446630744325115E+0)
def leb194():
return genOh_a00(0.1782340447244611E-2) \
+ genOh_aa0(0.5716905949977102E-2) \
+ genOh_aaa(0.5573383178848738E-2) \
+ genOh_aab(0.5608704082587997E-2,0.6712973442695226E+0) \
+ genOh_aab(0.5158237711805383E-2,0.2892465627575439E+0) \
+ genOh_aab(0.5518771467273614E-2,0.4446933178717437E+0) \
+ genOh_aab(0.4106777028169394E-2,0.1299335447650067E+0) \
+ genOh_ab0(0.5051846064614808E-2,0.3457702197611283E+0) \
+ genOh_abc(0.5530248916233094E-2,0.1590417105383530E+0,
0.8360360154824589E+0)
#Equvialance table:
# 00 = 1
# a0 = 2
# aa = 3
# ab = 4
# b0 = 5
# bc = 6
def leb974():
return genOh_a00(0.1438294190527431E-03) \
+ genOh_aaa(0.1125772288287004E-02) \
+ genOh_aab(0.4948029341949241E-03,0.4292963545341347E-01) \
+ genOh_aab(0.7357990109125470E-03,0.1051426854086404E+00) \
+ genOh_aab(0.8889132771304384E-03,0.1750024867623087E+00) \
+ genOh_aab(0.9888347838921435E-03,0.2477653379650257E+00) \
+ genOh_aab(0.1053299681709471E-02,0.3206567123955957E+00) \
+ genOh_aab(0.1092778807014578E-02,0.3916520749849983E+00) \
+ genOh_aab(0.1114389394063227E-02,0.4590825874187624E+00) \
+ genOh_aab(0.1123724788051555E-02,0.5214563888415861E+00) \
+ genOh_aab(0.1125239325243814E-02,0.6253170244654199E+00) \
+ genOh_aab(0.1126153271815905E-02,0.6637926744523170E+00) \
+ genOh_aab(0.1130286931123841E-02,0.6910410398498301E+00) \
+ genOh_aab(0.1134986534363955E-02,0.7052907007457760E+00) \
+ genOh_ab0(0.6823367927109931E-03,0.1236686762657990E+00) \
+ genOh_ab0(0.9454158160447096E-03,0.2940777114468387E+00) \
+ genOh_ab0(0.1074429975385679E-02,0.4697753849207649E+00) \
+ genOh_ab0(0.1129300086569132E-02,0.6334563241139567E+00) \
+ genOh_abc(0.8436884500901954E-03,0.5974048614181342E-01,0.2029128752777523E+00) \
+ genOh_abc(0.1075255720448885E-02,0.1375760408473636E+00,0.4602621942484054E+00) \
+ genOh_abc(0.1108577236864462E-02,0.3391016526336286E+00,0.5030673999662036E+00) \
+ genOh_abc(0.9566475323783357E-03,0.1271675191439820E+00,0.2817606422442134E+00) \
+ genOh_abc(0.1080663250717391E-02,0.2693120740413512E+00,0.4331561291720157E+00) \
+ genOh_abc(0.1126797131196295E-02,0.1419786452601918E+00,0.6256167358580814E+00) \
+ genOh_abc(0.1022568715358061E-02,0.6709284600738255E-01,0.3798395216859157E+00) \
+ genOh_abc(0.1108960267713108E-02,0.7057738183256172E-01,0.5517505421423520E+00) \
+ genOh_abc(0.1122790653435766E-02,0.2783888477882155E+00,0.6029619156159187E+00) \
+ genOh_abc(0.1032401847117460E-02,0.1979578938917407E+00,0.3589606329589096E+00) \
+ genOh_abc(0.1107249382283854E-02,0.2087307061103274E+00,0.5348666438135476E+00) \
+ genOh_abc(0.1121780048519972E-02,0.4055122137872836E+00,0.5674997546074373E+00)
def xyz_to_tp(x, y, z):
p = np.arccos(z)
fact = np.sqrt(x*x + y*y)
n = len(x)
t = np.zeros(n)
for j in range(len(x)):
if (fact[j] > 0.0):
t[j] = np.arccos(x[j]/fact[j])
else:
t[j] = np.arccos(x[j])
if (y[j] < 0.0):
t[j] = -t[j]
return t, p
def gen_grid():
#Generate the Lebedev points
xyzw = leb974()
xyzw = np.array(xyzw)
#Unpack the points
x, y, z, w = xyzw[:,0], xyzw[:,1], xyzw[:,2], xyzw[:,3]
#Convert the points to polars
t, p= xyz_to_tp(x,y,z)
return t, p, w
def generate_iso_grid(filename):
#Load in the quadrature points
xyzw = np.genfromtxt(filename)
#Unpack the points
x, y, z, w = xyzw[:,0], xyzw[:,1], xyzw[:,2], xyzw[:,3]
#Convert the points to polars
t, p = xyz_to_tp(x, y, z)
return t, p, w
def get_quadrature_dimension(filename):
#Load in the quadrature points
xyzw = np.genfromtxt(filename)
#Find the length
return len(xyzw)
LebFunc = {
6: leb6,
14: leb14,
26: leb26,
38: leb38,
50: leb50,
74: leb74,
86: leb86,
110: leb110,
146: leb146,
170: leb170,
194: leb194,
974: leb974
}
def Lebedev(n):
try:
return LebFunc[n]()
except:
raise "No grid available for %d" % n
return None
if __name__ == '__main__':
print "#!/usr/bin/env python"
print "Lebedev = {"
for i in [6,14,26,38,50,74,86,110,146,170,194]:
print " %d : [" % i
lf = LebFunc[i]()
for xyzw in lf:
print " (%16.12f,%16.12f,%16.12f,%16.12f)," % xyzw
print " ],"
print "}"
|
[
"[email protected]"
] | |
640eaca5a9846336b9ed25da7da3de7dbf3bbf11
|
ce886756510fb064b6d3c0559e547542e3de8891
|
/l21/zombie.py
|
3af6b6da9fb862d18801463f7d7f71147a592ce6
|
[] |
no_license
|
Nekot94/sprog
|
348b2cce012230cf59e85a7af5d8db8c4eecd7c0
|
a79f8c5e7cf0237b768f3a46732a72897fc50742
|
refs/heads/master
| 2020-05-21T02:01:37.608220 | 2017-05-05T12:01:34 | 2017-05-05T12:01:34 | 84,555,459 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,622 |
py
|
class Weapon:
def __init__(self,name,damage,count):
self.name = name
self.damage = damage
self.count = count
class Character:
def __init__(self,name,hp, damage):
self.name = name
self.hp = hp
self.damage = damage
def attack(self, victim):
victim.hp -= self.damage
print("{0.name} нанес {1.name} {0.damage} урона".format(
self,victim))
class Zombie(Character):
pass
class Player(Character):
def __init__(self,name,hp, damage, weapon):
super().__init__(name,hp, damage)
self.weapon = weapon
def attack(self, victim):
damage = self.damage + self.weapon.damage
victim.hp -= damage
print("{0.name} нанес {1.name} {2} урона".format(
self,victim,damage))
class Game:
def start(self):
weapon = Weapon("Томар", 100, 3)
player = Player("Омар",1000, 5, weapon)
zombie = Zombie("Арсн",300, 50)
count = 0
while True:
player.attack(zombie)
zombie.attack(player)
print("Жизни игрока:",player.hp)
print("Жизни зомби:",zombie.hp)
input()
if player.hp <= 0:
print(player.name,"умер но убил ",count,"зомби")
break
if zombie.hp <= 0:
count += 1
zombie = Zombie("Арсн",300, 50)
print(player.name,"убил зомби")
game = Game()
game.start()
|
[
"[email protected]"
] | |
e3e25ce23370e068912110921559d559bca593e6
|
1a5c27bc6e2d39a258dd517d2dc3570c13e42a70
|
/flaskext/utils.py
|
ff2d1dcf02a01b52fcfe2121292f09a4dde4989a
|
[
"MIT"
] |
permissive
|
fumingshih/flask-peewee
|
0f8e169ca7ab2d7ab437a5620a2ff2f082d668dd
|
4f44ec5583abba5099880a2a2af76404223a594b
|
refs/heads/master
| 2021-01-18T11:00:19.120283 | 2011-11-09T14:36:02 | 2011-11-09T14:36:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,083 |
py
|
import math
import random
import re
import sys
from hashlib import sha1
from flask import abort, request, render_template
from peewee import Model, DoesNotExist, SelectQuery
def get_object_or_404(query_or_model, **query):
try:
return query_or_model.get(**query)
except DoesNotExist:
abort(404)
def object_list(template_name, qr, var_name='object_list', **kwargs):
pq = PaginatedQuery(qr, kwargs.pop('paginate_by', 20))
kwargs[var_name] = pq.get_list()
return render_template(template_name, pagination=pq, page=pq.get_page(), **kwargs)
class PaginatedQuery(object):
page_var = 'page'
def __init__(self, query_or_model, paginate_by):
self.paginate_by = paginate_by
if isinstance(query_or_model, SelectQuery):
self.query = query_or_model
self.model = self.query.model
else:
self.model = query_or_model
self.query = self.model.select()
def get_page(self):
return int(request.args.get(self.page_var) or 1)
def get_pages(self):
return math.ceil(float(self.query.count()) / self.paginate_by)
def get_list(self):
return self.query.paginate(self.get_page(), self.paginate_by)
def get_next():
if not request.query_string:
return request.path
return '%s?%s' % (request.path, request.query_string)
def slugify(s):
return re.sub('[^a-z0-9_\-]+', '-', s.lower())
def load_class(s):
path, klass = s.rsplit('.', 1)
__import__(path)
mod = sys.modules[path]
return getattr(mod, klass)
# borrowing these methods, slightly modified, from django.contrib.auth
def get_hexdigest(salt, raw_password):
return sha1(salt + raw_password).hexdigest()
def make_password(raw_password):
salt = get_hexdigest(str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(salt, raw_password)
return '%s$%s' % (salt, hsh)
def check_password(raw_password, enc_password):
salt, hsh = enc_password.split('$', 1)
return hsh == get_hexdigest(salt, raw_password)
|
[
"[email protected]"
] | |
074f65b007b18514c4cebbd0794439daf535247b
|
fb3c49761c4771de6016f1fa54a940968ab06cb1
|
/home work 3.py
|
a3b872d8b9ff804da814d4e1eac6addf8658409c
|
[] |
no_license
|
Brayson0215/nwe
|
3d78c1c5b5e890aea98ee3a44d989051ebb3fad7
|
16a813be35667fb86b39edd54476789235cd98e5
|
refs/heads/master
| 2020-04-19T02:05:59.499257 | 2019-01-29T03:47:57 | 2019-01-29T03:47:57 | 167,892,119 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 281 |
py
|
#to have particular thing on particular place commonly checking validity
name=input("name")
if name.isalpha()==True:
print("valid")
else:
print("invalid")
contact=input("enter the phone number")
if contact.isdigit()==True:
print("valid")
else:
print("Invalid")
|
[
"[email protected]"
] | |
bc0756bb7498f6d8d340d9b366dacbe3fe73d3be
|
1043dc391dcf64fa0af7cc559a8cbd93faa9d2b3
|
/models/schedule.py
|
294b753b5539578ba40f5701798b5e7a8f3a0c8b
|
[] |
no_license
|
Zelwak/dip_school
|
a08c77a91f1a354898ff6e6463d7d7705a4bfcd9
|
4b2580ed8aa474fbb61226518ed9ada76a476570
|
refs/heads/master
| 2020-03-10T00:30:43.660883 | 2018-04-11T11:33:46 | 2018-04-11T11:33:46 | 129,083,427 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,714 |
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Gestion de lycée
# Copyright (C) 2018 Gestion de lycée.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, fields, api, _
# from odoo.exceptions import Warning
#----------------------------------------------------------
# Agenda
#----------------------------------------------------------
class school_schedule(models.Model):
"""Model pour les Agenda de school."""
_name = "school.schedule"
_description = u"Agenda school"
_order = "date_start"
name = fields.Char(string="Nom", required=True)
date_start = fields.Datetime(string="Horaire début")
date_stop = fields.Datetime(string="Horaire fin")
room = fields.Char(string="Salle de classe")
classroom_id = fields.Many2one('school.classroom', string="Classe")
course_id = fields.Many2one('school.course', string="Cours")
|
[
"[email protected]"
] | |
bf8ae2d6cb7e2b76c836b4f5da0d012a0906d5b1
|
ebd8a1d0295b6c3569ca9ba63931be5300944023
|
/53-Maximum Subarray.py
|
be67fceae2077a76e6877d915269e58dd0d6b4d5
|
[] |
no_license
|
WinterDing/leetcode
|
5428100adeb3687f16590ae6a07a4c2452cbaf7f
|
ae7d18964ec94658a76867eb78e4e39691cdf9fe
|
refs/heads/master
| 2021-09-04T06:02:15.276521 | 2018-01-16T14:40:26 | 2018-01-16T14:40:26 | 115,075,821 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 571 |
py
|
"""
Find the contiguous subarray within an array (containing at least one number) which has the largest sum.
For example, given the array [-2,1,-3,4,-1,2,1,-5,4],
the contiguous subarray [4,-1,2,1] has the largest sum = 6.
"""
class Solution:
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
maxCurr, maxSofar = 0, 0
for num in nums:
maxCurr = max(0, maxCurr+num)
maxSofar = max(maxSofar, maxCurr)
return maxSofar if maxSofar>0 else max(nums)
|
[
"[email protected]"
] | |
f987feb18613c6233b691f17ba56786226d2de05
|
0acba64de767f0c8c67932335f577218e4a3b12f
|
/src/facial_landmarks_detection.py
|
c7cfd59950462eccede311e443b0548aa8d0bd47
|
[] |
no_license
|
gaethanL/Computer-Pointer-Controller
|
29e4cb493d79f32c62c4ba2d2031087d31b32be3
|
bff45869a67991a2c448b962ecd77284ad4871cf
|
refs/heads/main
| 2023-01-02T16:57:44.729873 | 2020-10-26T20:52:19 | 2020-10-26T20:52:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,005 |
py
|
import os
import sys
import logging as log
from openvino.inference_engine import IENetwork, IECore
import cv2
class Model_Facial_Land:
def __init__(self, model_name, device, extensions=None):
self.model_weights=model_name+'.bin'
self.model_structure=model_name+'.xml'
self.device=device
self.extensions=extensions
try:
self.core=IECore()
self.network=IENetwork(model=self.model_structure, weights=self.model_weights)
network_layers=self.network.layers.keys()
supported_layers=self.core.query_network(network=self.network,device_name=self.device).keys()
for layer in network_layers:
if layer in supported_layers:
pass
else:
ext_required=True
break
if self.extensions!=None and "CPU" in self.device and ext_required:
self.core.add_extension(self.extensions, self.device)
for layer in network_layers:
if layer in supported_layers:
pass
else:
msg="Layer extension doesn't support all layers"
log.error(msg)
raise Exception(msg)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name=next(iter(self.network.inputs))
self.input_shape=self.network.inputs[self.input_name].shape
self.output_name=next(iter(self.network.outputs))
self.output_shape=self.network.outputs[self.output_name].shape
def load_model(self):
self.exec_network=self.core.load_network(self.network, self.device)
return
def predict(self, image):
input_img=self.preprocess_input(image)
input_dict={self.input_name:input_img}
outputs=self.exec_network.infer(input_dict)[self.output_name]
coords=self.preprocess_outputs(outputs,(image.shape[1],image.shape[0]))
return self.draw_outputs(coords,image)
def draw_outputs(self, coords, image):
left_eye_min=(coords[0]-15,coords[1]-15)
left_eye_max=(coords[0]+15,coords[1]+15)
right_eye_min=(coords[2]-15,coords[3]-15)
right_eye_max=(coords[2]+15,coords[3]+15)
left_eye=image[left_eye_min[1]:left_eye_max[1],left_eye_min[0]:left_eye_max[0]]
right_eye=image[right_eye_min[1]:right_eye_max[1],right_eye_min[0]:right_eye_max[0]]
cv2.rectangle(image,left_eye_min,left_eye_max,(200, 150, 18),2)
cv2.rectangle(image,right_eye_min,right_eye_max,(200, 150, 18),2)
eye_coords=[[left_eye_min[0],left_eye_min[1],left_eye_max[1],left_eye_max[1]],
[right_eye_min[0],right_eye_min[1],right_eye_max[0],right_eye_max[1]]]
return eye_coords,left_eye,right_eye,image
def preprocess_input(self, image):
preprocessed_frame=cv2.resize(image,(self.input_shape[3],self.input_shape[2]))
preprocessed_frame=preprocessed_frame.transpose((2,0,1))
return preprocessed_frame.reshape(1,*preprocessed_frame.shape)
def preprocess_outputs(self,outputs,dim):
left_eye_x=int(outputs[0][0]*dim[0])
left_eye_y=int(outputs[0][1]*dim[1])
right_eye_x=int(outputs[0][2]*dim[0])
right_eye_y=int(outputs[0][3]*dim[1])
return (left_eye_x,left_eye_y,right_eye_x,right_eye_y)
|
[
"[email protected]"
] | |
989b0dc1105771ebbafb8e5abc5c3aab056a2f37
|
f6f28c7a87d8632596e364e0822e88fdaeab1774
|
/app/urls.py
|
3346da098eba312cccb0ea904d0385bbffdb8ffb
|
[] |
no_license
|
Jagdeep02/Flixer
|
3b9ec9071cee11925c5431a4db808aedfbeb955e
|
6744bb9390d1b883e72baa8fcc04f2e5fdec4828
|
refs/heads/main
| 2023-08-16T03:03:38.478261 | 2021-10-21T12:19:46 | 2021-10-21T12:19:46 | 419,700,466 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 641 |
py
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.home, name="home"),
path('video/<slug>/',views.view_video, name="video"),
path('become_pro/',views.become_pro, name="become_pro"),
path('charge/',views.charge, name="charge"),
path('login/',views.login_attempt, name="login_attempt"),
path('register/',views.sign, name="register_attempt"),
path('logout/',views.logout_attempt, name="logout_attempt"),
path('about/', views.about, name="about"),
path('contact', views.contact, name="contact"),
path('detail/', views.detail, name="detail"),
path('edit/', views.edit, name="edit"),
]
|
[
"[email protected]"
] | |
63bd83adcb7f9700378098678b26a5b39b3d7a86
|
719853613b5b96f02072be1fde736d883e799f02
|
/server/intrinsic/management/commands/intrinsic_import_ec2.py
|
a6bd9aeef70b6ccd8ad1fe6dbb896cfbc53d5e39
|
[
"MIT",
"CC-BY-2.0"
] |
permissive
|
anmolkabra/opensurfaces
|
5ba442123586533a93eb29890fa1694e3efdbfe8
|
a42420083a777d7e1906506cc218f681c5cd145b
|
refs/heads/master
| 2020-03-20T01:11:05.182880 | 2018-06-13T14:55:45 | 2018-06-13T14:55:45 | 137,068,945 | 0 | 0 |
MIT
| 2018-06-12T12:32:53 | 2018-06-12T12:32:52 | null |
UTF-8
|
Python
| false | false | 1,615 |
py
|
import glob
import time
import timeit
from django.core.management.base import BaseCommand
from intrinsic.tasks import import_ec2_task
class Command(BaseCommand):
args = ''
help = 'Import image algorithms run on ec2'
def handle(self, *args, **options):
indir = '/vol/completed-tasks'
scheduled_fnames = {}
sleep_time = 2
total_count = None
start_time = None
first = True
while True:
files = glob.glob("%s/*.pickle" % indir)
c = 0
for fname in files:
if fname in scheduled_fnames:
scheduled_fnames[fname] -= sleep_time
else:
scheduled_fnames[fname] = 0
if scheduled_fnames[fname] <= 0:
import_ec2_task.delay(fname)
scheduled_fnames[fname] = 3600
c += 1
# ignore the first time
if first:
total_count = 0
start_time = timeit.default_timer()
rate = "N/A"
first = False
else:
total_count += c
time_elapsed = max(timeit.default_timer() - start_time, 1e-3)
rate = "%.3f" % (float(total_count) / time_elapsed)
if c > 0:
sleep_time = max(sleep_time // 2, 2)
else:
sleep_time = min(sleep_time * 2, 3600)
time.sleep(sleep_time)
print "%s new files (average %s files/s); sleep for %s seconds..." % (
c, rate, sleep_time)
|
[
"[email protected]"
] | |
5ae7c68e192a1862b1a8cf601434c9bfe89785ab
|
e1c2f85eeba924c976825005031cf8223d6b5de2
|
/Quiz/QuizProblem8.py
|
08bf8e53d89b65f82bc6f1c0e26b4996e2dc4ac3
|
[
"MIT"
] |
permissive
|
roshan2M/edX--mitX--introduction-to-computer-science-and-programming-with-python
|
887b49590d5fba06dfc266b4ad768279362bb817
|
81a7247e8442feddd624b5dbcd70cde1b58d2965
|
refs/heads/master
| 2021-01-20T09:14:23.798211 | 2017-08-27T23:37:04 | 2017-08-27T23:37:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 671 |
py
|
# Quiz, Problem 8
def satisfiesF(L):
"""
Assumes L is a list of strings
Assume function f is already defined for you and it maps a string to a Boolean
Mutates L such that it contains all of the strings, s, originally in L such
that f(s) returns True, and no other elements. Remaining elements in L
should be in the same order.
Returns the length of L after mutation
"""
newList = []
for elt in range(len(L)):
word = L[elt]
if f(word) == True:
newList.append(word)
L[:] = newList
return len(L)
run_satisfiesF(L, satisfiesF)
def f(s):
return 'a' in s
|
[
"[email protected]"
] | |
86e8f4f54ed4fb50a015d53414330f61c6dcb94a
|
2b87db0ada3c2d016df891761855e9a9dc5b81fe
|
/Sorting/heapSort.py
|
18d406d72437c4f2b43ee938933219edc34e6afc
|
[] |
no_license
|
drdcs/Algorithms-and-System-Design
|
9b201ba47bda14ca8fcd9aeddcfee760b3194f2d
|
656fafbd758c30f5bd7a73a7d677562d5ae1f39f
|
refs/heads/main
| 2023-04-11T10:25:02.992297 | 2021-04-22T05:57:10 | 2021-04-22T05:57:10 | 329,364,127 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 705 |
py
|
def heapify(arr, n, i):
largest = i
l = 2 * i + 1
r = 2 * i + 2
# check if left child of the root exists and is
# grater than root
if l < n and arr[largest] < arr[l]:
largest = l
# check if riht child of root exists and is greater than
# the root/lef node
if r < n and arr[largest] < arr[r]:
largest = r
if largest != i:
arr[i], arr[largest] = arr[largest], arr[i]
def heapSort(arr):
n = len(arr)
for i in range(n//2-1, -1, -1):
heapify(arr, n, i)
for i in range(n-1, 0, -1):
arr[i], arr[0] = arr[0], arr[i]
heapify(arr, i, 0)
arr = [12, 11, 13, 5, 6, 7]
heapSort(arr)
print(arr)
|
[
"[email protected]"
] | |
413f76a927f5fa192a64286142e225ee997cda97
|
01a758b24ef4787f0d6eca545da9c89b03a8e905
|
/Python/Math/PolarCoordinates.py
|
d06897444c448bb0f337235124c61baf87015145
|
[] |
no_license
|
Alladasaisandhya/Hackerrank-Solutions
|
a369c67050106edf64bb281754b3e729a6777347
|
a83b3989745559cb464853df370b2cbf62be90f8
|
refs/heads/master
| 2021-05-21T16:05:49.158817 | 2019-08-23T05:30:51 | 2019-08-23T05:30:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 264 |
py
|
'''
Title : Polar Coordinates
Subdomain : Math
Domain : Python
Author : Darpan Zope
Created :
Problem : https://www.hackerrank.com/challenges/polar-coordinates/problem
'''
import cmath
z = complex(input())
p = cmath.polar(z)
print(p[0])
print(p[1])
|
[
"[email protected]"
] | |
08d112384c3e3747a8ab175efc365d4512faab60
|
a058124907db4ae8a21e3b4ba31b3fdd36359869
|
/22-maps.py
|
8c6c74d93f64e02d52e85036487a5220bea72427
|
[] |
no_license
|
serenelc/python-tutorials
|
44a12f0379cfb089ef938f6a7bb5de4f5befe2c1
|
673c2fba59b4e2f53e41daa32a71ad16c6637770
|
refs/heads/master
| 2020-07-08T23:41:35.407117 | 2019-09-02T10:29:06 | 2019-09-02T10:29:06 | 203,813,558 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 302 |
py
|
from random import shuffle
desserts = ['brownies', 'cookies', 'sticky toffee pudding', 'fudge']
def jumble(word):
anagram = list(word)
shuffle(anagram)
return '-'.join(anagram)
print(jumble("serene"))
[print(x) for x in map(jumble, desserts)]
print([jumble(word) for word in desserts])
|
[
"[email protected]"
] | |
6b0eb808039ef82503efbdeb23c34da08d129912
|
c0edcec7a7ce57be81a0fd41b2d79566d1b273bd
|
/accounts/views.py
|
71f861ab48b4823ef62c5840a30b0e98f0cff5a6
|
[] |
no_license
|
churles/djangoblog
|
c06fa0d28b308584d4834e9f89843b44d6647787
|
582bc10d5666b7bc4445d0eb31edb63bbc411807
|
refs/heads/master
| 2023-08-26T23:07:36.567815 | 2021-10-30T14:36:50 | 2021-10-30T14:36:50 | 418,908,434 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,034 |
py
|
from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import login, logout
# Create your views here.
def signup_view(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
#login user
login(request, user)
return redirect('articles:list')
else:
form = UserCreationForm()
return render(request,'accounts/signup.html',{
'form':form
})
def login_view(request):
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
#login user
user = form.get_user()
login(request, user)
if 'next' in request.POST:
return redirect(request.POST.get('next'))
else:
return redirect('articles:list')
else:
form = AuthenticationForm()
return render(request,'accounts/login.html',{
'form':form
})
def logout_view(request):
if request.method == 'POST':
logout(request)
return redirect('articles:list')
|
[
"[email protected]"
] | |
8ba756d5821de4f45cb0caa15f230ce83762ded8
|
2d248650ba1155c098b8b5203a87a97a88605a0f
|
/puthon_files/lowest-common-ancest.py
|
b816292d1f5e07060d12d0dd324342dffe963485
|
[] |
no_license
|
aashishravindran/interview_prep
|
1cc4a065b75c7d14af87e94fc90cc1dbdc1d7bce
|
fea3d75e99433dc67a7b4e303e25ed0ad9343f5f
|
refs/heads/master
| 2020-09-29T07:46:17.675544 | 2019-12-09T23:59:55 | 2019-12-09T23:59:55 | 226,991,084 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 802 |
py
|
# https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-search-tree/
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Lowest Common Ancestor of a Binary Search Tree
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
p=p.val
q=q.val
queue=root
while queue:
parent=queue.val
if p>parent and q>parent:
queue=queue.right
elif p<parent and q<parent:
queue=queue.left
else:
return queue
|
[
"[email protected]"
] | |
229b55da64ecc77c40932990c5a8570ef38675ed
|
7424688cfe38acc35f9da038809184e980b1fb10
|
/app.py
|
052ec9ab91b3c1e80393ac71ad5b0d9558082334
|
[] |
no_license
|
ractf/challenge-server
|
a298b72f7c2f49ab666f2ba6887d0087fc37bc6a
|
b3ae61e52d234f7c8f1e6e28cc8bac4a58d42d13
|
refs/heads/master
| 2022-11-06T04:25:34.620606 | 2020-06-22T20:20:19 | 2020-06-22T20:20:19 | 274,226,382 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,024 |
py
|
import os
import docker
from flask import Flask, request, abort
from redis import Redis
import settings
import routes
from challenges import build_image
app = Flask(__name__)
@app.before_request
def check_auth():
if 'Authorization' not in request.headers or request.headers['Authorization'] != settings.API_KEY:
abort(403)
@app.cli.command('prestart')
def prestart():
for challenge in os.listdir('challenges'):
build_image(challenge)
@app.cli.command('reset')
def reset():
redis = Redis(host=settings.REDIS['ip'], port=settings.REDIS['port'], password=settings.REDIS['password'],
db=settings.REDIS['db'], charset='utf-8', decode_responses=True)
redis.flushdb()
client = docker.from_env()
for container in client.containers.list():
if container.name != "cadvisor":
print(f'Stopping {container.id}')
container.stop(timeout=5)
app.register_blueprint(routes.blueprint, url_prefix='/')
if __name__ == '__main__':
app.run()
|
[
"[email protected]"
] | |
dcc849540ef37d3c2d9dcc18f26ce2d3aa9cdf30
|
2ac87412ae229582cb53ea114b3bce1d37990b2d
|
/motif_sentences.py
|
243913eca6863cd1fc737ff745e28a4ab67a0fc7
|
[
"MIT"
] |
permissive
|
kaclark/DHS_intergenic_analysis
|
60fa278d60d02b241d84c2c1f4230170f155cdb5
|
5ae1dc1c257ae9dc0e001e07402bebd8e31f0f60
|
refs/heads/master
| 2022-12-02T11:42:22.804514 | 2020-08-05T19:45:00 | 2020-08-05T19:45:00 | 274,950,385 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 300 |
py
|
from Bio import motifs
import pandas as pd
motif_sentences = []
fh = open("data/tf_data/jaspar.txt")
for m in motifs.parse(fh, "jaspar"):
motif_sentences.append([m.name, m.consensus])
df = pd.DataFrame(motif_sentences)
df.to_csv("data/tf_data/motif_sentences.csv", index=False, header=False)
|
[
"[email protected]"
] | |
7367fe81d42fb80be0b61758180d011e05a6ffe1
|
bb082ce7880b931064af61e26ee325796ebb905a
|
/pages/settingspage.py
|
b8bdf8d7b76e95a7689d337e95b2c35a7dfe14b0
|
[
"MIT"
] |
permissive
|
juraisa/bird-bot
|
36fe8f467e6729399758a7392bb89de0c54decb5
|
f868596a78690e55e6b7e7b8d4bf58d7f5b492cf
|
refs/heads/master
| 2021-05-23T01:16:47.906006 | 2020-04-14T04:49:31 | 2020-04-14T04:49:31 | 253,168,887 | 0 | 0 |
MIT
| 2020-04-05T06:18:15 | 2020-04-05T06:18:14 | null |
UTF-8
|
Python
| false | false | 6,751 |
py
|
from PyQt5 import QtCore, QtGui, QtWidgets
from utils import return_data,write_data
import sys,platform,settings
def no_abort(a, b, c):
sys.__excepthook__(a, b, c)
sys.excepthook = no_abort
class SettingsPage(QtWidgets.QWidget):
def __init__(self,parent=None):
super(SettingsPage, self).__init__(parent)
self.setupUi(self)
def setupUi(self, settingspage):
self.settingspage = settingspage
self.settingspage.setAttribute(QtCore.Qt.WA_StyledBackground, True)
self.settingspage.setGeometry(QtCore.QRect(60, 0, 1041, 601))
self.settingspage.setStyleSheet("QComboBox::drop-down { border: 0px;}QComboBox::down-arrow { image: url(:/images/down_icon.png); width: 14px; height: 14px;}QComboBox{ padding: 1px 0px 1px 3px;}QLineEdit:focus { border: none; outline: none;}")
self.settings_card = QtWidgets.QWidget(self.settingspage)
self.settings_card.setGeometry(QtCore.QRect(30, 70, 471, 501))
font = QtGui.QFont()
font.setPointSize(13) if platform.system() == "Darwin" else font.setPointSize(13*.75)
font.setFamily("Arial")
self.settings_card.setFont(font)
self.settings_card.setStyleSheet("background-color: #232323;border-radius: 20px;border: 1px solid #2e2d2d;")
self.webhook_edit = QtWidgets.QLineEdit(self.settings_card)
self.webhook_edit.setGeometry(QtCore.QRect(30, 50, 411, 21))
self.webhook_edit.setFont(font)
self.webhook_edit.setStyleSheet("outline: 0;border: 1px solid #5D43FB;border-width: 0 0 2px;color: rgb(234, 239, 239);")
self.webhook_edit.setPlaceholderText("Webhook Link")
self.webhook_edit.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)
self.webhook_header = QtWidgets.QLabel(self.settings_card)
self.webhook_header.setGeometry(QtCore.QRect(20, 10, 101, 31))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(18) if platform.system() == "Darwin" else font.setPointSize(18*.75)
font.setWeight(50)
self.webhook_header.setFont(font)
self.webhook_header.setStyleSheet("color: rgb(212, 214, 214);border: none;")
self.webhook_header.setText("Webhook")
self.savesettings_btn = QtWidgets.QPushButton(self.settings_card)
self.savesettings_btn.setGeometry(QtCore.QRect(190, 450, 86, 32))
font = QtGui.QFont()
font.setPointSize(13) if platform.system() == "Darwin" else font.setPointSize(13*.75)
font.setFamily("Arial")
self.savesettings_btn.setFont(font)
self.savesettings_btn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.savesettings_btn.setStyleSheet("color: #FFFFFF;background-color: #5D43FB;border-radius: 10px;border: 1px solid #2e2d2d;")
self.savesettings_btn.setText("Save")
self.savesettings_btn.clicked.connect(self.save_settings)
self.browser_checkbox = QtWidgets.QCheckBox(self.settings_card)
self.browser_checkbox.setGeometry(QtCore.QRect(30, 90, 111, 20))
self.browser_checkbox.setStyleSheet("color: #FFFFFF;border: none;")
self.browser_checkbox.setText("Browser Opened")
self.order_checkbox = QtWidgets.QCheckBox(self.settings_card)
self.order_checkbox.setGeometry(QtCore.QRect(30, 120, 221, 20))
self.order_checkbox.setStyleSheet("color: #FFFFFF;border: none;")
self.order_checkbox.setText("Order Placed")
self.paymentfailed_checkbox = QtWidgets.QCheckBox(self.settings_card)
self.paymentfailed_checkbox.setGeometry(QtCore.QRect(30, 150, 121, 20))
self.paymentfailed_checkbox.setStyleSheet("color: #FFFFFF;border: none;")
self.paymentfailed_checkbox.setText("Payment Failed")
self.general_header = QtWidgets.QLabel(self.settings_card)
self.general_header.setGeometry(QtCore.QRect(20, 180, 101, 31))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(18) if platform.system() == "Darwin" else font.setPointSize(18*.75)
font.setWeight(50)
self.general_header.setFont(font)
self.general_header.setStyleSheet("color: rgb(212, 214, 214);border: none;")
self.general_header.setText("General")
self.onfailed_checkbox = QtWidgets.QCheckBox(self.settings_card)
self.onfailed_checkbox.setGeometry(QtCore.QRect(30, 220, 221, 20))
self.onfailed_checkbox.setStyleSheet("color: #FFFFFF;border: none;")
self.onfailed_checkbox.setText("Open browser on payment failed")
self.proxies_header = QtWidgets.QLabel(self.settingspage)
self.proxies_header.setGeometry(QtCore.QRect(30, 10, 81, 31))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(22) if platform.system() == "Darwin" else font.setPointSize(22*.75)
font.setWeight(50)
self.proxies_header.setFont(font)
self.proxies_header.setStyleSheet("color: rgb(234, 239, 239);")
self.proxies_header.setText("Settings")
self.set_data()
QtCore.QMetaObject.connectSlotsByName(settingspage)
def set_data(self):
settings = return_data("./data/settings.json")
self.webhook_edit.setText(settings["webhook"])
if settings["webhookonbrowser"]:
self.browser_checkbox.setChecked(True)
if settings["webhookonorder"]:
self.order_checkbox.setChecked(True)
if settings["webhookonfailed"]:
self.paymentfailed_checkbox.setChecked(True)
if settings["browseronfailed"]:
self.onfailed_checkbox.setChecked(True)
self.update_settings(settings)
def save_settings(self):
settings = {"webhook":self.webhook_edit.text(),
"webhookonbrowser":self.browser_checkbox.isChecked(),
"webhookonorder":self.order_checkbox.isChecked(),
"webhookonfailed":self.paymentfailed_checkbox.isChecked(),
"browseronfailed":self.onfailed_checkbox.isChecked()}
write_data("./data/settings.json",settings)
self.update_settings(settings)
QtWidgets.QMessageBox.information(self, "Bird Bot", "Saved Settings")
def update_settings(self,settings_data):
global webhook, webhook_on_browser, webhook_on_order, webhook_on_failed, browser_on_failed
settings.webhook, settings.webhook_on_browser, settings.webhook_on_order, settings.webhook_on_failed, settings.browser_on_failed = settings_data["webhook"], settings_data["webhookonbrowser"], settings_data["webhookonorder"], settings_data["webhookonfailed"], settings_data["browseronfailed"]
|
[
"[email protected]"
] | |
152e6de373d3950907e1041d754d5e444fc78569
|
c71e5115b895065d2abe4120799ffc28fa729086
|
/procon-archive/atcoder.jp/abc129/abc129_c/Main.py
|
7e58a42e6fbe088cfc45aa4987d551c677b95895
|
[] |
no_license
|
ken0105/competitive-programming
|
eb82f92a7b7ad0db601ea341c1441de6c6165064
|
f918f85a0ea6dfbe9cac3ef835f80503bb16a75d
|
refs/heads/master
| 2023-06-05T09:55:25.264731 | 2021-06-29T14:38:20 | 2021-06-29T14:38:20 | 328,328,825 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 413 |
py
|
from bisect import bisect, bisect_right, bisect_left
if __name__ == "__main__":
n,m = map(int,input().split())
a = set()
for i in range(m):
a.add(int(input()))
dp = [0] * (n + 1)
dp[0] = 1
for i in range(1,n+1):
if i not in a and i >= 2:
dp[i] = (dp[i-1] + dp[i-2])
elif i not in a and i == 1:
dp[i] = dp[i-1]
print(dp[n] % 1000000007)
|
[
"[email protected]"
] | |
7c0088fc02afdb9058cbb4fdf743efb97e73fad2
|
f76f83dcdfdbfe254ab67e26b244475d2e810819
|
/conttudoweb/inventory/migrations/0016_auto_20200723_1607.py
|
3116c549689509a9211c9601d3096006c7d686c2
|
[] |
no_license
|
ConTTudOweb/ConTTudOwebProject
|
fda13ece406e1904d6efe4c3ceebd30e3d168eae
|
18c3b8da1f65714eb01a420a0dbfb5305b9461f3
|
refs/heads/master
| 2022-12-14T22:05:00.243429 | 2021-03-15T23:32:41 | 2021-03-15T23:32:41 | 138,349,067 | 1 | 3 | null | 2022-12-08T07:49:21 | 2018-06-22T21:19:03 |
Python
|
UTF-8
|
Python
| false | false | 436 |
py
|
# Generated by Django 3.0.8 on 2020-07-23 19:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0015_auto_20200723_1600'),
]
operations = [
migrations.AlterField(
model_name='product',
name='description',
field=models.CharField(max_length=120, unique=True, verbose_name='descrição'),
),
]
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.