blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9471cea9b5d59083fe068b87504590f4027f45eb | ad8b30544480ba1e5f5b1cb2dec2aa77a644e8d2 | /BOJ/1238_파티.py | 47e4a42bd4b524d433bb52d123cba305548dc8c0 | [] | no_license | hyunwoojeong123/Algorithm | 79abc82d944ca60342a7f8b6fc44fac20ac55123 | 0baaf3222fbbec699ffbec5d4cc680067cf293fb | refs/heads/master | 2023-07-10T18:28:51.934005 | 2021-08-18T01:51:23 | 2021-08-18T01:51:23 | 284,403,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | import sys,heapq
INF = sys.maxsize
input = sys.stdin.readline
def dij(x):
# d 배열을 INF 로 전부 준다
d = [INF]*n
# heap에다가 [0, 출발점] 을 넣는다.
heapq.heappush(heap, [0,x])
# d[출발점] = 0
d[x] = 0
# heap 다 빌 때까지 반복
while heap:
# w,x 는 현 위치까지의 거리와 현 위치
w,x = heapq.heappop(heap)
# nw,nx 는 x에서 nx까지 거리, x와 연결된 애
for nw,nx in a[x]:
# nw 에 w를 더해줌 : 출발점에서 nx 까지 거리
nw += w
# 이게 기존에 기록해둔 값보다 작으면
if nw < d[nx]:
# 거리 갱신하고 heap에다가 걔네 넣음.
d[nx] = nw
heapq.heappush(heap,[nw,nx])
return d
n,m,t = map(int, input().split())
a = [[]*n for _ in range(n)]
heap = []
for i in range(m):
x,y,w = map(int, input().split())
a[x-1].append([w,y-1])
ans = [0]*n
for i in range(n):
d = dij(i)
ans[i] += d[t-1]
d = dij(t-1)
ans[i] -= d[i]
print(max(ans)) | [
"[email protected]"
] | |
461aefbbf762874b01a14c2240b32b4c3530d3e3 | 2dad8b725583afd64e2f381acb6a299350a069c4 | /daftar/migrations/0012_auto_20200204_1527.py | 939f809acd5ce9faedf92e3b7bb279d387c8aa89 | [] | no_license | s4-hub/winback | 39b0b354690201a7906ce77f46c1172ddcb21110 | abfb22b6ed5d523b93ea5cdb982ac3066a63ab7c | refs/heads/master | 2020-12-22T12:27:54.416189 | 2020-02-11T10:50:30 | 2020-02-11T10:50:30 | 233,515,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,233 | py | # Generated by Django 2.2.7 on 2020-02-04 08:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('daftar', '0011_auto_20200204_1445'),
]
operations = [
migrations.DeleteModel(
name='Pekerjaan',
),
migrations.AddField(
model_name='daftar',
name='lokasi',
field=models.CharField(choices=[('1101', 'ACEH SELATAN'), ('1102', 'ACEH TENGGARA'), ('1103', 'ACEH TIMUR'), ('1104', 'ACEH TENGAH'), ('1105', 'ACEH BARAT'), ('1106', 'ACEH BESAR'), ('1107', 'PIDIE'), ('1108', 'ACEH UTARA'), ('1109', 'SIMEULUE'), ('1110', 'ACEH SINGKIL'), ('1111', 'BIREUEN'), ('1112', 'ACEH BARAT DAYA'), ('1113', 'GAYO LUES'), ('1114', 'ACEH JAYA'), ('1115', 'NAGAN RAYA'), ('1116', 'ACEH TAMIANG'), ('1117', 'BENER MERIAH'), ('1118', 'PIDIE JAYA'), ('1171', 'KOTA BANDA ACEH'), ('1172', 'KOTA SABANG'), ('1173', 'KOTA LHOKSEUMAWE'), ('1174', 'KOTA LANGSA'), ('1175', 'KOTA SUBULUSSALAM')], default=1, max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='daftar',
name='pekerjaan1',
field=models.CharField(choices=[('P001', 'PETANI/PEKEBUN'), ('P002', 'PETERNAK'), ('P003', 'NELAYAN/PERIKANAN'), ('P004', 'TRANSPORTASI'), ('P005', 'BURUH HARIAN LEPAS'), ('P006', 'BURUH TANI/PERKEBUNAN'), ('P007', 'BURUH NELAYAN/PERIKANAN'), ('P008', 'BURUH PETERNAKAN'), ('P009', 'PEMBANTU RUMAH TANGGA'), ('P010', 'TUKANG CUKUR'), ('P011', 'TUKANG LISTRIK'), ('P012', 'TUKANG BATU'), ('P013', 'TUKANG KAYU'), ('P014', 'TUKANG SOL SEPATU'), ('P015', 'TUKANG LAS/PANDAI BESI'), ('P016', 'TUKANG JAHIT'), ('P017', 'TUKANG GIGI'), ('P018', 'PENATA RIAS'), ('P019', 'PENATA BUSANA'), ('P020', 'PENATA RAMBUT'), ('P021', 'MEKANIK'), ('P022', 'SENIMAN'), ('P023', 'TABIB'), ('P024', 'PARAJI'), ('P025', 'PERANCANG BUSANA'), ('P026', 'PENTERJEMAH'), ('P027', 'IMAM MESJID'), ('P028', 'PENDETA'), ('P029', 'PASTOR'), ('P030', 'WARTAWAN'), ('P031', 'USTADZ/MUBALIGH'), ('P032', 'JURU MASAK'), ('P033', 'PROMOTOR ACARA'), ('P034', 'DOSEN'), ('P035', 'GURU'), ('P036', 'PENGACARA'), ('P037', 'NOTARIS'), ('P038', 'ARSITEK'), ('P039', 'KONSULTAN'), ('P040', 'DOKTER'), ('P041', 'BIDAN'), ('P042', 'APOTEKER'), ('P043', 'PSIKIATER/PSIKOLOG'), ('P044', 'PENYIAR RADIO'), ('P045', 'PELAUT'), ('P046', 'PENELITI'), ('P047', 'SOPIR'), ('P048', 'PIALANG'), ('P049', 'PARANORMAL'), ('P050', 'PEDAGANG'), ('P051', 'BIARAWATI'), ('P052', 'WIRASWASTA'), ('P053', 'MITRA GOJEK'), ('P054', 'MITRA GRAB'), ('P055', 'MITRA UBER'), ('P056', 'PEKERJA MAGANG'), ('P057', 'SISWA KERJA PRAKTEK'), ('P058', 'TENAGA HONORER (SELAIN PENYELENGGARA NEGARA)'), ('P059', 'NARAPIDANA DALAM PROSES ASIMILASI'), ('P060', 'ATLET'), ('P061', 'ARTIS'), ('P062', 'JURU PARKIR'), ('P063', 'TUKANG PIJAT'), ('P064', 'PEMANDU LAGU'), ('P065', 'PENDAMPING DESA'), ('P066', 'BURUH BONGKAR MUAT/BAGASI'), ('P067', 'RELAWAN TAGANA/RELAWAN BENCANA'), ('P068', 'TUKANG SAMPAH'), ('P069', 'PEMULUNG'), ('P070', 'MARBOT MESJID'), ('P071', 'MITRA GOJEK-GO LIFE')], default=1, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='daftar',
name='pekerjaan2',
field=models.CharField(blank=True, choices=[('P001', 'PETANI/PEKEBUN'), ('P002', 'PETERNAK'), ('P003', 'NELAYAN/PERIKANAN'), ('P004', 'TRANSPORTASI'), ('P005', 'BURUH HARIAN LEPAS'), ('P006', 'BURUH TANI/PERKEBUNAN'), ('P007', 'BURUH NELAYAN/PERIKANAN'), ('P008', 'BURUH PETERNAKAN'), ('P009', 'PEMBANTU RUMAH TANGGA'), ('P010', 'TUKANG CUKUR'), ('P011', 'TUKANG LISTRIK'), ('P012', 'TUKANG BATU'), ('P013', 'TUKANG KAYU'), ('P014', 'TUKANG SOL SEPATU'), ('P015', 'TUKANG LAS/PANDAI BESI'), ('P016', 'TUKANG JAHIT'), ('P017', 'TUKANG GIGI'), ('P018', 'PENATA RIAS'), ('P019', 'PENATA BUSANA'), ('P020', 'PENATA RAMBUT'), ('P021', 'MEKANIK'), ('P022', 'SENIMAN'), ('P023', 'TABIB'), ('P024', 'PARAJI'), ('P025', 'PERANCANG BUSANA'), ('P026', 'PENTERJEMAH'), ('P027', 'IMAM MESJID'), ('P028', 'PENDETA'), ('P029', 'PASTOR'), ('P030', 'WARTAWAN'), ('P031', 'USTADZ/MUBALIGH'), ('P032', 'JURU MASAK'), ('P033', 'PROMOTOR ACARA'), ('P034', 'DOSEN'), ('P035', 'GURU'), ('P036', 'PENGACARA'), ('P037', 'NOTARIS'), ('P038', 'ARSITEK'), ('P039', 'KONSULTAN'), ('P040', 'DOKTER'), ('P041', 'BIDAN'), ('P042', 'APOTEKER'), ('P043', 'PSIKIATER/PSIKOLOG'), ('P044', 'PENYIAR RADIO'), ('P045', 'PELAUT'), ('P046', 'PENELITI'), ('P047', 'SOPIR'), ('P048', 'PIALANG'), ('P049', 'PARANORMAL'), ('P050', 'PEDAGANG'), ('P051', 'BIARAWATI'), ('P052', 'WIRASWASTA'), ('P053', 'MITRA GOJEK'), ('P054', 'MITRA GRAB'), ('P055', 'MITRA UBER'), ('P056', 'PEKERJA MAGANG'), ('P057', 'SISWA KERJA PRAKTEK'), ('P058', 'TENAGA HONORER (SELAIN PENYELENGGARA NEGARA)'), ('P059', 'NARAPIDANA DALAM PROSES ASIMILASI'), ('P060', 'ATLET'), ('P061', 'ARTIS'), ('P062', 'JURU PARKIR'), ('P063', 'TUKANG PIJAT'), ('P064', 'PEMANDU LAGU'), ('P065', 'PENDAMPING DESA'), ('P066', 'BURUH BONGKAR MUAT/BAGASI'), ('P067', 'RELAWAN TAGANA/RELAWAN BENCANA'), ('P068', 'TUKANG SAMPAH'), ('P069', 'PEMULUNG'), ('P070', 'MARBOT MESJID'), ('P071', 'MITRA GOJEK-GO LIFE')], max_length=50),
),
]
| [
"[email protected]"
] | |
1edc3c43ccedae5b0f41c6a0f086d3ab554dd904 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_w_M_to_W_focus_Zok_div/ch016/wiColorJ/Add2Loss/Sob_k09_s001_Mae_s001_good/pyr_Tcrop255_p20_j15/pyr_1s/L7/step09_1side_L7.py | c64e2e1afb0db5021bb85517a443b2008fa6e4b9 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,124 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_w_M_to_Wx_Wy_Wz_combine import I_w_M_to_W
from step08_b_use_G_generate_0_util import Tight_crop, Color_jit
from step09_c_train_step import Train_step_I_w_M_to_W
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
color_jit = Color_jit(do_ratio=0.6)
use_what_gen_op = I_w_M_to_W( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) )
use_what_train_step = Train_step_I_w_M_to_W( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15), color_jit=color_jit )
use_hid_ch = 16
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
pyramid_1side_1 = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
pyramid_1side_2 = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
pyramid_1side_3 = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]
pyramid_1side_4 = [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1]
pyramid_1side_5 = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
pyramid_1side_6 = [1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1]
pyramid_1side_7 = [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1]
pyramid_1side_8 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
#########################################################################################
ch032_pyramid_1side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_6, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_7, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=7, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_8, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_1side_4
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
| [
"[email protected]"
] | |
ecb797705d4380b014ac224de86a2b3ca7fbe0de | 029b18378b54856f6982cf3a73982b5285c2ff57 | /assignment1/cs231n/classifiers/linear_classifier.py | 22b624caa7e1dbd171409817f28da4d614335f49 | [] | no_license | Allensmile/cs231n_Convolutional-Neural-Networks-for-Visual-Recognition | 15f07693757a439776e7da22f2ac4e2cf6f78611 | bbae799b71c533ffb52ff9248ce9c92cfa76be6e | refs/heads/cs231n-0821 | 2021-01-01T19:05:11.608175 | 2016-08-22T04:39:20 | 2016-08-22T04:39:20 | 98,504,340 | 1 | 0 | null | 2017-07-27T07:01:01 | 2017-07-27T07:01:01 | null | UTF-8 | Python | false | false | 6,325 | py | import numpy as np
from cs231n.classifiers.linear_svm import *
from cs231n.classifiers.softmax import *
class LinearClassifier(object):
def __init__(self):
self.W = None
def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,batch_size=200, verbose=False):
"""
Train this linear classifier using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
- y: A numpy array of shape (N,) containing training labels; y[i] = c
means that X[i] has label 0 <= c < C for C classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
Outputs:
A list containing the value of the loss function at each training iteration.
"""
num_train, dim = X.shape
num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
#if self.W is None:
# lazily initialize W---->always initialize)
self.W = 0.001 * np.random.randn(dim, num_classes)*np.sqrt(dim/2.0)
# Run stochastic gradient descent to optimize W
loss_history = []
#Try using momentum update
v=0 #init to zero
mu=0.5 #int to 0.5, and increase it later.
for it in xrange(num_iters):
if num_iters%100==0:
mu+=0.05
#if num_iters>=1500:
# learning_rate*=0.7
if mu>=0.99:
mu=0.99
X_batch = None
y_batch = None
#########################################################################
# TODO: #
# Sample batch_size elements from the training data and their #
# corresponding labels to use in this round of gradient descent. #
# Store the data in X_batch and their corresponding labels in #
# y_batch; after sampling X_batch should have shape (dim, batch_size) #
# and y_batch should have shape (batch_size,) #
# #
# Hint: Use np.random.choice to generate indices. Sampling with #
# replacement is faster than sampling without replacement. #
#########################################################################
#pass
#1. get the batch for this iteration.
batch_indices=np.random.choice(num_train,batch_size,replace=True)
X_batch=X[batch_indices] #('X_batch.shape:', (200L, 3073L))
#print("X_batch.shape:",X_batch.shape)
y_batch=y[batch_indices] #('y_batch.shape:', 200)
#print("y_batch.shape:",len(y_batch))
#loss_vectorized, grad_vectorized = svm_loss_vectorized(self.W, X_batch, y_batch, reg)
#self.W+=-learning_rate*grad_vectorized
#########################################################################
# END OF YOUR CODE #
#########################################################################
#2. evaluate loss and gradient
loss, grad = self.loss(X_batch, y_batch, reg)
loss_history.append(loss)
#3. perform parameter update
#########################################################################
# TODO: #
# Update the weights using the gradient and the learning rate. #
#########################################################################
#pass
#self.W-=learning_rate*grad
v=mu*v-learning_rate*grad
self.W+=v
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print 'iteration %d / %d: loss %f' % (it, num_iters, loss)
return loss_history
def predict(self, X):
"""
Use the trained weights of this linear classifier to predict labels for
data points.
Inputs:
- X: D x N array of training data. Each column is a D-dimensional point.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
y_pred = np.zeros(X.shape[1])
###########################################################################
# TODO: #
# Implement this method. Store the predicted labels in y_pred. #
###########################################################################
#pass
scores=X.dot(self.W) #1.get scores
y_pred=np.argmax(scores,axis=1) #2.find the index for highest value in the row
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
def loss(self, X_batch, y_batch, reg):
"""
Compute the loss function and its derivative.
Subclasses will override this.
Inputs:
- X_batch: A numpy array of shape (N, D) containing a minibatch of N
data points; each point has dimension D.
- y_batch: A numpy array of shape (N,) containing labels for the minibatch.
- reg: (float) regularization strength.
Returns: A tuple containing:
- loss as a single float
- gradient with respect to self.W; an array of the same shape as W
"""
pass
class LinearSVM(LinearClassifier):
""" A subclass that uses the Multiclass SVM loss function """
def loss(self, X_batch, y_batch, reg):
return svm_loss_vectorized(self.W, X_batch, y_batch, reg)
class Softmax(LinearClassifier):
""" A subclass that uses the Softmax + Cross-entropy loss function """
def loss(self, X_batch, y_batch, reg):
return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)
| [
"[email protected]"
] | |
50587e5954677e11ceae53851f78af9e5bcfa727 | 458ff3c3611bb969f96ff3d3e15108fa9ec88316 | /quiz/migrations/0004_auto_20201209_2057.py | 9ddaf47129300087df96cc291ab7fda68b428ff2 | [] | no_license | mayank5044/Navigus | 8164809d87c5f3112565549229327ea20d090898 | aa03a99583efe4b7e9e7d1cb4a450e559f36d475 | refs/heads/master | 2023-08-11T01:30:21.115338 | 2021-10-09T06:47:24 | 2021-10-09T06:47:24 | 414,919,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('student', '0002_remove_student_status'),
('quiz', '0003_result'),
]
operations = [
migrations.AlterField(
model_name='result',
name='exam',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quiz.Course'),
),
migrations.AlterField(
model_name='result',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='student.Student'),
),
]
| [
"[email protected]"
] | |
baed34dff5b6291a245a5b0525a858aeba9dc2b8 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/FwupdPlugin/FirmwareClass.py | 41cfcb07d1b2a17378eab46a1dbbc60611507f68 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 5,303 | py | # encoding: utf-8
# module gi.repository.FwupdPlugin
# from /usr/lib64/girepository-1.0/FwupdPlugin-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Fwupd as __gi_repository_Fwupd
import gobject as __gobject
class FirmwareClass(__gi.Struct):
"""
:Constructors:
::
FirmwareClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
padding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parse = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
tokenize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
to_string = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
write = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(FirmwareClass), '__module__': 'gi.repository.FwupdPlugin', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'FirmwareClass' objects>, '__weakref__': <attribute '__weakref__' of 'FirmwareClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7feb1afdfd60>, 'parse': <property object at 0x7feb1afdfe50>, 'write': <property object at 0x7feb1afdff40>, 'to_string': <property object at 0x7feb1afe2090>, 'tokenize': <property object at 0x7feb1afe2180>, 'padding': <property object at 0x7feb1afe2270>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(FirmwareClass)
| [
"[email protected]"
] | |
944140c7bba8ea526c0edc0595e380ce65ebcc98 | 690e8f0a853c1f27bae688f021e8c27e62ca9613 | /auth/auth/settings.py | 9d8a145494269d4ab9cebf00c17bb3722ad9be69 | [] | no_license | MaksimLion/django-rest-authentication | d77c8b59e89c80a9f8c98fb7b038bebb431ffc0e | 8445354f761d0624a97faa490d8872be5994da5e | refs/heads/master | 2020-05-04T15:16:51.320819 | 2019-04-03T08:22:16 | 2019-04-03T08:22:16 | 179,233,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,303 | py | """
Django settings for auth project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g(e@i27l0_x85jylbz*$s8ld&+!+td179gwggfrvwope#(dpj9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'authentication'
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication', # <-- And here
],
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'auth.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'auth.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
d1bdc816ef14dbb9698a37af082dbc2f665ef045 | 434b6556038ad326ffaa8584a8a91edf8ad5c037 | /BST-1/CheckBST-1.py | 6677cdfe3c533653ccfc336a478ee2090bd1405b | [] | no_license | Pranav016/DS-Algo-in-Python | 60702460ad6639dd3e8a1fdc3caf0821b8e0b4c2 | 5557e371ccdf801d78ba123ca83c0dd47b3bdb3b | refs/heads/master | 2023-01-23T08:29:32.186861 | 2020-11-01T17:14:12 | 2020-11-01T17:14:12 | 284,651,382 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | import queue
class BinaryTreeNode:
def __init__(self,data):
self.data=data
self.left=None
self.right=None
def minimumNode(root):
if root is None:
return 1000000
leftMin=minimumNode(root.left)
rightMin=minimumNode(root.right)
return min(root.data,leftMin, rightMin)
def maximumNode(root):
if root is None:
return -1000000
leftMax=maximumNode(root.left)
rightMax=maximumNode(root.right)
return max(root.data,leftMax,rightMax)
def isBST(root):
if root is None:
return True
leftMax=maximumNode(root.left)
rightMin=minimumNode(root.right)
if root.data<=leftMax or root.data>rightMin:
return False
leftBST=isBST(root.left)
rightBST=isBST(root.right)
return leftBST and rightBST
def buildLevelTree(levelorder):
index = 0
length = len(levelorder)
if length<=0 or levelorder[0]==-1:
return None
root = BinaryTreeNode(levelorder[index])
index += 1
q = queue.Queue()
q.put(root)
while not q.empty():
currentNode = q.get()
leftChild = levelorder[index]
index += 1
if leftChild != -1:
leftNode = BinaryTreeNode(leftChild)
currentNode.left =leftNode
q.put(leftNode)
rightChild = levelorder[index]
index += 1
if rightChild != -1:
rightNode = BinaryTreeNode(rightChild)
currentNode.right =rightNode
q.put(rightNode)
return root
# Main
levelOrder = [int(i) for i in input().strip().split()]
root = buildLevelTree(levelOrder)
print(isBST(root)) | [
"[email protected]"
] | |
7b52bfbc8d308b9c74054bab3dae598640de80b8 | cd486d096d2c92751557f4a97a4ba81a9e6efebd | /16/addons/plugin.video.moviedb/resources/modules/sgate.py | 005822886551a515de00ccbea6b9c9440d95515e | [] | no_license | bopopescu/firestick-loader-kodi-data | 2f8cb72b9da67854b64aa76f720bdad6d4112926 | e4d7931d8f62c94f586786cd8580108b68d3aa40 | refs/heads/master | 2022-04-28T11:14:10.452251 | 2020-05-01T03:12:13 | 2020-05-01T03:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,146 | py | # -*- coding: utf-8 -*-
# moviedb Series Gate TV SHOW Module by: Blazetamer
import urllib,urllib2,re,xbmcplugin,xbmcgui,sys,urlresolver,xbmc,os,xbmcaddon,main
from metahandler import metahandlers
try:
from addon.common.addon import Addon
except:
from t0mm0.common.addon import Addon
addon_id = 'plugin.video.moviedb'
#addon = Addon(addon_id, sys.argv)
addon = main.addon
try:
from addon.common.net import Net
except:
from t0mm0.common.net import Net
net = Net()
try:
import StorageServer
except:
import storageserverdummy as StorageServer
# Cache
cache = StorageServer.StorageServer("MovieDB", 0)
mode = addon.queries['mode']
url = addon.queries.get('url', '')
name = addon.queries.get('name', '')
thumb = addon.queries.get('thumb', '')
ext = addon.queries.get('ext', '')
console = addon.queries.get('console', '')
dlfoldername = addon.queries.get('dlfoldername', '')
favtype = addon.queries.get('favtype', '')
mainimg = addon.queries.get('mainimg', '')
season = addon.queries.get('season', '')
episode = addon.queries.get('episode', '')
show = addon.queries.get('show', '')
# Global Stuff
cookiejar = addon.get_profile()
cookiejar = os.path.join(cookiejar,'cookies.lwp')
settings = xbmcaddon.Addon(id=addon_id)
artwork = xbmc.translatePath(os.path.join('http://cliqaddon.com/support/commoncore/tvaddons/moviedb/showgunart/images/', ''))
fanart = xbmc.translatePath(os.path.join('http://cliqaddon.com/support/commoncore/tvaddons/moviedb/showgunart/images/fanart/fanart.jpg', ''))
grab=metahandlers.MetaData()
net = Net()
basetv_url ='http://seriesgate.me/'
def LogNotify(title,message,times,icon):
xbmc.executebuiltin("XBMC.Notification("+title+","+message+","+times+","+icon+")")
def SGCATS():
addDir('All Series Gate TV Shows','http://seriesgate.me/tvshows/','sgindex',artwork + 'all.jpg','','dir')
addDir('[COLOR gold]Search TV Shows[/COLOR]','http://seriesgate.me/search/indv_episodes/','searchsgtv',artwork + 'search.jpg','','dir')
main.AUTO_VIEW('')
def SGINDEX (url):
link = net.http_GET(url).content
match=re.compile('<a href = "(.+?)"><img src = "(.+?)" height=".+?/><div class = "_tvshow_title">(.+?)</div>').findall(link)
if len(match) > 0:
for url,sitethumb,name in match:
inc = 0
#movie_name = fullyear[:-6]
#year = fullyear[-6:]
#movie_name = movie_name.decode('UTF-8','ignore')
data = main.GRABTVMETA(name,'')
thumb = data['cover_url']
yeargrab = data['year']
year = str(yeargrab)
dlfoldername = name
favtype = 'tvshow'
#main.addDir(name,url,'sgepisodelist',thumb,data,favtype)
addDir(name,basetv_url + url,'sgepisodelist',thumb,data,favtype)
#main.addSDir(movie_name +'('+ year +')',basetv_url + url,'episodes',thumb,year,favtype)
nmatch=re.compile('<span class="currentpage">.+?</span></li><li><a href="(.+?)">(.+?)</a></li><li>').findall(link)
if len(nmatch) > 0:
for pageurl,pageno in nmatch:
addDir('Page'+ pageno,basetv_url + pageurl,'sgindex',artwork +'nextpage.jpg','','dir')
main.AUTO_VIEW('movies')
def SGEPISODES(url,name,thumb):
params = {'url':url, 'mode':mode, 'name':name, 'thumb':thumb}
dlfoldername = name
mainimg = thumb
show = name
link = net.http_GET(url).content
matchurl=re.compile('<div class="season_page">\n\t\t\t\t\t\t<a href="(.+?)" >(.+?)</a>').findall(link)
for url,snumber in matchurl:
favtype = 'episodes'
#main.addDir(snumber,url,'sgepisodelist',thumb,'',favtype)
main.addEPNOCLEANDir(snumber,url,thumb,'sgepisodelist',show,dlfoldername,mainimg,'','')
main.AUTO_VIEW('movies')
def SGEPISODELIST(url,name,thumb):
params = {'url':url, 'mode':mode, 'name':name, 'thumb':thumb}
dlfoldername = name
mainimg = thumb
show = name
url2=url
link = net.http_GET(url).content
#match=re.compile('<a href="(.+?)">» S(.+?) - E(.+?) (.+?)</a><span>(.+?)</span>').findall(link)
match=re.compile('<a href="(.+?)">» S(.+?) - E(.+?) (.+?)</a>').findall(link)
for url,season,epnum,epname in match:
s = 'S'+season
e = 'E'+epnum
se = s+e
name = se + ' ' + epname
favtype = 'episodes'
main.addEPNOCLEANDir(name,url2+'/season'+season+'/episode'+epnum+'/searchresult',thumb,'sgtvlinkpage',show,dlfoldername,mainimg,season,epnum)
main.AUTO_VIEW('movies')
'''def SGEPISODELIST(url,name,thumb):
params = {'url':url, 'mode':mode, 'name':name, 'thumb':thumb}
#dlfoldername = name
mainimg = thumb
link = net.http_GET(url).content
match=re.compile('<div class=".+?" style=".+?" >Season(.+?) Episode(.+?)- <span><a href = ".+?">.+?</a></span></div><div class=".+?" >(.+?)</div><div class = ".+?"></div><div style=".+?"><a href="(.+?)"><img src="(.+?)" width=".+?" height=".+?" alt=".+?" title = "(.+?)" ></a>').findall(link)
for season,epnum, date, url, thumb, epname in match:
s = 'S'+season
e = 'E'+epnum
se = s+e
name = se + ' ' + epname
favtype = 'episodes'
main.addEPNOCLEANDir(name,url,thumb,'sgtvlinkpage',show,dlfoldername,mainimg,season,epnum)
main.AUTO_VIEW('movies') '''
def SGTVLINKPAGE(url,name,thumb,mainimg):
params = {'url':url, 'mode':mode, 'name':name, 'thumb':thumb, 'dlfoldername':dlfoldername,'mainimg':mainimg}
inc = 0
linkbase = 'http://seriesgate.me'
mainimg = mainimg
#link = net.http_GET(url).content
#match=re.compile('href="(.+?)">More Links').findall(link)
#for surl in match:
#url = linkbase + surl
#url = url +'searchresult/'
print 'host url look is' + url
if inc < 50:
link = net.http_GET(url).content
#hostmatch=re.compile('<a rel="nofollow" href="(.+?)" TARGET="_blank" >(.+?)</a>').findall(link)
hostmatch=re.compile('hre_watch_tt" href="(.+?)">').findall(link)
#for urls,sourcename in hostmatch:
for urls in hostmatch:
print 'Pre HMF url is ' +urls
hmf = urlresolver.HostedMediaFile(urls)
##########################################
print 'URLS is ' +urls
if hmf:
#try:
host = hmf.get_host()
hthumb = main.GETHOSTTHUMB(host)
#dlurl = urlresolver.resolve(vidUrl)
data = main.GRABTVMETA(name,'')
thumb = data['cover_url']
favtype = 'movie'
hostname = main.GETHOSTNAME(host)
try:
main.addTVDLDir(name+hostname,urls,'vidpage',hthumb,data,dlfoldername,favtype,mainimg)
inc +=1
except:
continue
#Start Search Function
def _get_keyboard( default="", heading="", hidden=False ):
""" shows a keyboard and returns a value """
keyboard = xbmc.Keyboard( default, heading, hidden )
keyboard.doModal()
if ( keyboard.isConfirmed() ):
return unicode( keyboard.getText(), "utf-8" )
return default
def SEARCHSGTV(url):
searchUrl = url
vq = _get_keyboard( heading="Searching for TV Shows" )
if ( not vq ): return False, 0
title = urllib.quote_plus(vq)
searchUrl += title + '&criteria=tag'
print "Searching URL: " + searchUrl
SGSEARCHINDEX(searchUrl)
main.AUTO_VIEW('movies')
def SGSEARCHINDEX (url):
link = net.http_GET(url).content
match=re.compile('</a><div class = ".+?" style=".+?"><div class = ".+?"><a href = "(.+?)">(.+?)</a>').findall(link)
#match=re.compile('<a href="(.+?)">» (.+?) - (.+?) (.+?)</a>').findall(link)
if len(match) > 0:
for url,name in match:
#for url,season,episode,name in match:
inc = 0
#movie_name = fullyear[:-6]
#year = fullyear[-6:]
#movie_name = movie_name.decode('UTF-8','ignore')
data = main.GRABTVMETA(name,'')
thumb = data['cover_url']
yeargrab = data['year']
year = str(yeargrab)
dlfoldername = name
favtype = 'tvshow'
addDir(name,basetv_url + url,'sgepisodelist',thumb,data,favtype)
#main.addSDir(movie_name +'('+ year +')',basetv_url + url,'episodes',thumb,year,favtype)
nmatch=re.compile('<span class="currentpage">.+?</span></li><li><a href="(.+?)">(.+?)</a></li><li>').findall(link)
if len(nmatch) > 0:
for pageurl,pageno in nmatch:
addDir('Page'+ pageno,basetv_url + pageurl,'movieindex',artwork +'nextpage.jpg','','dir')
main.AUTO_VIEW('movies')
def addDir(name,url,mode,thumb,labels,favtype):
#name = nameCleaner(name)
params = {'url':url, 'mode':mode, 'name':name, 'thumb':thumb, 'dlfoldername':dlfoldername, 'mainimg':mainimg}
contextMenuItems = []
gomode=mode
contextMenuItems.append(('[COLOR red]Add to CLIQ Favorites[/COLOR]', 'XBMC.RunPlugin(%s)' % addon.build_plugin_url({'mode': 'addsttofavs', 'name': name,'url': url,'thumb': thumb,'gomode': gomode})))
contextMenuItems.append(('[COLOR red]Remove From CLIQ Favorites[/COLOR]', 'XBMC.RunPlugin(%s)' % addon.build_plugin_url({'mode': 'removestfromfavs', 'name': name,'url': url,'thumb': thumb,'gomode': gomode})))
sitethumb = thumb
sitename = name
fanart = 'http://cliqaddon.com/support/commoncore/tvaddons/moviedb/showgunart/images/fanart/fanart.jpg'
try:
name = data['title']
thumb = data['cover_url']
fanart = data['backdrop_url']
except:
name = sitename
if thumb == '':
thumb = sitethumb
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=thumb)
liz.setInfo( type="Video", infoLabels=labels )
if favtype == 'movie':
contextMenuItems.append(('[COLOR gold]Movie Information[/COLOR]', 'XBMC.Action(Info)'))
elif favtype == 'tvshow':
contextMenuItems.append(('[COLOR gold]TV Show Information[/COLOR]', 'XBMC.Action(Info)'))
elif favtype == 'episode':
contextMenuItems.append(('[COLOR gold]Episode Information[/COLOR]', 'XBMC.Action(Info)'))
liz.addContextMenuItems(contextMenuItems, replaceItems=False)
try:
liz.setProperty( "Fanart_Image", labels['backdrop_url'] )
except:
liz.setProperty( "Fanart_Image", fanart )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
| [
"[email protected]"
] | |
76f5af84fbd35b8169fa79d19c04247b0d84fd00 | 504c9c2b0d29d946079e11644761ad354fc79715 | /_build/jupyter_execute/B_資訊設會必修的12堂Python通識課_何敏煌_博碩_2019/ch08.py | f2a128622353e50d7e7e751299228a9b37946c13 | [] | no_license | AaronCHH/jb_pysqlite | 2b5b79327778705f8a941b0c5628e9eba0f5be2a | 832a70b936800a380c1da0884eed9f7fa0dc2aee | refs/heads/main | 2023-03-12T23:17:22.534445 | 2021-03-06T15:51:10 | 2021-03-06T15:51:10 | 340,876,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,816 | py | # Ch08 操作資料庫
#顯示學生成績表
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select * from score;")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#輸入學生成績
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
stuno = input("學號:")
chi = input("國文成績:")
eng = input("英文成績:")
mat = input("數學成績:")
his = input("歷史成績:")
geo = input("地理成績:")
sql_str = "insert into score(stuno, chi, eng, mat, his, geo) values('{}',{},{},{},{},{});".format(
stuno, chi, eng, mat, his, geo)
conn.execute(sql_str)
conn.commit()
conn.close()
#輸入學生資料表
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
stuno = input("學號:")
while stuno!="-1":
name = input("姓名:")
gender = input("性別:")
clsno = input("班級編號:")
tel = input("電話:")
pid = input("家長身份證字號:")
sql_str = "insert into studata(stuno, name, gender, clsno, tel, pid) values('{}','{}','{}','{}','{}','{}');".format(
stuno, name, gender, clsno, tel, pid)
conn.execute(sql_str)
stuno = input("學號:")
conn.commit()
conn.close()
#顯示學生基本資料表
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select * from studata;")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#顯示學生的完整成績表(含總分及平均)
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select stuno, chi, eng, mat, his, geo, chi+eng+mat+his+geo, (chi+eng+mat+his+geo)/5 from score;")
print("學號\t國文\t英文\t數學\t歷史\t地理\t總分\t平均")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#顯示學生各科的平均
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select stuno, avg(chi), avg(eng), avg(mat), avg(his), avg(geo) from score;")
print("學號\t國文\t英文\t數學\t歷史\t地理")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#依姓名顯示成績表
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select studata.name, score.chi, score.eng from score, studata;")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#依姓名顯示成績表--使用INNER JOIN
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select studata.name, score.chi, score.eng from score inner join studata on score.stuno = studata.stuno;")
for row in rows:
for field in row:
print("{}\t".format(field), end="")
print()
conn.close()
#成績修改程式
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
stuno = input("請輸入想要修改成績的學號:")
rows = conn.execute("select stuno, chi, eng, mat, his, geo from score where stuno='{}'".format(stuno))
row = rows.fetchone()
if row is not None:
print("學號\t國文\t英文\t數學\t歷史\t地理")
for field in row:
print("{}\t".format(field), end="")
print()
chi = input("國文=")
eng = input("英文=")
mat = input("數學=")
his = input("歷史=")
geo = input("地理=")
sql_str = "update score set stuno='{}', chi={}, eng={}, mat={}, his={}, geo={} where stuno='{}';".format(
stuno, chi, eng, mat, his, geo, stuno)
conn.execute(sql_str)
conn.commit()
conn.close()
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
rows = conn.execute("select * from score;")
print(type(rows))
print(dir(rows))
print(type(rows.fetchone()))
conn.close()
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
cur = conn.cursor()
cur.execute("select * from score;")
print(type(cur.fetchone()))
print(cur.fetchone())
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
cur = conn.cursor()
cur.execute("select * from score;")
first3_records = cur.fetchmany(3)
all_records = cur.fetchall()
print(first3_records)
print(all_records)
conn.close()
import sqlite3
dbfile = "school.db"
conn = sqlite3.connect(dbfile)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("select * from score;")
rows = cur.fetchall()
print(rows[0].keys())
print(type(rows))
print(type(rows[0]))
print("學號\t國文\t英文")
for row in rows:
print("{}\t{}\t{}".format(row['stuno'], row['chi'], row['eng'])) | [
"[email protected]"
] | |
c63a4270aaeefd93934953d35eb9f8f3316bf194 | 8e52c27f1b2823db67db4438b2b7e22c18254eca | /gluon/gluoncv2/models/alexnet.py | 626397e4fda1fb1630db7caf578a3149a8c93c39 | [
"MIT"
] | permissive | earhian/imgclsmob | 5582f5f2d4062b620eecc28d5c4c9245fea47291 | c87c0942420876941868c016211073dec4392e4d | refs/heads/master | 2020-04-12T02:13:55.258601 | 2018-12-17T20:38:19 | 2018-12-17T20:38:19 | 162,242,486 | 1 | 0 | MIT | 2018-12-18T06:40:42 | 2018-12-18T06:40:41 | null | UTF-8 | Python | false | false | 8,438 | py | """
AlexNet, implemented in Gluon.
Original paper: 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
"""
__all__ = ['AlexNet', 'alexnet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class AlexConv(HybridBlock):
"""
AlexNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
**kwargs):
super(AlexConv, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
in_channels=in_channels)
self.activ = nn.Activation('relu')
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.activ(x)
return x
class AlexDense(HybridBlock):
"""
AlexNet specific dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(AlexDense, self).__init__(**kwargs)
with self.name_scope():
self.fc = nn.Dense(
units=out_channels,
weight_initializer="normal",
in_units=in_channels)
self.activ = nn.Activation('relu')
self.dropout = nn.Dropout(rate=0.5)
def hybrid_forward(self, F, x):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x)
return x
class AlexOutputBlock(HybridBlock):
"""
AlexNet specific output block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
classes,
**kwargs):
super(AlexOutputBlock, self).__init__(**kwargs)
mid_channels = 4096
with self.name_scope():
self.fc1 = AlexDense(
in_channels=in_channels,
out_channels=mid_channels)
self.fc2 = AlexDense(
in_channels=mid_channels,
out_channels=mid_channels)
self.fc3 = nn.Dense(
units=classes,
weight_initializer="normal",
in_units=mid_channels)
def hybrid_forward(self, F, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class AlexNet(HybridBlock):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
kernel_sizes : list of list of int
Convolution window sizes for each unit.
strides : list of list of int or tuple/list of 2 int
Strides of the convolution for each unit.
paddings : list of list of int or tuple/list of 2 int
Padding value for convolution layer for each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
kernel_sizes,
strides,
paddings,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(AlexNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix='stage{}_'.format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
stage.add(AlexConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_sizes[i][j],
strides=strides[i][j],
padding=paddings[i][j]))
in_channels = out_channels
stage.add(nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0))
self.features.add(stage)
self.output = nn.HybridSequential(prefix='')
self.output.add(nn.Flatten())
in_channels = in_channels * 6 * 6
self.output.add(AlexOutputBlock(
in_channels=in_channels,
classes=classes))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_alexnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join('~', '.mxnet', 'models'),
**kwargs):
"""
Create AlexNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [[64], [192], [384, 256, 256]]
kernel_sizes = [[11], [5], [3, 3, 3]]
strides = [[4], [1], [1, 1, 1]]
paddings = [[2], [2], [1, 1, 1]]
net = AlexNet(
channels=channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def alexnet(**kwargs):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_alexnet(model_name="alexnet", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
alexnet,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != alexnet or weight_count == 61100840)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| [
"[email protected]"
] | |
b81f41162f15e29f8b808b8521fb7a1cf808a28c | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /sale_crm/__manifest__.py | 2ee01b74f3caf0f9facf83b0f88b9115a413b705 | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | # -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
{
'name': 'Opportunity to Quotation',
'version': '1.0',
'category': 'Hidden',
'description': """
This module adds a shortcut on one or several opportunity cases in the CRM.
===========================================================================
This shortcut allows you to generate a sales order based on the selected case.
If different cases are open (a list), it generates one sales order by case.
The case is then closed and linked to the generated sales order.
We suggest you to install this module, if you installed both the sale and the crm
modules.
""",
'depends': ['sale_management', 'crm'],
'data': [
'security/ir.model.access.csv',
'views/partner_views.xml',
'views/sale_order_views.xml',
'views/crm_lead_views.xml',
],
'auto_install': True,
}
| [
"[email protected]"
] | |
484644bbb880fdcf085f5e5d6641f10a5231a625 | 08bfc8a1f8e44adc624d1f1c6250a3d9635f99de | /SDKs/swig/Examples/python/varargs/runme.py | fe1e28e881e6fbf9f29462308bf61efca2de0209 | [] | no_license | Personwithhat/CE_SDKs | cd998a2181fcbc9e3de8c58c7cc7b2156ca21d02 | 7afbd2f7767c9c5e95912a1af42b37c24d57f0d4 | refs/heads/master | 2020-04-09T22:14:56.917176 | 2019-07-04T00:19:11 | 2019-07-04T00:19:11 | 160,623,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f2ae65c42f8358298afb53751299f1957fed0218e6a36f16022a63aa74858f95
size 705
| [
"[email protected]"
] | |
734748a7d00403f32a4378d028e322462aeeabe3 | 09cead98874a64d55b9e5c84b369d3523c890442 | /py200421_python2/day14_py200606/tuple_1.py | c02f948ae526bf8546174abec4408a4458357833 | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | """
tuple
read-only list
"""
# create a tuple
my_tuple1 = (1, 2, 3, 4, 5, 6, 7, 8)
print(my_tuple1)
my_tuple2 = ()
print(my_tuple2)
# create a tuple with only one element
my_tuple3 = (1)
print(my_tuple3)
my_tuple3 = ('abc')
print(my_tuple3)
my_tuple3 = 1
my_tuple3 = (1,)
print(my_tuple3)
# create nested tuple
my_tuple4 = (1, 2, 3)
print(my_tuple4)
my_tuple4 = (('a','b'), 2, ('c','d'))
print(my_tuple4)
my_tuple4 = (('a','b'), ('c','d'), ('c','d'))
print(my_tuple4)
# create mix tuple
my_tuple5 = (['a','b'], ('c','d'), ('c','d'))
my_tuple5 = (['a','b'], ([1,2],'d'), ('c','d'))
# compare
# student profile collection
# pre-set scouting path
a = [(), (), ()]
# saving-slot in a game
b = ([], [], [])
# create a tuple by auto-packing
my_tuple = 1,2,'a'
print(my_tuple, type(my_tuple))
# unpacking
x, y, z = my_tuple
print(x)
print(y)
print(z)
| [
"[email protected]"
] | |
1d74ef8462950a6d0001f53e3884fb6d831e1a36 | e7729e83f4caa78586a57de7c651b8e705e73305 | /app/flags/agents/flag_6.py | 45cf12711db4e7a9ee386fa13e8ae664ea83a475 | [] | no_license | dekoder/training | 873674b985a0f2f8d0e3740f3b2004da2e0af02d | c7509ae9d13ba1ebbd127aeb4dadcaf88ffd9749 | refs/heads/master | 2023-01-01T01:00:56.709768 | 2020-10-29T19:51:19 | 2020-10-29T19:51:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | name = 'Contact points'
challenge = 'Deploy a new agent, using a different contact point then your first agent'
extra_info = """If an adversary deploys all of their agents on a host using the same protocol, say HTTP, then when their agent is
detected and shut down, the defenders will likely close access to the C2 over that protocol. Therefore, an adversary
will want multiple agents on a host, each using a different protocol to talk to the C2. """
async def verify(services):
contacts = set([agent.contact for agent in await services.get('data_svc').locate('agents')])
if len(contacts) > 1:
return True
return False
| [
"[email protected]"
] | |
13eafdf4cca9a65dfa2e6bccb504ab6397013fb7 | d5292505eb7b8b93eca743eb187a04ea58d6b6a3 | /venv/Lib/site-packages/networkx/algorithms/operators/unary.py | 71a6303f16c9db7a764e15fa906e9421b5937b55 | [
"Unlicense"
] | permissive | waleko/facerecognition | 9b017b14e0a943cd09844247d67e92f7b6d658fa | ea13b121d0b86646571f3a875c614d6bb4038f6a | refs/heads/exp | 2021-06-03T10:57:55.577962 | 2018-09-04T19:45:18 | 2018-09-04T19:45:18 | 131,740,335 | 5 | 1 | Unlicense | 2020-01-19T10:45:25 | 2018-05-01T17:10:42 | Python | UTF-8 | Python | false | false | 1,646 | py | """Unary operations on graphs"""
# Copyright (C) 2004-2018 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.utils import not_implemented_for
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['complement', 'reverse']
def complement(G):
"""Return the graph complement of G.
Parameters
----------
G : graph
A NetworkX graph
Returns
-------
GC : A new graph.
Notes
------
Note that complement() does not create self-loops and also
does not produce parallel edges for MultiGraphs.
Graph, node, and edge data are not propagated to the new graph.
"""
R = G.fresh_copy()
R.add_nodes_from(G)
R.add_edges_from(((n, n2)
for n, nbrs in G.adjacency()
for n2 in G if n2 not in nbrs
if n != n2))
return R
def reverse(G, copy=True):
"""Return the reverse directed graph of G.
Parameters
----------
G : directed graph
A NetworkX directed graph
copy : bool
If True, then a new graph is returned. If False, then the graph is
reversed in place.
Returns
-------
H : directed graph
The reversed G.
"""
if not G.is_directed():
raise nx.NetworkXError("Cannot reverse an undirected graph.")
else:
return G.reverse(copy=copy)
| [
"[email protected]"
] | |
3e53ef1658987ecc2bc55594ea180866af5b582c | 7c8f6edd87cbee33cf998e9d2cc673fdcd39dd5a | /bots/Voodtwo/python/voodoo.py | 149d114051c2baf9989c6c3621aadc1cea98e223 | [] | no_license | tarehart/RLBotSpikeLeague | 89ce96417d8e201dcfc2f67ed5c1c81c7941131b | 311b3753e770cc642fdde87b6d4083db4072af88 | refs/heads/master | 2020-07-04T11:45:30.564487 | 2019-08-24T05:31:55 | 2019-08-24T05:31:55 | 202,278,639 | 0 | 3 | null | 2019-08-23T14:31:27 | 2019-08-14T05:09:20 | Python | UTF-8 | Python | false | false | 823 | py | from rlbot.agents.base_agent import BOT_CONFIG_AGENT_HEADER
from rlbot.agents.executable_with_socket_agent import ExecutableWithSocketAgent
from rlbot.parsing.custom_config import ConfigHeader, ConfigObject
class Voodoo(ExecutableWithSocketAgent):
def get_port(self) -> int:
return 19231
def load_config(self, config_header: ConfigHeader):
self.executable_path = config_header.getpath('java_executable_path')
self.logger.info("Java executable is configured as {}".format(self.executable_path))
@staticmethod
def create_agent_configurations(config: ConfigObject):
params = config.get_header(BOT_CONFIG_AGENT_HEADER)
params.add_value('java_executable_path', str, default=None,
description='Relative path to the executable that runs java.')
| [
"[email protected]"
] | |
60a604d51abe28c15f4cbe9b135d530edf6eb603 | f87d1ce970ed414f62b90d79d8cf5a38556da592 | /repetory_api/migrations/0011_auto_20170609_1056.py | 670d2d9ff2b9b11106c16fd09dc242ea35f2ab32 | [] | no_license | zhangxu0307/repertory-rest | 331d58009c15e014d1a5e39447219817d77b08d9 | dc48a8e1e484254e1daa0712ffe66a52ec896ea7 | refs/heads/master | 2021-07-13T22:30:00.246833 | 2017-10-19T11:27:30 | 2017-10-19T11:27:30 | 107,536,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,712 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-09 02:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('repetory_api', '0010_auto_20170609_1044'),
]
operations = [
migrations.RemoveField(
model_name='material',
name='materailYear',
),
migrations.RemoveField(
model_name='material',
name='materialBand',
),
migrations.RemoveField(
model_name='material',
name='materialMark',
),
migrations.RemoveField(
model_name='material',
name='materialOriginal',
),
migrations.RemoveField(
model_name='material',
name='materialPostion',
),
migrations.RemoveField(
model_name='material',
name='materialState',
),
migrations.RemoveField(
model_name='material',
name='materialUnit',
),
migrations.AddField(
model_name='materialinput',
name='materailYear',
field=models.DateTimeField(blank=True, null=True, verbose_name='\u6750\u6599\u5e74\u4efd'),
),
migrations.AddField(
model_name='materialinput',
name='materialBand',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u54c1\u724c'),
),
migrations.AddField(
model_name='materialinput',
name='materialMark',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u578b\u53f7'),
),
migrations.AddField(
model_name='materialinput',
name='materialOriginal',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u539f\u4ea7\u5730'),
),
migrations.AddField(
model_name='materialinput',
name='materialPostion',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u4f4d\u7f6e'),
),
migrations.AddField(
model_name='materialinput',
name='materialState',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='\u6750\u6599\u72b6\u6001'),
),
migrations.AddField(
model_name='materialinput',
name='materialUnit',
field=models.DecimalField(decimal_places=4, default=0, max_digits=8, verbose_name='\u6750\u6599\u5355\u4f4d\u539f\u503c'),
),
]
| [
"[email protected]"
] | |
c1a278d0c191ec9f7a09ffb015bef1cb08eebb82 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil3029.py | 4d24e8f4ba438bbba1f8ddf9e36daac828244176 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,914 | py | # qubit number=4
# total number=41
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(3) # number=19
prog += CZ(0,3) # number=20
prog += H(3) # number=21
prog += CNOT(0,3) # number=23
prog += X(3) # number=24
prog += CNOT(0,3) # number=25
prog += CNOT(0,3) # number=17
prog += RX(-0.48380526865282825,3) # number=26
prog += H(1) # number=2
prog += Y(3) # number=18
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=12
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(1) # number=34
prog += CZ(0,1) # number=35
prog += H(1) # number=36
prog += CNOT(0,1) # number=31
prog += CNOT(0,1) # number=38
prog += X(1) # number=39
prog += CNOT(0,1) # number=40
prog += CNOT(0,1) # number=33
prog += CNOT(0,1) # number=30
prog += H(3) # number=8
prog += H(3) # number=37
prog += H(0) # number=9
prog += Y(2) # number=10
prog += X(2) # number=22
prog += Y(2) # number=11
prog += X(0) # number=13
prog += X(0) # number=14
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil3029.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"[email protected]"
] | |
0a5a10fc5960abab4709c50c8d9d9a98632a00ae | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /alembic/versions/00036_11dbcd6e5ee3_.py | 23c7b78690e2f34d72284e8b9134eea9c3b21604 | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 1,392 | py | """empty message
Revision ID: 11dbcd6e5ee3
Revises: 5aa994117f07
Create Date: 2017-09-28 04:06:18.968893
"""
# revision identifiers, used by Alembic.
revision = '11dbcd6e5ee3'
down_revision = '5aa994117f07'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE nu_release_item SET fetch_attempts = 0 WHERE fetch_attempts IS NULL")
op.execute("commit")
op.alter_column('nu_release_item', 'fetch_attempts',
existing_type=sa.INTEGER(),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('nu_release_item', 'fetch_attempts',
existing_type=sa.INTEGER(),
nullable=True)
### end Alembic commands ###
| [
"[email protected]"
] | |
fc0fecc42711bdc8005c76234c04e40af133500a | 3a9f2b3d79cf214704829427ee280f4b49dca70a | /saigon/rat/RuckusAutoTest/scripts/zd/ats_ZD_Combo_CLI_Application_Visibility_Standard_OPEN.py | 4c9c5d9a1708723af600c8124e9f074a6b2e5ab3 | [] | no_license | jichunwei/MyGitHub-1 | ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791 | f826fc89a030c6c4e08052d2d43af0b1b4b410e3 | refs/heads/master | 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,649 | py | """
Created on 2014-06
@author: [email protected]
"""
import sys
import random
import libZD_TestSuite as testsuite
from RuckusAutoTest.common import lib_KwList as kwlist
from RuckusAutoTest.common import Ratutils as utils
def define_wlan_cfg():
key_string_wpa2 = utils.make_random_string(random.randint(8, 63), "hex")
wlan_cfg = {
'ssid': 'Application_Visibility_OPEN',
'type': 'standard',
'auth': 'PSK',
'wpa_ver': 'WPA2',
'encryption': 'AES',
'key_index': '',
'key_string': key_string_wpa2,
'sta_auth': 'PSK',
'sta_wpa_ver': 'WPA2',
'sta_encryption': 'AES',
'enable_application_visibility': True
}
return wlan_cfg
def define_test_cfg(cfg,enable_tunnel):
test_cfgs = []
target_ip_addr = '172.16.10.252'
target_addr_for_denial_policy = 'www.example.net'
radio_mode = cfg['radio_mode']
sta_radio_mode = radio_mode
if sta_radio_mode == 'bg':
sta_radio_mode = 'g'
sta_tag = 'sta%s' % radio_mode
ap_tag = 'ap%s' % radio_mode
wlan_cfg = define_wlan_cfg()
case_name_suffix = ''
if enable_tunnel:
wlan_cfg['do_tunnel'] = True
case_name_suffix = '_with_tunnel'
test_name = 'CB_ZD_Remove_All_Wlans'
common_name = 'Remove all the WLANs from ZD'
test_cfgs.append(({}, test_name, common_name, 0, False))
test_name = 'CB_ZD_Create_Station'
common_name = 'Create target station'
test_cfgs.append(({'sta_ip_addr':cfg['target_station'],
'sta_tag': sta_tag}, test_name, common_name, 0, False))
test_name = 'CB_Station_Remove_All_Wlans'
common_name = 'Remove all WlANs from station'
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 0, False))
test_name = 'CB_ZD_Config_AP_Radio'
common_name = 'Config All APs Radio - Disable WLAN Service'
test_params = {'cfg_type': 'init',
'all_ap_mac_list': cfg['all_ap_mac_list']}
test_cfgs.append((test_params, test_name, common_name, 0, False))
test_name = 'CB_ZD_Create_Active_AP'
common_name = 'Create active AP'
test_cfgs.append(({'active_ap':cfg['active_ap'],
'ap_tag': ap_tag}, test_name, common_name, 0, False))
test_name = 'CB_ZD_Config_AP_Radio'
common_name = 'Config active AP Radio %s - Enable WLAN Service' % (radio_mode)
test_params = {'cfg_type': 'config',
'ap_tag': ap_tag,
'ap_cfg': {'radio': radio_mode, 'wlan_service': True},
}
test_cfgs.append((test_params, test_name, common_name, 0, False))
test_name = 'CB_ZD_CLI_Application_Visibility_Init_Env'
common_name = 'Try to delete all application visibility rules.'
test_params = {}
test_cfgs.append((test_params, test_name, common_name, 0, False))
#testcase 1
test_case_name = '[user_app_open_standard%s]'%case_name_suffix
user_app_rule_cfg = {'rule_description':'user_app_open_none',
'dest_ip':target_ip_addr,
'dest_port':'12345',
'netmask':'255.255.255.0',
'protocol':'udp'}
test_name = 'CB_ZD_CLI_Add_User_Defined_App'
common_name = '%s Add a user app.'% (test_case_name)
test_params = {'user_app_cfg':[user_app_rule_cfg],
'negative': False,}
test_cfgs.append((test_params,test_name, common_name, 1, False))
test_name = 'CB_ZD_Create_Wlan'
common_name = '%sCreate WLAN on ZD'% (test_case_name)
test_cfgs.append(({'wlan_cfg_list':[wlan_cfg],
'enable_wlan_on_default_wlan_group': True,
}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Associate_Station_1'
common_name = '%sAssociate the station to the WLAN'% (test_case_name)
test_cfgs.append(({'wlan_cfg': wlan_cfg,
'sta_tag': sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Get_Station_Wifi_Addr_1'
common_name = '%sGet WiFi address of the station'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Verify_Station_Info_V2'
common_name = '%sVerify client information Authorized status in ZD'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag,
'ap_tag': ap_tag,
'status': 'Authorized',
'wlan_cfg': wlan_cfg,
'radio_mode':sta_radio_mode,},
test_name, common_name, 2, False))
test_name = 'CB_ZD_Client_Ping_Dest'
common_name = '%sVerify station pings to the server successfully'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag,
'condition': 'allowed',
'target': target_ip_addr}, test_name, common_name, 2, False))
test_name = 'CB_Server_Start_Iperf'
common_name = '%sStart iperf server on linux PC'% (test_case_name)
test_cfgs.append(({'server_addr':'',
'test_udp': True,
'packet_len':'',
'bw':'',
'timeout':'',
'tos':'',
'multicast_srv':False,
'port':12345 }, test_name, common_name, 2, False))
test_name = 'CB_Station_Start_Iperf'
common_name = '%sStart iperf client and send traffic to server'% (test_case_name)
test_cfgs.append(({'sta_tag':sta_tag,
'server_addr':target_ip_addr,
'test_udp': True,
'packet_len':'',
'bw':'',
'timeout':60,
'tos':'',
'multicast_srv':False,
'port':12345 }, test_name, common_name, 2, False))
test_name = 'CB_Server_Stop_Iperf'
common_name = '%sStop iperf server on linux PC'% (test_case_name)
test_cfgs.append(({}, test_name, common_name, 2, False))
test_name = 'CB_Station_Stop_Iperf'
common_name = '%sStop iperf client on station'% (test_case_name)
test_cfgs.append(({'sta_tag':sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Verify_Application_Visibility_Info'
common_name = '%sVerify application info in Monitor Clients page'% (test_case_name)
test_cfgs.append(({'application_description':'user_app_open_none'}, test_name, common_name, 2, False))
test_name = 'CB_Station_Remove_All_Wlans'
common_name = '%sRemove all WlANs from station'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 2, True))
test_name = 'CB_ZD_Remove_All_Wlans'
common_name = '%sRemove all the WLANs from ZD'% (test_case_name)
test_cfgs.append(({}, test_name, common_name, 2, True))
test_name = 'CB_ZD_CLI_Del_User_Defined_App'
common_name = '%s Delete all user apps.'% (test_case_name)
test_params = {}
test_cfgs.append((test_params,test_name, common_name, 2, True))
#testcase 2
test_case_name = '[port_mapping_open_standard%s]'%case_name_suffix
port_mapping_rule_cfg = {'rule_description':'port_mapping_open_none','protocol':'udp','port':'54321'}
test_name = 'CB_ZD_CLI_Add_Port_Mapping_Policy'
common_name = '%s Add a port mapping rule.'% (test_case_name)
test_params = {'port_mapping_cfg':[port_mapping_rule_cfg],}
test_cfgs.append((test_params,test_name, common_name, 1, False))
test_name = 'CB_ZD_Create_Wlan'
common_name = '%sCreate WLAN on ZD'% (test_case_name)
test_cfgs.append(({'wlan_cfg_list':[wlan_cfg],
'enable_wlan_on_default_wlan_group': True,
}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Associate_Station_1'
common_name = '%sAssociate the station to the WLAN'% (test_case_name)
test_cfgs.append(({'wlan_cfg': wlan_cfg,
'sta_tag': sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Get_Station_Wifi_Addr_1'
common_name = '%sGet WiFi address of the station'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Verify_Station_Info_V2'
common_name = '%sVerify client information Authorized status in ZD'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag,
'ap_tag': ap_tag,
'status': 'Authorized',
'wlan_cfg': wlan_cfg,
'radio_mode':sta_radio_mode,},
test_name, common_name, 2, False))
test_name = 'CB_ZD_Client_Ping_Dest'
common_name = '%sVerify station pings to the server successfully'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag,
'condition': 'allowed',
'target': target_ip_addr}, test_name, common_name, 2, False))
test_name = 'CB_Server_Start_Iperf'
common_name = '%sStart iperf server on linux PC'% (test_case_name)
test_cfgs.append(({'server_addr':'',
'test_udp': True,
'packet_len':'',
'bw':'',
'timeout':'',
'tos':'',
'multicast_srv':False,
'port':54321 }, test_name, common_name, 2, False))
test_name = 'CB_Station_Start_Iperf'
common_name = '%sStart iperf client and send traffic to server'% (test_case_name)
test_cfgs.append(({'sta_tag':sta_tag,
'server_addr':target_ip_addr,
'test_udp': True,
'packet_len':'',
'bw':'',
'timeout':60,
'tos':'',
'multicast_srv':False,
'port':54321 }, test_name, common_name, 2, False))
test_name = 'CB_Server_Stop_Iperf'
common_name = '%sStop iperf server on linux PC'% (test_case_name)
test_cfgs.append(({}, test_name, common_name, 2, False))
test_name = 'CB_Station_Stop_Iperf'
common_name = '%sStop iperf client on station'% (test_case_name)
test_cfgs.append(({'sta_tag':sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Verify_Application_Visibility_Info'
common_name = '%sVerify application info in Monitor Clients page'% (test_case_name)
test_cfgs.append(({'application_description':'port_mapping_open_none',}, test_name, common_name, 2, False))
test_name = 'CB_Station_Remove_All_Wlans'
common_name = '%sRemove all WlANs from station'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 2, True))
test_name = 'CB_ZD_Remove_All_Wlans'
common_name = '%sRemove all the WLANs from ZD'% (test_case_name)
test_cfgs.append(({}, test_name, common_name, 2, True))
test_name = 'CB_ZD_CLI_Del_Port_Mapping_Policy'
common_name = '%s Delete all port mapping policies.'% (test_case_name)
test_params = {}
test_cfgs.append((test_params,test_name, common_name, 2, True))
#testcase 3
test_case_name = '[denial_policy_open_standard%s]'%case_name_suffix
denial_policy_cfg = {'policy_description': 'test_app_denial_policy',
'policy_name': 'test_app_denial_policy',
'rules': [{'application': 'Port', 'rule_description': 80, 'rule_id': 1},
{'application': 'HTTP hostname', 'rule_description': 'www.example.net', 'rule_id': 2}]}
test_name = 'CB_ZD_CLI_Add_App_Denial_Policy'
common_name = '%s Add a denial policy.'% (test_case_name)
test_params = {'denial_policy_cfg':[denial_policy_cfg],}
test_cfgs.append((test_params,test_name, common_name, 1, False))
test_name = 'CB_ZD_Create_Wlan'
common_name = '%sCreate WLAN on ZD'% (test_case_name)
test_cfgs.append(({'wlan_cfg_list':[wlan_cfg],
'enable_wlan_on_default_wlan_group': True,
}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Associate_Station_1'
common_name = '%sAssociate the station to the WLAN'% (test_case_name)
test_cfgs.append(({'wlan_cfg': wlan_cfg,
'sta_tag': sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Get_Station_Wifi_Addr_1'
common_name = '%sGet WiFi address of the station'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 2, False))
test_name = 'CB_ZD_Verify_Station_Info_V2'
common_name = '%sVerify client information Authorized status in ZD'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag,
'ap_tag': ap_tag,
'status': 'Authorized',
'wlan_cfg': wlan_cfg,
'radio_mode':sta_radio_mode,},
test_name, common_name, 2, False))
test_name = 'CB_Station_Ping_Dest_Is_Allowed'
common_name = '%sVerify station pinging to the server succeeds'%(test_case_name)
test_cfgs.append(({'sta_tag': sta_tag, 'dest_ip': target_addr_for_denial_policy,}, test_name, common_name, 2, False))
test_name = 'CB_Station_Connect_To_Server_Port'
common_name = "%sVerify station connecting to server's port succeeds"%(test_case_name)
test_cfgs.append(({'sta_tag': sta_tag, 'server_ip': '172.16.10.252','dest_port':80}, test_name, common_name, 2, False))
#edit wlan to enable denial policy
test_name = 'CB_ZD_Edit_Wlan'
common_name = '%sEdit wlan, to select a denial policy' % test_case_name
param_cfg = {'wlan_ssid': wlan_cfg['ssid'], 'new_wlan_cfg': {'application_denial_policy':'test_app_denial_policy'}}
test_cfgs.append((param_cfg,test_name, common_name, 2, False))
test_name = 'CB_Station_Ping_Dest_Is_Denied'
common_name = '%sVerify station pinging to the server fails'%(test_case_name)
test_cfgs.append(({'sta_tag': sta_tag, 'dest_ip': target_addr_for_denial_policy,}, test_name, common_name, 2, False))
test_name = 'CB_Station_Connect_To_Server_Port'
common_name = "%sVerify station connecting to server's port fails"%(test_case_name)
test_cfgs.append(({'sta_tag': sta_tag, 'server_ip': '172.16.10.252','dest_port':80,'negative':True}, test_name, common_name, 2, False))
test_name = 'CB_Station_Remove_All_Wlans'
common_name = '%sRemove all WlANs from station'% (test_case_name)
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 2, True))
test_name = 'CB_ZD_Remove_All_Wlans'
common_name = '%sRemove all the WLANs from ZD'% (test_case_name)
test_cfgs.append(({}, test_name, common_name, 2, True))
test_name = 'CB_ZD_CLI_Del_App_Denial_Policy'
common_name = '%s Delete all denial policies.'% (test_case_name)
test_params = {}
test_cfgs.append((test_params,test_name, common_name, 2, True))
#clean_up
test_name = 'CB_ZD_Config_AP_Radio'
common_name = 'Config All APs Radio - Enable WLAN Service'
test_params = {'cfg_type': 'teardown',
'all_ap_mac_list': cfg['all_ap_mac_list']}
test_cfgs.append((test_params, test_name, common_name, 0, True))
test_name = 'CB_Station_Remove_All_Wlans'
common_name = 'Remove all WlANs from station for the next test'
test_cfgs.append(({'sta_tag': sta_tag}, test_name, common_name, 0, True))
test_name = 'CB_ZD_CLI_Application_Visibility_Init_Env'
common_name = 'Try to delete all application visibility rules for next test.'
test_params = {}
test_cfgs.append((test_params, test_name, common_name, 0, True))
return test_cfgs
def check_max_length(test_cfgs):
for test_params, testname, common_name, exc_level, is_cleanup in test_cfgs:
if len(common_name) > 120:
raise Exception('common_name[%s] in case [%s] is too long, more than 120 characters' % (common_name, testname))
def check_duplicated_common_name(test_cfgs):
common_name_list = []
duplicate_flag = False
for test_params, testname, common_name, exc_level, is_cleanup in test_cfgs:
if common_name in common_name_list:
duplicate_flag = False
print '####################'
print common_name
print '####################'
else:
common_name_list.append(common_name)
return duplicate_flag
def createTestSuite(**kwargs):
ts_cfg = dict(interactive_mode=True,
station=(0, "g"),
targetap=False,
testsuite_name="",
)
ts_cfg.update(kwargs)
mtb = testsuite.getMeshTestbed(**kwargs)
tbcfg = testsuite.getTestbedConfig(mtb)
sta_ip_list = tbcfg['sta_ip_list']
ap_sym_dict = tbcfg['ap_sym_dict']
all_ap_mac_list = tbcfg['ap_mac_list']
if ts_cfg["interactive_mode"]:
print '\nOnly the following AP models support application visibility:'
print ' 1.ZF-7762-AC,ZF-7762-S-AC'
print ' 2.ZF-7782,ZF-7782-s,ZF-7782-n,ZF-7782-e'
print ' 3.ZF-7982'
print ' 4.sc8800-s-ac,sc8800-s'
print ' 5.ZF-7055'
print ' 6.ZF-7352'
print ' 7.ZF-7372,ZF-7372-e'
print ' 8.ZF-7781-m,ZF-7781cm'
active_ap_list = testsuite.getActiveAp(ap_sym_dict)
target_sta = testsuite.getTargetStation(sta_ip_list, "Pick wireless station: ")
target_sta_radio = testsuite.get_target_sta_radio()
else:
target_sta = sta_ip_list[ts_cfg["station"][0]]
target_sta_radio = ts_cfg["station"][1]
if kwargs["targetap"]:
active_ap_list = sorted(ap_sym_dict.keys())
active_ap = active_ap_list[0]
tcfg = {
'target_station':'%s' % target_sta,
'radio_mode': target_sta_radio,
'active_ap':active_ap,
'all_ap_mac_list': all_ap_mac_list,
}
tunnel_mode = raw_input("\n\
Do you want to enable tunnel to do test?\n\
1. Yes\n\
2. No\n\
Default selection is 2.Input your choice:")
if tunnel_mode != '1':
enable_tunnel = False
else: enable_tunnel = True
test_cfgs = define_test_cfg(tcfg,enable_tunnel)
check_max_length(test_cfgs)
check_duplicated_common_name(test_cfgs)
ts_suffix = ''
if enable_tunnel: ts_suffix = ' - tunneled'
if ts_cfg["testsuite_name"]:
ts_name = ts_cfg["testsuite_name"]
else:
ts_name = "Application_Visibility - Standard - OPEN%s"%ts_suffix
ts = testsuite.get_testsuite(ts_name, "Application_Visibility - Standard - OPEN%s"%ts_suffix , combotest=True)
test_order = 1
test_added = 0
for test_params, testname, common_name, exc_level, is_cleanup in test_cfgs:
if testsuite.addTestCase(ts, testname, common_name, test_params, test_order, exc_level, is_cleanup) > 0:
test_added += 1
test_order += 1
print "Add test case with test name: %s\n\t\common name: %s" % (testname, common_name)
print "\n-- Summary: added %d test cases into test suite '%s'" % (test_added, ts.name)
if __name__ == "__main__":
_dict = kwlist.as_dict(sys.argv[1:])
createTestSuite(**_dict)
| [
"[email protected]"
] | |
5d00ab45b31431b87c4bc649952d35c2f2a94f9a | 2b249f2be3a06b647c58ed09f14d1c0cc77b0309 | /cluster/pkg/swarmer.py | 059f3d7ad0f2e3b332823c5df2ff5527b94be16d | [] | permissive | Hrishi5/ACI-EnhancedEndpointTracker | 7dad677146ae4a26e1e2f212ad6e6eead92e3513 | a4de84c5fc00549e6539dbc1d8d927c74a704dcc | refs/heads/2.0 | 2020-04-01T18:18:24.119980 | 2018-12-14T06:17:15 | 2018-12-14T06:17:15 | 148,533,258 | 0 | 0 | MIT | 2018-09-12T19:46:19 | 2018-09-12T19:46:19 | null | UTF-8 | Python | false | false | 22,407 | py |
from .connection import Connection
from .lib import run_command
from .lib import pretty_print
import getpass
import json
import logging
import re
import time
# module level logging
logger = logging.getLogger(__name__)
class Swarmer(object):
def __init__(self, config, username=None, password=None):
# recevies instance of ClusterConfig
self.config = config
self.username = username
self.password = password
self.nodes = {}
self.node_id = None # local node-id
self.node_addr = None # local node-addr
self.node_socket = None # local node-addr+port for registering worker nodes
self.token = None # registration token
# reindex config.nodes with string id's to match against string labels
config_nodes = {}
for nid in self.config.nodes:
config_nodes["%s" % nid] = self.config.nodes[nid]
self.config.nodes = config_nodes
def get_credentials(self):
# prompt user for username/password if not previously provided
while self.username is None or len(self.username)==0:
self.username = raw_input("Enter ssh username: ").strip()
while self.password is None or len(self.password)==0:
self.password = getpass.getpass("Enter ssh password: ").strip()
def get_connection(self, hostname):
# return ssh connection object to provided hostname, raise exception on error
logger.debug("get connection to %s", hostname)
self.get_credentials()
c = Connection(hostname)
c.username = self.username
c.password = self.password
c.protocol = "ssh"
c.port = 22
c.prompt = "[#>\$] *$"
if not c.login(max_attempts=3):
raise Exception("failed to connect to node %s@%s" % (self.username, hostname))
return c
def init_swarm(self):
# determine the swarm status of this node. If in a swarm but not the manager, raise an error
# If in a swarm AND the manager, then validate status matches config.
# If in not in a swarm, then assume this non-initialized system
js = self.get_swarm_info()
self.node_id = js["NodeID"]
self.node_addr = js["NodeAddr"]
managers = js["RemoteManagers"]
manager_addr = None
if len(self.node_id) > 0:
logger.debug("node %s is part of an existing swarm", self.node_addr)
self.set_node_socket(managers)
if self.node_socket is None:
err_msg = "This node is not a docker swarm manager. "
err_msg+= "Please execute on the node-1"
raise Exception(err_msg)
else:
# need to initialize this node as a swarm master
logger.info("initializing swarm master")
if not run_command("docker swarm init"):
raise Exception("failed to initialize node as swarm master")
# get new swarm info
js = self.get_swarm_info()
self.node_id = js["NodeID"]
self.node_addr = js["NodeAddr"]
managers = js["RemoteManagers"]
self.set_node_socket(managers)
if self.node_socket is None:
raise Exception("failed to init swarm manager, no Addr found in RemoteManagers")
# validated that swarm is initialized and we're executing on a manager node. Need to get
# token for deploying to new workers
token = run_command("docker swarm join-token worker -q")
if token is None:
raise Exception("failed to get swarm token from manager")
self.token = token.strip()
logger.debug("swarm token: %s", self.token)
# get list of current nodes IDs
self.get_nodes()
lnode = self.nodes.get(self.node_id, None)
if lnode is None:
raise Exception("unable to find local id %s in docker nodes", self.node_id)
# check label for current node is '1', if not add it
node_label = lnode.labels.get("node", None)
if node_label is None:
logger.debug("adding label '1' to local node")
cmd = "docker node update --label-add node=1 %s" % self.node_id
if run_command(cmd) is None:
raise Exception("failed to add docker node label node=1 to %s" % self.node_id)
lnode.labels["node"] = "1"
elif "%s"%node_label != "1":
err_msg = "This node(%s) has node-id set to %s. Please run on node-1" % (
self.node_id, node_label)
raise Exception(err_msg)
else:
logger.debug("node(%s) already assigned with label 1", self.node_id)
# index nodes by addr and label id, raise error on duplicate
index_addr = {}
index_label = {}
for nid in self.nodes:
n = self.nodes[nid]
if n.addr in index_addr:
raise Exception("duplicate docker node address: %s between %s and %s" % (n.addr,
index_addr[n.addr].node_id, nid))
node_label = n.labels.get("node", None)
if node_label is None:
# existing node without a label should not exists, we could try to fix it here but
# that's a bit out of scope. Will force user to manually fix it for now...
err_msg = "Node(%s) exists within swarm but does not have a label. " % nid
err_msg+= "Manually add the appropriate id label via:\n"
err_msg+= " docker node update --label-add node=<id> %s" % nid
raise Exception(err_msg)
node_label = "%s" % node_label
if node_label in index_label:
raise Exception("duplicate docker label node=%s between %s and %s" % (node_label,
index_label[node_label].node_id, nid))
index_addr[n.addr] = n
index_label[node_label] = n
logger.debug("index_label: %s", index_label)
# validate each node in the config or add it if missing
for node_label in sorted(self.config.nodes):
# already validate we're on node-id 1, never need to add 1 as worker
if node_label == "1": continue
hostname = self.config.nodes[node_label]["hostname"]
if node_label not in index_label:
swarm_node_id = self.add_worker(hostname, node_label)
cmd = "docker node update --label-add node=%s %s" % (node_label, swarm_node_id)
if run_command(cmd) is None:
raise Exception("failed to add docker node label node=%s to %s" % (node_label,
swarm_node_id))
logger.info("docker cluster initialized with %s node(s)", len(self.config.nodes))
def add_worker(self, hostname, nid):
""" attempt to connect to remote node and add to docker swarm """
# prompt user for credentials here if not set...
logger.info("Adding worker to cluster (id:%s, hostname:%s)", nid, hostname)
c = self.get_connection(hostname)
cmd = "docker swarm join --token %s %s" % (self.token, self.node_socket)
ret = c.cmd(cmd, timeout=60)
if ret != "prompt":
raise Exception("failed to add worker(%s) %s: %s" % (nid, hostname, ret))
if not re.search("This node joined a swarm", c.output):
raise Exception("failed to add worker(%s) %s: %s" % (nid, hostname, c.output))
# hopefully node was added, grab the NodeID from the swarm and then make sure it is seen
# on the master node (over ssh so outputs contain prompt and full command)
cmd = "docker info --format '{{.Swarm.NodeID}}'"
ret = c.cmd(cmd)
if ret != "prompt":
raise Exception("failed to determine Swarm.NodeID for worker(%s) %s" % (nid, hostname))
for l in c.output.split("\n"):
r1 = re.search("^(?P<node_id>[a-zA-Z0-9]{25})$", l.strip())
if r1 is not None:
logger.debug("Swarm.NodeID %s for worker(%s) %s", r1.group("node_id"),nid,hostname)
return r1.group("node_id")
raise Exception("unable to extract Swarm.NodeID for new worker(%s) %s"% (nid, hostname))
def set_node_socket(self, managers):
""" from docker swarm RemoteManagers list, find the socket connection (Addr) for the
provided node_id. Return None on error
"""
self.node_socket = None
if managers is not None:
for m in managers:
if "NodeID" in m and "Addr" in m and m["NodeID"] == self.node_id:
logger.debug("node %s matches manager %s", self.node_id, m)
self.node_socket = m["Addr"]
return
logger.debug("node %s not in RemoteManagers list", self.node_id)
def get_swarm_info(self):
""" get and validate swarm info from 'docker info' command
return dict {
"NodeID": "",
"NodeAddr": "",
"RemoteManagers": "",
}
"""
# get/validate swarm info from 'docker info' command. Return
info = run_command("docker info --format '{{json .}}'")
if info is None:
raise Exception("failed to get docker info, is docker installed?")
js = json.loads(info)
logger.debug("local node docker info:%s", pretty_print(js))
if "Swarm" not in js or "NodeID" not in js["Swarm"] or "NodeAddr" not in js["Swarm"] or \
"RemoteManagers" not in js["Swarm"]:
version = js.get("ServerVersion", "n/a")
raise Exception("no Swarm info, unsupported docker version: %s" % version)
return {
"NodeID": js["Swarm"]["NodeID"],
"NodeAddr": js["Swarm"]["NodeAddr"],
"RemoteManagers": js["Swarm"]["RemoteManagers"],
}
def get_nodes(self):
""" read docker nodes and update self.nodes """
logger.debug("get docker node info")
lines = run_command("docker node ls --format '{{json .}}'")
if lines is None:
raise Exception("unable to get docker node info")
for l in lines.split("\n"):
if len(l) == 0: continue
try:
logger.debug("node: %s", l)
node = DockerNode(**json.loads(l))
if node.node_id is not None:
self.nodes[node.node_id] = node
logger.debug("new node: %s", node)
except ValueError as e:
logger.debug("failed to decode node: '%s'", l)
def deploy_service(self):
""" deploy docker service referencing config file and verify everything is running """
logger.info("deploying app services, please wait...")
cmd = "docker stack deploy -c %s %s" % (self.config.compose_file, self.config.app_name)
if run_command(cmd) is None:
raise Exception("failed to deploy stack")
check_count = 8
check_interval = 15
all_services_running = True
while check_count > 0:
check_count-= 1
all_services_running = True
# check that all the deployed services have at least one replica up
cmd = "docker service ls --format '{{json .}}'"
out = run_command(cmd)
if out is None:
raise Exception("failed to validate services are running")
for l in out.split("\n"):
if len(l.strip()) == 0: continue
try:
js = json.loads(l)
if re.search("^%s_" % re.escape(self.config.app_name), js["Name"]):
replicas = re.search("(?P<c>[0-9]+)/(?P<t>[0-9]+)",js["Replicas"])
if replicas is not None:
if int(replicas.group("c")) < int(replicas.group("t")):
err_msg = "failed to deploy service %s (%s/%s)" % (js["Name"],
replicas.group("c"), replicas.group("t"))
# if this is last check interation, raise an error
if check_count <= 0: raise Exception(err_msg)
all_services_running = False
logger.debug(err_msg)
logger.debug("service %s success: %s", js["Name"], js["Replicas"])
else:
logger.debug("skipping check for service %s", js["Name"])
except (ValueError,KeyError) as e:
logger.warn("failed to parse docker service line: %s", l)
if not all_services_running:
logger.debug("one or more services pending, re-check in %s seconds", check_interval)
time.sleep(check_interval)
else: break
logger.info("app services deployed")
logger.debug("pausing for 15 seconds to give all services time to actually start")
time.sleep(15)
def init_db(self):
""" need to initialize all replication sets for mongo db based on user config
ssh to intended replica primary (replica 0) and initialize replica
"""
self.init_db_cfg()
# pause for 15 seconds to ensure that replica set is ready
logger.debug("pausing for 15 seconds to ensure replica is up")
time.sleep(15)
self.init_db_shards()
def init_db_cfg(self):
""" initialize cfg server replica set """
logger.info("initialize db config replica set")
# find all 'db_cfg' service along with replica '0' info
rs = {"configsvr": True, "members":[]}
db_port = None
replica_0_node = None
replica_0_name = None
for svc_name in self.config.services:
svc = self.config.services[svc_name]
if svc.service_type == "db_cfg":
if "_id" not in rs: rs["_id"] = svc.replica
if svc.replica_number is None or svc.port_number is None:
raise Exception("service has invalid replica or port number: %s" % svc)
host = self.config.nodes.get("%s" % svc.node, None)
if host is None:
raise Exception("failed to determine host for service: %s" % svc)
member = {
"_id": svc.replica_number,
"host": "%s:%s" % (svc_name, svc.port_number)
}
if svc.replica_number == 0:
replica_0_node = host
replica_0_name = svc_name
db_port = svc.port_number
member["priority"] = 2
else:
member["priority"] = 1
rs["members"].append(member)
if replica_0_node is None or replica_0_name is None:
raise Exception("failed to determine replica 0 db configsrv")
cmd = 'docker exec -it '
cmd+= '$(docker ps -qf label=com.docker.swarm.service.name=%s_%s) ' % (
self.config.app_name, replica_0_name)
cmd+= 'mongo localhost:%s --eval \'rs.initiate(%s)\'' % (db_port, json.dumps(rs))
logger.debug("initiate cfg replication set cmd: %s", cmd)
# cfg server is statically pinned to node-1
if "%s" % replica_0_node["id"] == "1":
# hard to parse return json since there's other non-json characters printed so we'll
# just search for "ok" : 1
ret = run_command(cmd)
if ret is None or not re.search("['\"]ok['\"] *: *1 *", ret):
logger.warn("rs.initiate may not have completed successfully, cmd:%s\nresult:\n%s",
cmd, ret)
else:
raise Exception("expected cfg server replica 0 to be on node-1, currently on %s" % (
replica_0_node))
def init_db_shards(self):
""" initialize each shard replication set on replica-0 node owner """
logger.info("initialize db shards")
# get all service type db_sh and organize into replication sets
shards = {} # indexed by shared replica-name, contains node-0 (id and hostname) along
# with 'rs' which is initiate dict
for svc_name in self.config.services:
svc = self.config.services[svc_name]
if svc.service_type == "db_sh":
if svc.replica_number is None or svc.port_number is None:
raise Exception("service has invalid replica or port number: %s" % svc)
if svc.replica not in shards:
shards[svc.replica] = {
"node-0": None,
"svc_name": None,
"svc_port": None,
"rs": {"_id": svc.replica, "members":[]}
}
host = self.config.nodes.get("%s" % svc.node, None)
if host is None:
raise Exception("failed to determine host for service: %s" % svc)
member = {
"_id": svc.replica_number,
"host": "%s:%s" % (svc_name, svc.port_number)
}
if svc.replica_number == 0:
shards[svc.replica]["node-0"] = host
shards[svc.replica]["svc_name"] = svc.name
shards[svc.replica]["svc_port"] = svc.port_number
member["priority"] = 2
else:
member["priority"] = 1
shards[svc.replica]["rs"]["members"].append(member)
for shard_name in shards:
rs = shards[shard_name]["rs"]
node_0 = shards[shard_name]["node-0"]
if node_0 is None:
raise Exception("failed to find replica 0 node for shard %s" % shard_name)
cmd = 'docker exec -it '
cmd+= '$(docker ps -qf label=com.docker.swarm.service.name=%s_%s) ' % (
self.config.app_name, shards[shard_name]["svc_name"])
cmd+= 'mongo localhost:%s --eval \'rs.initiate(%s)\'' % (shards[shard_name]["svc_port"],
json.dumps(rs))
logger.debug("command on %s: %s", node_0["id"], cmd)
if "%s" % node_0["id"] == "1":
# command is executed on local host
ret = run_command(cmd)
if ret is None or not re.search("['\"]ok['\"] *: *1 *", ret):
err_msg="rs.initiate may not have completed successfully for shard %s"%shard_name
err_msg+= ", node (id:%s, hostname:%s)" % (node_0["id"], node_0["hostname"])
err_msg+= "\ncmd: %s\nresult: %s" % (cmd, ret)
logger.warn(err_msg)
else:
c = self.get_connection(node_0["hostname"])
ret = c.cmd(cmd)
if ret != "prompt" or not re.search("['\"]ok['\"] *: *1 *", c.output):
err_msg="rs.initiate may not have completed successfully for shard %s"%shard_name
err_msg+= ", (node id: %s, hostname: %s)" % (node_0["id"], node_0["hostname"])
err_msg+= "\ncmd: %s\nresult: %s" % (cmd, "\n".join(c.output.split("\n")[:-1]))
logger.warn(err_msg)
# pause for 15 seconds to ensure that replica set is ready
logger.debug("pausing for 15 seconds to ensure all replica is up")
time.sleep(15)
# add each shard to mongo-router - note, there's an instance of mongos with service name
# 'db' on all nodes in the cluster so this command is always locally executed
for shard_name in shards:
svc_name = shards[shard_name]["svc_name"]
svc_port = shards[shard_name]["svc_port"]
cmd = 'docker exec -it '
cmd+= '$(docker ps -qf label=com.docker.swarm.service.name=%s_db) '%self.config.app_name
cmd+= 'mongo localhost:%s --eval \'sh.addShard("%s/%s:%s")\'' % (
self.config.mongos_port, shard_name, svc_name, svc_port)
ret = run_command(cmd)
if ret is None or not re.search("['\"]ok['\"] *: *1 *", ret):
err_msg="sh.addShard may not have completed successfully for shard %s"%shard_name
err_msg+= "\ncmd: %s\nresult: %s" % (cmd, ret)
logger.warn(err_msg)
class DockerNode(object):
def __init__(self, **kwargs):
self.labels = {}
self.role = None
self.addr = None
self.node_id = kwargs.get("ID", None)
self.hostname = kwargs.get("Hostname", None)
self.availability = kwargs.get("Availability", None)
self.status = kwargs.get("Status", None)
if self.node_id is not None:
inspect = run_command("docker node inspect %s --format '{{json .}}'" % self.node_id)
if inspect is not None:
try:
logger.debug("inspect: %s", inspect)
js = json.loads(inspect)
if "Status" in js:
if "Addr" in js["Status"]:
self.addr = js["Status"]["Addr"]
if "State" in js["Status"]:
self.status = js["Status"]["State"]
if "Spec" in js:
if "Availability" in js["Spec"]:
self.availability = js["Spec"]["Availability"]
if "Role" in js["Spec"]:
self.role = js["Spec"]["Role"]
if "Labels" in js["Spec"]:
if type(js["Spec"]["Labels"]) is not dict:
logger.debug("invalid Labels for %s: %s", self.node_id, js["Spec"])
else:
self.labels = js["Spec"]["Labels"]
except ValueError as e:
logger.debug("failed to decode inspect(%s): %s", self.node_id, inspect)
def __repr__(self):
return "id:%s, role:%s, addr:%s, status:%s, avail:%s, labels:%s" % (
self.node_id, self.role, self.addr, self.status, self.availability, self.labels
)
| [
"[email protected]"
] | |
c5d385b41cade2187400881bf390d7ffe5eb5c55 | bd867af5245366ee0abfd0f659fcb42170fff8ca | /hackerRank/algorithms/DiagonalDifference/diagonal_difference.py | 954cf6bd5cfc0ee3735dcd2733472402344f7d21 | [] | no_license | kruart/coding_challenges | 04736a6b66da813fd973e7a57aa084bbdab31183 | 395ae60ab392e49bb5bc2f0a4eef1dfd232899bb | refs/heads/master | 2021-06-16T08:51:21.815334 | 2019-11-07T08:39:13 | 2019-11-07T08:39:13 | 153,890,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # https://www.hackerrank.com/challenges/diagonal-difference/problem
def diagonal_difference(arr):
return abs(sum([arr[i][i] - arr[i][len(arr)-i-1] for i in range(len(arr))]))
def main():
matrix1 = [
[1, 2, 3],
[4, 5, 6],
[9, 8, 9]
]
matrix2 = [
[11, 2, 4],
[4, 5, 6],
[10, 8, -12]
]
print(diagonal_difference(matrix1)) # 15 - 17 = 2
print(diagonal_difference(matrix2)) # 4 - 19 = 15
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a82c76f942927a67392aa0710e1f1969930ee6cf | bbf025a5f8596e5513bd723dc78aa36c46e2c51b | /dfs + tree/graph.py | 66496a7005f463b2e1716261d4179eac0bb238f2 | [] | no_license | AlanFermat/leetcode | 6209bb5cf2d1b19e3fe7b619e1230f75bb0152ab | cacba4abaca9c4bad8e8d12526336115067dc6a0 | refs/heads/master | 2021-07-11T04:00:00.594820 | 2020-06-22T21:31:02 | 2020-06-22T21:31:02 | 142,341,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | class Graph:
def __init__(self,mapping={}):
'''
Constructs a new empty graph.
'''
self.graph = mapping
def nodes(self):
'''
Returns a list of all nodes in the graph.
'''
return self.graph.keys()
def get_neighbors(self, node):
'''
Given a particular node, returns a list of all neighbors in the graph.
'''
return self.graph[node]
def add_node(self, node):
'''
Adds the given node to the graph.
'''
self.graph[node] = set()
def add_edge(self, node1, node2):
'''
Adds an edge between the given pair of nodes, adding the nodes themselves first if they are not already in the graph.
'''
if not node1 in self.graph.keys():
self.add_node(node1)
if not node2 in self.graph.keys():
self.add_node(node2)
self.graph[node1].add(node2)
self.graph[node2].add(node1) | [
"[email protected]"
] | |
f2bfc11338590eec04ff10e1911a56f28c3461f0 | e34cbf5fce48f661d08221c095750240dbd88caf | /python/day06/re_module.py | edd0ec1139439c775c119d49c71c7b07ae65d1f5 | [] | no_license | willianflasky/growup | 2f994b815b636e2582594375e90dbcb2aa37288e | 1db031a901e25bbe13f2d0db767cd28c76ac47f5 | refs/heads/master | 2023-01-04T13:13:14.191504 | 2020-01-12T08:11:41 | 2020-01-12T08:11:41 | 48,899,304 | 2 | 0 | null | 2022-12-26T19:46:22 | 2016-01-02T05:04:39 | C | UTF-8 | Python | false | false | 612 | py | #!/usr/bin/env python
# -*-coding:utf8-*-
# __author__ = "willian"
import re
# 从头匹配,很少使用
re.match("\d+", "341221")
# 匹配一次
re.search("\d+", "341221")
# 匹配多次
re.findall("\d+", "341221")
# 以逗号分割
re.split(",", "341,221")
# 匹配到进行替换,默认是替代所有,count指定次数.
re.sub("\d{4}", "1995", "1399,2017", count=1)
# re.I (忽略大小写)
# print(re.search("[a-z]", "Alex", flags=re.I))
# re.M (匹配多行)
# print(re.search("^is", "my name\nis alex", flags=re.M))
# re.S (多行匹配在一起)
# print(re.search(".+", "my \nname", flags=re.S))
| [
"[email protected]"
] | |
276f494e824843392c3efb25c438e23b280c6dbd | 0754e2e7aa1ffb90b54d563ce5a9317e41cfebf9 | /ml/m03_xor.py | 2f5fac7cee0e1b1116a7a60ebc02f9efee5e76ae | [] | no_license | ChaeMyungSeock/Study | 62dcf4b13696b1f483c816af576ea8883c57e531 | 6f726a6ecb43387e4a3b9d068a9c491b115c74c0 | refs/heads/master | 2023-01-24T20:59:52.053394 | 2020-12-07T14:54:34 | 2020-12-07T14:54:34 | 263,255,793 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
from sklearn import svm
# 1. 데이터
x_data = [[0, 0], [1,0], [0,1], [1,1]]
y_data = [0, 1, 1, 0]
# 2. 모델
# 모델은 한줄.. 파라미터값으로 늘어남
model = LinearSVC()
# 3. 훈련
model.fit(x_data, y_data)
# 4. 평가 예측
x_test = [[0,0], [1,0], [0,1], [1,1]]
y_predict = model.predict(x_test)
acc = accuracy_score([0,1,1,0], y_predict)
print(x_test, "의 예측 결과 : ", y_predict)
print("acc = ", acc)
#
| [
"[email protected]"
] | |
353fbe7250bf1beac4646624a021763b5c94b92a | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/Learn/PyCharm/Algorithmic Toolbox/Algorithmic Warm Up/Last Digit of the Sum of Fibonacci Numbers/last_digit_of_the_sum_of_fibonacci_numbers_unit_tests.py | bb3384e3158b2445f6adca669ed4c4fac09f64be | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:a6a99b9bfea384a8802695a7f6eafeab6ae6e1cd091ebf62c01e6e6c0ecac93e
size 662
| [
"[email protected]"
] | |
6c46999ddcfe4f6028d29dcdd2d2bb61c6a59501 | 650461f8804d7bd3c3f76d53a4f0b203b6f2788e | /PPool/__init__.py | 4e1a8022187ec02b92b5b21a70840fca3ef4427d | [
"Apache-2.0"
] | permissive | oeg-upm/PPool | 839953798f27249d3c9b492adc313afd9f2160c4 | 1c5557c37d86b5c22179b2204d68e7256d2a5c08 | refs/heads/master | 2020-03-25T02:56:28.305760 | 2018-09-26T07:04:28 | 2018-09-26T07:04:28 | 143,314,569 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15 | py | name = "PPool"
| [
"[email protected]"
] | |
7c393120ee51e757a0b0c2bc246dc2a4c934dc23 | 08706df7e3712ebec7afd2d2f8f964ae9d485386 | /server/patients/migrations/0016_attribute_resource.py | 3c95316f5e9d660ee4b386204d0e49c148dcc89e | [] | no_license | nickdotreid/take-on-transplant | 9129c9ab7c1206291fc1ca616c18c44cd7519587 | bf901b987121093787383f3d3726f87dddf4d5fd | refs/heads/master | 2023-08-27T06:14:54.521168 | 2021-11-02T21:41:04 | 2021-11-02T21:41:04 | 298,403,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | # Generated by Django 3.1.1 on 2020-11-24 02:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('resources', '0006_auto_20201120_1722'),
('patients', '0015_issue_posttransplantissue_pretransplantissue'),
]
operations = [
migrations.AddField(
model_name='attribute',
name='resource',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='resources.resource'),
),
]
| [
"[email protected]"
] | |
0598cc55bb3cc9cd48235f6dee023526aede8599 | a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea | /airflow/migrations/versions/0075_2_0_0_add_description_field_to_connection.py | 4c3f5835dcbfdf9b443396cbcceb764f421fbf89 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ishiis/airflow | 4305794e36b611d01f49e3f2401be3dc49782670 | 292440d54f4db84aaf0c5a98cf5fcf34303f2fa8 | refs/heads/master | 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 | Apache-2.0 | 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null | UTF-8 | Python | false | false | 2,008 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add description field to ``connection`` table
Revision ID: 61ec73d9401f
Revises: 2c6edca13270
Create Date: 2020-09-10 14:56:30.279248
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '61ec73d9401f'
down_revision = '2c6edca13270'
branch_labels = None
depends_on = None
airflow_version = '2.0.0'
def upgrade():
"""Apply Add description field to ``connection`` table"""
conn = op.get_bind()
with op.batch_alter_table('connection') as batch_op:
if conn.dialect.name == "mysql":
# Handles case where on mysql with utf8mb4 this would exceed the size of row
# We have to set text type in this migration even if originally it was string
# This is permanently fixed in the follow-up migration 64a7d6477aae
batch_op.add_column(sa.Column('description', sa.Text(length=5000), nullable=True))
else:
batch_op.add_column(sa.Column('description', sa.String(length=5000), nullable=True))
def downgrade():
"""Unapply Add description field to ``connection`` table"""
with op.batch_alter_table('connection', schema=None) as batch_op:
batch_op.drop_column('description')
| [
"[email protected]"
] | |
c997ae3f2e974662ca89bdc82bccbd2658d4404b | 73f7cc0e71bfd38d3bfe97367324f1e7a5d8b451 | /engine_code/gapi/modules/proxy/cloud/parse.py | 0e1d8a64f87ac9893d254692c67c63c5b528386c | [] | no_license | cash2one/my-test | ccc0ae860f936262a601c1b579d3c85196b562f9 | 8bd23f5963f4dc7398b7670e28768a3533bd5d14 | refs/heads/master | 2021-01-18T03:20:30.889045 | 2017-01-19T02:52:02 | 2017-01-19T02:52:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,357 | py | #!/usr/bin/python
# -*- coding=utf-8 -*-
from xml.etree.ElementTree import ElementTree,Element
def read_xml(in_path):
'''读取并解析xml文件
in_path: xml路径
return: ElementTree'''
tree = ElementTree()
tree.parse(in_path)
print tree.parse(in_path)
return tree
def write_xml(tree, out_path):
'''将xml文件写出
tree: xml树
out_path: 写出路径'''
tree.write(out_path,encoding="utf-8")
print '.....'
def if_match(node, kv_map):
'''判断某个节点是否包含所有传入参数属性
node: 节点
kv_map: 属性及属性值组成的map'''
for key in kv_map:
if node.get(key) != kv_map.get(key):
return False
return True
#---------------search -----
def find_nodes(tree, path):
'''查找某个路径匹配的所有节点
tree: xml树
path: 节点路径'''
return tree.findall(path)
def get_node_by_keyvalue(nodelist, kv_map):
'''根据属性及属性值定位符合的节点,返回节点
nodelist: 节点列表
kv_map: 匹配属性及属性值map'''
result_nodes = []
for node in nodelist:
if if_match(node, kv_map):
result_nodes.append(node)
return result_nodes
#---------------change -----
def change_node_properties(nodelist, kv_map, is_delete=False):
'''修改/增加 /删除 节点的属性及属性值
nodelist: 节点列表
kv_map:属性及属性值map'''
for node in nodelist:
for key in kv_map:
if is_delete:
if key in node.attrib:
del node.attrib[key]
else:
node.set(key, kv_map.get(key))
def change_node_text(nodelist, text, is_add=False, is_delete=False):
'''改变/增加/删除一个节点的文本
nodelist:节点列表
text : 更新后的文本'''
for node in nodelist:
if is_add:
node.text += text
elif is_delete:
node.text = ""
else:
node.text = text
def create_node(tag, property_map, content):
'''新造一个节点
tag:节点标签
property_map:属性及属性值map
content: 节点闭合标签里的文本内容
return 新节点'''
element = Element(tag, property_map)
element.text = content
return element
def add_child_node(nodelist, element):
'''给一个节点添加子节点
nodelist: 节点列表
element: 子节点'''
for node in nodelist:
node.append(element)
def del_node_by_tagkeyvalue(nodelist, tag, kv_map):
'''同过属性及属性值定位一个节点,并删除之
nodelist: 父节点列表
tag:子节点标签
kv_map: 属性及属性值列表'''
for parent_node in nodelist:
children = parent_node.getchildren()
for child in children:
if child.tag == tag and if_match(child, kv_map):
parent_node.remove(child)
#if __name__ == "__main__":
#
# #1. 读取xml文件
# tree = read_xml("./test.xml")
# print 'tree',tree
#
# #2. 属性修改
# #A. 找到父节点
# nodes = find_nodes(tree, "processers/processer")
# #B. 通过属性准确定位子节点
# result_nodes = get_node_by_keyvalue(nodes, {"name":"BProcesser"})
# #C. 修改节点属性
# change_node_properties(result_nodes, {"age": "1"})
# #D. 删除节点属性
# change_node_properties(result_nodes, {"value":""}, True)
#
# #3. 节点修改
# #A.新建节点
# a = create_node("person", {"age":"15","money":"200000"}, "this is the firest content")
# #B.插入到父节点之下
# add_child_node(result_nodes, a)
#
# #4. 删除节点
# #定位父节点
# del_parent_nodes = find_nodes(tree, "processers/services/service")
# #准确定位子节点并删除之
# target_del_node = del_node_by_tagkeyvalue(del_parent_nodes, "chain", {"sequency" : "chain1"})
#
# #5. 修改节点文本
# #定位节点
# text_nodes = get_node_by_keyvalue(find_nodes(tree, "processers/services/service/chain"), {"sequency":"chain3"})
# change_node_text(text_nodes, "new text")
#
# #6. 输出到结果文件
# write_xml(tree, "./out.xml")
| [
"[email protected]"
] | |
bfcfe9c39e88787a47af7b24c492c7cb2ba75116 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03150/s056018673.py | ba3699fc1ecf9d7f7a828e88f30db87b5e18b4da | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | S = input()
ans = "NO"
for i in range(len(S)):
for j in range(len(S)):
if S[0:i] + S[i+j:len(S)] == "keyence":
print("YES")
exit()
print(ans) | [
"[email protected]"
] | |
3e90c7f5b279e7d86b365e1a1faeb32f2420825d | 0529196c4d0f8ac25afa8d657413d4fc1e6dd241 | /runnie0427/02965/2965.py2.py | fead6e9c86c1bc1e100db0a5a2029668e08104b8 | [] | no_license | riyuna/boj | af9e1054737816ec64cbef5df4927c749808d04e | 06420dd38d4ac8e7faa9e26172b30c9a3d4e7f91 | refs/heads/master | 2023-03-17T17:47:37.198570 | 2021-03-09T06:11:41 | 2021-03-09T06:11:41 | 345,656,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,370 | py | <!DOCTYPE html>
<html lang="ko">
<head>
<title>Baekjoon Online Judge</title><meta name="viewport" content="width=device-width, initial-scale=1.0"><meta charset="utf-8"><meta name="author" content="스타트링크 (Startlink)"><meta name="keywords" content="ACM-ICPC, ICPC, 프로그래밍, 온라인 저지, 정보올림피아드, 코딩, 알고리즘, 대회, 올림피아드, 자료구조"><meta http-equiv="X-UA-Compatible" content="IE=edge"><meta property="og:type" content="website"><meta property="og:image" content="http://onlinejudgeimages.s3-ap-northeast-1.amazonaws.com/images/boj-og-1200.png"><meta property="og:site_name" content="Baekjoon Online Judge"><meta name="format-detection" content = "telephone=no"><meta name="msapplication-config" content="none"><link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png"><link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png"><link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png"><link rel="manifest" href="/site.webmanifest"><link rel="mask-icon" href="/safari-pinned-tab.svg" color="#0076c0"><meta name="msapplication-TileColor" content="#00aba9"><meta name="theme-color" content="#ffffff"><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.2.0/css/bootstrap.min.css"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/style.css?version=20210107"><link href="https://fonts.googleapis.com/css?family=Noto+Sans+KR:400,700|Open+Sans:400,400i,700,700i|Source+Code+Pro&subset=korean" rel="stylesheet"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/css/connect.css?version=20210107"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/css/result.css?version=20210107"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/custom.css?version=20210107"><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.6.3/css/font-awesome.css"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/theme-colors/blue.css?version=20210107"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/css/pace.css">
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-10874097-3"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-10874097-3');
</script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/noty/3.1.4/noty.min.css" /><meta name="username" content="">
<link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/pages/page_404_error.css">
</head>
<body>
<div class="wrapper">
<div class="header no-print"><div class="topbar"><div class="container"><ul class="loginbar pull-right"><li><a href = "/register">회원가입</a></li><li class="topbar-devider"></li><li><a href = "/login?next=%2Fsource%2Fdownload%2F5344904">로그인</a></li></ul></div></div><div class="navbar navbar-default mega-menu" role="navigation"><div class="container"><div class="navbar-header"><button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-responsive-collapse"><span class="sr-only">Toggle navigation</span><span class="fa fa-bars"></span></button><a class="navbar-brand" href="/"><img id="logo-header" src="https://d2gd6pc034wcta.cloudfront.net/images/[email protected]" alt="Logo" data-retina></a></div><div class="collapse navbar-collapse navbar-responsive-collapse"><ul class="nav navbar-nav"><li class="dropdown mega-menu-fullwidth "><a href="javascript:void(0);" class="dropdown-toggle" data-toggle="dropdown">문제</a><ul class="dropdown-menu"><li><div class="mega-menu-content"><div class="container"><div class="row equal-height"><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>문제</h3></li><li><a href = "/problemset">전체 문제</a></li><li><a href = "/category">문제 출처</a></li><li><a href = "/step">단계별로 풀어보기</a></li><li><a href = "/problem/tags">알고리즘 분류</a></li><li><a href = "/problem/added">새로 추가된 문제</a></li><li><a href = "/problem/added/1">새로 추가된 영어 문제</a></li><li><a href = "/problem/ranking">문제 순위</a></li></ul></div><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>문제</h3></li><li><a href="/problem/only">푼 사람이 한 명인 문제</a></li><li><a href="/problem/nobody">아무도 못 푼 문제</a></li><li><a href="/problem/recent/submit">최근 제출된 문제</a></li><li><a href="/problem/recent/accepted">최근 풀린 문제</a></li><li><a href="/problem/random">랜덤</a></li></ul></div><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>출처</h3></li><li><a href = "/category/1">ICPC</a></li><li><a href = "/category/2">Olympiad</a></li><li><a href = "/category/55">한국정보올림피아드</a></li><li><a href = "/category/57">한국정보올림피아드시․도지역본선</a></li><li><a href = "/category/318">전국 대학생 프로그래밍 대회 동아리 연합</a></li><li><a href = "/category/5">대학교 대회</a></li><li><a href = "/category/428">카카오 코드 페스티벌</a></li><li><a href = "/category/215">Coder's High</a></li></ul></div><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>ICPC</h3></li><li><a href = "/category/7">Regionals</a></li><li><a href = "/category/4">World Finals</a></li><li><a href = "/category/211">Korea Regional</a></li><li><a href = "/category/34">Africa and the Middle East Regionals</a></li><li><a href = "/category/10">Europe Regionals</a></li><li><a href = "/category/103">Latin America Regionals</a></li><li><a href = "/category/8">North America Regionals</a></li><li><a href = "/category/92">South Pacific Regionals</a></li></ul></div></div></div></div></li></ul></li><li><a href = "/workbook/top">문제집</a></li><li><a href = "/contest/official/list">대회<span class='badge badge-red rounded-2x'>2</span></a></li><li><a href = "/status">채점 현황</a></li><li><a href = "/ranklist">랭킹</a></li><li><a href = "/board/list/all">게시판</a></li><li><a href = "/group/list/all">그룹</a></li><li><a href = "/blog/list">블로그</a></li><li><a href = "/lectures">강의</a></li><li><a href = "/search"><i class="fa fa-search search-btn"></i></a></li></ul></div></div></div></div><form action="/logout" method="post" id="logout_form"><input type='hidden' value='%2Fsource%2Fdownload%2F5344904' name="next"></form>
<div class="container content">
<div class="col-md-8 col-md-offset-2">
<div class="error-v1">
<span class="error-v1-title">404</span>
<span>Not found</span>
<div class="margin-bottom-20"></div>
</div>
<div class="text-center">
<span style="font-size:18px;">강의 슬라이드의 첨부 소스 코드가 404 에러가 뜨는 경우에는 링크를 복사/붙여넣기 해주세요.</span>
</div>
<div class="margin-bottom-40"></div>
</div>
</div>
<div class="footer-v3 no-print"><div class="footer"><div class="container"><div class="row"><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>Baekjoon Online Judge</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/about">소개</a></li><li><a href="/news">뉴스</a></li><li><a href="/live">생중계</a></li><li><a href="/poll">설문조사</a></li><li><a href="/blog">블로그</a></li><li><a href="/calendar">캘린더</a></li><li><a href="/donate">기부하기</a></li><li><a href="https://github.com/Startlink/BOJ-Feature-Request">기능 추가 요청</a></li><li><a href="https://github.com/Startlink/BOJ-spj">스페셜 저지 제작</a></li><li><a href="/labs">실험실</a></li></ul><div class="thumb-headline"><h2>채점 현황</h2></div><ul class="list-unstyled simple-list"><li><a href="/status">채점 현황</a></li></ul></div><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>문제</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/problemset">문제</a></li><li><a href="/step">단계별로 풀어보기</a></li><li><a href="/problem/tags">알고리즘 분류</a></li><li><a href="/problem/added">새로 추가된 문제</a></li><li><a href="/problem/added/1">새로 추가된 영어 문제</a></li><li><a href="/problem/ranking">문제 순위</a></li><li><a href="/problem/recent/submit">최근 제출된 문제</a></li><li><a href="/problem/recent/accepted">최근 풀린 문제</a></li><li><a href="/change">재채점 및 문제 수정</a></li></ul><div class="thumb-headline"><h2>유저 대회 / 고등학교 대회</h2></div><ul class="list-inline simple-list margin-bottom"><li><a href="/category/353">FunctionCup</a></li><li><a href="/category/319">kriiicon</a></li><li><a href="/category/420">구데기컵</a></li><li><a href="/category/358">꼬마컵</a></li><li><a href="/category/421">네블컵</a></li><li><a href="/category/413">소프트콘</a></li><li><a href="/category/416">웰노운컵</a></li><li><a href="/category/detail/1743">HYEA Cup</a></li><li><a href="/category/364">경기과학고등학교</a></li><li><a href="/category/417">대구과학고등학교</a></li><li><a href="/category/429">부산일과학고</a></li><li><a href="/category/435">서울과학고등학교</a></li><li><a href="/category/394">선린인터넷고등학교</a></li></ul></div><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>출처</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/category/1">ICPC</a></li><li><a href="/category/211">ICPC Korea Regional</a></li><li><a href="/category/2">Olympiad</a></li><li><a href="/category/55">한국정보올림피아드</a></li><li><a href="/category/57">한국정보올림피아드시․도지역본선</a></li><li><a href="/category/318">전국 대학생 프로그래밍 대회 동아리 연합</a></li><li><a href="/category/5">대학교 대회</a></li><li><a href="/category/428">카카오 코드 페스티벌</a></li><li><a href="/category/215">Coder's High</a></li></ul><div class="thumb-headline"><h2>대학교 대회</h2></div><ul class="list-inline simple-list"><li><a href="/category/320">KAIST</a></li><li><a href="/category/426">POSTECH</a></li><li><a href="/category/341">고려대학교</a></li><li><a href="/category/434">광주과학기술원</a></li><li><a href="/category/361">국민대학교</a></li><li><a href="/category/83">서강대학교</a></li><li><a href="/category/354">서울대학교</a></li><li><a href="/category/352">숭실대학교</a></li><li><a href="/category/408">아주대학교</a></li><li><a href="/category/334">연세대학교</a></li><li><a href="/category/336">인하대학교</a></li><li><a href="/category/347">전북대학교</a></li><li><a href="/category/400">중앙대학교</a></li><li><a href="/category/402">충남대학교</a></li><li><a href="/category/418">한양대 ERICA</a></li><li><a href="/category/363">홍익대학교</a></li><li><a href="/category/409">경인지역 6개대학 연합 프로그래밍 경시대회</a></li></ul></div><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>도움말</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/help/judge">채점 도움말 및 채점 환경</a></li><li><a href="/help/rejudge">재채점 안내</a></li><li><a href="/help/rte">런타임 에러 도움말</a></li><li><a href="/help/problem">문제 스타일 안내</a></li><li><a href="/help/language">컴파일 또는 실행 옵션, 컴파일러 버전, 언어 도움말</a></li><li><a href="/help/workbook">문제집 도움말</a></li><li><a href="/help/contest">대회 개최 안내</a></li><li><a href="/help/problem-add">문제 출제 안내</a></li><li><a href="/help/rule">이용 규칙</a></li><li><a href="/help/stat">통계 도움말</a></li><li><a href="/help/question">질문 도움말</a></li><li><a href="/help/faq">자주묻는 질문</a></li><li><a href="/help/lecture">강의 안내</a></li><li><a href="/help/short">짧은 주소 안내</a></li><li><a href="/help/ad">광고 안내</a></li></ul></div></div></div><div class="copyright"><div class="container"><div class="row"><div class="col-md-9 col-sm-12"><p>© 2021 All Rights Reserved. <a href="https://startlink.io">주식회사 스타트링크</a> | <a href="/terms">서비스 약관</a> | <a href="/privacy">개인정보 보호</a> | <a href="/terms/payment">결제 이용 약관</a> | <a href="https://boj.startlink.help/hc/ko">도움말</a> | <a href="http://startl.ink/2pmlJaY">광고 문의</a> | <a href="https://github.com/Startlink/update-note/blob/master/boj.md">업데이트 노트</a> | <a href="https://github.com/Startlink/update-note/blob/master/boj-issues.md">이슈</a> | <a href="https://github.com/Startlink/update-note/blob/master/boj-todo.md">TODO</a></p></div><div class="col-md-3 col-sm-12"><ul class="social-icons pull-right"><li><a href="https://www.facebook.com/onlinejudge" data-original-title="Facebook" class="rounded-x social_facebook"></a></li><li><a href="https://startlink.blog" data-original-title="Wordpress" class="rounded-x social_wordpress"></a></li></ul></div></div><div class="row"><div class="col-sm-12"><a href="https://startlink.io" class="hidden-xs"><img src="https://d2gd6pc034wcta.cloudfront.net/logo/startlink-logo-white-only.png" class="pull-right startlink-logo"></a><ul class="list-unstyled simple-list"><li>사업자 등록 번호: 541-88-00682</li><li>대표자명: 최백준</li><li>주소: 서울시 서초구 서초대로74길 29 서초파라곤 412호</li><li>전화번호: 02-521-0487 (이메일로 연락 주세요)</li><li>이메일: <a href="mailto:[email protected]">[email protected]</a></li><li>통신판매신고번호: 제 2017-서울서초-2193 호</li></ul></div><div class="col-xs-9"><p id="no-acm-icpc"></p></div><div class="col-xs-3"></div></div></div></div></div>
</div>
<div id="fb-root"></div><script>
window.fbAsyncInit = function() {
FB.init({
appId : '322026491226049',
cookie : true,
xfbml : true,
version : 'v2.8'
});
};
(function(d, s, id) {
var js, fjs = d.getElementsByTagName(s)[0];
if (d.getElementById(id)) return;
js = d.createElement(s); js.id = id;
js.src = "//connect.facebook.net/ko_KR/sdk.js";
fjs.parentNode.insertBefore(js, fjs);
}(document, 'script', 'facebook-jssdk'));
</script>
<script>
!function(f,b,e,v,n,t,s){ if(f.fbq)return;n=f.fbq=function(){ n.callMethod?
n.callMethod.apply(n,arguments):n.queue.push(arguments) };if(!f._fbq)f._fbq=n;
n.push=n;n.loaded=!0;n.version='2.0';n.queue=[];t=b.createElement(e);t.async=!0;
t.src=v;s=b.getElementsByTagName(e)[0];s.parentNode.insertBefore(t,s) }(window,
document,'script','//connect.facebook.net/en_US/fbevents.js');
fbq('init', '1670563073163149');
fbq('track', 'PageView');
</script>
<noscript><img height="1" width="1" style="display:none" src="https://www.facebook.com/tr?id=1670563073163149&ev=PageView&noscript=1"/></noscript><script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script><script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-migrate/3.0.1/jquery-migrate.min.js"></script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.2.0/js/bootstrap.min.js"></script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.21.0/moment.min.js"></script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.21.0/locale/ko.js"></script><script type="text/javascript" src="https://ddo7jzca0m2vt.cloudfront.net/unify/js/app.min.js?version=20210107"></script><script type="text/javascript">jQuery(document).ready(function() {App.init(0);});</script><!--[if lt IE 9]><script src="https://ddo7jzca0m2vt.cloudfront.net/unify/plugins/respond.js"></script><script src="https://ddo7jzca0m2vt.cloudfront.net/unify/plugins/html5shiv.js"></script><script src="https://ddo7jzca0m2vt.cloudfront.net/unify/js/plugins/placeholder-IE-fixes.js"></script><![endif]--><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pace/1.0.2/pace.min.js"></script><script src="https://js.pusher.com/4.2/pusher.min.js"></script><script src="https://cdnjs.cloudflare.com/ajax/libs/noty/3.1.4/noty.min.js"></script>
<script>
window.MathJax = {
tex: {
inlineMath: [ ['$', '$'], ['\\(', '\\)'] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
processEscapes: true,
tags: "ams",
autoload: {
color: [],
colorv2: ['color']
},
packages: { '[+]': ['noerrors'] }
},
options: {
ignoreHtmlClass: "no-mathjax|redactor-editor",
processHtmlClass: 'mathjax',
enableMenu: false
},
chtml: {
scale: 0.9
},
loader: {
load: ['input/tex', 'output/chtml', '[tex]/noerrors'],
}
};
</script><script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script><script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
</body>
</html> | [
"[email protected]"
] | |
1edcceffcfbf8947bb55c85896d44b45eddc8739 | 673e829dda9583c8dd2ac8d958ba1dc304bffeaf | /data/multilingual/Latn.HNS/Serif_16/pdf_to_json_test_Latn.HNS_Serif_16.py | 14b2d82b21a61c2d50f3845e482493f91f58415d | [
"BSD-3-Clause"
] | permissive | antoinecarme/pdf_to_json_tests | 58bab9f6ba263531e69f793233ddc4d33b783b7e | d57a024fde862e698d916a1178f285883d7a3b2f | refs/heads/master | 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.HNS/Serif_16/udhr_Latn.HNS_Serif_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| [
"[email protected]"
] | |
98649096ac72586c4cc39e7e4b5b32871381a937 | ffbad21b9e8a92f9669ebaa1e0542d9bf114f414 | /akshare/bank/bank_banker.py | e14042b63153052d133088af489debe4c662e335 | [
"MIT"
] | permissive | cqzhao/akshare | 6bccdb4eceae633609bb6a797760a5d05ed0165a | cd740d050015edd26590cc0f3d493d7dc57ea79b | refs/heads/master | 2023-03-02T01:24:19.238315 | 2021-01-25T04:06:27 | 2021-01-25T04:06:27 | 311,243,910 | 0 | 0 | MIT | 2021-01-25T04:06:28 | 2020-11-09T06:26:44 | null | UTF-8 | Python | false | false | 9,970 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/1/14 15:56
Desc: thebankerdatabase
https://www.thebankerdatabase.com/index.cfm/search/ranking
"""
import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
def bank_rank_banker() -> pd.DataFrame:
"""
全球银行排名前 25 家
https://www.thebankerdatabase.com/index.cfm/search/ranking
:return: 全球银行排名前 25 家
:rtype: pandas.DataFrame
"""
url = "https://www.thebankerdatabase.com/index.cfm/search/index.cfm"
headers = {
"accept": "application/json, text/javascript, */*; q=0.01",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"content-length": "5906",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"cookie": "CFID=4066679; CFTOKEN=757b91f9e32ccf96-DABAED1E-5056-81CB-AC16B7759B219C5F; __utmz=11608689.1610550237.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmv=11608689.|1=User%20Type=Anonymous=1; X-Mapping-mcmjnkih=105487F00B86D7352E95B0FD5E7117FE; JSESSIONID=AAFB1EFAC538A6591033D322503118E6.cfusion; LIVEPAGEHEIGHT=600; LIVEPAGEWIDTH=800; __utma=11608689.1485486898.1610550237.1610550237.1610609939.2; __utmc=11608689; __utmt=1; __utmb=11608689.1.10.1610609939; CFGLOBALS=urltoken%3DCFID%23%3D4066679%26CFTOKEN%23%3D757b91f9e32ccf96%2DDABAED1E%2D5056%2D81CB%2DAC16B7759B219C5F%26jsessionid%23%3DAAFB1EFAC538A6591033D322503118E6%2Ecfusion%23lastvisit%3D%7Bts%20%272021%2D01%2D14%2007%3A39%3A01%27%7D%23hitcount%3D44%23timecreated%3D%7Bts%20%272021%2D01%2D13%2015%3A03%3A42%27%7D%23cftoken%3D757b91f9e32ccf96%2DDABAED1E%2D5056%2D81CB%2DAC16B7759B219C5F%23cfid%3D4066679%23",
"origin": "https://www.thebankerdatabase.com",
"pragma": "no-cache",
"referer": "https://www.thebankerdatabase.com/index.cfm/search/ranking",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36",
"x-requested-with": "XMLHttpRequest",
}
params = {
"fuseaction": "search.search_results_json",
"ajax": "1",
"ranking": "1",
}
payload = {
"draw": "4",
"columns[0][data]": "bank_id",
"columns[0][name]": "bank_id",
"columns[0][searchable]": "true",
"columns[0][orderable]": "false",
"columns[0][search][value]": "",
"columns[0][search][regex]": "false",
"columns[1][data]": "primary_ranking",
"columns[1][name]": "primary_ranking",
"columns[1][searchable]": "true",
"columns[1][orderable]": "1",
"columns[1][search][value]": "",
"columns[1][search][regex]": "false",
"columns[2][data]": "previous_ranking",
"columns[2][name]": "previous_ranking",
"columns[2][searchable]": "true",
"columns[2][orderable]": "1",
"columns[2][search][value]": "",
"columns[2][search][regex]": "false",
"columns[3][data]": "current_name",
"columns[3][name]": "current_name",
"columns[3][searchable]": "true",
"columns[3][orderable]": "1",
"columns[3][search][value]": "",
"columns[3][search][regex]": "false",
"columns[4][data]": "country_name",
"columns[4][name]": "country_name",
"columns[4][searchable]": "true",
"columns[4][orderable]": "1",
"columns[4][search][value]": "",
"columns[4][search][regex]": "false",
"columns[5][data]": "yearend_datetime",
"columns[5][name]": "yearend_datetime",
"columns[5][searchable]": "true",
"columns[5][orderable]": "1",
"columns[5][search][value]": "",
"columns[5][search][regex]": "false",
"columns[6][data]": "DP2",
"columns[6][name]": "DP2",
"columns[6][searchable]": "true",
"columns[6][orderable]": "1",
"columns[6][search][value]": "",
"columns[6][search][regex]": "false",
"columns[7][data]": "DP2_change",
"columns[7][name]": "DP2_change",
"columns[7][searchable]": "true",
"columns[7][orderable]": "1",
"columns[7][search][value]": "",
"columns[7][search][regex]": "false",
"columns[8][data]": "DP2_rank",
"columns[8][name]": "DP2_rank",
"columns[8][searchable]": "true",
"columns[8][orderable]": "1",
"columns[8][search][value]": "",
"columns[8][search][regex]": "false",
"columns[9][data]": "DP6",
"columns[9][name]": "DP6",
"columns[9][searchable]": "true",
"columns[9][orderable]": "1",
"columns[9][search][value]": "",
"columns[9][search][regex]": "false",
"columns[10][data]": "DP6_change",
"columns[10][name]": "DP6_change",
"columns[10][searchable]": "true",
"columns[10][orderable]": "1",
"columns[10][search][value]": "",
"columns[10][search][regex]": "false",
"columns[11][data]": "DP6_rank",
"columns[11][name]": "DP6_rank",
"columns[11][searchable]": "true",
"columns[11][orderable]": "1",
"columns[11][search][value]": "",
"columns[11][search][regex]": "false",
"columns[12][data]": "DP1",
"columns[12][name]": "DP1",
"columns[12][searchable]": "true",
"columns[12][orderable]": "1",
"columns[12][search][value]": "",
"columns[12][search][regex]": "false",
"columns[13][data]": "DP1_change",
"columns[13][name]": "DP1_change",
"columns[13][searchable]": "true",
"columns[13][orderable]": "1",
"columns[13][search][value]": "",
"columns[13][search][regex]": "false",
"columns[14][data]": "DP12",
"columns[14][name]": "DP12",
"columns[14][searchable]": "true",
"columns[14][orderable]": "1",
"columns[14][search][value]": "",
"columns[14][search][regex]": "false",
"columns[15][data]": "DP48",
"columns[15][name]": "DP48",
"columns[15][searchable]": "true",
"columns[15][orderable]": "1",
"columns[15][search][value]": "",
"columns[15][search][regex]": "false",
"columns[16][data]": "DP48_rank",
"columns[16][name]": "DP48_rank",
"columns[16][searchable]": "true",
"columns[16][orderable]": "1",
"columns[16][search][value]": "",
"columns[16][search][regex]": "false",
"columns[17][data]": "DP130",
"columns[17][name]": "DP130",
"columns[17][searchable]": "true",
"columns[17][orderable]": "1",
"columns[17][search][value]": "",
"columns[17][search][regex]": "false",
"columns[18][data]": "DP130_rank",
"columns[18][name]": "DP130_rank",
"columns[18][searchable]": "true",
"columns[18][orderable]": "1",
"columns[18][search][value]": "",
"columns[18][search][regex]": "false",
"columns[19][data]": "DP13",
"columns[19][name]": "DP13",
"columns[19][searchable]": "true",
"columns[19][orderable]": "1",
"columns[19][search][value]": "",
"columns[19][search][regex]": "false",
"columns[20][data]": "DP13_rank",
"columns[20][name]": "DP13_rank",
"columns[20][searchable]": "true",
"columns[20][orderable]": "1",
"columns[20][search][value]": "",
"columns[20][search][regex]": "false",
"columns[21][data]": "DP8",
"columns[21][name]": "DP8",
"columns[21][searchable]": "true",
"columns[21][orderable]": "1",
"columns[21][search][value]": "",
"columns[21][search][regex]": "false",
"columns[22][data]": "DP49",
"columns[22][name]": "DP49",
"columns[22][searchable]": "true",
"columns[22][orderable]": "1",
"columns[22][search][value]": "",
"columns[22][search][regex]": "false",
"columns[23][data]": "DP49_rank",
"columns[23][name]": "DP49_rank",
"columns[23][searchable]": "true",
"columns[23][orderable]": "1",
"columns[23][search][value]": "",
"columns[23][search][regex]": "false",
"columns[24][data]": "DP131",
"columns[24][name]": "DP131",
"columns[24][searchable]": "true",
"columns[24][orderable]": "1",
"columns[24][search][value]": "",
"columns[24][search][regex]": "false",
"columns[25][data]": "DP132",
"columns[25][name]": "DP132",
"columns[25][searchable]": "true",
"columns[25][orderable]": "1",
"columns[25][search][value]": "",
"columns[25][search][regex]": "false",
"order[0][column]": "0",
"order[0][dir]": "asc",
"start": "0",
"length": "100",
"search[value]": "",
"search[regex]": "false",
}
r = requests.post(url, params=params, data=payload, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"])
del temp_df["columnlist"]
del temp_df["bank_id"]
bank_url_list = [
"https://www.thebankerdatabase.com/"
+ BeautifulSoup(item, "lxml").find("a")["href"]
for item in temp_df["current_name"]
]
bank_name_list = []
for item in tqdm(bank_url_list):
r = requests.get(item)
soup = BeautifulSoup(r.text, "lxml")
bank_name = soup.find("h1", attrs={"class": "bank"}).find("span").text
bank_name_list.append(bank_name)
temp_df["current_name"] = bank_name_list
temp_df["yearend_datetime"] = pd.to_datetime(temp_df["yearend_datetime"])
return temp_df
if __name__ == "__main__":
bank_rank_banker_df = bank_rank_banker()
print(bank_rank_banker_df)
| [
"[email protected]"
] | |
ae5f27b58b42509c2fb6f82e2e426f521420b5dd | d87f6d9e769709def3efcf30230cd8bf6ac2cef7 | /WWTest/autotest/config/xkz/youyanyace/globalconfig/globalConfig.py | e6cc20a18999b112dc5f12dade19633d8c3165fc | [] | no_license | wawj901124/centos8xitong | 876dcc45b895871119404ad1899ca59ab5dd90b6 | 81fc0d1151e3172ceec2093b035d2cd921e1a433 | refs/heads/master | 2023-02-23T22:33:22.314433 | 2021-01-31T01:54:35 | 2021-01-31T01:54:35 | 290,476,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | class GlobalConfig(object):
ISONLINE = False
ONLINE_WEB_YUMING= ""
ONLINE_LOGIN_ACCOUNT = ""
ONLINE_LOGIN_PASSWORD = ""
TEST_WEB_YUMING = "http://111.207.18.22:22044/"
TEST_LOGIN_ACCOUNT = "admin"
TEST_LOGIN_PASSWORD = "admin123A"
COOKIE_FILE_NAME = "youyanyacelogincookie.json"
gc = GlobalConfig()
| [
"wawj900805"
] | wawj900805 |
8fad67f8ce8ce001bfb436e710258ff19d7ff81a | 6849f09504c1b9e7e6b4bdc2a924f84ec98ec432 | /webapp/manage.py | 62c14e20c068799663d30d3c0e974d9a606680f0 | [
"Apache-2.0"
] | permissive | likit/lab-instrument-booking-app | a1c9d16635b8cff3511901d5510560349e8e5911 | c21b42342376dc54fdd11a7f87bc7609e6204020 | refs/heads/master | 2021-01-02T09:14:33.291562 | 2015-06-28T14:57:39 | 2015-06-28T14:57:39 | 37,254,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | #!/usr/bin/env python
import os
from app import create_app, mongo
from flask.ext.script import Manager, Shell
# from flask.ext.migrate import Migrate, MigrateCommand
from werkzeug.security import generate_password_hash
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
# migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=mongo.db,)
@manager.command
def test():
"""Run the unit tests"""
import unittest
tests = unittest.TestLoader().discover('.')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def initdb():
"""Init the database"""
mongo.db.drop_collection('users')
password = generate_password_hash('testpass')
user = {
'name': 'Foo',
'lastname': 'Jiang',
'email': '[email protected]',
'password': password,
'pi_email': '[email protected]',
'status': 'undergrad',
}
# password = generate_password_hash('testpass')
# admin = {
# 'email': '[email protected]',
# 'password': password,
# }
# mongo.db.admins.insert(admin, safe=True)
mongo.db.users.insert(user, safe=True)
manager.add_command('shell', Shell(make_context=make_shell_context))
# manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| [
"[email protected]"
] | |
3f3eebddf1980d557d39e2eef82f0f178cb64734 | 2990b0841b63f300a722107933c01c7237a7976b | /all_xuef/code/sicp_code_python/2.2/exer2_36.py | 45876f0bdd6ff5e412e92460f44c40c00c5394aa | [] | no_license | xuefengCrown/Files_01_xuef | 8ede04751689e0495e3691fc5d8682da4d382b4d | 677329b0189149cb07e7ba934612ad2b3e38ae35 | refs/heads/master | 2021-05-15T04:34:49.936001 | 2019-01-23T11:50:54 | 2019-01-23T11:50:54 | 118,802,861 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | """
exer2.36 accumulate_n
((1 2 3) (4 5 6) (7 8 9) (10 11 12))-->(22 26 30)
"""
import exer2_33 as funcs
import accumulate as accu
import operator as oper
def accumulate_n(op, init, seqs):
# 每个序列等长度,所以如果第一个处理完了,意味着都处理完了
if len(seqs[0])==0: return []
return funcs._append([accu.accumulate(op,
init,
list(map(lambda seq:seq[0], seqs)))],
accumulate_n(op,
init,
list(map(lambda seq:seq[1:], seqs))))
def test():
seqs = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]
print(accumulate_n(oper.add, 0, seqs))
if __name__ == '__main__':
test()
| [
"[email protected]"
] | |
2f2897da3ab199c97a2904a7bc4488f42042c775 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/bwobsolete_helpers/PyGUI/FocusManager.py | d63a96b2d06f8769fe8fa7654f13ed3704920427 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,004 | py | # 2017.05.04 15:20:45 Střední Evropa (letní čas)
# Embedded file name: scripts/client/bwobsolete_helpers/PyGUI/FocusManager.py
_focusedComponent = None
def getFocusedComponent():
global _focusedComponent
return _focusedComponent
def setFocusedComponent(newFocus):
global _focusedComponent
if newFocus != _focusedComponent:
if _focusedComponent is not None:
_focusedComponent.focus = False
_focusedComponent = newFocus
if newFocus is not None:
newFocus.focus = True
return
def isFocusedComponent(component):
if _focusedComponent is None or component is None:
return _focusedComponent is component
else:
return _focusedComponent.__str__() == component.__str__()
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\bwobsolete_helpers\PyGUI\FocusManager.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:20:45 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
77cc7c9dea37d25900a3ef81b6fe8e5c4ac325d8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02259/s333912873.py | 70af1be9d0001017dd4c907d0d466616f478bb16 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | def bubbles(N):
count = 0
for i in range(len(N)):
for j in range(len(N)-1, i, -1):
if N[j] < N[j-1]:
N[j], N[j-1] = N[j-1], N[j]
count += 1
c = 1
for i in N:
print(i, end='')
if c < len(N):
print(' ', end='')
c += 1
print('')
return count
n = int(input())
numbers = list(map(int, input().split()))
print(bubbles(numbers)) | [
"[email protected]"
] | |
5dfd1f2fa0a20f7374881feaa573ca57dd325796 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_040/ch152_2020_04_13_20_50_06_154418.py | 4e37d65e1c26223db4521858483a177d0b8585da | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | def verifica_preco(x,y,z):
dic1 = {}
dic2 = {}
for x, cor in y.items():
dic1[x] = cor
for cor2, valor in z.items():
dic2[cor2] = valor
if cor == cor2:
return valor
| [
"[email protected]"
] | |
b9169e937fabc228e29384360ef65944f5973688 | 1d87b6e7cd7879fefeaa8f475045de1cc1bc2bf5 | /podder_task_foundation/logging/log_setting.py | db6d2ddd99e112b75acd67189097e92a65cda131 | [] | no_license | nagisa-sakamoto/podder-task-foundation | 2ecb24e07bbfcc1121661fb7d9e7005faf9093e0 | 8de453bf8f89d5ddcb8e82d394f73f3a8f715329 | refs/heads/main | 2022-12-30T01:10:23.051183 | 2020-10-20T08:13:55 | 2020-10-20T08:13:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,068 | py | import logging
import os
from typing import Any, Dict
from podder_task_foundation.config import Config
class LogSetting:
TASK_NAME_PATH = 'task_name.ini'
DEFAULT_FORMAT = '[%(asctime)s.%(msecs)03d] %(levelname)s - %(message)s'
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
_log_setting = None
def __init__(self, mode: str, config: Config):
self._mode = mode
self._config = config
def load(self):
if LogSetting._log_setting is None:
LogSetting._log_setting = self._load_log_yml()
return LogSetting._log_setting
def _get_config(self, key: str, default: Any) -> Any:
value = self._config.get("log." + key)
if value is not None:
return value
value = self._config.get("pipeline." + key)
if value is not None:
return value
return default
def _load_log_yml(self) -> Dict:
if os.path.exists(self.TASK_NAME_PATH):
with open(self.TASK_NAME_PATH, 'r') as stream:
task_name = stream.read()
else:
task_name = self._get_config('app.name', '')
settings = {
'task_name': task_name,
'default_log_format': self.DEFAULT_FORMAT,
'date_format': self.DATE_FORMAT,
'task_log_format': self._get_config('task_log_format', self.DEFAULT_FORMAT),
'server_log_format': self._get_config('server_log_format', self.DEFAULT_FORMAT),
'color_task_log_format': self._get_config('color_task_log_format', self.DEFAULT_FORMAT),
'color_server_log_format': self._get_config('color_server_log_format',
self.DEFAULT_FORMAT),
'task_log_level': self._get_config('task_log_level', logging.DEBUG),
'server_log_level': self._get_config('server_log_level', logging.DEBUG),
'log_colors': self._get_config('log_colors', {}),
'secondary_log_colors': self._get_config('secondary_log_colors', {}),
}
return settings
| [
"[email protected]"
] | |
d4bbd03fe42ba9327eb0c52142ba5c84766cd36c | ec153cf6c65b02d8d714e042bbdcf476001c6332 | /keystone/common/fernet_utils.py | cb7a69863bc71fd65b38a3e1a3ff689a24470723 | [] | no_license | bopopescu/dashboard | c4322f7602a9ba589400212aaef865ed4ffa8bdb | a74b4a549cd7d516dd9a0f5f2e17d06679c13bf6 | refs/heads/master | 2022-11-21T15:56:42.755310 | 2017-07-05T12:04:14 | 2017-07-05T12:04:17 | 281,596,428 | 0 | 0 | null | 2020-07-22T06:38:37 | 2020-07-22T06:38:36 | null | UTF-8 | Python | false | false | 11,370 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import stat
from cryptography import fernet
from oslo_log import log
import keystone.conf
from keystone.i18n import _LE, _LW, _LI
LOG = log.getLogger(__name__)
CONF = keystone.conf.CONF
# NOTE(lbragstad): In the event there are no encryption keys on disk, let's use
# a default one until a proper key repository is set up. This allows operators
# to gracefully upgrade from Mitaka to Newton without a key repository,
# especially in multi-node deployments. The NULL_KEY is specific to credential
# encryption only and has absolutely no beneficial purpose outside of easing
# upgrades.
NULL_KEY = base64.urlsafe_b64encode(b'\x00' * 32)
class FernetUtils(object):
def __init__(self, key_repository=None, max_active_keys=None):
self.key_repository = key_repository
self.max_active_keys = max_active_keys
def validate_key_repository(self, requires_write=False):
"""Validate permissions on the key repository directory."""
# NOTE(lbragstad): We shouldn't need to check if the directory was
# passed in as None because we don't set allow_no_values to True.
# ensure current user has sufficient access to the key repository
is_valid = (os.access(self.key_repository, os.R_OK) and
os.access(self.key_repository, os.X_OK))
if requires_write:
is_valid = (is_valid and
os.access(self.key_repository, os.W_OK))
if not is_valid:
LOG.error(
_LE('Either [fernet_tokens] key_repository does not exist or '
'Keystone does not have sufficient permission to access '
'it: %s'), self.key_repository)
else:
# ensure the key repository isn't world-readable
stat_info = os.stat(self.key_repository)
if(stat_info.st_mode & stat.S_IROTH or
stat_info.st_mode & stat.S_IXOTH):
LOG.warning(_LW(
'key_repository is world readable: %s'),
self.key_repository)
return is_valid
def create_key_directory(self, keystone_user_id=None,
keystone_group_id=None):
"""Attempt to create the key directory if it doesn't exist."""
if not os.access(self.key_repository, os.F_OK):
LOG.info(_LI(
'key_repository does not appear to exist; attempting to '
'create it'))
try:
os.makedirs(self.key_repository, 0o700)
except OSError:
LOG.error(_LE(
'Failed to create key_repository: either it already '
'exists or you don\'t have sufficient permissions to '
'create it'))
if keystone_user_id and keystone_group_id:
os.chown(
self.key_repository,
keystone_user_id,
keystone_group_id)
elif keystone_user_id or keystone_group_id:
LOG.warning(_LW(
'Unable to change the ownership of key_repository without '
'a keystone user ID and keystone group ID both being '
'provided: %s') % self.key_repository)
def _create_new_key(self, keystone_user_id, keystone_group_id):
"""Securely create a new encryption key.
Create a new key that is readable by the Keystone group and Keystone
user.
"""
key = fernet.Fernet.generate_key() # key is bytes
# This ensures the key created is not world-readable
old_umask = os.umask(0o177)
if keystone_user_id and keystone_group_id:
old_egid = os.getegid()
old_euid = os.geteuid()
os.setegid(keystone_group_id)
os.seteuid(keystone_user_id)
elif keystone_user_id or keystone_group_id:
LOG.warning(_LW(
'Unable to change the ownership of the new key without a '
'keystone user ID and keystone group ID both being provided: '
'%s') %
self.key_repository)
# Determine the file name of the new key
key_file = os.path.join(self.key_repository, '0')
try:
with open(key_file, 'w') as f:
# convert key to str for the file.
f.write(key.decode('utf-8'))
finally:
# After writing the key, set the umask back to it's original value.
# Do the same with group and user identifiers if a Keystone group
# or user was supplied.
os.umask(old_umask)
if keystone_user_id and keystone_group_id:
os.seteuid(old_euid)
os.setegid(old_egid)
LOG.info(_LI('Created a new key: %s'), key_file)
def initialize_key_repository(self, keystone_user_id=None,
keystone_group_id=None):
"""Create a key repository and bootstrap it with a key.
:param keystone_user_id: User ID of the Keystone user.
:param keystone_group_id: Group ID of the Keystone user.
"""
# make sure we have work to do before proceeding
if os.access(os.path.join(self.key_repository, '0'),
os.F_OK):
LOG.info(_LI('Key repository is already initialized; aborting.'))
return
# bootstrap an existing key
self._create_new_key(keystone_user_id, keystone_group_id)
# ensure that we end up with a primary and secondary key
self.rotate_keys(keystone_user_id, keystone_group_id)
def rotate_keys(self, keystone_user_id=None, keystone_group_id=None):
"""Create a new primary key and revoke excess active keys.
:param keystone_user_id: User ID of the Keystone user.
:param keystone_group_id: Group ID of the Keystone user.
Key rotation utilizes the following behaviors:
- The highest key number is used as the primary key (used for
encryption).
- All keys can be used for decryption.
- New keys are always created as key "0," which serves as a placeholder
before promoting it to be the primary key.
This strategy allows you to safely perform rotation on one node in a
cluster, before syncing the results of the rotation to all other nodes
(during both key rotation and synchronization, all nodes must recognize
all primary keys).
"""
# read the list of key files
key_files = dict()
for filename in os.listdir(self.key_repository):
path = os.path.join(self.key_repository, str(filename))
if os.path.isfile(path):
try:
key_id = int(filename)
except ValueError: # nosec : name isn't a number
pass
else:
key_files[key_id] = path
LOG.info(_LI('Starting key rotation with %(count)s key files: '
'%(list)s'), {
'count': len(key_files),
'list': list(key_files.values())})
# determine the number of the new primary key
current_primary_key = max(key_files.keys())
LOG.info(_LI('Current primary key is: %s'), current_primary_key)
new_primary_key = current_primary_key + 1
LOG.info(_LI('Next primary key will be: %s'), new_primary_key)
# promote the next primary key to be the primary
os.rename(
os.path.join(self.key_repository, '0'),
os.path.join(self.key_repository, str(new_primary_key))
)
key_files.pop(0)
key_files[new_primary_key] = os.path.join(
self.key_repository,
str(new_primary_key))
LOG.info(_LI('Promoted key 0 to be the primary: %s'), new_primary_key)
# add a new key to the rotation, which will be the *next* primary
self._create_new_key(keystone_user_id, keystone_group_id)
max_active_keys = self.max_active_keys
# purge excess keys
# Note that key_files doesn't contain the new active key that was
# created, only the old active keys.
keys = sorted(key_files.keys(), reverse=True)
while len(keys) > (max_active_keys - 1):
index_to_purge = keys.pop()
key_to_purge = key_files[index_to_purge]
LOG.info(_LI('Excess key to purge: %s'), key_to_purge)
os.remove(key_to_purge)
def load_keys(self, use_null_key=False):
"""Load keys from disk into a list.
The first key in the list is the primary key used for encryption. All
other keys are active secondary keys that can be used for decrypting
tokens.
:param use_null_key: If true, a known key containing null bytes will be
appended to the list of returned keys.
"""
if not self.validate_key_repository():
if use_null_key:
return [NULL_KEY]
return []
# build a dictionary of key_number:encryption_key pairs
keys = dict()
for filename in os.listdir(self.key_repository):
path = os.path.join(self.key_repository, str(filename))
if os.path.isfile(path):
with open(path, 'r') as key_file:
try:
key_id = int(filename)
except ValueError: # nosec : filename isn't a number,
# ignore this file since it's not a key.
pass
else:
keys[key_id] = key_file.read()
if len(keys) != self.max_active_keys:
# Once the number of keys matches max_active_keys, this log entry
# is too repetitive to be useful. Also note that it only makes
# sense to log this message for tokens since credentials doesn't
# have a `max_active_key` configuration option.
if self.key_repository == CONF.fernet_tokens.key_repository:
LOG.debug(
'Loaded %(count)d Fernet keys from %(dir)s, but '
'`[fernet_tokens] max_active_keys = %(max)d`; perhaps '
'there have not been enough key rotations to reach '
'`max_active_keys` yet?', {
'count': len(keys),
'max': self.max_active_keys,
'dir': self.key_repository})
# return the encryption_keys, sorted by key number, descending
key_list = [keys[x] for x in sorted(keys.keys(), reverse=True)]
if use_null_key:
key_list.append(NULL_KEY)
return key_list
| [
"[email protected]"
] | |
dd29ba0161560b2e89b22a3616b0cd936035b9cb | 2589e080a2cc76bae58963576ebd76fc024bb64e | /Snakefile | 39024ec9c31e8fa55fdce4689f6a3f81b6f6f5fc | [
"Apache-2.0"
] | permissive | inambioinfo/2020plus | eb0d8932d3d0748d9676430c9d22af5c50727b60 | 5c1bda3cfe59719509408f96c473d6d9d582442f | refs/heads/master | 2020-03-28T05:18:48.417528 | 2018-08-02T15:04:32 | 2018-08-02T15:04:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,862 | from os.path import join
# configuration file
if 'config' not in vars() or not config or 'ntrees' not in config:
configfile: "config.yaml"
# output directory
output_dir=config["output_dir"]
# MAF file containing mutations
mutations=config["mutations"]
# pre-trained classifier
trained_classifier=config["trained_classifier"]
# flag for CV
cv="--cv"
# number of trees in RF
ntrees=config['ntrees']
ntrees2=5*ntrees
# params for simulations
num_iter=10
ids=list(map(str, range(1, num_iter+1)))
# minimum recurrent missense
min_recur=3
###################################
# Top-level rules
###################################
rule all:
input: join(output_dir, "output/results/r_random_forest_prediction.txt")
# same rule is "all", but semantically more meaningful
rule predict:
"""
Predict on a pan-cancer set of somatic mutations from multiple cancer types.
This command will simultaneous train 20/20+ and make predictions using
gene hold-out cross-validation. The predict command uses the following parameters:
Input
-----
mutations : MAF file
MAF file containing mutations. Please see http://probabilistic2020.readthedocs.io/en/latest/tutorial.html#mutations for details on file format.
Output
------
output_dir : directory
Path of directory to save output. The results are save in the
"output/results/r_random_forest_prediction.txt" file.
"""
input: join(output_dir, "output/results/r_random_forest_prediction.txt")
# top-level rule to only train the 20/20+ random forest
rule train:
"""
Train a 20/20+ model to predict cancer driver genes. The trained model can
be used for subsequent prediction. The train command uses the following parameters:
Input
-----
mutations : MAF file
MAF file containing mutations. Please see http://probabilistic2020.readthedocs.io/en/latest/tutorial.html#mutations for details on file format.
Output
------
output_dir : directory
Path to file directory to save output. The saved model file from
20/20+ will be named 2020plus.Rdata by default.
"""
input: join(output_dir, "2020plus.Rdata")
# use an already trained 20/20+ random forest to predict new data
rule pretrained_predict:
"""
Predict cancer driver genes using a pre-trained 20/20+ model from the "train" command. The pretrained_predict command uses the following parameters:
Input
-----
mutations : MAF file
MAF file containing mutations. Please see http://probabilistic2020.readthedocs.io/en/latest/tutorial.html#mutations for details on file format.
trained_classifier : .Rdata file
File path of saved R workspace containing the trained 20/20+ model.
Output
------
output_dir : directory
File path of directory to save output. The results are save in the
"pretrained_output/results/r_random_forest_prediction.txt" file.
"""
input: join(output_dir, "pretrained_output/results/r_random_forest_prediction.txt")
rule help:
"""
Print list of all targets with help.
"""
run:
print('Input and output parameters are specified via the command line or in the config.yaml file. If done via the command line, e.g., the "trained_classifier" option would be specified by the following argument:\n\n--config trained_classifier="data/2020plus_100k.Rdata"\n\nMultiple options can follow after the --config flag.\n')
myhelp = ['predict', 'train', 'pretrained_predict', 'help']
for myrule in workflow.rules:
if myrule.name in myhelp:
print('='*len(myrule.name))
print(myrule.name)
print('='*len(myrule.name))
print(myrule.docstring)
print('See "snakemake --help" for additional snakemake command line help documentation.\n')
###################################
# Code for calculating empirical null
# distribution based on simulations
###################################
# Simulate MAF files for subsequent running by oncogene/tsg test
rule simMaf:
input:
MUTATIONS=mutations
params:
min_recur=min_recur,
data_dir=config["data_dir"]
output:
join(output_dir, "simulated_summary/chasm_sim_maf{iter,[0-9]+}.txt")
shell:
"mut_annotate --log-level=INFO "
" -b {params.data_dir}/snvboxGenes.bed -i {params.data_dir}/snvboxGenes.fa -c 1.5 "
" -m {input.MUTATIONS} -p 0 -n 1 --maf --seed=$(({wildcards.iter}*42)) "
" -r {params.min_recur} --unique -o {output}"
# calculate summarized features for the simulated mutations
rule simSummary:
input:
MUTATIONS=mutations
params:
min_recur=min_recur,
data_dir=config["data_dir"]
output:
join(output_dir, "simulated_summary/chasm_sim_summary{iter}.txt")
shell:
"mut_annotate --log-level=INFO "
" -b {params.data_dir}/snvboxGenes.bed -i {params.data_dir}/snvboxGenes.fa "
" -c 1.5 -m {input.MUTATIONS} -p 0 -n 1 --summary --seed=$(({wildcards.iter}*42)) "
" --score-dir={params.data_dir}/scores "
" --unique -r {params.min_recur} -o {output}"
# run probabilistic2020 tsg statistical test on simulated MAF
rule simTsg:
input:
join(output_dir, "simulated_summary/chasm_sim_maf{iter}.txt")
params:
num_sim=config["NUMSIMULATIONS"],
data_dir=config["data_dir"]
threads: 10
output:
join(output_dir, "simulated_summary/tsg_sim{iter}.txt")
shell:
"probabilistic2020 --log-level=INFO tsg "
" -c 1.5 -n {params.num_sim} -b {params.data_dir}/snvboxGenes.bed "
" -m {input} -i {params.data_dir}/snvboxGenes.fa -p {threads} -d 1 "
" -o {output} "
# run probabilistic2020 oncogene statistical test on simulated MAF
rule simOg:
input:
mutations=join(output_dir, "simulated_summary/chasm_sim_maf{iter}.txt")
params:
min_recur=min_recur,
num_sim=config["NUMSIMULATIONS"],
data_dir=config["data_dir"]
threads: 10
output:
join(output_dir, "simulated_summary/oncogene_sim{iter}.txt")
shell:
"probabilistic2020 --log-level=INFO oncogene "
" -c 1.5 -n {params.num_sim} -b {params.data_dir}/snvboxGenes.bed "
" -m {input.mutations} -i {params.data_dir}/snvboxGenes.fa -p {threads} "
" --score-dir={params.data_dir}/scores -r {params.min_recur} "
" -o {output}"
# Combine the results from simOg, simTsg, and simSummary
rule simFeatures:
input:
summary=join(output_dir, "simulated_summary/chasm_sim_summary{iter}.txt"),
og=join(output_dir, "simulated_summary/oncogene_sim{iter}.txt"),
tsg=join(output_dir, "simulated_summary/tsg_sim{iter}.txt")
params:
data_dir=config["data_dir"]
output:
join(output_dir, "simulated_summary/simulated_features{iter}.txt")
shell:
"python `which 2020plus.py` features "
" -s {input.summary} --tsg-test {input.tsg} -og-test {input.og} "
" -o {output}"
# final processing of the simulation results
rule finishSim:
input:
expand(join(output_dir, "simulated_summary/simulated_features{iter}.txt"), iter=ids)
output:
join(output_dir, "simulated_summary/simulated_features.txt")
shell:
'cat {input} | awk -F"\t" \'{{OFS="\t"}} NR == 1 || !/^gene/\' - > ' + output_dir + '/simulated_summary/tmp_simulated_features.txt ; '
'cat '+output_dir+'/simulated_summary/tmp_simulated_features.txt | awk -F"\t" \'{{OFS="\t"}}{{if(NR != 1) printf (NR"\t"); if(NR!=1) for(i=2; i<NF; i++) printf ($i"\t"); if(NR != 1) print $i; if(NR==1) print $0}}\' - > {output}'
###################################
# Code for calculating results on
# actually observed mutations
###################################
# calculate summarized features for the observed mutations
rule summary:
input:
mutations=mutations
params:
min_recur=min_recur,
data_dir=config["data_dir"]
output:
join(output_dir, "summary.txt")
shell:
"mut_annotate --log-level=INFO "
" -b {params.data_dir}/snvboxGenes.bed -i {params.data_dir}/snvboxGenes.fa "
" -c 1.5 -m {input.mutations} -p 0 -n 0 --summary "
" --score-dir={params.data_dir}/scores "
" --unique -r {params.min_recur} -o {output}"
# run probabilistic2020 tsg statistical test on MAF
rule tsg:
input:
mutations
params:
num_sim=config["NUMSIMULATIONS"],
data_dir=config["data_dir"]
threads: 10
output:
join(output_dir, "tsg.txt")
shell:
"probabilistic2020 -v --log-level=INFO tsg "
" -c 1.5 -n {params.num_sim} -b {params.data_dir}/snvboxGenes.bed "
" -m {input} -i {params.data_dir}/snvboxGenes.fa -p {threads} -d 1 "
" -o {output} "
# run probabilistic2020 oncogene statistical test on MAF
rule og:
input:
mutations=mutations
params:
min_recur=min_recur,
num_sim=config["NUMSIMULATIONS"],
data_dir=config["data_dir"]
threads: 10
output:
join(output_dir, "oncogene.txt")
shell:
"probabilistic2020 -v --log-level=INFO oncogene "
" -c 1.5 -n {params.num_sim} -b {params.data_dir}/snvboxGenes.bed "
" -m {input.mutations} -i {params.data_dir}/snvboxGenes.fa -p {threads} "
" --unique --score-dir={params.data_dir}/scores -r {params.min_recur} "
" -o {output}"
# Combine the results from og, tsg, and summary
rule features:
input:
summary=join(output_dir, "summary.txt"),
og=join(output_dir, "oncogene.txt"),
tsg=join(output_dir, "tsg.txt")
params:
data_dir=config["data_dir"]
output:
join(output_dir, "features.txt")
shell:
"python `which 2020plus.py` features "
" -s {input.summary} --tsg-test {input.tsg} -og-test {input.og} "
" -o {output}"
# perform prediction by random forest
# in this case the data is pan-cancer
# and so a cross-validation loop is performed
rule cv_predict:
input:
features=join(output_dir, "features.txt"),
sim_features=join(output_dir, "simulated_summary/simulated_features.txt"),
params:
ntrees=ntrees,
ntrees2=ntrees2,
data_dir=config["data_dir"],
output_dir=config["output_dir"]
output:
join(output_dir, "output/results/r_random_forest_prediction.txt"),
join(output_dir, "trained.Rdata")
shell:
"""
python `which 2020plus.py` --log-level=INFO train -d .7 -o 1.0 -n {{params.ntrees2}} -r {outdir}/trained.Rdata --features={{input.features}} --random-seed 71
python `which 2020plus.py` --log-level=INFO classify --trained-classifier {outdir}/trained.Rdata --null-distribution {outdir}/simulated_null_dist.txt --features {{input.sim_features}} --simulated
python `which 2020plus.py` --out-dir {outdir}/output --log-level=INFO classify -n {{params.ntrees}} -d .7 -o 1.0 --features {{input.features}} --null-distribution {outdir}/simulated_null_dist.txt --random-seed 71
""".format(outdir=output_dir)
#############################
# Rules for just training on
# pan-cancer data
#############################
rule train_pancan:
input:
features=join(output_dir, "features.txt")
params:
ntrees=ntrees,
data_dir=config["data_dir"],
output_dir=config["output_dir"]
output:
join(output_dir, "2020plus.Rdata")
shell:
"""
python `which 2020plus.py` --log-level=INFO train -d .7 -o 1.0 -n {{params.ntrees}} --features={{input.features}} {cv} --random-seed 71 -r {outdir}/2020plus.Rdata
""".format(outdir=output_dir, cv=cv)
#############################
# Rules for predicting using
# a trained classifier on a separate
# mutation data set
#############################
rule predict_test:
input:
trained_classifier=trained_classifier,
features=join(output_dir, "features.txt"),
sim_features=join(output_dir, "simulated_summary/simulated_features.txt"),
params:
ntrees=ntrees,
output:
join(output_dir, "pretrained_output/results/r_random_forest_prediction.txt")
shell:
"""
python `which 2020plus.py` --log-level=INFO classify --trained-classifier {{input.trained_classifier}} --null-distribution {outdir}/simulated_null_dist.txt --features {{input.sim_features}} --simulated {cv}
python `which 2020plus.py` --out-dir {outdir}/pretrained_output --log-level=INFO classify -n {{params.ntrees}} --trained-classifier {{input.trained_classifier}} -d .7 -o 1.0 --features {{input.features}} --null-distribution {outdir}/simulated_null_dist.txt --random-seed 71 {cv}
""".format(outdir=output_dir, cv=cv)
| [
"[email protected]"
] | ||
a500d0d54970ec25831ee58b453f03daf5f02059 | 306baa2ad596e3962e427d587e7b0d4175a1e48e | /configs/ttfnetv3/ttfv3net_r34_0114_3l_128_48_s16twice_basicup_aug_10x.py | 847551575ebcdb8c878b17ac7b992f8214941afd | [
"Apache-2.0"
] | permissive | mrsempress/mmdetection | 9c7ed7ed0c9f1d6200f79a2ab14fc0c8fe32c18a | cb650560c97a2fe56a9b369a1abc8ec17e06583a | refs/heads/master | 2022-04-24T04:34:30.959082 | 2020-04-26T07:52:23 | 2020-04-26T07:52:23 | 258,957,856 | 0 | 0 | Apache-2.0 | 2020-04-26T06:33:32 | 2020-04-26T06:33:32 | null | UTF-8 | Python | false | false | 4,154 | py | # model settings
model = dict(
type='TTFNet',
pretrained='modelzoo://resnet34',
backbone=dict(
type='ResNet',
depth=34,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
style='pytorch'),
neck=None,
bbox_head=dict(
type='TTFv3Head',
inplanes=(64, 128, 256, 512),
planes=(256, 128, 64),
down_ratio=(16, 8, 4),
hm_head_channels=((128, 128), (128, 128), (64, 64)),
wh_head_channels=((32, 32), (32, 32), (32, 32)),
num_classes=81,
shortcut_cfg=(1, 2, 3),
s16_shortcut_twice=True,
wh_scale_factor=(8., 8., 8.),
alpha=0.6,
beta=0.6,
hm_weight=(1.4, 1.4, 1.),
wh_weight=(7., 7., 5.),
length_range=((128, 512), (48, 128), (1, 48)),
train_branch=(True, True, True),
inf_branch=(True, True, True),
use_simple_nms=True,
fast_nms=False,
up_conv_cfg=dict(type='BasicBlock'),
max_objs=128,
conv_cfg=None,
norm_cfg=dict(type='BN')))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(debug=False)
test_cfg = dict(score_thr=0.01, max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.002,
momentum=0.9,
weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[90, 110])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_in_n_epoch=[63, 90])
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 120
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ttfv3net_r34_10x'
load_from = 'work_dirs/2001/0215_ttfv334_0114_3l_128_48_s16twice_basicup2_aug_10x/work_dirs/ttfv3net_r34_10x_0217_1444/epoch_120_iter_127630.pth'
resume_from = None
workflow = [('train', 1)]
| [
"[email protected]"
] | |
424604fc081fe31949ac6a2ea5e3618af401701a | b6e34dec0831f43d442e89c64f521f77bb2438b2 | /fbta/fbta_sequence.py | e2b01b7dfc4e19723ab1165e36d73b524e70cf67 | [] | no_license | kandation/FBTAFast | a1a38e09d5964915d46492f84f8fa0fead43185c | 505a2f232ef7ef9b6fc153357fb4eec5480cd92a | refs/heads/master | 2022-12-31T02:43:09.339384 | 2020-04-22T19:13:54 | 2020-04-22T19:13:54 | 212,693,322 | 0 | 0 | null | 2020-04-22T19:13:56 | 2019-10-03T22:34:15 | HTML | UTF-8 | Python | false | false | 4,209 | py | import time
from pprint import pprint
from fbta_04_activity_to_card import FBTAActivityToCardsNew
from fbta_05_cards_download_manager import FBTACardsDownloadManager
from fbta_02_clusters import FBTAClusterInfo
from fbta_06_photos_download_manager import FBTAPhotosDownloadManager
from fbta_07_dataft import FBTADataft
from fbta_120_album_count_manager import FBTAAlbumCountManager
from fbta_configs import FBTAConfigs
from fbta_03_history_download_manager import FBTAHistoryDownloadManager
from fbta_mkdir import FBTAMkdir
from fbta_node_master import FBTANodeMaster
from fbta_sequence_func import FBTASequenceFunction
from fbta_settings import FBTASettings
from fbta_01_yearbox import FBTAYearBox
class FBTASequence(FBTASequenceFunction):
def __init__(self, setting: FBTASettings, configs: FBTAConfigs):
FBTASequenceFunction.__init__(self, setting, configs)
self.__node_master: FBTANodeMaster = FBTANodeMaster.NONE
self.__node_yearbox = None
self.__node_cluster_info: FBTAClusterInfo = None
def start(self):
self._warnningTimeOptimize()
self.__px0_initDirectory()
self.__p00_generateMasterNode(0)
self._showFinishedProcessEndNotify(0)
self.__p01_processYearBox(1)
self._showFinishedProcessEndNotify(1)
self.__p02_processsClustersInfo(2)
self._showFinishedProcessEndNotify(2)
self.__p03_processDownloader(3)
self._showFinishedProcessEndNotify(3)
self.__p04_processDatabaseAsCard(4)
self._showFinishedProcessEndNotify(4)
self.__p05_processCardAsPost(5)
self._showFinishedProcessEndNotify(5)
self.__processDonloadPhotos(6)
self._showFinishedProcessEndNotify(6)
self.__processDataft(7)
self._showFinishedProcessEndNotify(7)
self.__p08_processAlbumCount(8)
self._showFinishedProcessEndNotify(8)
print('ENDT$EST')
exit()
def __px0_initDirectory(self):
self.__mkdirClass = FBTAMkdir(self._settings, self._configs)
self.__mkdirClass.startProjectDir()
def __p00_generateMasterNode(self, step):
if self._isInTestStep(step):
self.__node_master = FBTANodeMaster(self._settings, self._configs)
self.__node_master.start()
def __p01_processYearBox(self, step):
if self._isInTestStep(step):
self.__node_yearbox = FBTAYearBox(self.__node_master)
cond = self._settings.renew_index
cond = cond or not self.__node_yearbox.hasYearboxFile(self._settings.dir_data_path)
if cond:
self.__node_yearbox.run()
self.__node_yearbox.save(self._settings.dir_data_path)
else:
self.__node_yearbox.load(self._settings.dir_data_path)
def __p02_processsClustersInfo(self, step):
if self._isInTestStep(step):
self.__node_cluster_info = FBTAClusterInfo(self._settings, self._configs, self.__node_yearbox)
self.__node_cluster_info.run()
def __p03_processDownloader(self, step):
if self._isInTestStep(step):
# Step01 Download Activity
dl = FBTAHistoryDownloadManager(self.__node_master,
self.__node_cluster_info.clusters)
dl.main()
def __p04_processDatabaseAsCard(self, step):
if self._isInTestStep(step):
analysis = FBTAActivityToCardsNew(self._settings, self._configs)
analysis.main()
def __p05_processCardAsPost(self, step):
if self._isInTestStep(step):
order = FBTACardsDownloadManager(self.__node_master)
order.main()
def __processDonloadPhotos(self, step):
if self._isInTestStep(step):
photos = FBTAPhotosDownloadManager(self.__node_master)
photos.main()
def __p08_processAlbumCount(self, step):
if self._isInTestStep(step):
album_count = FBTAAlbumCountManager(self.__node_master)
album_count.main()
def __processDataft(self, step):
if self._isInTestStep(step):
dataft = FBTADataft(self.__node_master)
dataft.main() | [
"[email protected]"
] | |
c6b360f08562aaddf5900e08cd01d476537105f1 | 0edb94d9de7222d31ac8350a8cc330179f69ef60 | /urls.py | 5b7e936fb82222a7f0a25bc6333ac4cee7b25143 | [] | no_license | ondrejsika/django-1.6-blank-project | 0f503fd661ec38fd3a9977d2e8fb4772d1c6da80 | 51b59c0a3102d8601c0490d2ee0e3b65afee0b33 | refs/heads/master | 2016-09-11T00:06:38.522221 | 2015-04-12T18:54:28 | 2015-04-12T18:54:28 | 26,711,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'twistedexample.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| [
"[email protected]"
] | |
d259ae82743f9dd80d1891b6d940a00ed317e4c1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03815/s988732380.py | cb8e305d533c05ff2909fab480bd1c9ceab07cc4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | x = int(input())
a = x//11
b = x%11
if b == 0: print(2*a)
elif b > 6: print(2*a+2)
else: print(2*a+1) | [
"[email protected]"
] | |
94b4e8d8f567da5caa24b59eddc1e618e7006a22 | 12c41119156dd3783c3801e07f5f973289f26bb0 | /aliyun-python-sdk-dbs/aliyunsdkdbs/request/v20190306/ModifyBackupPlanNameRequest.py | ba55c010034c829610d62b54c02abad12cceb8cd | [
"Apache-2.0"
] | permissive | toywei/aliyun-openapi-python-sdk | bfe0893da38af9b222ce072fd7587d5b6cdce204 | ce8f683e3201fca8c473512267f50a34f71e31d3 | refs/heads/master | 2020-08-07T23:42:00.053692 | 2019-10-08T08:50:21 | 2019-10-08T08:50:21 | 213,626,962 | 1 | 0 | NOASSERTION | 2019-10-08T11:43:15 | 2019-10-08T11:43:15 | null | UTF-8 | Python | false | false | 1,697 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyBackupPlanNameRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dbs', '2019-03-06', 'ModifyBackupPlanName','cbs')
def get_BackupPlanName(self):
return self.get_query_params().get('BackupPlanName')
def set_BackupPlanName(self,BackupPlanName):
self.add_query_param('BackupPlanName',BackupPlanName)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_BackupPlanId(self):
return self.get_query_params().get('BackupPlanId')
def set_BackupPlanId(self,BackupPlanId):
self.add_query_param('BackupPlanId',BackupPlanId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | [
"[email protected]"
] | |
1db358528dfe7eb150bfcf52b137cce3df1bb254 | 027dd49b92ee92c8faa5ea05bce95d28efd2268d | /Documents/django/crudView/crudapp/migrations/0001_initial.py | 03b4d87a72cb9fdab2c706d0b7c2ab583aa93a89 | [] | no_license | arunkumar27-ank-tech/RestAPIcrud | 0ac06a4f0b6cf3373eb76b815e3cd6c5748610d5 | 387c5fad78f4b72cfbbe47d06e79c1a15038ad69 | refs/heads/master | 2023-06-13T21:44:08.157685 | 2021-07-06T14:11:12 | 2021-07-06T14:11:12 | 383,477,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # Generated by Django 3.1.5 on 2021-07-06 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('completed', models.BooleanField(default=False)),
],
),
]
| [
"[email protected]"
] | |
0527bee5e87be348d59d9a2dceebb0b42f5a6ea2 | c2be395eac600d0d853de03cd67070bd8391038f | /ofm_request_reverse_rd/__manifest__.py | 39a1213f2bec2e986a4933fa317933ec0a2efee5 | [] | no_license | amendoncabh/salary_emp | 960cfdb4df48df70ab361886039c790840a5e8d2 | 2ac2dd9461271153cb2ee406bf70a29f614c25f1 | refs/heads/master | 2022-03-30T22:35:10.704092 | 2020-01-05T16:23:20 | 2020-01-05T16:23:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | # -*- coding: utf-8 -*-
# © <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Trinity Roots :: OFM Request Approve Reverse RD",
"summary": "For updating related project modules",
"version": "8.0.1.0.0",
"category": "Uncategorized",
"description": """
MODULE
======
* This module MUST be depended by related project module.
* If this module is updated, All related module will be updated too.
""",
"website": "http://www.trinityroots.co.th/",
"author": "Trinity Roots",
"license": "AGPL-3",
"application": False,
"installable": True,
"external_dependencies": {
"python": [],
"bin": [],
},
# any module necessary for this one to work correctly
'depends': [
'base',
'web_notify',
'pos_customize',
'ofm_inventory_ext',
'tr_core_update',
],
# always loaded
'data': [
'security/request_reverse_rd_security.xml',
'security/ir.model.access.csv',
'views/ofm_request_reverse_view.xml',
'views/stock_view.xml',
'wizard/reason_reject_wizard_view.xml',
'wizard/reason_approve_wizard_view.xml',
],
}
| [
"[email protected]"
] | |
2aa8324aee23f64603e3406c3de9441e9cb98c51 | 4b4544e5860bf2776ef578ba8e91dd34a9cf2b80 | /nodejs/patches/pkgsrc/lang/nodejs/patches/patch-deps_cares_cares.gyp | ba1548a9de0c9d65a856346f95ff4d5904181d81 | [
"CC0-1.0"
] | permissive | nabla-containers/rumprun-packages | 1e00e5cf0b6995f1772e8dff6b20d7d064ac71cf | 687c6dab278ff3dba68b914e1ed0511eb5525551 | refs/heads/solo5 | 2021-07-08T10:42:24.436007 | 2019-02-21T22:39:36 | 2019-02-21T22:43:57 | 137,268,640 | 1 | 4 | NOASSERTION | 2019-02-20T02:29:18 | 2018-06-13T20:44:12 | Makefile | UTF-8 | Python | false | false | 647 | gyp | $NetBSD: patch-deps_cares_cares.gyp,v 1.1 2013/05/22 15:17:07 mspo Exp $
Add support for NetBSD.
--- deps/cares/cares.gyp.orig 2013-03-14 10:55:24.000000000 +0900
+++ deps/cares/cares.gyp 2013-03-14 10:55:47.000000000 +0900
@@ -140,6 +140,10 @@
'include_dirs': [ 'config/freebsd' ],
'sources': [ 'config/freebsd/ares_config.h' ]
}],
+ [ 'OS=="netbsd"', {
+ 'include_dirs': [ 'config/netbsd' ],
+ 'sources': [ 'config/netbsd/ares_config.h' ]
+ }],
[ 'OS=="openbsd"', {
'include_dirs': [ 'config/openbsd' ],
'sources': [ 'config/openbsd/ares_config.h' ]
| [
"[email protected]"
] | |
ed240b64758a709dbdc34b22204a6fef55cf355e | b51ac97fc0dcb19c401f92a48e8657de9a2b86db | /tccli/services/tke/tke_client.py | a2c1e960dff0206080bec664527d89c408e09bbe | [
"Apache-2.0"
] | permissive | tarnover/tencentcloud-cli | 657b97d7b07997cff98456c111847d97324d9372 | 5b0537913a33884a20d7663405a8aa1c2276b41a | refs/heads/master | 2020-05-03T12:54:37.091798 | 2019-04-05T16:23:21 | 2019-04-05T16:23:21 | 178,639,639 | 0 | 0 | Apache-2.0 | 2019-04-05T05:36:09 | 2019-03-31T03:49:37 | Python | UTF-8 | Python | false | false | 12,592 | py | # -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli.nice_command import NiceCommand
import tccli.error_msg as ErrorMsg
import tccli.help_template as HelpTemplate
from tccli import __version__
from tccli.utils import Utils
from tccli.configure import Configure
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.tke.v20180525 import tke_client as tke_client_v20180525
from tencentcloud.tke.v20180525 import models as models_v20180525
from tccli.services.tke import v20180525
from tccli.services.tke.v20180525 import help as v20180525_help
def doAddExistedInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("AddExistedInstances", g_param[OptionsDefine.Version])
return
param = {
"ClusterId": Utils.try_to_json(argv, "--ClusterId"),
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InstanceAdvancedSettings": Utils.try_to_json(argv, "--InstanceAdvancedSettings"),
"EnhancedService": Utils.try_to_json(argv, "--EnhancedService"),
"LoginSettings": Utils.try_to_json(argv, "--LoginSettings"),
"SecurityGroupIds": Utils.try_to_json(argv, "--SecurityGroupIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TkeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.AddExistedInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.AddExistedInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeClusters(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeClusters", g_param[OptionsDefine.Version])
return
param = {
"ClusterIds": Utils.try_to_json(argv, "--ClusterIds"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Filters": Utils.try_to_json(argv, "--Filters"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TkeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeClustersRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeClusters(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteClusterInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteClusterInstances", g_param[OptionsDefine.Version])
return
param = {
"ClusterId": Utils.try_to_json(argv, "--ClusterId"),
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InstanceDeleteMode": Utils.try_to_json(argv, "--InstanceDeleteMode"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TkeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteClusterInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteClusterInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeClusterInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeClusterInstances", g_param[OptionsDefine.Version])
return
param = {
"ClusterId": Utils.try_to_json(argv, "--ClusterId"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TkeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeClusterInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeClusterInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20180525": tke_client_v20180525,
}
MODELS_MAP = {
"v20180525": models_v20180525,
}
ACTION_MAP = {
"AddExistedInstances": doAddExistedInstances,
"DescribeClusters": doDescribeClusters,
"DeleteClusterInstances": doDeleteClusterInstances,
"DescribeClusterInstances": doDescribeClusterInstances,
}
AVAILABLE_VERSION_LIST = [
v20180525.version,
]
AVAILABLE_VERSIONS = {
'v' + v20180525.version.replace('-', ''): {"help": v20180525_help.INFO,"desc": v20180525_help.DESC},
}
def tke_action(argv, arglist):
if "help" in argv:
versions = sorted(AVAILABLE_VERSIONS.keys())
opt_v = "--" + OptionsDefine.Version
version = versions[-1]
if opt_v in argv:
version = 'v' + argv[opt_v].replace('-', '')
if version not in versions:
print("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return
action_str = ""
docs = AVAILABLE_VERSIONS[version]["help"]
desc = AVAILABLE_VERSIONS[version]["desc"]
for action, info in docs.items():
action_str += " %s\n" % action
action_str += Utils.split_str(" ", info["desc"], 120)
helpstr = HelpTemplate.SERVICE % {"name": "tke", "desc": desc, "actions": action_str}
print(helpstr)
else:
print(ErrorMsg.FEW_ARG)
def version_merge():
help_merge = {}
for v in AVAILABLE_VERSIONS:
for action in AVAILABLE_VERSIONS[v]["help"]:
if action not in help_merge:
help_merge[action] = {}
help_merge[action]["cb"] = ACTION_MAP[action]
help_merge[action]["params"] = []
for param in AVAILABLE_VERSIONS[v]["help"][action]["params"]:
if param["name"] not in help_merge[action]["params"]:
help_merge[action]["params"].append(param["name"])
return help_merge
def register_arg(command):
cmd = NiceCommand("tke", tke_action)
command.reg_cmd(cmd)
cmd.reg_opt("help", "bool")
cmd.reg_opt(OptionsDefine.Version, "string")
help_merge = version_merge()
for actionName, action in help_merge.items():
c = NiceCommand(actionName, action["cb"])
cmd.reg_cmd(c)
c.reg_opt("help", "bool")
for param in action["params"]:
c.reg_opt("--" + param, "string")
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
c.reg_opt(stropt, "string")
def parse_global_arg(argv):
params = {}
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
if stropt in argv:
params[opt] = argv[stropt]
else:
params[opt] = None
if params[OptionsDefine.Version]:
params[OptionsDefine.Version] = "v" + params[OptionsDefine.Version].replace('-', '')
config_handle = Configure()
profile = config_handle.profile
if ("--" + OptionsDefine.Profile) in argv:
profile = argv[("--" + OptionsDefine.Profile)]
is_conexist, conf_path = config_handle._profile_existed(profile + "." + config_handle.configure)
is_creexist, cred_path = config_handle._profile_existed(profile + "." + config_handle.credential)
config = {}
cred = {}
if is_conexist:
config = config_handle._load_json_msg(conf_path)
if is_creexist:
cred = config_handle._load_json_msg(cred_path)
for param in params.keys():
if param == OptionsDefine.Version:
continue
if params[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId]:
if param in cred:
params[param] = cred[param]
else:
raise Exception("%s is invalid" % param)
else:
if param in config:
params[param] = config[param]
elif param == OptionsDefine.Region:
raise Exception("%s is invalid" % OptionsDefine.Region)
try:
if params[OptionsDefine.Version] is None:
version = config["tke"][OptionsDefine.Version]
params[OptionsDefine.Version] = "v" + version.replace('-', '')
if params[OptionsDefine.Endpoint] is None:
params[OptionsDefine.Endpoint] = config["tke"][OptionsDefine.Endpoint]
except Exception as err:
raise Exception("config file:%s error, %s" % (conf_path, str(err)))
versions = sorted(AVAILABLE_VERSIONS.keys())
if params[OptionsDefine.Version] not in versions:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return params
def show_help(action, version):
docs = AVAILABLE_VERSIONS[version]["help"][action]
desc = AVAILABLE_VERSIONS[version]["desc"]
docstr = ""
for param in docs["params"]:
docstr += " %s\n" % ("--" + param["name"])
docstr += Utils.split_str(" ", param["desc"], 120)
helpmsg = HelpTemplate.ACTION % {"name": action, "service": "tke", "desc": desc, "params": docstr}
print(helpmsg)
def get_actions_info():
config = Configure()
new_version = max(AVAILABLE_VERSIONS.keys())
version = new_version
try:
profile = config._load_json_msg(os.path.join(config.cli_path, "default.configure"))
version = profile["tke"]["version"]
version = "v" + version.replace('-', '')
except Exception:
pass
if version not in AVAILABLE_VERSIONS.keys():
version = new_version
return AVAILABLE_VERSIONS[version]["help"]
| [
"[email protected]"
] | |
def2133f683035964fdbf030fa9a9bec0085cb22 | f1fcaf58e53792db786bf6ffb87f67b815ed600e | /Chapter8.py | 4e1e60f78cbb1e010b37949f78d483331693bc96 | [] | no_license | stephenosullivan/effective-python3 | 8e414d0aa64eb2a599ba661056809830b6e4a39f | c933b3f80021f9ba3d1f0ad608f563a106d89bd8 | refs/heads/master | 2021-01-13T07:39:56.418989 | 2015-10-04T01:27:26 | 2015-10-04T01:27:26 | 39,714,317 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | __author__ = 'stephenosullivan'
class Item55:
"""
Use repr Strings for debugging output
"""
def __init__(self):
a = "string"
print(a)
print(repr(a))
print(eval(repr(a)))
print('%r' % a)
a = Opaque(5,4)
print(a)
b = BetterClass(6,7)
print(b)
print(a.__dict__)
class Opaque:
def __init__(self, x, y):
self.x = x
self.y = y
class BetterClass:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "BetterClass(%s, %s)" % (self.x, self.y)
class Item56:
"""
Test everything with unittest
"""
def __init__(self):
return
def to_str(data):
if isinstance(data, str):
return data
elif isinstance(data, bytes):
return data.decode('utf-8')
else:
raise TypeError('Must supply string or bytes, ' 'found: %r' % data)
from unittest import TestCase, main
class UtilsTestCase(TestCase):
def test_to_str_bytes(self):
self.assertEqual('hello', to_str(b'hello'))
def test_to_str_str(self):
self.assertEqual('hello', to_str('hello'))
def test_to_str_bad(self):
self.assertRaises(TypeError, to_str, object())
if __name__ == "__main__":
sol = Item55()
main()
| [
"[email protected]"
] | |
4344b251328ece82d57f22c21563a169e723a2c2 | f94e54d3085cd07a6f4972f2111574ad95fe4d89 | /utils/iotools.py | 406433fab0cdf5f54f662d8821bdadfae2017c15 | [] | no_license | banskt/statsfpl | b4e67ca4ed09a8cdc927ec4cb4ad570d891ad395 | b442208fa4d07e3a097445c75a4fd2f8098440ff | refs/heads/master | 2021-06-30T01:54:05.461439 | 2020-09-07T09:41:04 | 2020-09-07T09:41:04 | 143,441,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,469 | py | import numpy as np
import collections
import csv
from utils.containers import FixtureInfo
def get_fixtures(filename, scores):
fixtures = [{} for x in range(38)]
with open(filename) as csvfile:
instream = csv.reader(csvfile, delimiter = ',')
for row in instream:
team = row[0].strip()
teamscores = scores[team]
for gw in range(1, 39):
opp = row[gw].split()[0].strip()
loc = row[gw].split()[1].strip()[1]
athome = False
if loc == 'H':
athome = True
score = teamscores[opp][loc]
fixtures[gw - 1][team] = FixtureInfo(gegen = opp, athome = athome, prob = score)
return fixtures
def convert_scores_mat(sdict, teams, nanval = 0.5):
n = len(teams)
home = np.zeros((n, n))
away = np.zeros((n, n))
for i, t1 in enumerate(teams):
for j, t2 in enumerate(teams):
home[i, j] = sdict[t1][t2]['H']
away[i, j] = sdict[t1][t2]['A']
vmin = min(np.min(home), np.min(away))
vmax = max(np.max(home), np.max(away))
delta = vmax - vmin
home = (home - vmin) / delta
away = (away - vmin) / delta
home[np.diag_indices_from(home)] = nanval
away[np.diag_indices_from(away)] = nanval
return home, away
def get_scores(filename, nanval = 0.5):
scores = {}
points = {}
teams = list()
ATTACKH = 0
ATTACKA = 1
DEFENDH = 2
DEFENDA = 3
with open(filename) as csvfile:
instream = csv.reader(csvfile, delimiter = ',')
next(instream, None)
for row in instream:
team = row[0].strip()
teams.append(team)
points[team] = [float(x.strip()) for x in row[1:]]
for team in teams:
scores[team] = {}
for opp in teams:
scores[team][opp] = {}
if opp == team:
scores[team][opp]['H'] = 0
scores[team][opp]['A'] = 0
else:
scores[team][opp]['H'] = points[team][DEFENDH] - points[opp][ATTACKA]
scores[team][opp]['A'] = points[team][DEFENDA] - points[opp][ATTACKH]
home, away = convert_scores_mat(scores, teams, nanval = nanval)
for i, team in enumerate(teams):
for j, opp in enumerate(teams):
scores[team][opp]['H'] = home[i, j]
scores[team][opp]['A'] = away[i, j]
return teams, scores
| [
"[email protected]"
] | |
decc14ec6c9e00b0fbed6e000b45d1b1efb74fa2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2335/60705/241811.py | ab11ecca95033a28315b69c996ee7f5b73163e7e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | x = int(input())
y = int(input())
count = 0
s = {x}
while not s.__contains__(y):
s2 = set()
for i in s:
s2.add(2*i)
s2.add(i - 1)
for j in s2:
s.add(j)
count += 1
print(count) | [
"[email protected]"
] | |
d6e6e537527df94454a1ffa739957e917b26d616 | 8ce656578e04369cea75c81b529b977fb1d58d94 | /bank_guarantee/helpers/copy_request.py | 6d2267189010e53aa1b00484d02d30b070321647 | [] | no_license | JJvzd/django_exp | f9a08c40a6a7535777a8b5005daafe581d8fe1dc | b1df4681e67aad49a1ce6426682df66b81465cb6 | refs/heads/master | 2023-05-31T13:21:24.178394 | 2021-06-22T10:19:43 | 2021-06-22T10:19:43 | 379,227,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,328 | py | import os
import requests
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.functional import cached_property
from sentry_sdk import capture_exception
from accounting_report.fields import QuarterData
from accounting_report.models import Quarter
from bank_guarantee.models import RequestStatus, Request
from base_request.models import RequestTender
from clients.models import Client, Agent
from common.helpers import generate_password
from questionnaire.models import (
PassportDetails, Profile, LicensesSRO, KindOfActivity, BankAccount,
ProfilePartnerLegalEntities
)
from users.models import Role, User
class CopyRequest:
from_url = r'http://tenderhelp.ru/'
token = os.environ.get('TOKEN_FOR_COPY_REQUEST')
def __init__(self, from_request_id, from_url=None, token=None):
self.from_request_id = from_request_id
if from_url:
self.from_url = from_url
if token:
self.token = token
def get_data(self, url, params=None):
if params is None:
params = {}
result = requests.get(
self.from_url + url,
params=params,
headers={'Authorization': 'Token %s' % self.token},
verify=False
)
try:
return result.json()
except Exception as error:
capture_exception(error)
raise error
def get_request_data(self):
url = r'api/requests/bank_guarantee/%s/' % str(self.from_request_id)
result = self.get_data(url)
return result.get('request')
def get_profile_data(self):
url = r'api/requests/bank_guarantee/%s/profile/' % str(self.from_request_id)
result = self.get_data(url)
return result.get('profile')
def get_accountint_report_data(self, client_id):
url = r'api/accounting_report/%s/' % client_id
result = self.get_data(url)
return result.get('quarters')
@cached_property
def agent(self):
agent = Agent.objects.filter(inn=5010050218).first()
if agent is None:
agent = Agent.objects.first()
if agent is None:
BaseException('Нету созданных агентов')
return agent
@cached_property
def agent_user(self):
user = self.agent.user_set.first()
if user is None:
BaseException('У агента инн: %s нету пользователей' % self.agent.inn)
return user
@cached_property
def manager(self):
user = User.objects.filter(roles__name=Role.MANAGER).first()
if user is None:
BaseException('В системе нет менеджеров, создайте!')
return user
@cached_property
def manager_fio(self):
return self.manager.full_name
def save_bank_accounts(self, profile, bank_accounts):
bank_accounts_save = []
for bank_account_data in bank_accounts:
bank_account_id = bank_account_data.get('id')
bank_account_data.pop('profile', None)
if bank_account_id:
bank_account = BankAccount.objects.filter(id=bank_account_id).first()
self.update_from_dict(bank_account, bank_account_data)
else:
bank_account = profile.bankaccount_set.create()
self.update_from_dict(bank_account, bank_account_data)
bank_accounts_save.append(bank_account.id)
BankAccount.objects.filter(profile=profile).exclude(
id__in=bank_accounts_save
).delete()
def save_activities(self, profile, activities):
activities_save = []
for activity_data in activities:
activity_id = activity_data.get('id')
activity_data.pop('profile', None)
if activity_id:
activity = KindOfActivity.objects.filter(id=activity_id).first()
self.update_from_dict(activity, activity_data)
else:
activity = profile.kindofactivity_set.create()
self.update_from_dict(activity, activity_data)
activities_save.append(activity.id)
KindOfActivity.objects.filter(profile=profile).exclude(
id__in=activities_save
).delete()
def save_licenses(self, profile, licenses):
licenses_sro_save = []
for license_sro_data in licenses:
license_sro_id = license_sro_data.pop('id', None)
license_sro_data.pop('profile', None)
if license_sro_id:
license_sro = LicensesSRO.objects.filter(id=license_sro_id).first()
self.update_from_dict(license_sro, license_sro_data)
else:
license_sro = profile.licensessro_set.create()
self.update_from_dict(license_sro, license_sro_data)
licenses_sro_save.append(license_sro.id)
LicensesSRO.objects.filter(profile=profile).exclude(
id__in=licenses_sro_save
).delete()
def person_empty(self, person):
return all([not value for key, value in person.items() if
key in ['first_name', 'last_name', 'middle_name', 'fiz_inn']])
def update_from_dict(self, obj, data):
if data:
for key, value in data.items():
if hasattr(obj, key):
if obj._meta.get_field(key).__class__ is models.DateField:
if not value:
value = None
if key not in ['id']:
setattr(obj, key, value)
obj.save()
def save_passport(self, passport_data):
passport_id = passport_data.pop('id', None)
if not passport_id:
passport = PassportDetails.objects.create()
else:
passport = PassportDetails.objects.filter(id=passport_id).first()
self.update_from_dict(passport, passport_data)
return passport
def save_persons(self, profile: Profile, persons):
persons_save = []
for person_data in persons:
if not self.person_empty(person_data):
if person_data['resident'] is None:
person_data['resident'] = False
passport_data = person_data.pop('passport', {})
passport = self.save_passport(passport_data)
person_data.update({'passport': passport})
# не нужно сохранять для обычного участника
person_data.pop('document_gen_dir', {})
person_data.pop('profile', None)
person_id = person_data.pop('id', None)
if person_id:
person = profile.profilepartnerindividual_set.filter(
id=person_id
).first()
else:
person = profile.profilepartnerindividual_set.create()
self.update_from_dict(person, person_data)
persons_save.append(person_id)
profile.profilepartnerindividual_set.exclude(
Q(id__in=persons_save) | Q(is_general_director=True) | Q(is_booker=True)
).delete()
@staticmethod
def persons_without_general_director(persons):
return [p for p in persons if not p['is_general_director']]
def save_general_director(self, profile, general_director):
if general_director:
general_director_id = general_director.pop('id', None)
passport_data = general_director.pop('passport', {})
passport = self.save_passport(passport_data)
general_director.update({'passport': passport})
general_director.update({'profile': profile})
document_gen_dir_data = general_director.pop('document_gen_dir', {})
gen_dir = None
if general_director_id:
gen_dir = profile.profilepartnerindividual_set.filter(
id=general_director_id
).first()
if not gen_dir:
gen_dir = profile.profilepartnerindividual_set.create(
is_general_director=True
)
self.update_from_dict(gen_dir, general_director)
self.update_from_dict(gen_dir.document_gen_dir, document_gen_dir_data)
profile.profilepartnerindividual_set.filter(
is_general_director=True
).exclude(id=gen_dir.id).delete()
def save_legal_sharehoders(self, profile, legal_shareholders):
legal_shareholders_save = []
for legal_shareholder_data in legal_shareholders:
legal_shareholder_id = legal_shareholder_data.get('id')
legal_shareholder_data.pop('profile', None)
if legal_shareholder_data.get('passport'):
passport_data = legal_shareholder_data.get('passport')
if passport_data.get('id'):
passport = PassportDetails.objects.filter(
id=passport_data.pop('id')
).first()
else:
passport = PassportDetails.objects.create()
self.update_from_dict(passport, passport_data)
legal_shareholder_data.update({'passport': passport})
if legal_shareholder_id:
legal_shareholder = ProfilePartnerLegalEntities.objects.filter(
id=legal_shareholder_id
).first()
self.update_from_dict(legal_shareholder, legal_shareholder_data)
else:
legal_shareholder = profile.profilepartnerlegalentities_set.create()
self.update_from_dict(legal_shareholder, legal_shareholder_data)
legal_shareholders_save.append(legal_shareholder.id)
ProfilePartnerLegalEntities.objects.filter(profile=profile).exclude(
id__in=legal_shareholders_save
).delete()
@classmethod
def clear_id(cls, data):
if isinstance(data, list):
for d in data:
cls.clear_id(d)
if isinstance(data, dict):
temp = list(data.keys())
for key in temp:
if key in ['id', 'profile'] and isinstance(data[key], int):
del data[key]
else:
cls.clear_id(data[key])
def update_profile(self, client):
profile = client.profile
profile_data = self.get_profile_data()
self.clear_id(profile_data)
if profile.general_director:
profile_data['general_director']['id'] = profile.general_director.id
profile_data['general_director']['passport']['id'] = profile.general_director.passport.id # noqa
profile_data['general_director']['document_gen_dir']['id'] = profile.general_director.document_gen_dir.id # noqa
self.save_bank_accounts(
profile=profile,
bank_accounts=profile_data.pop('bank_accounts') or []
)
self.save_activities(
profile=profile,
activities=profile_data.pop('activities') or []
)
self.save_licenses(
profile=profile,
licenses=profile_data.pop('licenses_sro') or []
)
self.save_general_director(
profile=profile,
general_director=profile_data.pop('general_director') or {}
)
self.save_persons(
profile=profile,
persons=self.persons_without_general_director(
profile_data.pop('persons') or []
)
)
self.save_legal_sharehoders(
profile=profile,
legal_shareholders=profile_data.pop('legal_shareholders') or []
)
profile_data.pop('booker', None)
self.update_from_dict(client.profile, profile_data)
def update_accounting_report(self, new_client, old_client_id):
data = self.get_accountint_report_data(old_client_id)
for d in data:
quarter, create = Quarter.objects.update_or_create(
quarter=d['quarter'],
year=d['year'],
client=new_client,
defaults={'data': QuarterData(d['data']), 'no_data': d['no_data']}
)
def get_client(self, data):
inn = data['inn']
kpp = data['kpp']
data['agent_company'] = self.agent
data['agent_user'] = self.agent_user
data['manager'] = self.manager
data['managet_fio'] = self.manager_fio
client_id = data['id']
fields_for_delete = [
'id',
'inn',
'kpp',
'profile',
'agent_user_id',
'agent_company_id',
'agent_company_inn',
'agent_company_short_name',
'email',
'last_login',
'managet_fio',
'phone',
]
for field in fields_for_delete:
del data[field]
client, create = Client.objects.update_or_create(inn=inn, kpp=kpp, defaults=data)
self.update_profile(client)
self.update_accounting_report(client, client_id)
if create:
self.create_user(client)
return client
def create_user(self, client, roles=[Role.CLIENT]):
email = '%[email protected]' % client.inn
password = generate_password()
user = User.objects.create_user(email, password=password)
user.client = client
user.roles.set(Role.objects.filter(name__in=roles))
user.save()
@staticmethod
def get_tender(data):
fields_for_delete = [
'read_only_fields',
'procuring_amount',
'placement',
'get_federal_law_display'
]
for field in fields_for_delete:
del data[field]
return RequestTender.objects.create(**data)
@staticmethod
def get_status():
return RequestStatus.objects.get(code=RequestStatus.CODE_DRAFT)
def copy_request(self):
data = self.get_request_data()
fields_for_delete = [
'id',
'bank',
'offer',
'offer_additional_fields',
'assigned',
'additional_status',
'rating',
'base_request',
'decision_maker',
'request_number',
'request_number_in_bank',
'is_signed',
'status_changed_date',
'sent_to_bank_date',
'created_date',
'updated_date',
'agent_user_id',
]
for field in fields_for_delete:
data.pop(field, None)
data['banks_commissions'] = '{}'
data['client'] = self.get_client(data['client'])
data['tender'] = self.get_tender(data['tender'])
data['status'] = self.get_status()
data['agent'] = self.agent
data['agent_user'] = self.agent.user_set.first()
data['interval_to'] = timezone.datetime(*[
int(i) for i in data['interval_to'].split('-')
])
data['interval_from'] = timezone.datetime(*[
int(i) for i in data['interval_from'].split('-')
])
Request.objects.create(**data)
| [
"[email protected]"
] | |
788ef0052aafc50928a425010a71836954b38794 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/inspections/ChainedComparison9.py | b9dfa664cb719771acc687073432f02f50687c0f | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 24 | py | 0 < x and True and x < 2 | [
"[email protected]"
] | |
a30badd10e968213b68d1cab709d7f6258ff4478 | 921c29354a9065a4f76f816c2b2ec68457f66aef | /todo/tests/test_task.py | e4752471e00904681378c98b7d75e47dcc6c54c8 | [] | no_license | AmrAnwar/ToDoList | 520fa0529090183832dfd8c274fb3e7dad4d7a3b | de5e9e9887dee857e6169184aa9c7b74f31d32c4 | refs/heads/master | 2020-04-11T15:51:39.869491 | 2018-12-15T17:20:11 | 2018-12-15T17:20:11 | 161,905,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | from .test_init import InitTest
class TestList(InitTest):
def setUp(self):
super(TestList, self).setUp()
def test_get_task(self):
res = self.client.get(self.task.get_absolute_url())
self.assertEqual(res.status_code, 404)
self.client.login(username="anwar", password="password")
res = self.client.get(self.task.get_absolute_url())
self.assertEqual(res.status_code, 404)
self.client.login(username="guest", password="password")
res = self.client.get(self.task.get_absolute_url())
self.assertEqual(res.status_code, 200)
def test_update(self):
self.client.login(username="guest", password="password")
data = {
"title": "test-title"
}
self.client.post(self.task.get_absolute_url(), data=data)
| [
"[email protected]"
] | |
8de9d49675be983416774ae4bf4609d2d1d95145 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/142/usersdata/227/62295/submittedfiles/av2_p3_civil.py | 280c0440154f4d960bd1fc3ba353a60f8deb5e93 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | # -*- coding: utf-8 -*-
def media(a):
soma = 0
for i in range(0,len(a),1):
soma = soma + a[i]
media = soma/len(a)
return (media)
#ESCREVA AS DEMAIS FUNÇÕES
def somaA(x,y):
mx=media(x)
my=media(y)
soma=0
for i in range(0,len(x),1):
soma=soma+((x[i]-mx)*(y[i])-my)
return(soma)
def entradaLista(n):
a = []
for i in range(0,n,1):
valor = float(input('Digite um valor: '))
a.append(valor)
return (a)
n = int(input('Digite o tamanho da lista: '))
x = entradaLista(n)
y = entradaLista(n)
p=somaA(x,y)/((somaD(x)*somad(y)**(0,5))
print('%.4f' % p)
| [
"[email protected]"
] | |
a9e2684649859d6b87e451d62c77a2a7bc594f57 | 5b4b1866571453f78db5b06a08ff0eda17b91b04 | /test/vanilla/Expected/AcceptanceTests/Url/url/operations/_path_items_operations.py | 8514033f930cc55c1503dfc44161367f746472ce | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | koek67/autorest.azure-functions-python | ba345f1d194ca7431daab1210a0cd801d4946991 | b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783 | refs/heads/main | 2022-12-20T13:27:56.405901 | 2020-09-30T08:23:11 | 2020-09-30T08:23:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,257 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PathItemsOperations(object):
"""PathItemsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~url.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_all_with_values(
self,
path_item_string_path, # type: str
local_string_path, # type: str
path_item_string_query=None, # type: Optional[str]
local_string_query=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""send globalStringPath='globalStringPath', pathItemStringPath='pathItemStringPath',
localStringPath='localStringPath', globalStringQuery='globalStringQuery',
pathItemStringQuery='pathItemStringQuery', localStringQuery='localStringQuery'.
:param path_item_string_path: A string value 'pathItemStringPath' that appears in the path.
:type path_item_string_path: str
:param local_string_path: should contain value 'localStringPath'.
:type local_string_path: str
:param path_item_string_query: A string value 'pathItemStringQuery' that appears as a query
parameter.
:type path_item_string_query: str
:param local_string_query: should contain value 'localStringQuery'.
:type local_string_query: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_all_with_values.metadata['url'] # type: ignore
path_format_arguments = {
'pathItemStringPath': self._serialize.url("path_item_string_path", path_item_string_path, 'str'),
'globalStringPath': self._serialize.url("self._config.global_string_path", self._config.global_string_path, 'str'),
'localStringPath': self._serialize.url("local_string_path", local_string_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters['pathItemStringQuery'] = self._serialize.query("path_item_string_query", path_item_string_query, 'str')
if self._config.global_string_query is not None:
query_parameters['globalStringQuery'] = self._serialize.query("self._config.global_string_query", self._config.global_string_query, 'str')
if local_string_query is not None:
query_parameters['localStringQuery'] = self._serialize.query("local_string_query", local_string_query, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_all_with_values.metadata = {'url': '/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/globalStringQuery/pathItemStringQuery/localStringQuery'} # type: ignore
@distributed_trace
def get_global_query_null(
self,
path_item_string_path, # type: str
local_string_path, # type: str
path_item_string_query=None, # type: Optional[str]
local_string_query=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""send globalStringPath='globalStringPath', pathItemStringPath='pathItemStringPath',
localStringPath='localStringPath', globalStringQuery=null,
pathItemStringQuery='pathItemStringQuery', localStringQuery='localStringQuery'.
:param path_item_string_path: A string value 'pathItemStringPath' that appears in the path.
:type path_item_string_path: str
:param local_string_path: should contain value 'localStringPath'.
:type local_string_path: str
:param path_item_string_query: A string value 'pathItemStringQuery' that appears as a query
parameter.
:type path_item_string_query: str
:param local_string_query: should contain value 'localStringQuery'.
:type local_string_query: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_global_query_null.metadata['url'] # type: ignore
path_format_arguments = {
'pathItemStringPath': self._serialize.url("path_item_string_path", path_item_string_path, 'str'),
'globalStringPath': self._serialize.url("self._config.global_string_path", self._config.global_string_path, 'str'),
'localStringPath': self._serialize.url("local_string_path", local_string_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters['pathItemStringQuery'] = self._serialize.query("path_item_string_query", path_item_string_query, 'str')
if self._config.global_string_query is not None:
query_parameters['globalStringQuery'] = self._serialize.query("self._config.global_string_query", self._config.global_string_query, 'str')
if local_string_query is not None:
query_parameters['localStringQuery'] = self._serialize.query("local_string_query", local_string_query, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_global_query_null.metadata = {'url': '/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/null/pathItemStringQuery/localStringQuery'} # type: ignore
@distributed_trace
def get_global_and_local_query_null(
self,
path_item_string_path, # type: str
local_string_path, # type: str
path_item_string_query=None, # type: Optional[str]
local_string_query=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""send globalStringPath=globalStringPath, pathItemStringPath='pathItemStringPath',
localStringPath='localStringPath', globalStringQuery=null,
pathItemStringQuery='pathItemStringQuery', localStringQuery=null.
:param path_item_string_path: A string value 'pathItemStringPath' that appears in the path.
:type path_item_string_path: str
:param local_string_path: should contain value 'localStringPath'.
:type local_string_path: str
:param path_item_string_query: A string value 'pathItemStringQuery' that appears as a query
parameter.
:type path_item_string_query: str
:param local_string_query: should contain null value.
:type local_string_query: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_global_and_local_query_null.metadata['url'] # type: ignore
path_format_arguments = {
'pathItemStringPath': self._serialize.url("path_item_string_path", path_item_string_path, 'str'),
'globalStringPath': self._serialize.url("self._config.global_string_path", self._config.global_string_path, 'str'),
'localStringPath': self._serialize.url("local_string_path", local_string_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters['pathItemStringQuery'] = self._serialize.query("path_item_string_query", path_item_string_query, 'str')
if self._config.global_string_query is not None:
query_parameters['globalStringQuery'] = self._serialize.query("self._config.global_string_query", self._config.global_string_query, 'str')
if local_string_query is not None:
query_parameters['localStringQuery'] = self._serialize.query("local_string_query", local_string_query, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_global_and_local_query_null.metadata = {'url': '/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/null/pathItemStringQuery/null'} # type: ignore
@distributed_trace
def get_local_path_item_query_null(
self,
path_item_string_path, # type: str
local_string_path, # type: str
path_item_string_query=None, # type: Optional[str]
local_string_query=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""send globalStringPath='globalStringPath', pathItemStringPath='pathItemStringPath',
localStringPath='localStringPath', globalStringQuery='globalStringQuery',
pathItemStringQuery=null, localStringQuery=null.
:param path_item_string_path: A string value 'pathItemStringPath' that appears in the path.
:type path_item_string_path: str
:param local_string_path: should contain value 'localStringPath'.
:type local_string_path: str
:param path_item_string_query: should contain value null.
:type path_item_string_query: str
:param local_string_query: should contain value null.
:type local_string_query: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_local_path_item_query_null.metadata['url'] # type: ignore
path_format_arguments = {
'pathItemStringPath': self._serialize.url("path_item_string_path", path_item_string_path, 'str'),
'globalStringPath': self._serialize.url("self._config.global_string_path", self._config.global_string_path, 'str'),
'localStringPath': self._serialize.url("local_string_path", local_string_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters['pathItemStringQuery'] = self._serialize.query("path_item_string_query", path_item_string_query, 'str')
if self._config.global_string_query is not None:
query_parameters['globalStringQuery'] = self._serialize.query("self._config.global_string_query", self._config.global_string_query, 'str')
if local_string_query is not None:
query_parameters['localStringQuery'] = self._serialize.query("local_string_query", local_string_query, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_local_path_item_query_null.metadata = {'url': '/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/globalStringQuery/null/null'} # type: ignore
| [
"[email protected]"
] | |
e7f4f24803a27a38a46f361243a674a5236a571a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03274/s010264861.py | c2235d0bdb18733aa448f6ca2a63b3cad841e71a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | import collections
n,k = map(int, raw_input().split(' '))
xis = map(int, raw_input().split(' '))
ais = [xi for xi in xis if xi >= 0]
bis = [-xi for xi in xis if xi < 0][::-1]
m = ais[k -1] if k-1 < len(ais) else +float('inf')
m = min(m, bis[k -1] if k-1 < len(bis) else +float('inf'))
for i in range(len(ais)):
if i + 1 == k: break
if 0 <= k - (i+1) -1 < len(bis):
m = min(m, 2*ais[i] + bis[k - (i+1) -1])
m = min(m, ais[i] + 2*bis[k - (i+1) -1])
print m | [
"[email protected]"
] | |
75687bab192a3f68f275a053b3ee4aa69bc1955b | 523fb785bda41e33546c929a5c2de6c93f98b434 | /专题学习/树/BinaryTreePathDivideConquer.py | 63bc4f925a2a8db053b249b643773310f578e34c | [] | no_license | lizhe960118/TowardOffer | afd2029f8f9a1e782fe56ca0ff1fa8fb37892d0e | a0608d34c6ed96c9071cc3b9bdf70c95cef8fcbd | refs/heads/master | 2020-04-27T10:33:21.452707 | 2019-05-02T10:47:01 | 2019-05-02T10:47:01 | 174,259,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | """
Definition of TreeNode:
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: the root of the binary tree
@return: all root-to-leaf paths
"""
def binaryTreePaths(self, root):
paths = []
if root is None:
return paths
left_paths = self.binaryTreePaths(root.left)
right_paths = self.binaryTreePaths(root.right)
for path in left_paths:
paths.append(str(root.val) + '->' + path)
for path in right_paths:
paths.append(str(root.val) + '->' + path)
# 如果节点是叶子节点
if len(paths) == 0:
paths.append(str(root.val))
return paths
if __name__ == '__main__':
node1 = TreeNode(1)
node2 = TreeNode(2)
node3 = TreeNode(3)
node4 = TreeNode(4)
node5 = TreeNode(5)
node1.left = node2
node1.right = node3
node2.left = node4
node2.right = node5
root = node1
print(Solution().binaryTreePaths(root)) | [
"[email protected]"
] | |
4eccb52479c5050a8cb64f03d50f62ec22ebf031 | 083d93a621f0fd411aabd9b1607e83aedd588d2c | /etg/propgridiface.py | 847a0e054be21e3f253b5d29483057e83d7d49fc | [] | no_license | jns4u/Phoenix | 0a8e5b50326d37048aa58d11023308517ace525b | 478e192ccf0d75a04b78c6600963614d1039dd53 | refs/heads/master | 2021-01-09T06:20:02.546100 | 2017-02-05T03:33:00 | 2017-02-05T03:33:00 | 80,965,252 | 1 | 0 | null | 2017-02-05T03:10:08 | 2017-02-05T03:10:08 | null | UTF-8 | Python | false | false | 2,648 | py | #---------------------------------------------------------------------------
# Name: etg/propgridiface.py
# Author: Robin Dunn
#
# Created: 23-Feb-2015
# Copyright: (c) 2015 by Total Control Software
# License: wxWindows License
#---------------------------------------------------------------------------
import etgtools
import etgtools.tweaker_tools as tools
PACKAGE = "wx"
MODULE = "_propgrid"
NAME = "propgridiface" # Base name of the file to generate to for this script
DOCSTRING = ""
# The classes and/or the basename of the Doxygen XML files to be processed by
# this script.
ITEMS = [ 'wxPGPropArgCls',
'wxPropertyGridInterface',
]
#---------------------------------------------------------------------------
def run():
# Parse the XML file(s) building a collection of Extractor objects
module = etgtools.ModuleDef(PACKAGE, MODULE, NAME, DOCSTRING)
etgtools.parseDoxyXML(module, ITEMS)
#-----------------------------------------------------------------
# Tweak the parsed meta objects in the module object as needed for
# customizing the generated code and docstrings.
c = module.find('wxPGPropArgCls')
assert isinstance(c, etgtools.ClassDef)
c.find('GetPtr').overloads[0].ignore()
c = module.find('wxPropertyGridInterface')
c.abstract = True
for m in c.findAll('GetIterator'):
if m.type == 'wxPropertyGridConstIterator':
m.ignore()
c.find('SetPropertyValue').findOverload('int value').ignore()
c.find('SetPropertyValue').findOverload('bool value').ignore()
c.find('SetPropertyValue').findOverload('wxLongLong_t value').ignore()
c.find('SetPropertyValue').findOverload('wxULongLong_t value').ignore()
c.find('SetPropertyValue').findOverload('wxObject *value').ignore()
module.addItem(
tools.wxArrayPtrWrapperTemplate('wxArrayPGProperty', 'wxPGProperty', module))
# wxPGPropArg is a typedef for "const wxPGPropArgCls&" so having the
# wrappers treat it as a normal type can be problematic. ("new cannot be
# applied to a reference type", etc.) Let's just ignore it an replace it
# everywhere for the real type.
module.find('wxPGPropArg').ignore()
for item in module.allItems():
if hasattr(item, 'type') and item.type == 'wxPGPropArg':
item.type = 'const wxPGPropArgCls &'
#-----------------------------------------------------------------
tools.doCommonTweaks(module)
tools.runGenerators(module)
#---------------------------------------------------------------------------
if __name__ == '__main__':
run()
| [
"[email protected]"
] | |
f613d66153900cdfab69753db317f2b3e2792278 | 64c8d431c751b1b7a7cb7224107ee40f67fbc982 | /code/python/external/pi3d/constants/__init__.py | a0dfd912eed894eb189c79ca89c51473d892341d | [
"MIT"
] | permissive | silky/echomesh | 6ac4755e4ff5ea3aa2b2b671c0979068c7605116 | 2fe5a00a79c215b4aca4083e5252fcdcbd0507aa | refs/heads/master | 2021-01-12T20:26:59.294649 | 2013-11-16T23:29:05 | 2013-11-16T23:29:05 | 14,458,268 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | from __future__ import absolute_import, division, print_function, unicode_literals
"""
pi3d.constants contains constant values, mainly integers, from OpenGL ES 2.0.
"""
VERSION = '0.06'
STARTUP_MESSAGE = """
Pi3D module - version %(version)s
Copyright (c) Tim Skillman, 2012-2013
Copyright (c) Patrick Gaunt, 2012-2013
Copyright (c) Tom Ritchford, 2012-2013
Updates available from www.github.com/tipam/pi3d
""" % {'version': VERSION}
VERBOSE = False
# TODO: get rid of verbose in favor of logging.
# Pick up our constants extracted from the header files with prepare_constants.py
from pi3d.constants.egl import *
from pi3d.constants.gl2 import *
from pi3d.constants.gl2ext import *
from pi3d.constants.gl import *
# Define some extra constants that the automatic extraction misses.
EGL_DEFAULT_DISPLAY = 0
EGL_NO_CONTEXT = 0
EGL_NO_DISPLAY = 0
EGL_NO_SURFACE = 0
DISPMANX_PROTECTION_NONE = 0
# Lastly, load the libraries.
def _load_library(name):
"""Try to load a shared library, report an error on failure."""
try:
import ctypes
return ctypes.CDLL('lib%s.so' % name)
except:
from echomesh.util import Log
Log.logger(__name__).error("Couldn't load library %s" % name)
bcm = _load_library('bcm_host')
opengles = _load_library('GLESv2')
openegl = _load_library('EGL')
| [
"[email protected]"
] | |
49acb7c799821f6f485dc8243c3203145bd9385f | c6db8eccba0f863e464fa23e7c8c5f27d6da277b | /CS/Programming_Languages/Python/Modules/exterior/topics/gui/dearPyGUI/tutorials/_3_item_usage/_3_3_configuration_state_info/configure_items.py | ea4e913f1183433230106f6806d466fcd30d277d | [] | no_license | corridda/Studies | ceabb94f48bd03a31e4414e9af841d6a9b007cf9 | 1aacf52f2762e05a416c9e73ebe20794cb5d21cf | refs/heads/master | 2023-02-05T18:51:04.217528 | 2023-01-28T09:21:03 | 2023-01-28T09:21:03 | 216,492,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | import dearpygui.dearpygui as dpg
dpg.create_context()
with dpg.window(label="Tutorial"):
# configuration set when button is created
dpg.add_button(label="Apply", width=300)
# user data and callback set any time after button has been created
btn = dpg.add_button(label="Apply 2")
dpg.set_item_label(btn, "Button 57")
dpg.set_item_width(btn, 200)
dpg.show_item_registry()
dpg.create_viewport(title='Custom Title', width=800, height=600)
dpg.setup_dearpygui()
dpg.show_viewport()
dpg.start_dearpygui()
dpg.destroy_context()
| [
"[email protected]"
] | |
2d2a0919eaf9d4900549e260e76a29a86aff5212 | 9f1b8a1ada57198e2a06d88ddcdc0eda0c683df7 | /submission - lab9/set 2/VICTORIA ALEXANDRA ALERS_19376_assignsubmission_file_Lab9/VICTORIA ALEXANDRA ALERS_19376_assignsubmission_file_Lab9.py | 23b6a370b40815a0efeb963db12ada5ea16a12bf | [] | no_license | sendurr/spring-grading | 90dfdced6327ddfb5c311ae8f42ae1a582768b63 | 2cc280ee3e0fba02e95b6e9f45ad7e13bc7fad54 | refs/heads/master | 2020-04-15T17:42:10.781884 | 2016-08-29T20:38:17 | 2016-08-29T20:38:17 | 50,084,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #1________________________________
from math import sin, exp
class F:
def __init__(self, a, w):
self.a = a
self.w = w
def value(self, x):
a = self.a
w = self.w
return exp(-a * w) * sin(w * x)
from math import pi
f = F(a=1.0, w=0.1)
print (f.value(x=pi))
f.a = 2
print (f.value(pi))
#2---------------------------------------------
class Simple:
def __init__(self, i):
#super().__init__()
self.i=i
def double(self):
self.i = self.i + self.i
s1=Simple(4)
for i in range(4):
s1.double()
print(s1.i)
s2=Simple('Hello')
s2.double(); s2.double()
print(s2.i)
s2.i=100
print(s2.i)
| [
"[email protected]"
] | |
0844bbe4a8d7dc1254333da5414f1afff5a87ca7 | f4b694982027ac362de1e9d6755f2943d0355a06 | /DECSKS-24_--_Boundary_conditions_revisited_on_a_cell_centered_perspective/DECSKS/main.py | 64e3792512a19fa7015a7435945c2d43bd03da74 | [] | no_license | dsirajud/IPython-notebooks | 55275e44191c16f5393571522787993f931cfd98 | 6ad9d978c611558525fc9d716af101dc841a393b | refs/heads/master | 2021-01-15T15:33:57.119172 | 2016-07-13T20:08:29 | 2016-07-13T20:08:29 | 35,054,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,042 | py | #!/usr/bin/env python
#============================================================================##
#DECSKS - DEterministic Convected Scheme Kinetic Solver for Boltzmann systems #
#-----------------------------------------------------------------------------#
# 1D1V Vlasov-Poisson system, two species #
# #
# __author__ = David Sirajuddin #
# __version__ = 2.3 #
# __email__ = [email protected] #
# __status__ = in development #
# #
# Python code is crafted with some attention to PEP8 style standards #
# https://www.python.org/dev/peps/pep-0008/ for #
# #
# Python 2.7.3 #
# NumPy 1.11.0.dev0+fe64f97 #
# #
# is not compatible with Python 3+ and/or earlier Numpy releases #
# #
# coding conventions: #
# packages/modules -- lowercase, no underscore #
# classes -- CapWords #
# instances -- lowercase (if no symbol conflict), with underscores #
# functions -- lowercase, with underscores unless proper name #
# #
# other (local) conventions: #
# numpy arrays -- lowercase (if no symbol conflict), with underscores #
# iterators -- i, j, n used according to f(x_i, v_j, t^n), #
# phase space var -- z = {x, y, z, vx, vy, z} #
#=============================================================================#
import _mypath # adds relative path to sys.path for flexible deployment
import DECSKS
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import time
# =========================================================================== #
#rm_plots = int(raw_input('remove ALL plot files after simulation is done (1 = yes, 0 = no)?: '))
rm_plots = 0
tic = time.clock()
sim_params = DECSKS.lib.read.inputfile('./etc/params_s18-20.dat')
# both species will use same grid x, vx. Can reuse the same vx and ax here
# given serial implementation. In parallel applications, distinct vx_i, vx_e
# ax_i, ax_e may be desirable depending on how the parallelization is approached
x = DECSKS.lib.domain.Setup(sim_params, var = 'x')
vx = DECSKS.lib.domain.Setup(sim_params, var = 'v', dim = 'x')
ax = DECSKS.lib.domain.Setup(sim_params, 'a', 'x')
t = DECSKS.lib.domain.Setup(sim_params, var = 't')
# set up two species
fe, fi = DECSKS.lib.density.setup(sim_params, t, x, vx) # NOTE mu and tau in ion density must match those just below
ne_avg = np.sum(fe)*x.width * vx.width / x.L
print ne_avg
# store total mass for conservation checks, TODO do not need the x phase space variable pass in this function
sim_params['me_0'] = np.sum(fe)
sim_params['mi_0'] = np.sum(fi)
print "TIME ZERO, masses are"
print "fe = %g" % np.sum(fe)
print "fi = %g" % np.sum(fi)
# CAUTION: make sure you use the same mu and tau in lib.density for fe, fi as indicated in params.dat input deck
# as the mu specified in params.dat will be used to compute the ion acceleration ax ~ 1 / mu * Ex compared to the
# electron acceleration term ax ~ -Ex
print sim_params['BC']['f']['x']['type']
print sim_params['BC']['f']['vx']['type']
print sim_params['BC']['f']['x']['lower']
print sim_params['BC']['f']['x']['upper']
print sim_params['BC']['f']['vx']['lower']
print sim_params['BC']['f']['vx']['upper']
#print sim_params['compute_electric_potential_phi_handle'][x.str] # = None if fourier solver
# print sim_params['phi_BC']['x'] # = None if fourier solver
Plot = DECSKS.lib.plots.PlotSetup(fe, 0, t, x, vx, sim_params, species = 'electron')
Plot(n = 0)
Plot = DECSKS.lib.plots.PlotSetup(fi, 0, t, x, vx, sim_params, species = 'ion')
Plot(n = 0)
#Ex_2D = eval(sim_params['compute_electric_field_orchestrator_handle']['x'])(fe, fi, x, vx, 0, sim_params)
#Ex = Ex_2D[:,0] # all columns of the original 2D array are the same, choose the zeroeth column arbitrarily
#phi = eval(sim_params['compute_electric_potential_phi_handle'][x.str])(fe, fi, x, vx, 0, sim_params)
#phi = phi[:,0]
#print phi.shape
DECSKS.lib.diagnostics.calcs_and_writeout(sim_params,fe, fi, 0, x, vx, sim_params['mu'])
#matplotlib.pyplot.plot(x.gridvalues, phi, linewidth = 2, color = 'blue')
#matplotlib.pyplot.grid()
##matplotlib.pyplot.axis([x.gridvalues[0], x.gridvalues[-1], 0, 35])
#matplotlib.pyplot.xlabel(r'position $x$', fontsize = 18)
#matplotlib.pyplot.ylabel(r'$\phi (t^n,x)$', fontsize = 18)
#matplotlib.pyplot.title(r's18-21d Electric potential $\phi (x)$: $N_x$ = %d, $N_v$ = %d, $t^n$ = %2.3f, n = %03d' % (sim_params['active_dims'][0], sim_params['active_dims'][1], 0.0, 0))
#it_str = 'it%05d' % 0
#matplotlib.pyplot.savefig('./plots/' + 'phi_s18-21_' + it_str)
#matplotlib.pyplot.clf()
#matplotlib.pyplot.plot(x.gridvalues, Ex, linewidth = 2, color = 'blue')
#matplotlib.pyplot.grid()
#matplotlib.pyplot.axis([x.gridvalues[0], x.gridvalues[-1], 0, 35])
#matplotlib.pyplot.xlabel(r'position $x$', fontsize = 18)
#matplotlib.pyplot.ylabel(r'$E (t^n,x)$', fontsize = 18)
#matplotlib.pyplot.title(r's18-21d Electric field $E (x)$: $N_x$ = %d, $N_v$ = %d, $t^n$ = %2.3f, n = %03d' % (sim_params['active_dims'][0], sim_params['active_dims'][1], 0.0, 0))
#it_str = 'it%05d' % 0
#matplotlib.pyplot.savefig('./plots/' + 'Ex_s18-21_' + it_str)
#matplotlib.pyplot.clf()
#print sim_params['sigma']['x']['lower']
#print sim_params['sigma']['x']['upper']
print 'simulation has started, status updates are broadcasted after each timestep'
print t.stepnumbers
for n in t.stepnumbers:
fe, fi = DECSKS.lib.split.scheme(
fe, fi,
t, x, vx, ax,
n,
sim_params
)
# sim_params['sigma_n']['x']['lower'][n] = sim_params['sigma']['x']['lower']
# sim_params['sigma_n']['x']['upper'][n] = sim_params['sigma']['x']['upper']
# print sim_params['sigma']['x']['lower']
# print sim_params['sigma']['x']['upper']
Plot = DECSKS.lib.plots.PlotSetup(fe, n, t, x, vx, sim_params, species = 'electron')
Plot(n)
Plot = DECSKS.lib.plots.PlotSetup(fi, n, t, x, vx, sim_params, species = 'ion')
Plot(n)
# Ex_2D = eval(sim_params['compute_electric_field_orchestrator_handle']['x'])(fe, fi, x, vx, n, sim_params)
# Ex = Ex_2D[:,0] # all columns of the original 2D array are the same, choose the zeroeth column arbitrarily
# phi = eval(sim_params['compute_electric_potential_phi_handle'][x.str])(fe, fi, x, vx, 0, sim_params)
# phi = phi[:,0]
# matplotlib.pyplot.plot(x.gridvalues, phi, linewidth = 2, color = 'blue')
# matplotlib.pyplot.grid()
# # matplotlib.pyplot.axis([x.gridvalues[0], x.gridvalues[-1], 0, 35])
# matplotlib.pyplot.xlabel(r'position $x$', fontsize = 18)
# matplotlib.pyplot.ylabel(r'$\phi (t^n,x)$', fontsize = 18)
# matplotlib.pyplot.title(r's18-21d Electric potential $\phi (t^n, x)$: $N_x$ = %d, $N_v$ = %d, $t^n$ = %2.3f, n = %03d' % (sim_params['active_dims'][0], sim_params['active_dims'][1], n*t.width, n))
# it_str = 'it%05d' % n
# matplotlib.pyplot.savefig('./plots/' + 'phi_s18-21_' + it_str)
# matplotlib.pyplot.clf()
# matplotlib.pyplot.plot(x.gridvalues, Ex, linewidth = 2, color = 'blue')
# matplotlib.pyplot.grid()
# # matplotlib.pyplot.axis([x.gridvalues[0], x.gridvalues[-1], 0, 35])
# matplotlib.pyplot.xlabel(r'position $x$', fontsize = 18)
# matplotlib.pyplot.ylabel(r'$E (t^n,x)$', fontsize = 18)
# matplotlib.pyplot.title(r's18-21d Electric field $E (x)$: $N_x$ = %d, $N_v$ = %d, $t^n$ = %2.3f, n = %03d' % (sim_params['active_dims'][0], sim_params['active_dims'][1], n*t.width, n))
# it_str = 'it%05d' % n
# matplotlib.pyplot.savefig('./plots/' + 'Ex_s18-21_' + it_str)
# matplotlib.pyplot.clf()
# calcs performed and outputs written only if "record outputs? = yes"
# in ./etc/params.dat
DECSKS.lib.diagnostics.calcs_and_writeout(sim_params,fe, fi, n, x, vx, sim_params['mu'])
DECSKS.lib.status.check_and_clean(t, n, tic, rm_plots)
#sigma_n_left = sim_params['sigma_n']['x']['lower']
#sigma_n_right = sim_params['sigma_n']['x']['upper']
#plt.plot(t.times, sigma_n_left, linewidth = 2, label = r'$\sigma (t, x= -10)$')
#plt.plot(t.times,sigma_n_right, linewidth = 2, label = r'$\sigma (t, x= +10)$')
#plt.grid()
#plt.xlabel(r'time step $n$', fontsize = 18)
#plt.ylabel(r'$\sigma (t,x)$', fontsize = 18)
#plt.legend(loc = 'best')
#phi_left = sim_params['sigma_n']['x']['lower'] # E = -1/2 sigma, phi = 1/2 sigma, here sigma = ni - ne
#phi_right = sim_params['sigma_n']['x']['upper']
#plt.plot(trange,phi_left, linewidth = 2, label = r'$\phi (t, x= -10)$')
#plt.plot(trange,phi_right, linewidth = 2, label = r'$\phi (t, x= +10)$')
#plt.grid()
#plt.xlabel(r'time step $n$', fontsize = 18)
#plt.ylabel(r'$\phi (t,x)$', fontsize = 18)
#plt.legend(loc = 'best')
#plt.show()
toc = time.clock()
simtime = toc - tic
print "simulation completed in %g seconds = %g minutes = %g hours " % (
simtime,
simtime/60.,
simtime/3600.)
# =============================================================================== #
# END
| [
"[email protected]"
] | |
db65f69a9e0e554a65106f54ff445628c3458f7c | 839d8d7ccfa54d046e22e31a2c6e86a520ee0fb5 | /icore/high/thread/thread_queue.py | 7e1d562ecf087f475cb24370b431819ad85ae3b5 | [] | no_license | Erich6917/python_corepython | 7b584dda737ef914780decca5dd401aa33328af5 | 0176c9be2684b838cf9613db40a45af213fa20d1 | refs/heads/master | 2023-02-11T12:46:31.789212 | 2021-01-05T06:21:24 | 2021-01-05T06:21:24 | 102,881,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | # -*- coding: utf-8 -*-
# @Time : 2017/12/29
# @Author : LIYUAN134
# @File : thread_queue.py
# @Commment:
#
# -*- coding: UTF-8 -*-
import Queue
import threading
import time
exitFlag = 0
class myThread(threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print "Starting " + self.name
process_data(self.name, self.q)
print "Exiting " + self.name
def process_data(threadName, q):
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print "%s processing %s" % (threadName, data)
else:
queueLock.release()
time.sleep(1)
threadList = ["Thread-1", "Thread-2", "Thread-3"]
nameList = ["One", "Two", "Three", "Four", "Five"]
queueLock = threading.Lock()
workQueue = Queue.Queue(10)
threads = []
threadID = 1
# 创建新线程
for tName in threadList:
thread = myThread(threadID, tName, workQueue)
thread.start()
threads.append(thread)
threadID += 1
# 填充队列
queueLock.acquire()
for word in nameList:
workQueue.put(word)
queueLock.release()
# 等待队列清空
while not workQueue.empty():
pass
# 通知线程是时候退出
exitFlag = 1
# 等待所有线程完成
for t in threads:
t.join()
print "Exiting Main Thread"
| [
"[email protected]"
] | |
d5cd69bc39db446dab3c1bfa0714fd10795d9b13 | 107941a50c3adc621563fe0254fd407ea38d752e | /spider_03.py | ff843e21ab654378dec18c1fae8d152647acbf11 | [] | no_license | zhangliang852469/spider_ | 758a4820f8bd25ef6ad0edbd5a4efbaaa410ae08 | 718208c4d8e6752bbe8d66a209e6d7446c81d139 | refs/heads/master | 2020-04-05T07:12:03.790358 | 2018-11-08T07:17:22 | 2018-11-08T07:17:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""节点交互 """
from selenium import webdriver
import time
# browser = webdriver.Chrome()
# browser.get('https://www.taobao.com')
# input = browser.find_element_by_id('q')
# input.send_keys('iPhone')
# time.sleep(1)
# input.clear()
# input.send_keys('iPad')
# button = browser.find_element_by_class_name('btn-search')
# button.click()
"""在这里我们首先驱动浏览器打开淘宝,然后用 find_element_by_id() 方法获取输入框,
然后用 send_keys() 方法输入 iPhone 文字,等待一秒之后用 clear() 方法清空输入框,
再次调用 send_keys() 方法输入 iPad 文字,之后再用 find_element_by_class_name()
方法获取搜索按钮,最后调用 click() 方法完成搜索动作。"""
browser = webdriver.Chrome()
browser.get('https://www.taobao.com')
input = browser.find_element_by_id('q')
input.send_keys('iPone')
time.sleep(1)
input.clear()
input.send_keys('iPad')
button = browser.find_element_by_class_name('btn-search')
button.click()
| [
"[email protected]"
] | |
f46477242fa911d6e3c8332e24eb1cc7e38b0750 | 99d436394e47571160340c95d527ecadaae83541 | /algorithms_questions/ch14_sorting/q26_2.py | 1d3dbab643ddebf9f47062db3e7538e2d0eb1102 | [] | no_license | LeeSeok-Jun/Algorithms | b47ba4de5580302e9e2399bcf85d245ebeb1b93d | 0e8573bd03c50df3f89dd0ee9eed9cf8716ef8d8 | refs/heads/main | 2023-03-02T06:47:20.939235 | 2021-02-08T05:18:24 | 2021-02-08T05:18:24 | 299,840,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | """
카드 정렬하기 - 3회차
"""
# 풀이 제한 시간 : 30분
# 2021/01/21 14:57 ~ 15:15
# 실패 - 틀린 부분 주석 처리
import heapq
n = int(input())
data = []
for _ in range(n):
heapq.heappush(data, int(input()))
"""
sum_value = heapq.heappop(data)
while data:
now = heapq.heappop(data)
sum_value += now
heapq.heappush(data, sum_value)
print(sum_value)
"""
result = 0
while len(data) != 1:
one = heapq.heappop(data)
two = heapq.heappop(data)
sum_value = one + two
result += sum_value
heapq.heappush(data, result)
print(result)
| [
"[email protected]"
] | |
84a44293453107c4c6dd00597d3f3f1c970b6484 | de4e8e0f33dbd8bb39784907b420f05b2d62f65a | /test/test_sub_step_type.py | e57e83c7a9007b51213a5ff59a81bf9107ecdcc5 | [
"BSD-3-Clause"
] | permissive | hpcc-systems/uptrends-python | 489d7b513d1eeaf57569081363861010492a85e6 | 2e05ba851a4e65bde3c40514f499c475465bef90 | refs/heads/master | 2022-11-15T05:32:38.638456 | 2020-07-10T18:48:45 | 2020-07-10T18:48:45 | 256,216,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | # coding: utf-8
"""
Uptrends API v4
This document describes Uptrends API version 4. This Swagger environment also lets you execute API methods directly. Please note that this is not a sandbox environment: these API methods operate directly on your actual Uptrends account. For more information, please visit https://www.uptrends.com/api. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import uptrends
from uptrends.models.sub_step_type import SubStepType # noqa: E501
from uptrends.rest import ApiException
class TestSubStepType(unittest.TestCase):
"""SubStepType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSubStepType(self):
"""Test SubStepType"""
# FIXME: construct object with mandatory attributes with example values
# model = uptrends.models.sub_step_type.SubStepType() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
808df8fd000d1100b98a1532e9de2156af996c80 | 8ee9a85496208ed5f4331d437ec44cc17f7bce08 | /FinalPractice/SemanticSegmentation/U_net/model.py | 76320a94bd7d5507c70b0f20bc245ce78291b4fc | [] | no_license | Ollitros/ComputerVision | aa93527ef0172874a0034b61d1cae6c31f514734 | b0ec5d9f94406b4f8164d0ef7180226156ea1194 | refs/heads/master | 2020-04-03T19:23:06.898807 | 2019-06-13T04:56:29 | 2019-06-13T04:56:29 | 155,521,472 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,718 | py | from tensorflow.keras.layers import Conv2D, MaxPool2D, UpSampling2D, \
Input, BatchNormalization, concatenate, Activation
from tensorflow.keras.models import Model
def model():
inputs = Input(shape=(128, 128, 3))
# 128
down1 = Conv2D(64, (3, 3), padding='same')(inputs)
down1 = BatchNormalization()(down1)
down1 = Activation('relu')(down1)
down1 = Conv2D(64, (3, 3), padding='same')(down1)
down1 = BatchNormalization()(down1)
down1 = Activation('relu')(down1)
down1_pool = MaxPool2D()(down1)
# 64
down2 = Conv2D(128, (3, 3), padding='same')(down1_pool)
down2 = BatchNormalization()(down2)
down2 = Activation('relu')(down2)
down2 = Conv2D(128, (3, 3), padding='same')(down2)
down2 = BatchNormalization()(down2)
down2 = Activation('relu')(down2)
down2_pool = MaxPool2D()(down2)
# 32
down3 = Conv2D(256, (3, 3), padding='same')(down2_pool)
down3 = BatchNormalization()(down3)
down3 = Activation('relu')(down3)
down3 = Conv2D(256, (3, 3), padding='same')(down3)
down3 = BatchNormalization()(down3)
down3 = Activation('relu')(down3)
down3_pool = MaxPool2D()(down3)
# 16
down4 = Conv2D(512, (3, 3), padding='same')(down3_pool)
down4 = BatchNormalization()(down4)
down4 = Activation('relu')(down4)
down4 = Conv2D(512, (3, 3), padding='same')(down4)
down4 = BatchNormalization()(down4)
down4 = Activation('relu')(down4)
down4_pool = MaxPool2D()(down4)
# 8
center = Conv2D(1024, (3, 3), padding='same')(down4_pool)
center = BatchNormalization()(center)
center = Activation('relu')(center)
center = Conv2D(1024, (3, 3), padding='same')(center)
center = BatchNormalization()(center)
center = Activation('relu')(center)
# center
up4 = UpSampling2D((2, 2))(center)
up4 = concatenate([down4, up4], axis=3)
up4 = Conv2D(512, (3, 3), padding='same')(up4)
up4 = BatchNormalization()(up4)
up4 = Activation('relu')(up4)
up4 = Conv2D(512, (3, 3), padding='same')(up4)
up4 = BatchNormalization()(up4)
up4 = Activation('relu')(up4)
up4 = Conv2D(512, (3, 3), padding='same')(up4)
up4 = BatchNormalization()(up4)
up4 = Activation('relu')(up4)
# 16
up3 = UpSampling2D((2, 2))(up4)
up3 = concatenate([down3, up3], axis=3)
up3 = Conv2D(256, (3, 3), padding='same')(up3)
up3 = BatchNormalization()(up3)
up3 = Activation('relu')(up3)
up3 = Conv2D(256, (3, 3), padding='same')(up3)
up3 = BatchNormalization()(up3)
up3 = Activation('relu')(up3)
up3 = Conv2D(256, (3, 3), padding='same')(up3)
up3 = BatchNormalization()(up3)
up3 = Activation('relu')(up3)
# 32
up2 = UpSampling2D((2, 2))(up3)
up2 = concatenate([down2, up2], axis=3)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up1 = UpSampling2D((2, 2))(up2)
up1 = concatenate([down1, up1], axis=3)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
classify = Conv2D(1, (1, 1), activation='sigmoid')(up1)
model = Model(inputs=inputs, outputs=classify)
return model
| [
"[email protected]"
] | |
24a1f766afd91bb14af906157c92b21157847e12 | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /repos/Gather-Deployment-master/tensorflow/24.pyflink/notebooks/udf.py | c5bd455da383c118815ae7e7411968c2b4808d33 | [
"MIT"
] | permissive | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,501 | py | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import abc
import collections
import functools
import inspect
from pyflink.java_gateway import get_gateway
from pyflink.table.types import DataType, _to_java_type
from pyflink.util import utils
__all__ = ['FunctionContext', 'ScalarFunction', 'udf']
class FunctionContext(object):
"""
Used to obtain global runtime information about the context in which the
user-defined function is executed. The information includes the metric group,
and global job parameters, etc.
"""
pass
class UserDefinedFunction(abc.ABC):
"""
Base interface for user-defined function.
"""
def open(self, function_context):
"""
Initialization method for the function. It is called before the actual working methods
and thus suitable for one time setup work.
:param function_context: the context of the function
:type function_context: FunctionContext
"""
pass
def close(self):
"""
Tear-down method for the user code. It is called after the last call to the main
working methods.
"""
pass
def is_deterministic(self):
"""
Returns information about the determinism of the function's results.
It returns true if and only if a call to this function is guaranteed to
always return the same result given the same parameters. true is assumed by default.
If the function is not pure functional like random(), date(), now(),
this method must return false.
:return: the determinism of the function's results.
:rtype: bool
"""
return True
class ScalarFunction(UserDefinedFunction):
"""
Base interface for user-defined scalar function. A user-defined scalar functions maps zero, one,
or multiple scalar values to a new scalar value.
"""
@abc.abstractmethod
def eval(self, *args):
"""
Method which defines the logic of the scalar function.
"""
pass
class DelegatingScalarFunction(ScalarFunction):
"""
Helper scalar function implementation for lambda expression and python function. It's for
internal use only.
"""
def __init__(self, func):
self.func = func
def eval(self, *args):
return self.func(*args)
class UserDefinedFunctionWrapper(object):
"""
Wrapper for Python user-defined function. It handles things like converting lambda
functions to user-defined functions, creating the Java user-defined function representation,
etc. It's for internal use only.
"""
def __init__(
self, func, input_types, result_type, deterministic = None, name = None
):
if inspect.isclass(func) or (
not isinstance(func, UserDefinedFunction) and not callable(func)
):
raise TypeError(
'Invalid function: not a function or callable (__call__ is not defined): {0}'.format(
type(func)
)
)
if not isinstance(input_types, collections.Iterable):
input_types = [input_types]
for input_type in input_types:
if not isinstance(input_type, DataType):
raise TypeError(
'Invalid input_type: input_type should be DataType but contains {}'.format(
input_type
)
)
if not isinstance(result_type, DataType):
raise TypeError(
'Invalid returnType: returnType should be DataType but is {}'.format(
result_type
)
)
self._func = func
self._input_types = input_types
self._result_type = result_type
self._judf_placeholder = None
self._name = name or (
func.__name__
if hasattr(func, '__name__')
else func.__class__.__name__
)
if (
deterministic is not None
and isinstance(func, UserDefinedFunction)
and deterministic != func.is_deterministic()
):
raise ValueError(
'Inconsistent deterministic: {} and {}'.format(
deterministic, func.is_deterministic()
)
)
# default deterministic is True
self._deterministic = (
deterministic
if deterministic is not None
else (
func.is_deterministic()
if isinstance(func, UserDefinedFunction)
else True
)
)
def _judf(self, is_blink_planner, table_config):
if self._judf_placeholder is None:
self._judf_placeholder = self._create_judf(
is_blink_planner, table_config
)
return self._judf_placeholder
def _create_judf(self, is_blink_planner, table_config):
func = self._func
if not isinstance(self._func, UserDefinedFunction):
func = DelegatingScalarFunction(self._func)
import cloudpickle
serialized_func = cloudpickle.dumps(func)
gateway = get_gateway()
j_input_types = utils.to_jarray(
gateway.jvm.TypeInformation,
[_to_java_type(i) for i in self._input_types],
)
j_result_type = _to_java_type(self._result_type)
if is_blink_planner:
PythonTableUtils = (
gateway.jvm.org.apache.flink.table.planner.utils.python.PythonTableUtils
)
j_scalar_function = PythonTableUtils.createPythonScalarFunction(
table_config,
self._name,
bytearray(serialized_func),
j_input_types,
j_result_type,
self._deterministic,
_get_python_env(),
)
else:
PythonTableUtils = gateway.jvm.PythonTableUtils
j_scalar_function = PythonTableUtils.createPythonScalarFunction(
self._name,
bytearray(serialized_func),
j_input_types,
j_result_type,
self._deterministic,
_get_python_env(),
)
return j_scalar_function
# TODO: support to configure the python execution environment
def _get_python_env():
gateway = get_gateway()
exec_type = (
gateway.jvm.org.apache.flink.table.functions.python.PythonEnv.ExecType.PROCESS
)
return gateway.jvm.org.apache.flink.table.functions.python.PythonEnv(
exec_type
)
def _create_udf(f, input_types, result_type, deterministic, name):
return UserDefinedFunctionWrapper(
f, input_types, result_type, deterministic, name
)
def udf(
f = None,
input_types = None,
result_type = None,
deterministic = None,
name = None,
):
"""
Helper method for creating a user-defined function.
Example:
::
>>> add_one = udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT())
>>> @udf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()],
... result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> subtract_one = udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT())
:param f: lambda function or user-defined function.
:type f: function or UserDefinedFunction or type
:param input_types: the input data types.
:type input_types: list[DataType] or DataType
:param result_type: the result data type.
:type result_type: DataType
:param name: the function name.
:type name: str
:param deterministic: the determinism of the function's results. True if and only if a call to
this function is guaranteed to always return the same result given the
same parameters. (default True)
:type deterministic: bool
:return: UserDefinedFunctionWrapper or function.
:rtype: UserDefinedFunctionWrapper or function
"""
# decorator
if f is None:
return functools.partial(
_create_udf,
input_types = input_types,
result_type = result_type,
deterministic = deterministic,
name = name,
)
else:
return _create_udf(f, input_types, result_type, deterministic, name)
| [
"[email protected]"
] | |
6c137c6126c25690c337197affaf147d9e37e27b | e38f7b5d46fd8a65c15e49488fc075e5c62943c9 | /pychron/processing/fits/interpolation_fit_selector.py | d87ba575e0cfa7ff8c9751b69ae6c15e42f3c200 | [] | no_license | INGPAN/pychron | 3e13f9d15667e62c347f5b40af366096ee41c051 | 8592f9fc722f037a61b0b783d587633e22f11f2f | refs/heads/master | 2021-08-15T00:50:21.392117 | 2015-01-19T20:07:41 | 2015-01-19T20:07:41 | 111,054,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | #===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#============= enthought library imports =======================
#============= standard library imports ========================
#============= local library imports ==========================
from pychron.processing.fits.fit import Fit
from pychron.processing.fits.fit_selector import FitSelector
from pychron.pychron_constants import FIT_TYPES_INTERPOLATE
class InterpolationFit(Fit):
def _get_fit_types(self):
return FIT_TYPES_INTERPOLATE
class InterpolationFitSelector(FitSelector):
fit_klass = InterpolationFit
fit_types = FIT_TYPES_INTERPOLATE
#============= EOF =============================================
| [
"[email protected]"
] | |
cdba6787cb45d6b039a3639d858ae0c457771963 | 9773059260c1f9395d182f7a65760b0917794a7f | /venv/bin/easy_install | 7b6db3a0dec0c3e534d13076015638fb1340631b | [] | no_license | Ruldane/DjangoCountWords | 05a452db5640b0efbff8f8e75061ed8dc2a40f6e | e54efb72caf678b7682642ce29b54b7d68170fa2 | refs/heads/master | 2020-06-10T19:50:41.289276 | 2019-06-25T14:54:39 | 2019-06-25T14:54:39 | 193,728,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | #!/home/ruldane/PycharmProjects/countword/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | ||
46780ffe28ee6581b83e37f84a8955507f9583fc | 80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019 | /data/HackerRank-Mathematics/Constructing a Number.py | c87d42e4fdaa23fd44e0b8922ae34ab56bbcd61e | [] | no_license | Ritvik19/CodeBook | ef7764d89b790e902ede5802f36d5ca910d8a50e | 2b4ed7938bbf156553d6ba5cba6216449528f0fc | refs/heads/master | 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | #!/bin/python3
import math
import os
import random
import re
import sys
def canConstruct(a):
return "Yes" if sum(a) % 3 == 0 else "No"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
a = list(map(int, input().rstrip().split()))
result = canConstruct(a)
fptr.write(result + '\n')
fptr.close()
| [
"[email protected]"
] | |
4abb7fabbd57ff0a857464e0b5557d97d45f5452 | 7a9034fa0698e9b6481c5de35ffd91c96d7552e9 | /personal_site/settings.py | 744f94f86bf4b5ecbb9947fff3a52102ef65e017 | [] | no_license | k4u5h4L/personal_site | 0e3144b62d9be0e08cf803cc5378c75f40425735 | 807867332e9bca759e2de8a28eb1840d2dd6a451 | refs/heads/main | 2023-02-07T07:52:11.031056 | 2020-12-19T16:36:38 | 2020-12-19T16:36:38 | 322,577,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,785 | py | """
Django settings for personal_site project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import json
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&q_x8wc#6ahgx(yk58au#nide7=58-xd$h)^0=x-g)&r+=x)mb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'portfolio',
'users',
'blog',
'django_filters',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'personal_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'personal_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "blog/static")
APPEND_SLASH = False
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = '/media/'
AUTH_USER_MODEL = 'users.CustomUser'
LOGIN_URL = 'landing_page'
LOGIN_REDIRECT_URL = 'home_page'
LOGOUT_REDIRECT_URL = 'landing_page'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
with open(f'{os.getcwd()}/personal_site/config.json') as fp:
email_cred = json.load(fp)
EMAIL_HOST_USER = email_cred['EMAIL_USR']
EMAIL_HOST_PASSWORD = email_cred['EMAI_PASSWD']
EMAIL_USE_TLS = True
| [
"[email protected]"
] | |
16f2f9490ec4a93471d45983ce6e3dc1fb71d1e2 | 2a03132e5742ea5d4012327194bc7ec4e7096194 | /tools/actions_local_runner.py | 4082abda527b90fe1b0a7da2a316ef5323d1dabd | [
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | jmnarloch/pytorch | 11f7be7b6d47b11868ede673879f651084f4e976 | 74c12da4517c789bea737dc947d6adc755f63176 | refs/heads/master | 2023-05-05T07:40:18.135517 | 2021-05-24T04:34:55 | 2021-05-24T04:36:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,495 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import subprocess
import sys
import os
import argparse
import yaml
import asyncio
import shutil
import re
import fnmatch
import shlex
import configparser
from typing import List, Dict, Any, Optional, Tuple, Union
REPO_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class col:
HEADER = "\033[95m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
RESET = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def should_color() -> bool:
return hasattr(sys.stdout, "isatty") and sys.stdout.isatty()
def color(the_color: str, text: str) -> str:
if should_color():
return col.BOLD + the_color + str(text) + col.RESET
else:
return text
def cprint(the_color: str, text: str) -> None:
if should_color():
print(color(the_color, text))
else:
print(text)
def git(args: List[str]) -> List[str]:
p = subprocess.run(
["git"] + args,
cwd=REPO_ROOT,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
)
lines = p.stdout.decode().strip().split("\n")
return [line.strip() for line in lines]
def find_changed_files() -> List[str]:
untracked = []
for line in git(["status", "--porcelain"]):
# Untracked files start with ??, so grab all of those
if line.startswith("?? "):
untracked.append(line.replace("?? ", ""))
# Modified, unstaged
modified = git(["diff", "--name-only"])
# Modified, staged
cached = git(["diff", "--cached", "--name-only"])
# Committed
merge_base = git(["merge-base", "origin/master", "HEAD"])[0]
diff_with_origin = git(["diff", "--name-only", merge_base, "HEAD"])
# De-duplicate
all_files = set()
for x in untracked + cached + modified + diff_with_origin:
stripped = x.strip()
if stripped != "" and os.path.exists(stripped):
all_files.add(stripped)
return list(all_files)
def print_results(job_name: str, passed: bool, streams: List[str]) -> None:
header(job_name, passed)
for stream in streams:
stream = stream.strip()
if stream != "":
print(stream)
async def shell_cmd(
cmd: Union[str, List[str]],
env: Optional[Dict[str, Any]] = None,
redirect: bool = True,
) -> Tuple[bool, str, str]:
if isinstance(cmd, list):
cmd_str = ' '.join(shlex.quote(arg) for arg in cmd)
else:
cmd_str = cmd
proc = await asyncio.create_subprocess_shell(
cmd_str,
shell=True,
cwd=REPO_ROOT,
env=env,
stdout=subprocess.PIPE if redirect else None,
stderr=subprocess.PIPE if redirect else None,
executable=shutil.which("bash"),
)
stdout, stderr = await proc.communicate()
passed = proc.returncode == 0
if not redirect:
return passed, "", ""
return passed, stdout.decode().strip(), stderr.decode().strip()
def header(name: str, passed: bool) -> None:
PASS = color(col.GREEN, "✓")
FAIL = color(col.RED, "x")
icon = PASS if passed else FAIL
print(f"{icon} {color(col.BLUE, name)}")
def get_flake_excludes() -> List[str]:
config = configparser.ConfigParser()
config.read(os.path.join(REPO_ROOT, ".flake8"))
excludes = re.split(r',\s*', config["flake8"]["exclude"].strip())
excludes = [e.strip() for e in excludes if e.strip() != ""]
return excludes
async def run_flake8(files: Optional[List[str]], quiet: bool) -> bool:
cmd = ["flake8"]
excludes = get_flake_excludes()
def should_include(name: str) -> bool:
for exclude in excludes:
if fnmatch.fnmatch(name, pat=exclude):
return False
if name.startswith(exclude) or ("./" + name).startswith(exclude):
return False
return True
if files is not None:
files = [f for f in files if should_include(f)]
if len(files) == 0:
print_results("flake8", True, [])
return True
# Running quicklint, pass in an explicit list of files (unlike mypy,
# flake8 will still use .flake8 to filter this list by the 'exclude's
# in the config
cmd += files
passed, stdout, stderr = await shell_cmd(cmd)
print_results("flake8", passed, [stdout, stderr])
return passed
async def run_mypy(files: Optional[List[str]], quiet: bool) -> bool:
env = os.environ.copy()
if should_color():
# Secret env variable: https://github.com/python/mypy/issues/7771
env["MYPY_FORCE_COLOR"] = "1"
if files is not None:
# Running quick lint, use mypy-wrapper instead so it checks that the files
# actually should be linted
passed, stdout, stderr = await shell_cmd(
[sys.executable, "tools/mypy_wrapper.py"] + [
os.path.join(REPO_ROOT, f) for f in files
],
env=env,
)
print_results("mypy (skipped typestub generation)", passed, [
stdout + "\n",
stderr + "\n",
])
return passed
# Not running quicklint, so use lint.yml
_, _, _ = await shell_cmd(
[
sys.executable,
"tools/actions_local_runner.py",
"--job",
"mypy",
"--file",
".github/workflows/lint.yml",
"--step",
"Run autogen",
],
redirect=False,
env=env,
)
passed, _, _ = await shell_cmd(
[
sys.executable,
"tools/actions_local_runner.py",
"--job",
"mypy",
"--file",
".github/workflows/lint.yml",
"--step",
"Run mypy",
],
redirect=False,
env=env,
)
return passed
async def run_shellcheck(files: Optional[List[str]], quiet: bool) -> bool:
if files is not None:
# The files list should already be filtered by '--file-filter ".sh"' when
# calling this script
passed, stdout, stderr = await shell_cmd(
["tools/run_shellcheck.sh"] + [
os.path.join(REPO_ROOT, f) for f in files
],
)
print_results("shellcheck: Run ShellCheck", passed, [
stdout + "\n",
stderr + "\n",
])
return passed
# Not running quicklint, so use lint.yml
passed, _, _ = await shell_cmd(
[
sys.executable,
"tools/actions_local_runner.py",
"--job",
"shellcheck",
"--file",
".github/workflows/lint.yml",
"--step",
"Run ShellCheck",
],
redirect=False,
)
return passed
async def run_step(
step: Dict[str, Any], job_name: str, files: Optional[List[str]], quiet: bool
) -> bool:
env = os.environ.copy()
env["GITHUB_WORKSPACE"] = "/tmp"
script = step["run"]
if quiet:
# TODO: Either lint that GHA scripts only use 'set -eux' or make this more
# resilient
script = script.replace("set -eux", "set -eu")
script = re.sub(r"^time ", "", script, flags=re.MULTILINE)
name = f'{job_name}: {step["name"]}'
passed, stderr, stdout = await shell_cmd(script, env=env)
if not passed:
print_results(name, passed, [stdout, stderr])
else:
print_results(name, passed, [])
return passed
async def run_steps(
steps: List[Dict[str, Any]], job_name: str, files: Optional[List[str]], quiet: bool
) -> bool:
coros = [run_step(step, job_name, files, quiet) for step in steps]
return all(await asyncio.gather(*coros))
def relevant_changed_files(file_filters: Optional[List[str]]) -> Optional[List[str]]:
changed_files: Optional[List[str]] = None
try:
changed_files = sorted(find_changed_files())
except Exception:
# If the git commands failed for some reason, bail out and use the whole list
print(
"Could not query git for changed files, falling back to testing all files instead",
file=sys.stderr,
)
return None
if file_filters is None:
return changed_files
else:
relevant_files = []
for f in changed_files:
for file_filter in file_filters:
if f.endswith(file_filter):
relevant_files.append(f)
break
return relevant_files
def grab_specific_steps(
steps_to_grab: List[str], job: Dict[str, Any]
) -> List[Dict[str, Any]]:
relevant_steps = []
for step in steps_to_grab:
for actual_step in job["steps"]:
if actual_step["name"].lower().strip() == step.lower().strip():
relevant_steps.append(actual_step)
break
if len(relevant_steps) != len(steps_to_grab):
raise RuntimeError(f"Missing steps:\n{relevant_steps}\n{steps_to_grab}")
return relevant_steps
def main() -> None:
parser = argparse.ArgumentParser(
description="Pull shell scripts out of GitHub actions and run them"
)
parser.add_argument("--file", help="YAML file with actions")
parser.add_argument(
"--file-filter",
help="only pass through files with this extension",
nargs="*",
)
parser.add_argument(
"--changed-only",
help="only run on changed files",
action="store_true",
default=False,
)
parser.add_argument("--job", help="job name", required=True)
parser.add_argument(
"--no-quiet", help="output commands", action="store_true", default=False
)
parser.add_argument("--step", action="append", help="steps to run (in order)")
args = parser.parse_args()
relevant_files = None
quiet = not args.no_quiet
if args.changed_only:
relevant_files = relevant_changed_files(args.file_filter)
if args.file is None:
# If there is no .yml file provided, fall back to the list of known
# jobs. We use this for flake8 and mypy since they run different
# locally than in CI due to 'make quicklint'
if args.job not in ad_hoc_steps:
raise RuntimeError(
f"Job {args.job} not found and no .yml file was provided"
)
future = ad_hoc_steps[args.job](relevant_files, quiet)
else:
if args.step is None:
raise RuntimeError("1+ --steps must be provided")
action = yaml.safe_load(open(args.file, "r"))
if "jobs" not in action:
raise RuntimeError(f"top level key 'jobs' not found in {args.file}")
jobs = action["jobs"]
if args.job not in jobs:
raise RuntimeError(f"job '{args.job}' not found in {args.file}")
job = jobs[args.job]
# Pull the relevant sections out of the provided .yml file and run them
relevant_steps = grab_specific_steps(args.step, job)
future = run_steps(relevant_steps, args.job, relevant_files, quiet)
loop = asyncio.get_event_loop()
loop.run_until_complete(future)
# These are run differently locally in order to enable quicklint, so dispatch
# out to special handlers instead of using lint.yml
ad_hoc_steps = {
"mypy": run_mypy,
"flake8-py3": run_flake8,
"shellcheck": run_shellcheck,
}
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| [
"[email protected]"
] | |
5c03b63199ce12903d3e2e9941d7d45034431151 | de95e9ace929f6279f5364260630e4bf7a658c1c | /recursion.py | bb3d67bb866d8fbe991317696a4fae7cd83a89fe | [] | no_license | ludwigwittgenstein2/Algorithms-Python | ceaf0739b8582f7bd749a9b3f52f283765044744 | c5bed8b2e398c218d1f36e72b05a3f5545cf783a | refs/heads/master | 2021-06-19T11:40:31.012268 | 2017-07-02T04:59:20 | 2017-07-02T04:59:20 | 75,953,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | def factorial(n):
if n == 0:
return 1
else:
recurse = factorial(n - 1)
result = n*recurse
return result
| [
"[email protected]"
] | |
706e98389051b53c21fed428b65f6748aea8884a | 18f8a1c7122c0b320f17ea31192439779a8c63e8 | /zoom/component.py | 4c33c561fec51ddfa4e1a08ca62ab77817d6130e | [
"MIT"
] | permissive | RyanLainchbury/zoom | d49afa8d3506fca2c6e426707bd60ba640420a45 | 684a16f4fe3cea3d26f2d520c743a871ca84ecc5 | refs/heads/master | 2020-12-25T19:03:12.881247 | 2017-06-09T07:29:27 | 2017-06-09T07:29:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,879 | py | """
zoom.component
Components encapsulate all of the parts that are required to make a
component appear on a page. This can include HTML, CSS and Javascript
parts and associated libraries.
Components parts are assembled in the way that kind of part
needs to be treated. For example HTML parts are simply joined
together in order and returned. CSS parts on the other hand are
joined together but any duplicate parts are ignored.
When a caller supplies JS or CSS as part of the component being assembled
these extra parts are submitted to the system to be included in thier
proper place within a response (typically a page template).
The Component object is currently experimental and is intended to be used
in future releases.
"""
import logging
import threading
from zoom.utils import OrderedSet, pp
# TODO: rename this to context (or system?)
composition = threading.local()
class Component(object):
"""component of a page response
>>> c = Component()
>>> c
<Component: {'html': []}>
>>> c += 'test'
>>> c
<Component: {'html': ['test']}>
>>> c += dict(css='mycss')
>>> c
<Component: {'css': OrderedSet(['mycss']), 'html': ['test']}>
>>> c += dict(css='mycss')
>>> c
<Component: {'css': OrderedSet(['mycss']), 'html': ['test']}>
>>> c += 'test2'
>>> sorted(c.parts.items())
[('css', OrderedSet(['mycss'])), ('html', ['test', 'test2'])]
>>> Component() + 'test1' + 'test2'
<Component: {'html': ['test1', 'test2']}>
>>> Component() + 'test1' + dict(css='mycss')
<Component: {'css': OrderedSet(['mycss']), 'html': ['test1']}>
>>> Component('test1', Component('test2'))
<Component: {'html': ['test1', 'test2']}>
>>> Component(
... Component('test1', css='css1'),
... Component('test2', Component('test3', css='css3')),
... )
<Component: {'css': OrderedSet(['css1', 'css3']), 'html': ['test1', 'test2', 'test3']}>
>>> Component((Component('test1', css='css1'), Component('test2', css='css2')))
<Component: {'css': OrderedSet(['css1', 'css2']), 'html': ['test1', 'test2']}>
>>> Component(Component('test1', css='css1'), Component('test2', css='css2'))
<Component: {'css': OrderedSet(['css1', 'css2']), 'html': ['test1', 'test2']}>
>>> composition.parts = Component()
>>> c = Component(Component('test1', css='css1'), Component('test2', css='css2'))
>>> c.render()
'test1test2'
>>> page2 = \\
... Component() + \\
... '<h1>Title</h1>' + \\
... dict(css='mycss') + \\
... dict(js='myjs') + \\
... 'page body goes here'
>>> t = (
... "<Component: {'css': OrderedSet(['mycss']), "
... "'html': ['<h1>Title</h1>', 'page body goes here'], "
... "'js': OrderedSet(['myjs'])}>"
... )
>>> #print(repr(page2) + '\\n' + t)
>>> repr(page2) == t
True
"""
# pylint: disable=too-few-public-methods
def __init__(self, *args, **kwargs):
"""construct a Component
>>> Component()
<Component: {'html': []}>
>>> Component('body')
<Component: {'html': ['body']}>
>>> Component('body', css='css1')
<Component: {'css': OrderedSet(['css1']), 'html': ['body']}>
>>> t = Component('body', css='css1', js='js1')
>>> repr(t) == (
... "<Component: {"
... "'css': OrderedSet(['css1']), "
... "'html': ['body'], "
... "'js': OrderedSet(['js1'])"
... "}>"
... )
True
"""
def is_iterable(obj):
"""Returns True if object is an iterable but not a string"""
return hasattr(obj, '__iter__') and not isinstance(obj, str)
def flatten(items):
"""Returns list of items with sublists incorporated into list"""
items_as_iterables = list(is_iterable(i) and i or (i,) for i in items)
return [i for j in items_as_iterables for i in j]
self.parts = {
'html': [],
}
for arg in flatten(args):
self += arg
self += kwargs
def __iadd__(self, other):
"""add something to a component
>>> page = Component('<h1>Title</h1>')
>>> page += dict(css='mycss')
>>> page += 'page body goes here'
>>> page += dict(js='myjs')
>>> result = (
... "<Component: {"
... "'css': OrderedSet(['mycss']), "
... "'html': ['<h1>Title</h1>', 'page body goes here'], "
... "'js': OrderedSet(['myjs'])"
... "}>"
... )
>>> #print(page)
>>> #print(result)
>>> result == repr(page)
True
>>> page = Component('test')
>>> page += dict(html='text')
>>> page
<Component: {'html': ['test', 'text']}>
"""
def rendered(obj):
"""call the render method if necessary"""
if not isinstance(obj, Component) and hasattr(obj, 'render'):
return obj.render()
return obj
other = rendered(other)
if isinstance(other, str):
self.parts['html'].append(other)
elif isinstance(other, dict):
for key, value in other.items():
part = self.parts.setdefault(key, OrderedSet())
if key == 'html':
if isinstance(value, list):
part.extend(value)
else:
part.append(value)
else:
if isinstance(value, list):
part |= value
else:
part |= [value]
elif isinstance(other, Component):
for key, value in other.parts.items():
part = self.parts.setdefault(key, OrderedSet())
if key == 'html':
part.extend(value)
else:
part |= value
return self
def __add__(self, other):
"""add a component to something else
>>> (Component() + 'test1' + dict(css='mycss')) + 'test2'
<Component: {'css': OrderedSet(['mycss']), 'html': ['test1', 'test2']}>
>>> Component() + 'test1' + dict(css='mycss') + dict(css='css2')
<Component: {'css': OrderedSet(['mycss', 'css2']), 'html': ['test1']}>
"""
result = Component()
result += self
result += other
return result
def __repr__(self):
return '<Component: {{{}}}>'.format(
', '.join(
'{!r}: {!r}'.format(i, j)
for i, j in sorted(self.parts.items())
)
)
def render(self):
"""renders the component"""
composition.parts += self
return ''.join(self.parts['html'])
def __str__(self):
return self.render()
component = Component
def compose(*args, **kwargs):
"""Compose a response - DEPRECATED"""
composition.parts += component(**kwargs)
return ''.join(args)
def handler(request, handler, *rest):
"""Component handler"""
pop = request.session.__dict__.pop
composition.parts = Component(
success=pop('system_successes', []),
warning=pop('system_warnings', []),
error=pop('system_errors', []),
)
result = handler(request, *rest)
logger = logging.getLogger(__name__)
logger.debug('component middleware')
# TODO: clean this up, use a single alerts list with an alert type value
success_alerts = composition.parts.parts.get('success')
if success_alerts:
if not hasattr(request.session, 'system_successes'):
request.session.system_successes = []
request.session.system_successes = list(success_alerts)
warning_alerts = composition.parts.parts.get('warning')
if warning_alerts:
if not hasattr(request.session, 'system_warnings'):
request.session.system_warnings = []
request.session.system_warnings = list(warning_alerts)
error_alerts = composition.parts.parts.get('error')
if error_alerts:
if not hasattr(request.session, 'system_errors'):
request.session.system_errors = []
request.session.system_errors = list(error_alerts)
return result
# def component(*args, **kwargs):
# """assemble parts of a component
#
# >>> system.setup()
# >>> system.css
# OrderedSet()
#
# >>> component('test', css='mycss')
# 'test'
# >>> system.css
# OrderedSet(['mycss'])
#
# >>> component(100, css='mycss')
# '100'
#
# >>> component(css='mycss', html='test')
# 'test'
# >>> system.css
# OrderedSet(['mycss'])
#
# >>> component('test', html='more', css='mycss')
# 'testmore'
# >>> system.css
# OrderedSet(['mycss'])
#
# >>> component('test', 'two', css=['mycss','css2'], js='myjs')
# 'testtwo'
# >>> system.css
# OrderedSet(['mycss', 'css2'])
# >>> system.js
# OrderedSet(['myjs'])
#
# >>> component('test', js='js2')
# 'test'
# >>> system.js
# OrderedSet(['myjs', 'js2'])
#
# >>> component(['test1'], ('test2',), 'test3')
# 'test1test2test3'
#
# >>> from mvc import DynamicView
# >>> class MyThing(DynamicView):
# ... def __str__(self):
# ... return self.model
# >>> hasattr(MyThing('test'), '__iter__')
# False
# >>> component(['test1'], ('test2',), 'test3', MyThing('test4'))
# 'test1test2test3test4'
# >>> component(MyThing('test4'))
# 'test4'
# >>> component(MyThing('test4'), MyThing('test5'))
# 'test4test5'
# >>> component((MyThing('test4'), MyThing('test5')))
# 'test4test5'
# >>> args = (MyThing('test4'), MyThing('test5'))
# >>> component(args)
# 'test4test5'
# >>> component(*list(args))
# 'test4test5'
#
# >>> system.setup()
# >>> component('test', js=[])
# 'test'
# >>> system.js
# OrderedSet()
# """
# def is_iterable(item):
# return hasattr(item, '__iter__')
#
# def as_iterable(item):
# return not is_iterable(item) and (item,) or item
#
# def flatten(items):
# items_as_iterables = list(is_iterable(i) and i or (i,) for i in items)
# return [i for j in items_as_iterables for i in j]
#
# parts = {
# 'html': flatten(args),
# }
# for key, value in kwargs.items():
# part = parts.setdefault(key, OrderedSet())
# if key == 'html':
# part.extend(as_iterable(value))
# else:
# part |= OrderedSet(as_iterable(value))
# for key in ['css', 'js', 'styles', 'libs', 'head', 'tail']:
# part = getattr(system, key)
# part |= parts.get(key, [])
# return ''.join(map(str, parts['html']))
| [
"[email protected]"
] | |
267bb492f6f1d1c52316995189ee560e6d5fac8b | cbbd5ae034bfc4a81a49af0fb7712516136afa6a | /PycharmProjects/Sensel/MISC/plot_contact_point_dynamic.py | c9c6a4d57c05bf7f73c33ae49037fdcb550ba242 | [] | no_license | pratikaher88/SenselWork | fafe12037ae8349510f29b3dc60130d26992ea77 | d6f17bca7d2ac6ec6621f9b1b1540ca9e80eb2f7 | refs/heads/master | 2020-03-22T09:12:19.559029 | 2019-09-08T19:25:15 | 2019-09-08T19:25:15 | 139,822,527 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,496 | py | #!/usr/bin/env python
##########################################################################
# MIT License
#
# Copyright (c) 2013-2017 Sensel, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
##########################################################################
import sys
from SenselUse import sensel,sensel_register_map
import binascii
import threading
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib import style
style.use('fivethirtyeight')
fig=plt.figure()
ax1=fig.add_subplot(1,1,1)
global enter_pressed
X=[]
Y=[]
def waitForEnter():
global enter_pressed
input("Press Enter to exit...")
enter_pressed = True
return
def openSensel():
handle = None
(error, device_list) = sensel.getDeviceList()
if device_list.num_devices != 0:
(error, handle) = sensel.openDeviceByID(device_list.devices[0].idx)
return handle
def initFrame():
error = sensel.setFrameContent(handle, sensel.FRAME_CONTENT_PRESSURE_MASK | sensel.FRAME_CONTENT_CONTACTS_MASK)
sensel.setContactsMask(handle, sensel.CONTACT_MASK_ELLIPSE | sensel.CONTACT_MASK_BOUNDING_BOX)
# sensel.writeReg(handle, sensel_register_map.SENSEL_REG_BASELINE_DYNAMIC_ENABLED, 1, [0])
(error, frame) = sensel.allocateFrameData(handle)
error = sensel.startScanning(handle)
return frame
# def initFrameForContacts():
# error = sensel.setFrameContent(handle, sensel.FRAME_CONTENT_CONTACTS_MASK)
# (error, frame) = sensel.allocateFrameData(handle)
# error = sensel.startScanning(handle)
# return frame
def scanFrames(frame, info):
error = sensel.readSensor(handle)
(error, num_frames) = sensel.getNumAvailableFrames(handle)
for i in range(num_frames):
error = sensel.getFrame(handle, frame)
printFrame(frame, info)
def printFrame(frame, info):
# total_force = 0.0
# for n in range(info.num_rows * info.num_cols):
# total_force += frame.force_array[n]
# print("Total Force: " + str(total_force))
if frame.n_contacts > 0:
print("\nNum Contacts: ", frame.n_contacts)
for n in range(frame.n_contacts):
c = frame.contacts[n]
print("Contact ID: ", c.id)
print("X_pos",c.x_pos)
print("Y_pos",c.y_pos)
X.append(c.x_pos)
Y.append(c.y_pos)
plt.ion()
animated_plot = plt.plot(X, Y, 'ro')[0]
for i in range(len(X)):
animated_plot.set_xdata(X[0:i])
animated_plot.set_ydata(Y[0:i])
plt.draw()
plt.pause(0.0001)
# f = open('sampleText', 'a')
# f.write(str(c.x_pos)+','+str(c.y_pos)+'\n')
# animate(c.x_pos,c.y_pos)
# plt.scatter(c.x_pos, c.y_pos)
# ani = animation.FuncAnimation(plt.figure(), plt.scatter(c.x_pos,c.y_pos), interval=1000)
# plt.show(block=False)
total_force = 0.0
for n in range(info.num_rows * info.num_cols):
total_force += frame.force_array[n]
print("Total Force", total_force)
if c.state == sensel.CONTACT_START:
sensel.setLEDBrightness(handle, c.id, 100)
# Gives force at contact begin
# for n in range(info.num_rows * info.num_cols):
# total_force += frame.force_array[n]
elif c.state == sensel.CONTACT_END:
sensel.setLEDBrightness(handle, c.id, 0)
def closeSensel(frame):
error = sensel.freeFrameData(handle, frame)
error = sensel.stopScanning(handle)
error = sensel.close(handle)
if __name__ == "__main__":
global enter_pressed
enter_pressed = False
plt.xlim(0, 230)
plt.ylim(0, 130)
# plt.scatter(X, Y)
plt.gca().invert_yaxis()
# plt.show(block=False)
handle = openSensel()
if handle != None:
(error, info) = sensel.getSensorInfo(handle)
frame = initFrame()
t = threading.Thread(target=waitForEnter)
t.start()
while (enter_pressed == False):
scanFrames(frame, info)
closeSensel(frame)
# plt.xlim(0, 230)
# plt.ylim(0, 130)
plt.scatter(X, Y)
# plt.gca().invert_yaxis()
# ani = animation.FuncAnimation(fig, animatethis, interval=1000)
# plt.show()
# with open('sampleText', "w"):
# pass
print(X)
print(Y)
| [
"[email protected]"
] | |
4c533330fc30bad9170734f0a1c30bbcfc8d9a59 | b521802cca8e4ee4ff5a5ffe59175a34f2f6d763 | /maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Main/Cam_Main/Cam_Item_Layout_20190117213541.py | 1cc9c0f906fa3ff3efcc249cec01387c59eb07fa | [] | no_license | all-in-one-of/I-Do-library | 2edf68b29558728ce53fe17168694ad0353a076e | 8972ebdcf1430ccc207028d8482210092acf02ce | refs/heads/master | 2021-01-04T06:58:57.871216 | 2019-12-16T04:52:20 | 2019-12-16T04:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,830 | py | # -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
import plugin.Qt as Qt
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Item_Layout.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
class Cam_Item_Layout(form_class,base_class):
def __init__(self):
super(Cam_Item_Layout,self).__init__()
self.setupUi(self)
self.Item_Add_BTN.clicked.connect(self.Item_Add_Fn)
self.Item_Clear_BTN.clicked.connect(self.Item_Clear_Fn)
def Item_Add_Fn(self):
Cam_Item(self)
def Item_Clear_Fn(self):
for i,child in enumerate(self.Item_Layout.children()):
if i != 0:
child.deleteLater()
UI_PATH = os.path.join(DIR,"ui","Cam_Item.ui")
form_class , base_class = loadUiType(UI_PATH)
class Cam_Item(form_class,base_class):
def __init__(self,parent):
super(Cam_Item,self).__init__()
self.setupUi(self)
self.Cam_Del_BTN.clicked.connect(self.Cam_Del_BTN_Fn)
TotalCount = len(parent.Item_Layout.children())
parent.Item_Layout.layout().insertWidget(TotalCount-1,self)
self.Cam_LE.setText("Cam_Item_%s" % TotalCount)
self.Cam_Num_Label.setText(u"镜头%s" % TotalCount)
self.setObjectName("Cam_Item_%s" % TotalCount)
self.num = TotalCount
def Cam_Del_BTN_Fn(self):
self.deleteLater()
# # TotalCount = len(self.parent().children())
ChildrenList = self.parent().children()
# print ChildrenList
# for i in range(self.num,len(ChildrenList)):
# if i+1 == len(ChildrenList):
# del(ChildrenList[i])
# break
# ChildrenList[i] = ChildrenList[i+1]
# print ChildrenList
# count = 0
# for child in ChildrenList:
# if count != 0:
# child.Cam_Num_Label.setText(u"%s" % count)
# child.setObjectName("Cam_Item_%s" % count)
# print count
# count += 1
# for i,child in enumerate(ChildrenList):
# if i != 0:
# print u"%s" % i
# child.Cam_Num_Label.setText(u"%s" % i)
# child.setObjectName("Cam_Item_%s" % i)
index = 999999
for i,child in enumerate(ChildrenList):
if i != 0:
child.Cam_Num_Label.setText(u"镜头%s" % i)
child.setObjectName("Cam_Item_%s" % i)
if i < self.num:
child.Cam_Num_Label.setText(u"镜头%s" % (i-1))
child.setObjectName("Cam_Item_%s" % i-1)
| [
"[email protected]"
] | |
cf24680305aff81ff86ab5ebb28a06a585343af1 | cbfddfdf5c7fa8354162efe50b41f84e55aff118 | /venv/lib/python3.7/site-packages/apscheduler/executors/debug.py | ac739aebcef52bb0b824e66c1fcfc7693b4fab6a | [
"MIT",
"Apache-2.0"
] | permissive | tclerico/SAAC | 8d2245221dd135aea67c5e079ac7eaf542b25e2f | 2f52007ae8043096662e76da828a84e87f71091e | refs/heads/master | 2022-12-09T21:56:33.430404 | 2019-02-20T14:23:51 | 2019-02-20T14:23:51 | 153,152,229 | 3 | 0 | MIT | 2022-09-16T17:52:47 | 2018-10-15T17:13:29 | Python | UTF-8 | Python | false | false | 573 | py | import sys
from apscheduler.executors.base import BaseExecutor, run_job
class DebugExecutor(BaseExecutor):
"""
A special executor that executes the target callable directly instead of deferring it to a
thread or process.
Plugin alias: ``debug``
"""
def _do_submit_job(self, job, run_times):
try:
events = run_job(job, job._jobstore_alias, run_times, self._logger.name)
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
| [
"[email protected]"
] | |
fda997527f91121c4f1bffd1b3f2b0ddcc3dc4fa | 1d7eec692553afc411ec1e7325634f71a2aed291 | /backend/curriculum_tracking/migrations/0007_auto_20200710_1319.py | 9c49a0b43cc247004d1e90d0e0992ef9482c6d27 | [] | no_license | Andy-Nkumane/Tilde | a41a2a65b3901b92263ae94d527de403f59a5caf | 80de97edaf99f4831ca8cb989b93e3be5e09fdd6 | refs/heads/develop | 2023-05-09T10:02:41.240517 | 2021-05-28T09:20:51 | 2021-05-28T09:20:51 | 299,501,586 | 0 | 0 | null | 2020-10-25T22:37:30 | 2020-09-29T04:10:48 | Python | UTF-8 | Python | false | false | 1,043 | py | # Generated by Django 2.1.5 on 2020-07-10 13:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('curriculum_tracking', '0006_auto_20200701_0539'),
]
operations = [
migrations.AddField(
model_name='recruitproject',
name='code_review_competent_since_last_review_request',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='recruitproject',
name='code_review_excellent_since_last_review_request',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='recruitproject',
name='code_review_ny_competent_since_last_review_request',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='recruitproject',
name='code_review_red_flag_since_last_review_request',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
966665af55225f40fdd4da19c28dd883a43f62ff | 3c8bc614c9f09db5efce54af3cbcaf78e0f48b54 | /0x0B-python-input_output/4-append_write.py | e5256329fd3346953966d0bb9bdd0fec8b45629c | [] | no_license | davidknoppers/holbertonschool-higher_level_programming | 7848d301c4bf5c1fa285314392adfb577d6d082f | beaf6e5ece426c2086f34763e50c3ce0f56923ac | refs/heads/master | 2021-04-29T10:10:27.071278 | 2017-05-03T02:46:44 | 2017-05-03T02:46:44 | 77,847,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | #!/usr/bin/python3
"""
One function in this module
append_write opens a file and appends some text to it
"""
def append_write(filename="", text=""):
"""
open file
put some text at the end of it
close that file
"""
with open(filename, mode='a', encoding="utf-8") as myFile:
chars_written = myFile.write(text)
return chars_written
| [
"[email protected]"
] | |
51e143411179c72bfa7bbfbd9bd7d7bd04103a16 | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/models/model_test.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/quantize/python/common.py | 78f1c2bb4f48920369f7dab6876ead2faead3890 | [
"Apache-2.0"
] | permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 185 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/contrib/quantize/python/common.py | [
"[email protected]"
] | |
e1164b25df69866a6cb1d50cfb9672d8d6217e7a | a9e81c87022fdde86d47a4ec1e74791da8aa0e30 | /python-learning/libraries/pyqt5/base/layouts/complex-layout.py | b774d4700b1104671fb8542f99d2d70b4238e84f | [
"Apache-2.0"
] | permissive | ymli1997/deeplearning-notes | c5c6926431b7efc1c6823d85e3eb470f3c986494 | f2317d80cd998305814f988e5000241797205b63 | refs/heads/master | 2020-07-29T11:15:43.689307 | 2018-05-05T10:58:18 | 2018-05-05T10:58:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,356 | py | # -*- coding: utf-8 -*-
'''
复杂布局
'''
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import sys
class Form(QMainWindow):
def __init__(self,parent=None):
super().__init__(parent)
centerWidget = QWidget()
defalutLayout = QVBoxLayout()
vboxlayout = QVBoxLayout()
hboxlayout = QHBoxLayout()
gridlayout = QGridLayout()
# 添加控件代码
buttons = []
for i in range(5):
buttons.append(QPushButton("Grid Button %d" %(i)))
vboxlayout.addWidget(QPushButton("VBox Button %d" %(i)))
hboxlayout.addWidget(QPushButton("HBox Button %d" %(i)))
gridlayout.addWidget(buttons[0],0,0)
gridlayout.addWidget(buttons[1],0,1)
gridlayout.addWidget(buttons[2],1,0,1,2) #跨1行2列
gridlayout.addWidget(buttons[3],2,0)
gridlayout.addWidget(buttons[4],2,1)
defalutLayout.addLayout(vboxlayout)
defalutLayout.addLayout(gridlayout)
defalutLayout.addLayout(hboxlayout)
centerWidget.setLayout(defalutLayout)
self.setCentralWidget(centerWidget)
self.resize(640,480)
self.setWindowTitle("PyQt5-")
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Form()
ex.show()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
1b9da0c86e0e095737f906fdf95ded574b5a0f3c | 7ba5ec9aa9ddca3f9b3384fc4457b0a865c2a0a1 | /src/301.py | 55d8e16e1a35748acecac34a5c82e9d8d714e5c4 | [] | no_license | ecurtin2/Project-Euler | 71f79ee90a9abd0943421677d78a6c087419e500 | 79479da7a45b3ae67c0c7ea24da5f7d43c6f25d3 | refs/heads/master | 2021-03-19T14:52:57.045443 | 2018-04-12T22:05:37 | 2018-04-12T22:05:37 | 100,059,180 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | """
Nim is a game played with heaps of stones, where two players take it in turn to remove any number of stones from any heap until no stones remain.
We'll consider the three-heap normal-play version of Nim, which works as follows:
- At the start of the game there are three heaps of stones.
- On his turn the player removes any positive number of stones from any single heap.
- The first player unable to move (because no stones remain) loses.
If (n1,n2,n3) indicates a Nim position consisting of heaps of size n1, n2 and n3 then there is a simple function X(n1,n2,n3) — that you may look up or attempt to deduce for yourself — that returns:
zero if, with perfect strategy, the player about to move will eventually lose; or
non-zero if, with perfect strategy, the player about to move will eventually win.For example X(1,2,3) = 0 because, no matter what the current player does, his opponent can respond with a move that leaves two heaps of equal size, at which point every move by the current player can be mirrored by his opponent until no stones remain; so the current player loses. To illustrate:
- current player moves to (1,2,1)
- opponent moves to (1,0,1)
- current player moves to (0,0,1)
- opponent moves to (0,0,0), and so wins.
For how many positive integers n ≤ 230 does X(n,2n,3n) = 0 ?
"""
import numpy as np
import time
def X(n):
n = int(n)
return bool(n ^ (2 * n) ^ (3 * n))
N = 2**28
t = time.time()
n = np.arange(1, N)
x = np.bitwise_xor(n, np.bitwise_xor(2*n, 3*n)).astype(bool)
total = np.sum(x)
print("Numpy done in {:10.8f} seconds.".format(time.time() - t))
print(total)
#t = time.time()
#total = sum(X(i) for i in range(1, N))
#print("Python done in {:10.8f} seconds.".format(time.time() - t))
#print(total) | [
"[email protected]"
] | |
a24baa4d6bc822d4b1281390833220aec3d84176 | 2aa47f47fb81798afdf41437844cbbea8e9de66c | /02pythonBase/day10/res/exercise/mysum.py | f01610139cf297dab58d87dd27cd73d8d71c2bb2 | [] | no_license | nykh2010/python_note | 83f2eb8979f2fb25b4845faa313dbd6b90b36f40 | 5e7877c9f7bf29969072f05b98277ef3ba090969 | refs/heads/master | 2020-04-27T23:10:16.578094 | 2019-03-23T02:43:14 | 2019-03-23T02:43:14 | 174,765,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # 练习:
# 写一个函数 mysum, 可以传入任意个实参的数字,
# 此函数返回所有实参的和:
# def mysum(*args):
# ... # <<<--- 此处需要自己实现
# print(mysum(1, 2, 3, 4)) # 10
# print(mysum(1, 2, 3, 4, 5)) # 15
def mysum(*args):
print("第11行的mysum被调用!")
s = 0 # 用于累加和
for x in args:
s += x
return s
def mysum(*args):
print("第17行的mysum被调用!")
return sum(args)
print(mysum(1, 2, 3, 4)) # 10
print(mysum(1, 2, 3, 4, 5)) # 15
| [
"[email protected]"
] | |
40dfe239746f14da2cd97adf27df4e81ed29da65 | 4f7140c62cc649373379941224072c9e6b707ef7 | /examples/prompts/clock-input.py | fd1f8760ed2e77af6f65fbae97fa63ce949125fe | [
"BSD-3-Clause"
] | permissive | qianhaohu/python-prompt-toolkit | 03d282a0a6a258a08ef822bc342a6b7fb65667f7 | 237cf46ff50c8a689a72e3dfe664dfe69bffd245 | refs/heads/master | 2020-05-16T05:19:38.934218 | 2019-04-16T19:10:07 | 2019-04-16T19:40:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | #!/usr/bin/env python
"""
Example of a 'dynamic' prompt. On that shows the current time in the prompt.
"""
from __future__ import unicode_literals
import datetime
from prompt_toolkit.shortcuts import prompt
def get_prompt():
" Tokens to be shown before the prompt. "
now = datetime.datetime.now()
return [
('bg:#008800 #ffffff', '%s:%s:%s' % (now.hour, now.minute, now.second)),
('bg:cornsilk fg:maroon', ' Enter something: ')
]
def main():
result = prompt(get_prompt, refresh_interval=.5)
print('You said: %s' % result)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
5d9c0e1d64a54205baad6cd4387d049d7075add4 | 96622790b66e45926b79bc524ec75a0f4d53a7eb | /src/misc-preprocessing-scripts/maeToPdb.py | 607322e4c183184fce0eb9365a2c80401d9cb81f | [] | no_license | akma327/GPCR-WaterDynamics | a8c2e13e18f953b6af66a3e669052cb3eacd346b | 685f4dea0605d65c003bf952afd964df6e605b06 | refs/heads/master | 2021-01-22T07:42:42.539496 | 2017-05-27T07:23:44 | 2017-05-27T07:23:44 | 92,574,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,514 | py | # Author: Anthony Kai Kwang Ma
# Email: [email protected]
# maeToPdb.py
# MAE To PDB File Converter
# Usage:
# python maeToPdb.py <input path to mae> <output path for pdb> <optional pdb file name>
# <input path to mae> Provide the absolute path to the mae file name
# <output path for pdb> Provide the directory path to store the pdb
# <optional pdb file name> Default is to rename the pdb to the same prefix as mae, but user can specify new name
# Example:
#
import vmd, molecule
import sys
PROPER_USAGE_STR = """
# Usage:
# python maeToPdb.py <input path to mae> <output path for pdb> <optional pdb file name>
# <input path to mae> Provide the absolute path to the mae file name
# <output path for pdb> Provide the directory path to store the pdb
# <optional pdb file name> Default is to rename the pdb to the same prefix as mae, but user can specify new name
# Example:
# INPUT_MAE_PATH="/scratch/PI/rondror/DesRes-Simulations/ordered-from-DesRes/nature2013/DESRES-Trajectory_nature2013-AA-all/DESRES-Trajectory_nature2013-AA-58-all/nature2013-AA-58-all/nature2013-AA-58-all.mae"
# OUTPUT_PDB_PATH="/scratch/PI/rondror/akma327/noncovalent_Interaction_Scripts/DynamicInteractions/tools"
# PDB_FILE_NAME="nature2013-AA-58-new.pdb"
# python maeToPdb.py $INPUT_MAE_PATH $OINPUT_MAE_PATH="/scratch/PI/rondror/DesRes-Simulations/ordered-from-DesRes/nature2013/DESRES-Trajectory_nature2013-AA-all/DESRES-Trajectory_nature2013-AA-58-all/nature2013-AA-58-all/nature2013-AA-58-all.mae"
# OUTPUT_PDB_PATH="/scratch/PI/rondror/akma327/noncovalent_Interaction_Scripts/DynamicInteractions/tools"
# PDB_FILE_NAME="nature2013-AA-58-new.pdb"
# python maeToPdb.py $INPUT_MAE_PATH $OUTPUT_PDB_PATH $PDB_FILE_NAME UTPUT_PDB_PATH $PDB_FILE_NAME """
MIN_NUM_ARGS = 3
# import vmd, molecule
# input_mae_path= "nature2011-B-all.mae"
# output_pdb_file_path = "step5_assembly.pdb"
# molid = molecule.load('mae', input_mae_path)
# molecule.write(molid, 'pdb', output_pdb_file_path)
# import mdtraj as md
# t = md.load('step5_assembly.pdb')
def maeToPdb(input_mae_path, output_pdb_file_path):
molid = molecule.load('mae', input_mae_path)
molecule.write(molid, 'pdb', output_pdb_file_path)
print("Finished Conversion for: " + str(input_mae_path))
if __name__ == "__main__":
if(len(sys.argv) < MIN_NUM_ARGS):
print("Invalid Arguments")
print(PROPER_USAGE_STR)
exit(0)
input_mae_path = sys.argv[1]
output_pdb_path = sys.argv[2]
print(input_mae_path, output_pdb_path)
maeToPdb(input_mae_path, output_pdb_path)
| [
"[email protected]"
] | |
d6d93cb282a9b64ae57a7522d83152c22b1aae24 | 6814b9b28204fa58f77598d01c760ddeb4b66353 | /baselines/jft/experiments/jft300m_vit_base16_heteroscedastic_finetune_cifar.py | 8384bfa093ad4ce0435cbcdfa7302096e6fa5720 | [
"Apache-2.0"
] | permissive | qiao-maoying/uncertainty-baselines | a499951ea1450323e00fe03891ba8f781fe1cdc7 | 54dce3711b559ae3955a8a7d05c88eb982dea470 | refs/heads/main | 2023-07-17T23:17:10.867509 | 2021-08-18T20:32:11 | 2021-08-18T20:32:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,620 | py | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Finetune a ViT-B/16 heteroscedastic model on CIFAR-10.
"""
# pylint: enable=line-too-long
import ml_collections
# TODO(dusenberrymw): Open-source remaining imports.
def get_sweep(hyper):
return hyper.product([])
def get_config():
"""Config for training a patch-transformer on JFT."""
config = ml_collections.ConfigDict()
# Fine-tuning dataset
config.dataset = 'cifar10'
config.val_split = 'train[98%:]'
config.train_split = 'train[:98%]'
config.num_classes = 10
BATCH_SIZE = 512 # pylint: disable=invalid-name
config.batch_size = BATCH_SIZE
config.total_steps = 10_000
INPUT_RES = 384 # pylint: disable=invalid-name
pp_common = '|value_range(-1, 1)'
# pp_common += f'|onehot({config.num_classes})'
# To use ancestor 'smearing', use this line instead:
pp_common += f'|onehot({config.num_classes}, key="label", key_result="labels")' # pylint: disable=line-too-long
pp_common += '|keep("image", "labels")'
config.pp_train = f'decode|inception_crop({INPUT_RES})|flip_lr' + pp_common
config.pp_eval = f'decode|resize({INPUT_RES})' + pp_common
config.shuffle_buffer_size = 50_000 # Per host, so small-ish is ok.
config.log_training_steps = 10
config.log_eval_steps = 100
# NOTE: eval is very fast O(seconds) so it's fine to run it often.
config.checkpoint_steps = 1000
config.checkpoint_timeout = 1
config.prefetch_to_device = 2
config.trial = 0
# Model section
# pre-trained model ckpt file
# !!! The below section should be modified per experiment
config.model_init = '/path/to/pretrained_model_ckpt.npz'
# Model definition to be copied from the pre-training config
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = [16, 16]
config.model.hidden_size = 768
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.attention_dropout_rate = 0.
config.model.transformer.dropout_rate = 0.
config.model.transformer.mlp_dim = 3072
config.model.transformer.num_heads = 12
config.model.transformer.num_layers = 12
config.model.classifier = 'token' # Or 'gap'
# This is "no head" fine-tuning, which we use by default
config.model.representation_size = None
# # Heteroscedastic
config.model.multiclass = True
config.model.temperature = 3.0
config.model.mc_samples = 1000
config.model.num_factors = 3
config.model.param_efficient = True
config.model.return_locs = True # set True to fine-tune a homoscedastic model
# Optimizer section
config.optim_name = 'Momentum'
config.optim = ml_collections.ConfigDict()
config.grad_clip_norm = 1.0
config.weight_decay = None # No explicit weight decay
config.loss = 'softmax_xent' # or 'sigmoid_xent'
config.lr = ml_collections.ConfigDict()
config.lr.base = 0.001
config.lr.warmup_steps = 500
config.lr.decay_type = 'cosine'
config.lr.scale_with_batchsize = False
config.args = {}
return config
| [
"[email protected]"
] | |
5cffb5a2fb9d408a8f4fe88b0e46d790428e9c92 | 1bde114a847c629701e3acd004be5788594e0ef1 | /Examples/Decorator/alldecorators/CoffeeShop.py | 9e4861b473c3803d1d2a0b2ad0b382e4cce35f7a | [] | no_license | BruceEckel/ThinkingInPython | 0b234cad088ee144bb8511e1e7db9fd5bba78877 | 76a1310deaa51e02e9f83ab74520b8269aac6fff | refs/heads/master | 2022-02-21T23:01:40.544505 | 2022-02-08T22:26:52 | 2022-02-08T22:26:52 | 97,673,620 | 106 | 33 | null | 2022-02-08T22:26:53 | 2017-07-19T04:43:50 | Python | UTF-8 | Python | false | false | 1,722 | py | # Decorator/alldecorators/CoffeeShop.py
# Coffee example using decorators
class DrinkComponent:
def getDescription(self):
return self.__class__.__name__
def getTotalCost(self):
return self.__class__.cost
class Mug(DrinkComponent):
cost = 0.0
class Decorator(DrinkComponent):
def __init__(self, drinkComponent):
self.component = drinkComponent
def getTotalCost(self):
return self.component.getTotalCost() + \
DrinkComponent.getTotalCost(self)
def getDescription(self):
return self.component.getDescription() + \
' ' + DrinkComponent.getDescription(self)
class Espresso(Decorator):
cost = 0.75
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class Decaf(Decorator):
cost = 0.0
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class FoamedMilk(Decorator):
cost = 0.25
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class SteamedMilk(Decorator):
cost = 0.25
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class Whipped(Decorator):
cost = 0.25
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class Chocolate(Decorator):
cost = 0.25
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
cappuccino = Espresso(FoamedMilk(Mug()))
print(cappuccino.getDescription().strip() + \)
": $" + `cappuccino.getTotalCost()`
cafeMocha = Espresso(SteamedMilk(Chocolate(
Whipped(Decaf(Mug())))))
print(cafeMocha.getDescription().strip() + \)
": $" + `cafeMocha.getTotalCost()`
| [
"[email protected]"
] | |
a7c8fa0bda79edadd701b585eff8e09a773467c6 | e7c3d2b1fd7702b950e31beed752dd5db2d127bd | /code/super_pandigital_numbers/sol_571.py | 50c76cee64069e830d981145a37c35c2cc3edff5 | [
"Apache-2.0"
] | permissive | Ved005/project-euler-solutions | bbadfc681f5ba4b5de7809c60eb313897d27acfd | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | refs/heads/master | 2021-09-25T08:58:32.797677 | 2018-10-20T05:40:58 | 2018-10-20T05:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py |
# -*- coding: utf-8 -*-
'''
File name: code\super_pandigital_numbers\sol_571.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #571 :: Super Pandigital Numbers
#
# For more information see:
# https://projecteuler.net/problem=571
# Problem Statement
'''
A positive number is pandigital in base b if it contains all digits from 0 to b - 1 at least once when written in base b.
A n-super-pandigital number is a number that is simultaneously pandigital in all bases from 2 to n inclusively.
For example 978 = 11110100102 = 11000203 = 331024 = 124035 is the smallest 5-super-pandigital number.
Similarly, 1093265784 is the smallest 10-super-pandigital number.
The sum of the 10 smallest 10-super-pandigital numbers is 20319792309.
What is the sum of the 10 smallest 12-super-pandigital numbers?
'''
# Solution
# Solution Approach
'''
'''
| [
"[email protected]"
] | |
086f15693af91521b68d827e7613c2ac26e02baf | 7f57c12349eb4046c40c48acb35b0f0a51a344f6 | /2015/PopulatingNextRightPointersInEachNode_v1.py | 3577626399488b0ca50d165ddf85bbb001892a21 | [] | no_license | everbird/leetcode-py | 0a1135952a93b93c02dcb9766a45e481337f1131 | b093920748012cddb77258b1900c6c177579bff8 | refs/heads/master | 2022-12-13T07:53:31.895212 | 2022-12-10T00:48:39 | 2022-12-10T00:48:39 | 11,116,752 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | #!/usr/bin/env python
# encoding: utf-8
# Definition for binary tree with next pointer.
class TreeLinkNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.next = None
def __repr__(self):
return '<{}>'.format(self.val)
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
self.dfs(root)
def dfs(self, root):
if not root:
return
if root.left:
root.left.next = root.right
if root.next and root.right:
root.right.next = root.next.left
self.dfs(root.left)
self.dfs(root.right)
def levelorder(self, root):
queue = [root]
while queue:
n = queue.pop()
print n, n.val, n.next, '<<<'
if n.left:
queue = [n.left] + queue
if n.right:
queue = [n.right] + queue
if __name__ == '__main__':
s = Solution()
n1 = TreeLinkNode(1)
n2 = TreeLinkNode(2)
n3 = TreeLinkNode(3)
n4 = TreeLinkNode(4)
n5 = TreeLinkNode(5)
n6 = TreeLinkNode(6)
n7 = TreeLinkNode(7)
root = n1
n1.left = n2
n1.right = n3
n2.left = n4
n2.right = n5
n3.left = n6
n3.right = n7
s.connect(root)
s.levelorder(root)
| [
"[email protected]"
] | |
37a55e826ebb167071a7c6afe9b42c8b3264506b | b24e45267a8d01b7d3584d062ac9441b01fd7b35 | /Usuario/.history/views_20191023114840.py | 870eeb90df98b7537147ae14418728dfb2b3fb07 | [] | no_license | slalbertojesus/merixo-rest | 1707b198f31293ced38930a31ab524c0f9a6696c | 5c12790fd5bc7ec457baad07260ca26a8641785d | refs/heads/master | 2022-12-10T18:56:36.346159 | 2020-05-02T00:42:39 | 2020-05-02T00:42:39 | 212,175,889 | 0 | 0 | null | 2022-12-08T07:00:07 | 2019-10-01T18:56:45 | Python | UTF-8 | Python | false | false | 2,211 | py | from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.permissions import AllowAny
from .models import Usuario
from .serializers import UsuarioSerializer
SUCCESS = 'exito'
ERROR = 'error'
DELETE_SUCCESS = 'eliminado'
UPDATE_SUCCESS = 'actualizado'
CREATE_SUCCESS = 'creado'
@api_view(['GET', ])
def api_detail_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UsuarioSerializer(usuario)
return Response(serializer.data)
@api_view(['PUT',])
def api_update_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
serializer = UsuarioSerializer(usuario, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
data[SUCCESS] = UPDATE_SUCCESS
return Response(data=data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE',])
def api_delete_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador=identificador)
except usuario.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'DELETE':
operation = usuario.delete()
data = {}
if operation:
data[SUCCESS] = DELETE_SUCCESS
return Response(data=data)
@api_view(['POST'])
@permission_classes([AllowAny])
def api_create_usuario_view(request, identificador):
try:
usuario = Usuario.objects.get(identificador = identificador)
except usuario.DoesNotExist:
if request.method == 'POST':
serializer = UsuarioSerializer(usuario, data=request.data)
data = {}
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| [
"[email protected]"
] | |
d2333889ef1fc70d5e7c0a79e6f3112aa752306a | 6fc84acaaf012f6cbbcb918390a4ed5508f84414 | /opalWebsrv/test.py | 5f5f02fee393637efbf17662b5ee5d476b2f476d | [] | no_license | missinglpf/MAS_finite_consenus | 43f03bdb2417c6da98cb5ff5a6b8b888ec1944b3 | a83e8709dd12e5965ef4a5b413d056a434dd1245 | refs/heads/master | 2020-08-01T03:42:44.747402 | 2018-06-25T06:01:10 | 2018-06-25T06:01:10 | 210,850,495 | 3 | 0 | null | 2019-09-25T13:20:32 | 2019-09-25T13:20:32 | null | UTF-8 | Python | false | false | 2,298 | py | #! /usr/bin/python
import struct
import socket
import urllib
import subprocess
import sys
import time
import os
import traceback
def portIsOpened(hostip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((hostip,port))
if result == 0:
return True
else:
return False
def fakeOpalCom(vals, the_format_in, the_format_out, hostip, port):
sock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
d4= struct.pack(the_format_out, *vals)
sent=sock.sendto(d4, (hostip, port))
print "Opal sends", vals
rawdata,server=sock.recvfrom(4096)
sock.close()
data = struct.unpack(the_format_in, rawdata)
print "Opal recvd", data
return data
def testsrv(http_port, opal_port, nbIn, nbOut):
print "Testing with a new set"
assert(not(portIsOpened('127.0.0.1',http_port)))
assert(not(portIsOpened('127.0.0.1',opal_port)))
p = subprocess.Popen([os.getcwd()+ "/opalWebSrv.py", "-s", "-I", str(nbIn), "-O", str(nbOut)], bufsize=1024, stdin=sys.stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
time.sleep(0.5)
the_format_in='<hih'+str(nbIn)+'d'
the_format_out='<hih'+str(nbOut)+'d'
HTTP_PORT=str(8000)
UDP_IP='127.0.0.1'
UDP_PORT=50000
vals=[1,2,3]+range(1,nbOut+1)
opalret = fakeOpalCom(vals, the_format_in, the_format_out, UDP_IP, UDP_PORT)
assert(opalret==tuple([1,0,8*nbIn]+[0 for i in range(nbIn)]))
f=urllib.urlopen('http://localhost:'+HTTP_PORT+'/asyncsrv/set?valin0=12.5&valin1=40.2')
print f.read()
f=urllib.urlopen('http://localhost:'+HTTP_PORT+'/asyncsrv/get?name0=valout0&name1=valout1')
ret=f.read()
print ret
assert(ret=='{"valout0": 1.0, "valout1": 2.0}')
vals=[1,2,3,10.]+range(2,nbOut+1)
opalret = fakeOpalCom(vals, the_format_in, the_format_out, UDP_IP, UDP_PORT)
assert(opalret==tuple([1,1,8*nbIn]+[12.5,40.2]+ [0 for i in range(nbIn-2)]))
f=urllib.urlopen('http://localhost:'+HTTP_PORT+'/asyncsrv/get?name0=valout0&name1=valout1')
assert(f.read()=='{"valout0": 10.0, "valout1": 2.0}')
except Exception as error:
p.kill()
traceback.print_exc()
raise(error)
p.kill()
params_list = [
{"http_port": 8000, "opal_port": 50000,"nbIn":16, "nbOut":16},
{"http_port": 8001, "opal_port": 50001,"nbIn":10, "nbOut":12}
]
for params in params_list:
testsrv(**params)
print "Testing succeeded"
| [
"[email protected]"
] | |
43dcac20edd103067c8fa3fce010b8162d077b2a | 552ba370742e346dbb1cf7c7bf4b99648a17979b | /tbx/services/blocks.py | cbd8282834c2d89bfbac3f75334fcd64d1e9a9a5 | [
"MIT"
] | permissive | arush15june/wagtail-torchbox | 73e5cdae81b524bd1ee9c563cdc8a7b5315a809e | c4d06e096c72bd8007975dc016133024f9d27fab | refs/heads/master | 2022-12-25T05:39:32.309635 | 2020-08-13T14:50:42 | 2020-08-13T14:50:42 | 299,591,277 | 0 | 0 | MIT | 2020-09-29T11:08:49 | 2020-09-29T11:08:48 | null | UTF-8 | Python | false | false | 3,242 | py | from wagtail.core.blocks import (CharBlock, ListBlock, PageChooserBlock,
RichTextBlock, StreamBlock, StructBlock,
TextBlock, URLBlock)
from wagtail.images.blocks import ImageChooserBlock
from tbx.core.blocks import PullQuoteBlock
class CaseStudyBlock(StructBlock):
title = CharBlock(required=True)
intro = TextBlock(required=True)
case_studies = ListBlock(StructBlock([
('page', PageChooserBlock('work.WorkPage')),
('title', CharBlock(required=False)),
('descriptive_title', CharBlock(required=False)),
('image', ImageChooserBlock(required=False)),
]))
class Meta:
template = 'blocks/services/case_study_block.html'
class HighlightBlock(StructBlock):
title = CharBlock(required=True)
intro = RichTextBlock(required=False)
highlights = ListBlock(TextBlock())
class Meta:
template = 'blocks/services/highlight_block.html'
class StepByStepBlock(StructBlock):
title = CharBlock(required=True)
intro = TextBlock(required=False)
steps = ListBlock(StructBlock([
('subtitle', CharBlock(required=False)),
('title', CharBlock(required=True)),
('icon', CharBlock(max_length=9000, required=True, help_text='Paste SVG code here')),
('description', RichTextBlock(required=True))
]))
class Meta:
template = 'blocks/services/step_by_step_block.html'
class PeopleBlock(StructBlock):
title = CharBlock(required=True)
intro = RichTextBlock(required=True)
people = ListBlock(PageChooserBlock())
class Meta:
template = 'blocks/services/people_block.html'
class FeaturedPagesBlock(StructBlock):
title = CharBlock()
pages = ListBlock(StructBlock([
('page', PageChooserBlock()),
('image', ImageChooserBlock()),
('text', TextBlock()),
('sub_text', CharBlock(max_length=100)),
]))
class Meta:
template = 'blocks/services/featured_pages_block.html'
class SignUpFormPageBlock(StructBlock):
page = PageChooserBlock('sign_up_form.SignUpFormPage')
def get_context(self, value, parent_context=None):
context = super(SignUpFormPageBlock, self).get_context(value, parent_context)
context['form'] = value['page'].sign_up_form_class()
return context
class Meta:
icon = 'doc-full'
template = 'blocks/services/sign_up_form_page_block.html'
class LogosBlock(StructBlock):
title = CharBlock()
intro = CharBlock()
logos = ListBlock(StructBlock((
('image', ImageChooserBlock()),
('link_page', PageChooserBlock(required=False)),
('link_external', URLBlock(required=False)),
)))
class Meta:
icon = 'site'
template = 'blocks/services/logos_block.html'
class ServicePageBlock(StreamBlock):
paragraph = RichTextBlock(icon="pilcrow")
case_studies = CaseStudyBlock()
highlights = HighlightBlock()
pull_quote = PullQuoteBlock(template='blocks/services/pull_quote_block.html')
process = StepByStepBlock()
people = PeopleBlock()
featured_pages = FeaturedPagesBlock()
sign_up_form_page = SignUpFormPageBlock()
logos = LogosBlock()
| [
"[email protected]"
] | |
d5e6beb44c4d3eabfbc1f90c7e6154546b5390be | 3a85089c2498ff04d1b9bce17a4b8bf6cf2380c9 | /RecoMuon/TrackingTools/python/__init__.py | 46c2d8c095c7740e0d099c19bde145fc026b6c15 | [] | no_license | sextonkennedy/cmssw-ib | c2e85b5ffa1269505597025e55db4ffee896a6c3 | e04f4c26752e0775bd3cffd3a936b288ee7b0268 | HEAD | 2016-09-01T20:09:33.163593 | 2013-04-26T12:05:17 | 2013-04-29T16:40:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | #Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/RecoMuon/TrackingTools/',1)[0])+'/cfipython/slc6_amd64_gcc480/RecoMuon/TrackingTools')
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.