blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7df9dcc7b35ce702c5fdf33e237c3bb866b1708a | afbaa5685bf737ec7d16fee2bab54ae13caf96f9 | /geekbang/core/ch17/Demo1.py | 98dd62e83056057241e556d48e785f0e1f247874 | []
| no_license | ykdsg/myPython | 9dcc9afe6f595e51b72257875d66ada1ba04bba6 | 77d2eaa2acb172664b632cc2720cef62dff8f235 | refs/heads/master | 2023-06-10T20:11:08.061075 | 2023-06-03T11:39:53 | 2023-06-03T11:39:53 | 10,655,956 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | def func(message):
print('got a message:{}'.format(message))
# 函数赋予变量
send_message = func
send_message('hello world')
def get_message(message):
return 'got a message:' + message
def root_call(func, message):
print(func(message))
| [
"[email protected]"
]
| |
93b24835b5b197d6dfa82c2fe29f8f644663b0b2 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/FairMOT/src/lib/datasets/dataset/jde.py | 3e7156e32fca35205bf1eac8143fcd1fc065e94c | [
"GPL-1.0-or-later",
"BSD-3-Clause",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 23,094 | py | # BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
import glob
import math
import os
import os.path as osp
import random
import time
from collections import OrderedDict
import cv2
import json
import numpy as np
import torch
import copy
from torch.utils.data import Dataset
from torchvision.transforms import transforms as T
from cython_bbox import bbox_overlaps as bbox_ious
from opts import opts
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.utils import xyxy2xywh, generate_anchors, xywh2xyxy, encode_delta
class LoadImages: # for inference
def __init__(self, path, img_size=(1088, 608)):
if os.path.isdir(path):
image_format = ['.jpg', '.jpeg', '.png', '.tif']
self.files = sorted(glob.glob('%s/*.*' % path))
self.files = list(filter(lambda x: os.path.splitext(x)[1].lower() in image_format, self.files))
elif os.path.isfile(path):
self.files = [path]
self.nF = len(self.files) # number of image files
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
assert self.nF > 0, 'No images found in ' + path
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == self.nF:
raise StopIteration
img_path = self.files[self.count]
# Read image
img0 = cv2.imread(img_path) # BGR
assert img0 is not None, 'Failed to load ' + img_path
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return img_path, img, img0
def __getitem__(self, idx):
idx = idx % self.nF
img_path = self.files[idx]
# Read image
img0 = cv2.imread(img_path) # BGR
assert img0 is not None, 'Failed to load ' + img_path
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
return img_path, img, img0
def __len__(self):
return self.nF # number of files
class LoadVideo: # for inference
def __init__(self, path, img_size=(1088, 608)):
self.cap = cv2.VideoCapture(path)
self.frame_rate = int(round(self.cap.get(cv2.CAP_PROP_FPS)))
self.vw = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.vh = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.vn = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
self.width = img_size[0]
self.height = img_size[1]
self.count = 0
self.w, self.h = 1920, 1080
print('Lenth of the video: {:d} frames'.format(self.vn))
def get_size(self, vw, vh, dw, dh):
wa, ha = float(dw) / vw, float(dh) / vh
a = min(wa, ha)
return int(vw * a), int(vh * a)
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if self.count == len(self):
raise StopIteration
# Read image
res, img0 = self.cap.read() # BGR
assert img0 is not None, 'Failed to load frame {:d}'.format(self.count)
img0 = cv2.resize(img0, (self.w, self.h))
# Padded resize
img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
# Normalize RGB
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img, dtype=np.float32)
img /= 255.0
# cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return self.count, img, img0
def __len__(self):
return self.vn # number of files
class LoadImagesAndLabels: # for training
def __init__(self, path, img_size=(1088, 608), augment=False, transforms=None):
with open(path, 'r') as file:
self.img_files = file.readlines()
self.img_files = [x.replace('\n', '') for x in self.img_files]
self.img_files = list(filter(lambda x: len(x) > 0, self.img_files))
self.label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files]
self.nF = len(self.img_files) # number of image files
self.width = img_size[0]
self.height = img_size[1]
self.augment = augment
self.transforms = transforms
def __getitem__(self, files_index):
img_path = self.img_files[files_index]
label_path = self.label_files[files_index]
return self.get_data(img_path, label_path)
def get_data(self, img_path, label_path):
height = self.height
width = self.width
img = cv2.imread(img_path) # BGR
if img is None:
raise ValueError('File corrupt {}'.format(img_path))
augment_hsv = True
if self.augment and augment_hsv:
# SV augmentation by 50%
fraction = 0.50
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
S = img_hsv[:, :, 1].astype(np.float32)
V = img_hsv[:, :, 2].astype(np.float32)
a = (random.random() * 2 - 1) * fraction + 1
S *= a
if a > 1:
np.clip(S, a_min=0, a_max=255, out=S)
a = (random.random() * 2 - 1) * fraction + 1
V *= a
if a > 1:
np.clip(V, a_min=0, a_max=255, out=V)
img_hsv[:, :, 1] = S.astype(np.uint8)
img_hsv[:, :, 2] = V.astype(np.uint8)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)
h, w, _ = img.shape
img, ratio, padw, padh = letterbox(img, height=height, width=width)
# Load labels
if os.path.isfile(label_path):
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
# Normalized xywh to pixel xyxy format
labels = labels0.copy()
labels[:, 2] = ratio * w * (labels0[:, 2] - labels0[:, 4] / 2) + padw
labels[:, 3] = ratio * h * (labels0[:, 3] - labels0[:, 5] / 2) + padh
labels[:, 4] = ratio * w * (labels0[:, 2] + labels0[:, 4] / 2) + padw
labels[:, 5] = ratio * h * (labels0[:, 3] + labels0[:, 5] / 2) + padh
else:
labels = np.array([])
# Augment image and labels
if self.augment:
img, labels, M = random_affine(img, labels, degrees=(-5, 5), translate=(0.10, 0.10), scale=(0.50, 1.20))
plotFlag = False
if plotFlag:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.figure(figsize=(50, 50))
plt.imshow(img[:, :, ::-1])
plt.plot(labels[:, [1, 3, 3, 1, 1]].T, labels[:, [2, 2, 4, 4, 2]].T, '.-')
plt.axis('off')
plt.savefig('test.jpg')
time.sleep(10)
nL = len(labels)
if nL > 0:
# convert xyxy to xywh
labels[:, 2:6] = xyxy2xywh(labels[:, 2:6].copy()) # / height
labels[:, 2] /= width
labels[:, 3] /= height
labels[:, 4] /= width
labels[:, 5] /= height
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip & (random.random() > 0.5):
img = np.fliplr(img)
if nL > 0:
labels[:, 2] = 1 - labels[:, 2]
img = np.ascontiguousarray(img[:, :, ::-1]) # BGR to RGB
if self.transforms is not None:
img = self.transforms(img)
return img, labels, img_path, (h, w)
def __len__(self):
return self.nF # number of batches
def letterbox(img, height=608, width=1088,
color=(127.5, 127.5, 127.5)): # resize a rectangular image to a padded rectangular
shape = img.shape[:2] # shape = [height, width]
ratio = min(float(height) / shape[0], float(width) / shape[1])
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio)) # new_shape = [width, height]
dw = (width - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded rectangular
return img, ratio, dw, dh
def random_affine(img, targets=None, degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2),
borderValue=(127.5, 127.5, 127.5)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
border = 0 # width of added border (optional)
height = img.shape[0]
width = img.shape[1]
# Rotation and Scale
R = np.eye(3)
a = random.random() * (degrees[1] - degrees[0]) + degrees[0]
# a += random.choice([-180, -90, 0, 90]) # 90deg rotations added to small rotations
s = random.random() * (scale[1] - scale[0]) + scale[0]
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels)
T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg)
M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
imw = cv2.warpPerspective(img, M, dsize=(width, height), flags=cv2.INTER_LINEAR,
borderValue=borderValue) # BGR order borderValue
# Return warped points also
if targets is not None:
if len(targets) > 0:
n = targets.shape[0]
points = targets[:, 2:6].copy()
area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = points[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# apply angle-based reduction
radians = a * math.pi / 180
reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
x = (xy[:, 2] + xy[:, 0]) / 2
y = (xy[:, 3] + xy[:, 1]) / 2
w = (xy[:, 2] - xy[:, 0]) * reduction
h = (xy[:, 3] - xy[:, 1]) * reduction
xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
#np.clip(xy[:, 0], 0, width, out=xy[:, 0])
#np.clip(xy[:, 2], 0, width, out=xy[:, 2])
#np.clip(xy[:, 1], 0, height, out=xy[:, 1])
#np.clip(xy[:, 3], 0, height, out=xy[:, 3])
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
targets = targets[i]
targets[:, 2:6] = xy[i]
targets = targets[targets[:, 2] < width]
targets = targets[targets[:, 4] > 0]
targets = targets[targets[:, 3] < height]
targets = targets[targets[:, 5] > 0]
return imw, targets, M
else:
return imw
def collate_fn(batch):
imgs, labels, paths, sizes = zip(*batch)
batch_size = len(labels)
imgs = torch.stack(imgs, 0)
max_box_len = max([l.shape[0] for l in labels])
labels = [torch.from_numpy(l) for l in labels]
filled_labels = torch.zeros(batch_size, max_box_len, 6)
labels_len = torch.zeros(batch_size)
for i in range(batch_size):
isize = labels[i].shape[0]
if len(labels[i]) > 0:
filled_labels[i, :isize, :] = labels[i]
labels_len[i] = isize
return imgs, filled_labels, paths, sizes, labels_len.unsqueeze(1)
class JointDataset(LoadImagesAndLabels): # for training
default_resolution = [1088, 608]
mean = None
std = None
num_classes = 1
def __init__(self, opt, root, paths, img_size=(1088, 608), augment=False, transforms=None):
self.opt = opt
dataset_names = paths.keys()
self.img_files = OrderedDict()
self.label_files = OrderedDict()
self.tid_num = OrderedDict()
self.tid_start_index = OrderedDict()
self.num_classes = 1
for ds, path in paths.items():
with open(path, 'r') as file:
self.img_files[ds] = file.readlines()
self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]
self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))
self.label_files[ds] = [
x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files[ds]]
for ds, label_paths in self.label_files.items():
max_index = -1
for lp in label_paths:
lb = np.loadtxt(lp)
if len(lb) < 1:
continue
if len(lb.shape) < 2:
img_max = lb[1]
else:
img_max = np.max(lb[:, 1])
if img_max > max_index:
max_index = img_max
self.tid_num[ds] = max_index + 1
last_index = 0
for i, (k, v) in enumerate(self.tid_num.items()):
self.tid_start_index[k] = last_index
last_index += v
self.nID = int(last_index + 1)
self.nds = [len(x) for x in self.img_files.values()]
self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]
self.nF = sum(self.nds)
self.width = img_size[0]
self.height = img_size[1]
self.max_objs = opt.K
self.augment = augment
self.transforms = transforms
print('=' * 80)
print('dataset summary')
print(self.tid_num)
print('total # identities:', self.nID)
print('start index')
print(self.tid_start_index)
print('=' * 80)
def __getitem__(self, files_index):
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
imgs, labels, img_path, (input_h, input_w) = self.get_data(img_path, label_path)
for i, _ in enumerate(labels):
if labels[i, 1] > -1:
labels[i, 1] += self.tid_start_index[ds]
output_h = imgs.shape[1] // self.opt.down_ratio
output_w = imgs.shape[2] // self.opt.down_ratio
num_classes = self.num_classes
num_objs = labels.shape[0]
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
if self.opt.ltrb:
wh = np.zeros((self.max_objs, 4), dtype=np.float32)
else:
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs, ), dtype=np.int64)
reg_mask = np.zeros((self.max_objs, ), dtype=np.uint8)
ids = np.zeros((self.max_objs, ), dtype=np.int64)
bbox_xys = np.zeros((self.max_objs, 4), dtype=np.float32)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else draw_umich_gaussian
for k in range(num_objs):
label = labels[k]
bbox = label[2:]
cls_id = int(label[0])
bbox[[0, 2]] = bbox[[0, 2]] * output_w
bbox[[1, 3]] = bbox[[1, 3]] * output_h
bbox_amodal = copy.deepcopy(bbox)
bbox_amodal[0] = bbox_amodal[0] - bbox_amodal[2] / 2.
bbox_amodal[1] = bbox_amodal[1] - bbox_amodal[3] / 2.
bbox_amodal[2] = bbox_amodal[0] + bbox_amodal[2]
bbox_amodal[3] = bbox_amodal[1] + bbox_amodal[3]
bbox[0] = np.clip(bbox[0], 0, output_w - 1)
bbox[1] = np.clip(bbox[1], 0, output_h - 1)
h = bbox[3]
w = bbox[2]
bbox_xy = copy.deepcopy(bbox)
bbox_xy[0] = bbox_xy[0] - bbox_xy[2] / 2
bbox_xy[1] = bbox_xy[1] - bbox_xy[3] / 2
bbox_xy[2] = bbox_xy[0] + bbox_xy[2]
bbox_xy[3] = bbox_xy[1] + bbox_xy[3]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = 6 if self.opt.mse_loss else radius
#radius = max(1, int(radius)) if self.opt.mse_loss else radius
ct = np.array(
[bbox[0], bbox[1]], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
if self.opt.ltrb:
wh[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \
bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]
else:
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
ids[k] = label[1]
bbox_xys[k] = bbox_xy
ret = {'input': imgs, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh, 'reg': reg, 'ids': ids, 'bbox': bbox_xys}
return ret
class DetDataset(LoadImagesAndLabels): # for training
def __init__(self, root, paths, img_size=(1088, 608), augment=False, transforms=None):
dataset_names = paths.keys()
self.img_files = OrderedDict()
self.label_files = OrderedDict()
self.tid_num = OrderedDict()
self.tid_start_index = OrderedDict()
for ds, path in paths.items():
with open(path, 'r') as file:
self.img_files[ds] = file.readlines()
self.img_files[ds] = [osp.join(root, x.strip()) for x in self.img_files[ds]]
self.img_files[ds] = list(filter(lambda x: len(x) > 0, self.img_files[ds]))
self.label_files[ds] = [
x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt')
for x in self.img_files[ds]]
for ds, label_paths in self.label_files.items():
max_index = -1
for lp in label_paths:
lb = np.loadtxt(lp)
if len(lb) < 1:
continue
if len(lb.shape) < 2:
img_max = lb[1]
else:
img_max = np.max(lb[:, 1])
if img_max > max_index:
max_index = img_max
self.tid_num[ds] = max_index + 1
last_index = 0
for i, (k, v) in enumerate(self.tid_num.items()):
self.tid_start_index[k] = last_index
last_index += v
self.nID = int(last_index + 1)
self.nds = [len(x) for x in self.img_files.values()]
self.cds = [sum(self.nds[:i]) for i in range(len(self.nds))]
self.nF = sum(self.nds)
self.width = img_size[0]
self.height = img_size[1]
self.augment = augment
self.transforms = transforms
print('=' * 80)
print('dataset summary')
print(self.tid_num)
print('total # identities:', self.nID)
print('start index')
print(self.tid_start_index)
print('=' * 80)
def __getitem__(self, files_index):
for i, c in enumerate(self.cds):
if files_index >= c:
ds = list(self.label_files.keys())[i]
start_index = c
img_path = self.img_files[ds][files_index - start_index]
label_path = self.label_files[ds][files_index - start_index]
if os.path.isfile(label_path):
labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
imgs, labels, img_path, (h, w) = self.get_data(img_path, label_path)
for i, _ in enumerate(labels):
if labels[i, 1] > -1:
labels[i, 1] += self.tid_start_index[ds]
return imgs, labels0, img_path, (h, w)
| [
"[email protected]"
]
| |
f205af874bfd19c543b990383520db2dc51ce796 | 297c30dc0120c2920c86c8257bc530db1bb1114a | /Application/Application_Pandas/panda_DataFrame_Test_1.py | e7ad323507d330adb254dc3f79e9571b82741412 | []
| no_license | whoiszyc/Repo_python | 76e248b350a3f109c53bfb1f3abe59b903a98e46 | bdc3f39883aed5b2e85624525c662c00f60d35e3 | refs/heads/master | 2021-07-06T04:48:04.973680 | 2020-07-27T03:55:58 | 2020-07-27T03:55:58 | 139,599,645 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | import numpy as np
import pandas as pd
# use string as key
mydict0 = [{'a': 10, 'b': 20, 'c': 30, 'd': 40},{'a': 100, 'b': 200, 'c': 300, 'd': 400},{'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }]
# use int number as key
mydict1 = [{0: 10, 1: 20, 2: 30, 3: 40},{0: 100, 1: 200, 2: 300, 3: 400},{0: 1000, 1: 2000, 2: 3000, 3: 4000 }]
# test the first data frame
df=pd.DataFrame(mydict0)
print(df)
# general information of the data frame
print('Total number of data entries in the data frame is {}'.format(df.size))
print('Dimension of data entries in the data frame is {} by {}'.format(df.shape[0], df.shape[1]))
# get entry by location
print('Second column of the data frame')
print(df.iloc[:,1])
print('Second to third column of the data frame')
print(df.iloc[:,1:2])
print('Second to third row of the data frame')
print(df.iloc[1:2,:])
# get entry by key
print('The column that key equals to "a" is:')
print(df['a'])
# save data frame to csv
df.to_csv('test_1.csv')
df.to_excel('test_1.xls')
# test the second data frame
# get entry by key
df=pd.DataFrame(mydict1)
print(df)
print('The column that key equals to 0 is:')
print(df[0])
# save data frame to csv
df.to_csv('test_2.csv', encoding='utf-8')
df.to_excel('test_2.xls') | [
"[email protected]"
]
| |
476e8f2d422cf9b9348b2be998dbf5b010ef7f87 | 620b58e17d4851e43bd1270cabc8c26f43629a7b | /lib/candy_editor/AssetEditor/EngineAsset/ShaderAsset.py | fd3756f724e956b29e72c34bc709b54269fc049f | [
"MIT"
]
| permissive | lihaochen910/Candy | 78b9862cf06748b365b6fb35ac23f0e7a00ab558 | d12cb964768459c22f30c22531d3e1734901e814 | refs/heads/master | 2022-11-25T19:12:34.533828 | 2021-11-07T16:11:07 | 2021-11-07T16:11:07 | 141,284,960 | 1 | 1 | NOASSERTION | 2022-11-22T09:20:08 | 2018-07-17T12:12:02 | Lua | UTF-8 | Python | false | false | 2,551 | py | import os.path
import logging
import subprocess
import shutil
import json
from candy_editor.core import *
from candy_editor.moai.MOAIRuntime import _CANDY
##----------------------------------------------------------------##
class ShaderAssetManager ( AssetManager ):
def getName ( self ):
return 'asset_manager.shader'
def getMetaType ( self ):
return 'script'
def acceptAssetFile ( self, filePath ):
if not os.path.isfile ( filePath ): return False
name, ext = os.path.splitext ( filePath )
if not ext in [ '.shader' ]: return False
return True
def importAsset ( self, node, reload = False ):
node.assetType = 'shader'
node.setObjectFile ( 'def', node.getFilePath () )
return True
# def onRegister ( self ):
#check builtin shaders
# def editAsset (self, node):
# editor = app.getModule ( 'framebuffer_editor' )
# if not editor:
# return alertMessage ( 'Editor not load', 'shader Editor not found!' )
# editor.openAsset ( node )
##----------------------------------------------------------------##
class ShaderAssetCreator ( AssetCreator ):
def getAssetType ( self ):
return 'shader'
def getLabel ( self ):
return 'Shader'
def createAsset ( self, name, contextNode, assetType ):
ext = '.shader'
filename = name + ext
if contextNode.isType ( 'folder' ):
nodepath = contextNode.getChildPath ( filename )
else:
nodepath = contextNode.getSiblingPath ( filename )
fullpath = AssetLibrary.get ().getAbsPath ( nodepath )
_CANDY.createEmptySerialization ( fullpath, 'candy.Shader' )
return nodepath
class ShaderScriptAssetManager ( AssetManager ):
def getName ( self ):
return 'asset_manager.shader_script'
def getMetaType ( self ):
return 'script'
def acceptAssetFile ( self, filePath ):
if not os.path.isfile ( filePath ): return False
name, ext = os.path.splitext ( filePath )
if not ext in [ '.vsh', '.fsh' ]: return False
return True
def importAsset ( self, node, reload = False ):
name, ext = os.path.splitext ( node.getFilePath () )
if ext == '.vsh':
node.assetType = 'vsh'
elif ext == '.fsh':
node.assetType = 'fsh'
node.setObjectFile ( 'src', node.getFilePath () )
return True
##----------------------------------------------------------------##
ShaderAssetManager ().register ()
ShaderAssetCreator ().register ()
ShaderScriptAssetManager ().register ()
AssetLibrary.get ().setAssetIcon ( 'shader', 'shader' )
AssetLibrary.get ().setAssetIcon ( 'vsh', 'text-red' )
AssetLibrary.get ().setAssetIcon ( 'fsh', 'text-yellow' )
| [
"[email protected]"
]
| |
619e237a4faf772e892747ff7c3e8b05f3a6b00e | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_7/Dongola/wafi/account_custom_wafi/account_custom.py | f19ef70a021724ff48985fea969d72415668236c | []
| no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,557 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# NCTR, Nile Center for Technology Research
# Copyright (C) 2011-2012 NCTR (<http://www.nctr.sd>).
#
##############################################################################
from openerp.osv import osv, fields, orm
from datetime import datetime
from datetime import timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
from openerp.tools.translate import _
#----------------------------------------------------------
# Account Account (Inherit)
#----------------------------------------------------------
class account_account(osv.Model):
_inherit = "account.account"
_columns = {
'ceiling': fields.float('Ceiling'),
'min_ceiling': fields.float('Minimum Ceiling'),
'payment_ceiling': fields.float('Payment Ceiling'),
}
class account_journal(osv.Model):
_inherit = 'account.journal'
def check_ceiling(self, cr, uid, ids, context=None):
journal = self.browse(cr, uid, ids, context=context)[0]
recipient_partners = []
for user in journal.user_id:
recipient_partners.append(
(4, user.partner_id.id)
)
ceil_msg = []
msg = ""
flag= False
if journal.default_debit_account_id.balance >= journal.default_debit_account_id.ceiling :
ceil_msg.append(_(" Maximum ceiling %s for %s ' %s ' has been exceed") % (journal.default_debit_account_id.ceiling,journal.default_debit_account_id.name,journal.default_debit_account_id.balance))
flag = True
if journal.default_credit_account_id.balance >= journal.default_credit_account_id.ceiling:
ceil_msg.append(_("\nMaximum ceiling %s for %s ' %s ' has been exceed") % (journal.default_credit_account_id.ceiling,journal.default_credit_account_id.name,journal.default_credit_account_id.balance))
flag = True
#raise orm.except_orm(_('Warning !'), _('(Maximum ceiling %s for %s " %s " has been exceed')%(account.ceiling,account.name,account.balance))
if journal.default_debit_account_id.balance <= journal.default_debit_account_id.min_ceiling:
ceil_msg.append(_("\nMinimum ceiling %s for %s ' %s ' has been exceed") % (journal.default_debit_account_id.min_ceiling,journal.default_debit_account_id.name,journal.default_debit_account_id.balance))
flag = True
if journal.default_credit_account_id.balance <= journal.default_credit_account_id.min_ceiling:
ceil_msg.append(_("\nMinimum ceiling %s for %s ' %s ' has been exceed") % (journal.default_credit_account_id.min_ceiling,journal.default_credit_account_id.name,journal.default_credit_account_id.balance))
flag = True
if flag == True:
for msg_rec in ceil_msg:
msg = min_msg +','+ msg
post_vars = {'subject': "notification about ceiling",
'body': msg,
'partner_ids': recipient_partners,} # Where "4" adds the ID to the list
# of followers and "3" is the partner ID
thread_pool = self.pool.get('mail.thread')
thread_pool.message_post(
cr, uid, False,
type="notification",
subtype="mt_comment",
context=context,
**post_vars)
raise orm.except_orm(_('Warning !'), _('Minimum ceiling %s for %s " %s " has been exceed')%(account.min_ceiling,account.name,account.balance))
return True
class account_period(osv.Model):
_inherit = "account.period"
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
"""
Inherit name_search method to display only open period
unless order close period by sending closed=True in context
@return: super name_search
"""
if args is None:
args = []
if context is None:
context = {}
if not context.get('closed',False):
args.append(('state', '=', 'draft'))
return super(account_period, self).name_search(cr, uid, name, args=args, operator='ilike', context=context, limit=limit)
def action_draft(self, cr, uid, ids, context=None):
"""
Inherit action_draft method to prevent reopening statement
@return: super action_draft
"""
if self.search(cr, uid, [('id', 'in', ids), ('fiscalyear_id.state', '!=', 'draft')], context=context):
raise osv.except_osv(_('Warning!'), _('You can not re-open a period which belongs to closed fiscal year'))
return super(account_period, self).action_draft(cr, uid, ids, context)
class account_fiscalyear(osv.Model):
"""
Inherit fiscal year model to modify it's states according to government requirements
"""
_inherit = "account.fiscalyear"
_columns = {
'state': fields.selection([('draft', 'Open'), ('locked_temp', 'Locked Temporarily'),
('open_ext_period' , 'Open Extension Period'),
('close_ext_period', 'Close Extension Period'),
('first_lock', 'First Lock'), ('done', 'Final Lock')],
'State',size=64, readonly=True),
}
def action_locked_temporarily(self, cr, uid, ids, context=None):
"""
Method to check that all fiscal year's periods closed or not.
@return: change record state to 'locked temporarily' or raise exception
"""
if self.pool.get('account.period').search(cr, uid, [('state','=','draft'),('fiscalyear_id','in',ids)], context=context):
raise orm.except_orm(_('Error'), _('You Must Close Open Periods First'))
return self.write(cr, uid, ids, {'state': 'locked_temp'}, context=context)
def action_close_extension_period(self, cr, uid, ids, context=None):
"""
@return Change record state to 'Close Extension Period'.
"""
return self.write(cr, uid, ids, {'state': 'close_ext_period'}, context=context)
def action_first_lock(self, cr, uid, ids, context=None):
"""
@return: Change record state to 'First Lock'.
"""
self.write(cr, uid, ids, {'state': 'first_lock'}, context=context)
return {
'id': 'account_custom.action_account_pl_close',
'context': {'default_fiscalyear_id': ids}
}
#----------------------------------------------------------
# Account Move Line(Inherit)
#----------------------------------------------------------
class account_move_line(osv.Model):
_inherit = 'account.move.line'
def _query_get(self, cr, uid, obj='l', context=None):
"""
use in account arabic reports and chart of account to balance the credit and debit
@return: string of the where statement
"""
fiscalyear_obj = self.pool.get('account.fiscalyear')
company_obj = self.pool.get('res.company')
fiscalperiod_obj = self.pool.get('account.period')
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
initial_bal = context.get('initial_bal', False)
fiscalyear_ids = []
if context is None:
context = {}
#Only Valid Move Lines (BALANCE MOVES)
query = obj+".state <> 'draft' "
#Filter by Company
if context.get('company_id', False):
query += " AND " +obj+".company_id = %s" % context['company_id']
if context.get('unit_type', False):
if context.get('unit_type', False) == 'ministry':
company_ids = company_obj.search(cr,uid, [ ('type', '=', 'other')])
elif context.get('unit_type', False) == 'locality':
company_ids = company_obj.search(cr,uid, [ ('type', '=', 'loc_sub')])
else:
types=('other','loc_sub')
company_ids = company_obj.search(cr,uid, [ ('type', 'in', types)])
company_ids2 = ','.join(map(str, company_ids))
query += " AND " +obj+".company_id in (%s)" % company_ids2
#Filter by Move State
if context.get('state', False):
if type(context['state']) in (list,tuple) :
query += " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE state !='reversed') "
# query += " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE state IN ("+st+")) "
elif context['state'].lower() != 'all':
query += " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE account_move.state != '"+context['state']+"') "
#Get Selected FiscalYear
if not context.get('fiscalyear', False):
if context.get('all_fiscalyear', False):
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('company_id', 'in', company_ids)])
else:
if context.get('date_from', False):
#fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])
date_from=context.get('date_from', False)
date_from2 = datetime.strptime( date_from, '%Y-%m-%d')
f_code=date_from2.year
fiscalyear_ids = fiscalyear_obj.search(cr,uid, [('company_id', 'in', company_ids), ('code', '=', f_code)])
else:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('company_id', 'in', company_ids)])
else:
#make the context['fiscalyear'] in one dimention list or ids
fiscalyear_ids = type(context['fiscalyear']) is list and context['fiscalyear'] or [context['fiscalyear']]
fiscalyear_clause = (','.join(map(str, fiscalyear_ids)))
#Duration Filters
if context.get('date_from', False) and context.get('date_to', False):
if initial_bal:
init_period = fiscalperiod_obj.search(cr, uid, [('special', '=', True), ('fiscalyear_id', 'in', fiscalyear_ids)])
date_start = fiscalperiod_obj.browse(cr, uid, init_period[0], context=context).date_start
query += " AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) ) " % (fiscalyear_clause,)
date_from=context['date_from']
if context.get('date_from', False)==date_start:
date_1 = datetime.strptime(date_from, DEFAULT_SERVER_DATE_FORMAT)
date_from= date_1+timedelta(days=1)
query += " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date <='%s') " %(context['date_from'],)
query += " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date <'%s') " %(date_from,)
else:
if context['type']=='statement':
query += " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date >= '%s' AND date <= '%s') "%(context['date_from'],context['date_to'])
elif context['type']=='balance':
init_period = fiscalperiod_obj.search(cr, uid, [('special', '=', True), ('fiscalyear_id', 'in', fiscalyear_ids)])
date_start = fiscalperiod_obj.browse(cr, uid, init_period[0], context=context).date_start
date_from=context['date_from']
if context.get('date_from', False)==date_start:
date_1 = datetime.strptime(date_from, DEFAULT_SERVER_DATE_FORMAT)
date_from= date_1+timedelta(days=1)
query += " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date > '%s' AND date <= '%s') "%(date_from,context['date_to'])
query += " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date >= '%s' AND date <= '%s') "%(context['date_from'],context['date_to'])
if context.get('period_from', False) and context.get('period_to', False) and not context.get('periods', False) and context.get('type', False)!='statement':
if initial_bal:
period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id
first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id), ('fiscalyear_id', 'in', fiscalyear_ids)], order='date_start')
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period[0], first_period[first_period.index(context['period_from'])-1])
else:
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])
if context.get('periods', False) and context.get('type', False)!='statement':
period_ids = ','.join(map(str, context['periods']))
query += " AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) " % (fiscalyear_clause, period_ids)
else:
sub_query = ""
if not context.get('date_from', False) or context.get('period_from', False):
special = initial_bal and (not context.get('date_from', False))
sub_query = "AND special = %s"%(special,)
query += " AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) %s) " % (fiscalyear_clause, sub_query)
#Filter by Journal
#situation_journal = set(journal_obj.search(cr, uid, [('type', '=', 'situation')], context=context))
#selected_journals = set(context.get('journal_ids', False) or journal_obj.search(cr, uid, [], context=context))
#TEST: situation journal when opening balance & not
#journal_ids = context.get('selected_journals', False) and selected_journals or \
# (initial_bal and list(selected_journals | situation_journal) or list(selected_journals-situation_journal))
# if journal_ids:
# query += ' AND '+obj+'.journal_id IN (%s) ' % ','.join(map(str, journal_ids))
#if not context.get('selected_journals', False) and not initial_bal and situation_journal:
#query += ' AND '+obj+'.journal_id NOT IN (%s) ' % ','.join(map(str, situation_journal))
#Filter by chart of Account
if context.get('chart_account_id', False):
child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)
query += ' AND '+obj+'.account_id IN (%s) ' % ','.join(map(str, child_ids))
#Filter by Move Line Statement
if 'statement_id' in context:
if context.get('statement_id', False):
query += ' AND '+obj+'.statement_id IN (%s) ' % ','.join(map(str, context['statement_id']))
else:
query += ' AND '+obj+'.statement_id IS NULL '
#Filter by Move Line
if context.get('move_line_ids', False):
query += ' AND '+obj+'.id IN (%s) ' % ','.join(map(str, context['move_line_ids']))
#Filter by Analytic Account Type
if context.get('analytic_display', False):
query += ' AND '+obj+".analytic_account_id IN (SELECT id FROM account_analytic_account WHERE analytic_type=%s) " % (context.get('analytic_display', False).id,)
return query
class account_voucher(osv.osv):
"""
Customize account voucher.
"""
_inherit='account.voucher'
_columns = {
'invoice_id': fields.many2one('account.invoice','Invoice'),
}
class res_company(osv.Model):
"""
Inherit company model to add restricted payment scheduler as configurable option
"""
_inherit = "res.company"
_columns = {
'interval_number': fields.integer('Interval Number'),
}
_defaults = {
'interval_number': 2,
}
#----------------------------------------------------------
# Account Config (Inherit)
#----------------------------------------------------------
class account_config_settings(osv.osv_memory):
"""
Inherit account configuration setting model to define and display
the restricted payment scheduler' field value
"""
_inherit = 'account.config.settings'
_columns = {
'interval_number': fields.related('company_id', 'interval_number', type='integer', string='Interval Number'),
}
def onchange_company_id(self, cr, uid, ids, company_id, context=None):
"""
Update dict. of values to set interval_number depend on company_id
@param company_id: user company_id
@return: dict. of new values
"""
# update related fields
values =super(account_config_settings,self).onchange_company_id(cr, uid, ids, company_id, context=context).get('value',{})
if company_id:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
values.update({
'interval_number': company.interval_number
})
return {'value': values}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
]
| |
9840040315f9fdf4d3c22de338e2ace8d80de7a0 | fad702beb35d587278010e570a923bc84a4dda4a | /code/pyorg/scripts/tests/uni_2nd_speedup.py | 13519cc3f8dc80adcdd125cde94a260a0bee67ba | [
"Apache-2.0"
]
| permissive | anmartinezs/pyseg_system | f7769ec3dcaf243895ec1cf13ac6e1da1ab2a92a | 1370bfedae2ad5e6cdd1dc08395eb9e95b4a8596 | refs/heads/master | 2023-02-23T06:23:10.087737 | 2023-01-30T13:24:36 | 2023-01-30T13:24:36 | 227,147,753 | 15 | 4 | NOASSERTION | 2023-02-10T17:18:20 | 2019-12-10T14:58:22 | C | UTF-8 | Python | false | false | 7,837 | py | """
Measures the speed-up for computing univarite 2nd oder models and simulate CSRV instances
"""
################# Package import
import os
import sys
import math
import time
import numpy as np
import multiprocessing as mp
from scipy.optimize import curve_fit
from pyorg.surf.model import ModelCSRV, gen_tlist
from pyorg.surf.utils import disperse_io
from matplotlib import pyplot as plt, rcParams
plt.switch_backend('agg')
###### Global variables
__author__ = 'Antonio Martinez-Sanchez'
########################################################################################
# PARAMETERS
########################################################################################
try:
root_path = sys.argv[1]
except IndexError:
root_path = os.path.split(os.path.abspath(__file__))[0] + '/../../../tests'
out_dir = root_path + '/results'
# Synthetic data generation variables
sdat_surf = root_path + '/../pyorg/surf/test/in/sph_rad_5_surf.vtp'
sdat_tomo_shape = (500, 500, 100)
sdat_n_tomos = 5
sdat_n_sims = None # 20
sdat_n_part_tomo = 600 # 200
# Analysis variables
ana_npr_rg = [1, 2, 4, 8, 16, 24, 32, 36] # [1, 2, 4, 16] # It must start with 1
ana_rad_rg = np.arange(4, 250, 1) # np.arange(4, 180, 3)
ana_shell_thick = None
ana_fmm = False # True
# Plotting settings
rcParams['axes.labelsize'] = 14
rcParams['xtick.labelsize'] = 14
rcParams['ytick.labelsize'] = 14
rcParams['patch.linewidth'] = 2
########################################################################################
# HELPING FUNCTIONS
########################################################################################
def gen_rect_voi_array(shape):
"""
Generates a rectangular array VOI
:param shape: 3-tuple with the length of the three rectangle sides
:return: a binary ndarray object
"""
seg = np.zeros(shape=np.asarray(shape) + 1, dtype=bool)
seg[1:shape[0], 1:shape[1], 1:shape[2]] = True
return seg
def amdahls(x, p):
"""
Computes Amdal's Law speed-up
:param x: is the speedup of the part of the task that benefits from improved system resources
:param p: is the proportion of execution time that the part benefiting from improved resources originally occupied
:return: the computed speed-up
"""
return 1. / (1. - p + p/x)
########################################################################################
# MAIN ROUTINE
########################################################################################
########## Print initial message
print('Test for measuring univariate 2nd order and simulations computation speed-up.')
print('\tAuthor: ' + __author__)
print('\tDate: ' + time.strftime("%c") + '\n')
print('\tSynthetic data generations settings: ')
print('\t\t-Particle surface path: ' + str(sdat_surf))
print('\t\t-Tomogram shape: ' + str(sdat_tomo_shape))
print('\t\t-Number of tomograms: ' + str(sdat_n_tomos))
if sdat_n_sims is None:
print('\t\t-Number of simulations per tomogram are set to the number of processess.')
else:
print('\t\t-Number of simulations per tomogram: ' + str(sdat_n_sims))
print('\t\t-Number of particles per tomogram: ' + str(sdat_n_part_tomo))
print('\tAnalysis settings: ')
print('\t\t-Number of parallel processes to check: ' + str(ana_npr_rg))
print('\t\t-Scale samplings array: ' + str(ana_rad_rg))
if ana_shell_thick is None:
print('\t\t-Functions L is computed.')
else:
print('\t\t-Function O is computed with shell thickness: ' + str(ana_shell_thick))
if ana_fmm:
print('\t\t-Geodesic metric.')
else:
print('\t\t-Euclidean metric.')
print('')
######### Main process
print('Main Routine: ')
print('\t-Initialization...')
voi = gen_rect_voi_array(sdat_tomo_shape)
part = disperse_io.load_poly(sdat_surf)
model_csrv = ModelCSRV()
ltomos_csrv = gen_tlist(sdat_n_tomos, sdat_n_part_tomo, model_csrv, voi, sdat_surf, mode_emb='center',
npr=max(ana_rad_rg))
cu_i = 1. / float(sdat_n_tomos * sdat_n_part_tomo)
cpus = mp.cpu_count()
print('\t\t+CPUs found: ' + str(cpus))
# Loop for the of processors
print('\t-Measurements loops: ')
comp_times = np.zeros(shape=len(ana_npr_rg), dtype=np.float32)
sim_times = np.zeros(shape=len(ana_npr_rg), dtype=np.float32)
for i, npr in enumerate(ana_npr_rg):
print('\t\t+Number of processes: ' + str(npr))
# Computations loop
comp_time, sim_time = 0, 0
for tkey in ltomos_csrv.get_tomo_fname_list():
hold_time = time.time()
hold_tomo = ltomos_csrv.get_tomo_by_key(tkey)
hold_tomo.compute_uni_2nd_order(ana_rad_rg, thick=None, border=True, conv_iter=None, max_iter=None, fmm=ana_fmm,
npr=npr)
comp_time += (time.time() - hold_time)
if sdat_n_sims is None:
hold_n_sims = npr
else:
hold_n_sims = sdat_n_sims
cu_sim_i = 1. / float(sdat_n_tomos * sdat_n_part_tomo * hold_n_sims)
hold_time = time.time()
hold_sim = hold_tomo.simulate_uni_2nd_order(hold_n_sims, model_csrv, part, 'center', ana_rad_rg, thick=None,
border=True, conv_iter=None, max_iter=None, fmm=ana_fmm,
npr=npr)
sim_time += (time.time() - hold_time)
comp_times[i], sim_times[i] = comp_time * cu_i, sim_time * cu_sim_i
print('\t\t\t*Computation time per c.u.: ' + str(comp_times[i]) + ' [secs]')
print('\t\t\t*Computation time per c.u. and null-model simulations time: ' + str(sim_times[i]) + ' [secs]')
print('\tPlotting: ')
# plt.figure()
# plt.xlabel('# processes')
# plt.ylabel('Time/c.u. [s]')
# plt.plot(ana_npr_rg, comp_times, linewidth=2.0, linestyle='-', color='b', label='C')
# plt.plot(ana_npr_rg, sim_times, linewidth=2.0, linestyle='-', color='g', label='C+S')
# plt.tight_layout()
# plt.legend(loc=0)
# if out_dir is not None:
# out_fig_times = out_dir + '/times.png'
# print '\t\t-Storing the time figure in: ' + out_fig_times
# plt.savefig(out_fig_times)
# else:
# plt.show(block=True)
# plt.close()
# Speed up fitting:
processes = np.asarray(ana_npr_rg, dtype=float)
processes_ex = np.logspace(0, np.log2(cpus), num=50, base=2)
sup_comp = comp_times[0] / comp_times
sup_sim = sim_times[0] / sim_times
popt_comp, pcov_comp = curve_fit(amdahls, processes, sup_comp)
popt_sim, pcov_sim = curve_fit(amdahls, processes, sup_sim)
sup_comp_f = amdahls(processes_ex, popt_comp)
sup_sim_f = amdahls(processes_ex, popt_sim)
fig, ax1 = plt.subplots()
ax1.set_xlabel('# processes')
ax1.set_ylabel('Time/c.u. [s]')
# ax1.set_xlim((1, processes_ex.max()))
ax1.plot(ana_npr_rg, comp_times, linewidth=2.0, linestyle='--', color='b', label='C Time')
ax1.plot(ana_npr_rg, sim_times, linewidth=2.0, linestyle='--', color='g', label='C&S Time')
ax2 = ax1.twinx()
ax2.set_ylabel('Speedup')
# plt.plot(processes_ex, processes_ex, linewidth=1.0, linestyle='--', color='k', label='IDEAL')
# plt.plot((16, 16), (0, 16), linewidth=1.0, linestyle='-.', color='k')
# plt.plot((36, 36), (0, 36), linewidth=1.0, linestyle='-.', color='k')
ax2.plot(processes, sup_comp, linewidth=4.0, linestyle='-', marker='*', color='b', label='C Speedup')
# ax2.plot(processes_ex, sup_comp_f, linewidth=2.0, linestyle='-', color='b', label='C Speedup')
ax2.plot(processes, sup_sim, linewidth=4.0, linestyle='-', marker='s', color='g', label='C&S Speedup')
# ax2.plot(processes_ex, sup_sim_f, linewidth=2.0, linestyle='-', color='g', label='C&S Speedup')
# ax2.set_ylim((1, processes_ex.max()))
fig.tight_layout()
# fig.legend(loc=9)
if out_dir is not None:
out_fig_speed = out_dir + '/speed_up_time.png'
print('\t\t-Storing the time figure in: ' + out_fig_speed)
plt.savefig(out_fig_speed)
else:
plt.show(block=True)
plt.close()
print('Terminated. (' + time.strftime("%c") + ')')
| [
"[email protected]"
]
| |
b0d451dea5d617604a2cb9d1c05eab2bd487e4d5 | bae75bf1de75fb1b76e19b0d32c778e566de570a | /smodels-database/8TeV/CMS/CMS-EXO-12-026/validation/TRHadUM1_2EqMassAx.py | 096085bd68122b2c206bfd2aa0ce12b8264addfb | []
| no_license | andlessa/RDM | 78ae5cbadda1875c24e1bb726096b05c61627249 | ac6b242871894fee492e089d378806c2c2e7aad8 | refs/heads/master | 2023-08-16T00:47:14.415434 | 2021-09-21T20:54:25 | 2021-09-21T20:54:25 | 228,639,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,099 | py | validationData = [{'slhafile': 'TRHadUM1_1000_1000.slha', 'error': 'no results', 'axes': {'x': 1000.0, 'y': 1000.0}}, {'slhafile': 'TRHadUM1_1020_1020.slha', 'error': 'no results', 'axes': {'x': 1020.0, 'y': 1020.0}}, {'slhafile': 'TRHadUM1_1040_1040.slha', 'error': 'no results', 'axes': {'x': 1040.0, 'y': 1040.0}}, {'slhafile': 'TRHadUM1_1060_1060.slha', 'error': 'no results', 'axes': {'x': 1060.0, 'y': 1060.0}}, {'slhafile': 'TRHadUM1_1080_1080.slha', 'error': 'no results', 'axes': {'x': 1080.0, 'y': 1080.0}}, {'slhafile': 'TRHadUM1_1100_1100.slha', 'error': 'no results', 'axes': {'x': 1100.0, 'y': 1100.0}}, {'slhafile': 'TRHadUM1_1120_1120.slha', 'error': 'no results', 'axes': {'x': 1120.0, 'y': 1120.0}}, {'slhafile': 'TRHadUM1_1140_1140.slha', 'error': 'no results', 'axes': {'x': 1140.0, 'y': 1140.0}}, {'slhafile': 'TRHadUM1_1160_1160.slha', 'error': 'no results', 'axes': {'x': 1160.0, 'y': 1160.0}}, {'slhafile': 'TRHadUM1_1180_1180.slha', 'error': 'no results', 'axes': {'x': 1180.0, 'y': 1180.0}}, {'slhafile': 'TRHadUM1_1200_1200.slha', 'error': 'no results', 'axes': {'x': 1200.0, 'y': 1200.0}}, {'slhafile': 'TRHadUM1_1220_1220.slha', 'error': 'no results', 'axes': {'x': 1220.0, 'y': 1220.0}}, {'slhafile': 'TRHadUM1_1240_1240.slha', 'error': 'no results', 'axes': {'x': 1240.0, 'y': 1240.0}}, {'slhafile': 'TRHadUM1_1260_1260.slha', 'error': 'no results', 'axes': {'x': 1260.0, 'y': 1260.0}}, {'slhafile': 'TRHadUM1_1280_1280.slha', 'error': 'no results', 'axes': {'x': 1280.0, 'y': 1280.0}}, {'slhafile': 'TRHadUM1_1300_1300.slha', 'error': 'no results', 'axes': {'x': 1300.0, 'y': 1300.0}}, {'slhafile': 'TRHadUM1_1320_1320.slha', 'error': 'no results', 'axes': {'x': 1320.0, 'y': 1320.0}}, {'slhafile': 'TRHadUM1_1340_1340.slha', 'error': 'no results', 'axes': {'x': 1340.0, 'y': 1340.0}}, {'slhafile': 'TRHadUM1_1360_1360.slha', 'error': 'no results', 'axes': {'x': 1360.0, 'y': 1360.0}}, {'slhafile': 'TRHadUM1_1380_1380.slha', 'error': 'no results', 'axes': {'x': 1380.0, 'y': 1380.0}}, {'slhafile': 'TRHadUM1_1400_1400.slha', 'error': 'no results', 'axes': {'x': 1400.0, 'y': 1400.0}}, {'slhafile': 'TRHadUM1_1420_1420.slha', 'error': 'no results', 'axes': {'x': 1420.0, 'y': 1420.0}}, {'slhafile': 'TRHadUM1_1440_1440.slha', 'error': 'no results', 'axes': {'x': 1440.0, 'y': 1440.0}}, {'slhafile': 'TRHadUM1_1460_1460.slha', 'error': 'no results', 'axes': {'x': 1460.0, 'y': 1460.0}}, {'slhafile': 'TRHadUM1_1480_1480.slha', 'error': 'no results', 'axes': {'x': 1480.0, 'y': 1480.0}}, {'slhafile': 'TRHadUM1_1500_1500.slha', 'error': 'no results', 'axes': {'x': 1500.0, 'y': 1500.0}}, {'slhafile': 'TRHadUM1_1550_1550.slha', 'error': 'no results', 'axes': {'x': 1550.0, 'y': 1550.0}}, {'slhafile': 'TRHadUM1_1600_1600.slha', 'error': 'no results', 'axes': {'x': 1600.0, 'y': 1600.0}}, {'slhafile': 'TRHadUM1_1650_1650.slha', 'error': 'no results', 'axes': {'x': 1650.0, 'y': 1650.0}}, {'slhafile': 'TRHadUM1_1700_1700.slha', 'error': 'no results', 'axes': {'x': 1700.0, 'y': 1700.0}}, {'slhafile': 'TRHadUM1_1750_1750.slha', 'error': 'no results', 'axes': {'x': 1750.0, 'y': 1750.0}}, {'slhafile': 'TRHadUM1_1800_1800.slha', 'error': 'no results', 'axes': {'x': 1800.0, 'y': 1800.0}}, {'slhafile': 'TRHadUM1_1850_1850.slha', 'error': 'no results', 'axes': {'x': 1850.0, 'y': 1850.0}}, {'slhafile': 'TRHadUM1_1900_1900.slha', 'error': 'no results', 'axes': {'x': 1900.0, 'y': 1900.0}}, {'slhafile': 'TRHadUM1_1950_1950.slha', 'error': 'no results', 'axes': {'x': 1950.0, 'y': 1950.0}}, {'slhafile': 'TRHadUM1_400_400.slha', 'axes': {'x': 400.0}, 't': 0.12478996423574594, 'signal': 356.83, 'UL': 3.9098280556377634, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_420_420.slha', 'axes': {'x': 420.0}, 't': 0.12478996423574594, 'signal': 262.683, 'UL': 3.5613225806451614, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_440_440.slha', 'axes': {'x': 440.0}, 't': 0.12478996423574594, 'signal': 195.812, 'UL': 3.2128171056525603, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_460_460.slha', 'axes': {'x': 460.0}, 't': 0.12478996423574594, 'signal': 147.49200000000002, 'UL': 2.8643116306599588, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_480_480.slha', 'axes': {'x': 480.0}, 't': 0.12478996423574594, 'signal': 112.24099999999999, 'UL': 2.515806155667357, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_500_500.slha', 'axes': {'x': 500.0}, 't': 0.12478996423574594, 'signal': 85.5847, 'UL': 2.1673006806747557, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_520_520.slha', 'axes': {'x': 520.0}, 't': 0.12478996423574594, 'signal': 66.0189, 'UL': 1.9066133516148691, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_540_540.slha', 'axes': {'x': 540.0}, 't': 0.12478996423574594, 'signal': 51.174699999999994, 'UL': 1.6489229189518586, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_560_560.slha', 'axes': {'x': 560.0}, 't': 0.12478996423574594, 'signal': 39.9591, 'UL': 1.3912324862888483, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_580_580.slha', 'axes': {'x': 580.0}, 't': 0.12478996423574594, 'signal': 31.3654, 'UL': 1.133542053625838, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_600_600.slha', 'axes': {'x': 600.0}, 't': 0.12478996423574594, 'signal': 24.8009, 'UL': 0.8877338919241085, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_620_620.slha', 'axes': {'x': 620.0}, 't': 0.12478996423574594, 'signal': 19.6331, 'UL': 0.900095072017483, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_640_640.slha', 'axes': {'x': 640.0}, 't': 0.12478996423574594, 'signal': 15.5809, 'UL': 0.9124562521108572, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_660_660.slha', 'axes': {'x': 660.0}, 't': 0.12478996423574594, 'signal': 12.539299999999999, 'UL': 0.9248174322042316, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_680_680.slha', 'axes': {'x': 680.0}, 't': 0.12478996423574594, 'signal': 10.0516, 'UL': 0.937178612297606, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_700_700.slha', 'axes': {'x': 700.0}, 't': 0.12478996423574594, 'signal': 8.1141, 'UL': 0.9494650238379022, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_720_720.slha', 'axes': {'x': 720.0}, 't': 0.12478996423574594, 'signal': 6.56729, 'UL': 0.9547053893524037, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_740_740.slha', 'axes': {'x': 740.0}, 't': 0.12478996423574594, 'signal': 5.3260499999999995, 'UL': 0.959945754866905, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_760_760.slha', 'axes': {'x': 760.0}, 't': 0.12478996423574594, 'signal': 4.336880000000001, 'UL': 0.9651861203814065, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_780_780.slha', 'axes': {'x': 780.0}, 't': 0.12478996423574594, 'signal': 3.5421099999999996, 'UL': 0.9704264858959079, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_800_800.slha', 'axes': {'x': 800.0}, 't': 0.12478996423574594, 'signal': 2.89588, 'UL': 0.9756668514104093, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_820_820.slha', 'axes': {'x': 820.0}, 't': 0.12478996423574594, 'signal': 2.37168, 'UL': 0.9964521451776649, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_840_840.slha', 'axes': {'x': 840.0}, 't': 0.12478996423574594, 'signal': 1.9517200000000001, 'UL': 1.017611536040609, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_860_860.slha', 'axes': {'x': 860.0}, 't': 0.12478996423574594, 'signal': 1.60403, 'UL': 1.0387709269035534, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_880_880.slha', 'axes': {'x': 880.0}, 't': 0.12478996423574594, 'signal': 1.32077, 'UL': 1.0599303177664976, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_900_900.slha', 'axes': {'x': 900.0}, 't': 0.12478996423574594, 'signal': 1.09501, 'UL': 1.0817074977325407, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_920_920.slha', 'axes': {'x': 920.0}, 't': 0.12478996423574594, 'signal': 0.907494, 'UL': 1.1148627935100273, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_940_940.slha', 'axes': {'x': 940.0}, 't': 0.12478996423574594, 'signal': 0.753768, 'UL': 1.1480180892875138, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_960_960.slha', 'axes': {'x': 960.0}, 't': 0.12478996423574594, 'signal': 0.626876, 'UL': 1.1811733850650004, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}, {'slhafile': 'TRHadUM1_980_980.slha', 'axes': {'x': 980.0}, 't': 0.12478996423574594, 'signal': 0.5224949999999999, 'UL': 1.214328680842487, 'condition': 0.0, 'dataset': None, 'kfactor': 1.0}] | [
"[email protected]"
]
| |
7a28f24d0a6faf49ea00304d8ca51cfb2d5b84ef | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /6_tree/经典题/后序dfs统计信息/换根dp/hard/abc-233-G - Vertex Deletion-每个点是否在树的最大匹配中.py | 284d716c8e41ab42dbe6165859649c030080a298 | []
| no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 1,506 | py | # abc-233-G - Vertex Deletion-每个点是否在树的最大匹配中
# https://atcoder.jp/contests/abc223/tasks/abc223_g
# 给定一棵树
# 对每个结点i为根,删除根连接的所有边后,
# !使得剩下的树的最大匹配和原树最大匹配相等
# 求这样的根的个数
# !解:即不参与二分图的最大匹配
# https://yukicoder.me/problems/2085
# 二分图博弈
# Alice和Bob在树上博弈
# 先手放一个棋子,后手在相邻的结点放一个棋子
# 交替放棋子,直到不能放棋子的时候,输
# !问先手是否必胜 => 如果起点不在二分图的最大匹配中,先手必胜
from Rerooting import Rerooting
if __name__ == "__main__":
E = int # 当前节点是否构成子树的最大匹配, 0: 不参与, 1: 参与
def e(root: int) -> E:
return 0
def op(childRes1: E, childRes2: E) -> E:
return childRes1 | childRes2
def composition(fromRes: E, parent: int, cur: int, direction: int) -> E:
"""direction: 0: cur -> parent, 1: parent -> cur"""
return fromRes ^ 1 # 孩子参与匹配则父亲不参与, 反之成立
n = int(input())
edges = []
for _ in range(n - 1):
u, v = map(int, input().split())
edges.append((u - 1, v - 1))
R = Rerooting(n)
for u, v in edges:
R.addEdge(u, v)
dp = R.rerooting(e=e, op=op, composition=composition, root=0)
print(dp.count(0)) # 不在最大匹配中的点的个数
| [
"[email protected]"
]
| |
acb2fc903d2a0616fd16767c00059ce86cc7baa7 | 0116bfbdff160b028b18040df9b59d99d4a824e4 | /social/migrations/0011_question_user_name.py | 3ce91059651fb603746d5d812d40a3346826f1af | []
| no_license | Subhash1998/social-welfare | d9cd2897154f2da0afd9484fe33be7f8cf1a0390 | d2e59d511481fcb33a45c0d6d65ad1e97070f0b4 | refs/heads/master | 2022-12-14T15:49:23.851170 | 2018-06-02T03:36:41 | 2018-06-02T03:36:41 | 125,677,783 | 3 | 0 | null | 2022-11-22T02:05:53 | 2018-03-17T23:39:24 | Python | UTF-8 | Python | false | false | 471 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-03-17 07:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('social', '0010_remove_question_user_name'),
]
operations = [
migrations.AddField(
model_name='question',
name='user_name',
field=models.CharField(blank=True, max_length=100),
),
]
| [
"[email protected]"
]
| |
33a71e71ff9018b19823a1c3481dabfbf256ef91 | f3360b809d7e8e26c8904365b5e4df0dca69225d | /userprofile/migrations/0005_catatanmodal_parent_id.py | ea8293c76b529176e1c90697977c68a4d2c09e6b | []
| no_license | cursecan/epayment | 0bcd272a6479847ad60507daf2cf74ee95002924 | be9df7034261fa9f9eaafb157309b4955b793cfb | refs/heads/master | 2020-03-15T05:52:34.556971 | 2018-07-30T12:01:21 | 2018-07-30T12:01:21 | 131,996,100 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # Generated by Django 2.0.4 on 2018-05-27 10:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0004_catatanmodal'),
]
operations = [
migrations.AddField(
model_name='catatanmodal',
name='parent_id',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='userprofile.CatatanModal'),
),
]
| [
"[email protected]"
]
| |
1c8d7e664a52297cfd690bdbac1717ee6262c187 | a50386f9b6c0cc2c0789a84c3acedfd33a4eaf0f | /CursoOpenCVcomDaniel/esqueleto/test.py | 18cc78f446fc6a4fd48d48f195ac38f37d13e0f9 | []
| no_license | juanengml/RemoteWebCamWithOpenCV | 66b19e174d724b2584a7f1d07c5d9ee698ff0809 | caa4a0b52be1ac66bcb1b401485fb427746c31ef | refs/heads/master | 2020-03-23T02:49:56.853490 | 2019-07-23T01:15:33 | 2019-07-23T01:15:33 | 140,994,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | from skimage.morphology import skeletonize
ske = (skeletonize("01.jpg"//255) * 255).astype(np.uint8)
print ske
| [
"[email protected]"
]
| |
fd383de4e6b89efa815286ba137152c793ddc76d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03261/s807506520.py | 99d06e27e156d735988991a822f6233b91e07407 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | N=int(input())
H=input()
word=[]
word.append(H)
for i in range(N-1):
S=input()
if S in word:
print('No')
exit()
else:
if H[-1]==S[0]:
H=S
word.append(H)
else:
print('No')
exit()
print('Yes') | [
"[email protected]"
]
| |
1b85fe33cfdc32745e6d5c918558932acb47d4f5 | 11334e46d3575968de5062c7b0e8578af228265b | /systests/camera/pygame_test.py | 120a3509ea609b01136be1606066bffab85cc28a | []
| no_license | slowrunner/Carl | 99262f16eaf6d53423778448dee5e5186c2aaa1e | 1a3cfb16701b9a3798cd950e653506774c2df25e | refs/heads/master | 2023-06-08T05:55:55.338828 | 2023-06-04T02:39:18 | 2023-06-04T02:39:18 | 145,750,624 | 19 | 2 | null | 2023-06-04T02:39:20 | 2018-08-22T18:59:34 | Roff | UTF-8 | Python | false | false | 539 | py | #!/usr/bin/env python3
# file: pygame_test.py
from PIL import Image
import numpy as np
from time import sleep
import pygame
pygame.init()
clk = pygame.time.Clock()
im = np.array(Image.open('images/motion_capture.jpg'))
win = pygame.display.set_mode((im.shape[1],im.shape[0]))
img = pygame.image.load('images/motion_capture.jpg')
while True:
try:
win.blit(img,(0,0))
pygame.display.flip()
clk.tick(3)
sleep(5)
exit(0)
except KeyboardInterrupt:
print("\nExiting")
break
| [
"[email protected]"
]
| |
3f7f623f96a3f56eb9b05f7047dbb6a29c218a46 | e82b3c6000fe8e4639d6606f9d3605e75a8a5d5c | /src/secondaires/crafting/actions/copier_attributs.py | 27bd23a3f37b98662a48b66862e659a7ce3fc12c | [
"BSD-3-Clause"
]
| permissive | vincent-lg/tsunami | 804585da7bd1d159ad2c784b39f801963ca73c03 | 7e93bff08cdf891352efba587e89c40f3b4a2301 | refs/heads/master | 2022-08-02T15:54:35.480614 | 2022-07-18T12:06:41 | 2022-07-18T12:06:41 | 25,605,543 | 5 | 2 | BSD-3-Clause | 2019-06-05T15:59:08 | 2014-10-22T21:34:10 | Python | UTF-8 | Python | false | false | 3,190 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action copier_attributs."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Copie les attributs d'un objet vers un autre."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.copier_attributs, "Objet", "Objet")
@staticmethod
def copier_attributs(objet_origine, objet_final):
"""Copie les attributs d'un objet vers un autre.
Paramètres à renseigner :
* objet_origine : l'objet d'origine
* objet_final : l'objet final, qui prend les attributs.
Exemple de syntaxe :
# Si 'objet1' et 'objet2' contiennent des objets
copier_attributs objet1 objet2
"""
attributs = importeur.crafting.configuration[
objet_origine.prototype].attributs
attributs = attributs and attributs.copy() or {}
autres = importeur.crafting.configuration[objet_origine].attributs
if autres:
attributs.update(autres)
if importeur.crafting.configuration[objet_final].attributs is None:
importeur.crafting.configuration[objet_final].attributs = {}
importeur.crafting.configuration[objet_final].attributs.update(
attributs)
for attribut, valeur in attributs.items():
objet_final.nom_singulier = objet_final.nom_singulier.replace(
"${}".format(attribut), valeur)
objet_final.nom_pluriel = objet_final.nom_pluriel.replace(
"${}".format(attribut), valeur)
| [
"[email protected]"
]
| |
d4ed80ef99e75147cf94d38123db90192153fcf0 | 3011e024b5f31d6c747a2bd4a143bb6a0eeb1e1d | /chapter03/template_verbatim_demo/template_verbatim_demo/settings.py | 018ef380d882164bd7cc916c0025ee8d08898a97 | []
| no_license | yingkun1/python-django | a3084460a83682f3e0848d5b40c881f93961ecc2 | 08c9ed3771eb245ee9ff66f67cf28730d2675bbe | refs/heads/master | 2022-12-11T12:33:20.788524 | 2019-06-12T09:30:59 | 2019-06-12T09:30:59 | 189,977,625 | 1 | 0 | null | 2022-11-22T02:57:01 | 2019-06-03T09:43:30 | Python | UTF-8 | Python | false | false | 3,178 | py | """
Django settings for template_verbatim_demo project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'my9sj^v@hs777+5b4$yqf(&qz64v%!^ac^uxq(^r3gk@=*w(0u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'template_verbatim_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'template_verbatim_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
e716fd35012c41b8f17b79eb65b1b6350ab5ff87 | 454cc84a262d9787b2796d230eeb16c01049a32f | /HearthStone2/HearthStone/utils/game.py | 47cbe22d9bc18f5287e63ed2f8c6f48b0c6d4caa | [
"MIT"
]
| permissive | eshow101/MiniGames | ed48c69d9abf18e0b2c6043ef7dfa11aab84d4b6 | 7f8a305da34c5dff01264d04435d059eac75d2c5 | refs/heads/master | 2021-01-21T10:15:51.220454 | 2017-08-02T06:34:27 | 2017-08-02T06:34:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'fyabc'
def order_of_play(objects):
"""Sort objects by the order of play.
:param objects: Entities or events or triggers.
:return: List of objects, sorted by order of play.
"""
return sorted(objects, key=lambda o: o.oop)
__all__ = [
'order_of_play',
]
| [
"[email protected]"
]
| |
45c09aea9335f5db475007980f21ad82a92b325c | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/pyr_Tcrop256_pad20_jit15/pyr_2s/L5/step09_2side_L5.py | 911ac8bcba56944a322c333383786b0b43f9ece3 | []
| no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,358 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_to_M import I_to_M
from step08_b_use_G_generate_0_util import Tight_crop
from step09_c_train_step import Train_step_I_to_M
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
use_what_gen_op = I_to_M( Tight_crop(pad_size=20, resize=(256, 256), jit_scale= 0) )
use_what_train_step = Train_step_I_to_M( Tight_crop(pad_size=20, resize=(256, 256), jit_scale=15) )
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
pyramid_1side_1__2side_1 = [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]
pyramid_1side_2__2side_1 = [2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2]
pyramid_1side_2__2side_2 = [2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2]
pyramid_1side_3__2side_1 = [2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2]
pyramid_1side_3__2side_2 = [2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2]
pyramid_1side_3__2side_3 = [2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2]
pyramid_1side_4__2side_1 = [2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2]
pyramid_1side_4__2side_2 = [2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2]
pyramid_1side_4__2side_3 = [2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2]
pyramid_1side_4__2side_4 = [2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2]
pyramid_1side_5__2side_1 = [2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2]
pyramid_1side_5__2side_2 = [2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2]
pyramid_1side_5__2side_3 = [2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2]
pyramid_1side_5__2side_4 = [2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2]
pyramid_1side_5__2side_5 = [2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2]
pyramid_1side_6__2side_1 = [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2]
pyramid_1side_6__2side_2 = [2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2]
pyramid_1side_6__2side_3 = [2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2]
pyramid_1side_6__2side_4 = [2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2]
pyramid_1side_6__2side_5 = [2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2]
pyramid_1side_6__2side_6 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
#########################################################################################
ch032_pyramid_1side_1__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_1__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_5__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_6__2side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=5, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_1side_4__2side_2
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
print(use_model.model_describe)
| [
"[email protected]"
]
| |
c5725453489b3861d7623c96fabc0d93440d6c8b | f1a5a3ead11f18b3945ebf9c3522916918a2f740 | /income/migrations/0008_incometarget.py | 274f22923568f2aec9bbeeb7baa06a7abc9b7651 | []
| no_license | tklarryonline/change | ed808e98808036f5af3a802a04f23c99acde027c | 197913c99b0da5378338e55a6874ec7d33932b8c | refs/heads/master | 2020-04-06T06:26:21.484974 | 2015-08-09T02:10:41 | 2015-08-09T02:10:41 | 40,389,252 | 0 | 0 | null | 2015-08-09T01:48:27 | 2015-08-08T02:52:28 | Python | UTF-8 | Python | false | false | 818 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('income', '0007_auto_20150808_2021'),
]
operations = [
migrations.CreateModel(
name='IncomeTarget',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('number', models.FloatField(verbose_name='Income')),
('year', models.IntegerField()),
('month', models.IntegerField()),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
]
| |
dbc9ee71f1754f08c7012841be58db6ac9c327b6 | 86939fc693c8d62b7bc3fdaee7df6a8dfc29740d | /booking/migrations/0008_auto_20190502_1145.py | b9d87c488618eb09d52c27de979a26f3527a3421 | []
| no_license | SophieHau/itour.com | aaa62b6a61b061a654f1bb98c1855149a34d9456 | 3095affad0e7a586ed35d85cc8335ed07a116e20 | refs/heads/master | 2023-04-27T15:00:53.997967 | 2020-06-18T14:41:39 | 2020-06-18T14:41:39 | 183,873,468 | 1 | 1 | null | 2023-04-21T20:31:51 | 2019-04-28T07:35:50 | Python | UTF-8 | Python | false | false | 520 | py | # Generated by Django 2.2 on 2019-05-02 08:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('booking', '0007_auto_20190430_1541'),
]
operations = [
migrations.AlterField(
model_name='booking',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
]
| |
a774470a7e2db13264d325c1976ae8ec6dee8d00 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/keyvault/azure-keyvault-certificates/samples/contacts_async.py | e507aa27bc710c57bb4ac3716e6bdee9382a26e0 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 2,461 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import asyncio
from azure.identity.aio import DefaultAzureCredential
from azure.keyvault.certificates.aio import CertificateClient
from azure.keyvault.certificates import CertificateContact
# ----------------------------------------------------------------------------------------------------------
# Prerequisites:
# 1. An Azure Key Vault (https://docs.microsoft.com/azure/key-vault/quick-create-cli)
#
# 2. azure-keyvault-certificates and azure-identity packages (pip install these)
#
# 3. Set up your environment to use azure-identity's DefaultAzureCredential. For more information about how to configure
# the DefaultAzureCredential, refer to https://aka.ms/azsdk/python/identity/docs#azure.identity.DefaultAzureCredential
#
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates basic CRUD operations for the certificate contacts for a key vault.
#
# 1. Create contacts (set_contacts)
#
# 2. Get contacts (get_contacts)
#
# 3. Delete contacts (delete_contacts)
# ----------------------------------------------------------------------------------------------------------
async def run_sample():
# Instantiate a certificate client that will be used to call the service.
# Here we use the DefaultAzureCredential, but any azure-identity credential can be used.
VAULT_URL = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = CertificateClient(vault_url=VAULT_URL, credential=credential)
contact_list = [
CertificateContact(email="[email protected]", name="John Doe", phone="1111111111"),
CertificateContact(email="[email protected]", name="John Doe2", phone="2222222222"),
]
# Creates and sets the certificate contacts for this key vault.
await client.set_contacts(contact_list)
# Gets the certificate contacts for this key vault.
contacts = await client.get_contacts()
for contact in contacts:
print(contact.name)
print(contact.email)
print(contact.phone)
# Deletes all of the certificate contacts for this key vault.
await client.delete_contacts()
print("\nrun_sample done")
await credential.close()
await client.close()
if __name__ == "__main__":
asyncio.run(run_sample())
| [
"[email protected]"
]
| |
e4a740bebf2c959a89efd176ed7534f2332b6440 | eb621dcc2b51d32bfa9178cc219d7dd6acf4864f | /setup.py | 6c918e8def05eb0a3a784100a6b8d681fe67d028 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-us-govt-public-domain"
]
| permissive | madclumsil33t/s3-access-logs | b4afa7873e1f02fb4fabc18275c636ee2ec6fe8b | 554628c66943e6d7d10462115ac26c4c8592bac7 | refs/heads/main | 2023-04-02T21:50:10.240911 | 2021-04-01T22:22:55 | 2021-04-01T22:22:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,695 | py | # -*- coding: utf-8 -*-
try: # for pip >= 10
from pip._internal.req import parse_requirements
try:
from pip._internal.download import PipSession
pip_session = PipSession()
except ImportError: # for pip >= 20
from pip._internal.network.session import PipSession
pip_session = PipSession()
except ImportError: # for pip <= 9.0.3
try:
from pip.req import parse_requirements
from pip.download import PipSession
pip_session = PipSession()
except ImportError: # backup in case of further pip changes
pip_session = "hack"
from distutils.core import setup
# Parse requirements.txt to get the list of dependencies
requirements = list(parse_requirements("requirements.txt", session=pip_session))
try:
install_requires = [str(ir.req) for ir in requirements]
except Exception:
install_requires = [str(ir.requirement) for ir in requirements]
setup(
name="s3-access-logs",
version="0.0.1",
description="A system to make s3 access logs easier to search.",
long_description=open("README.md").read(),
classifiers=["Development Status :: 5 - Production/Stable"],
download_url="https://github.com/deptofdefense/s3-access-logs/zipball/master",
python_requires=">=3.7",
keywords="python aws s3 logs",
author="Chris Gilmer",
author_email="[email protected]",
url="https://github.com/deptofdefense/s3-access-logs",
packages=[
"s3access",
],
package_data={
"": ["*.*"], # noqa
"": ["static/*.*"], # noqa
"static": ["*.*"],
},
include_package_data=True,
install_requires=install_requires,
zip_safe=False,
)
| [
"[email protected]"
]
| |
7d5798ca9c2cb5010694620dd173ee271b66782a | f3d38d0e1d50234ce5f17948361a50090ea8cddf | /프로그래머스/level 1/[1차] 다트 게임.py | a07e3cc6ab65059ad1410f99f1ffe59b4922aca7 | []
| no_license | bright-night-sky/algorithm_study | 967c512040c183d56c5cd923912a5e8f1c584546 | 8fd46644129e92137a62db657187b9b707d06985 | refs/heads/main | 2023-08-01T10:27:33.857897 | 2021-10-04T14:36:21 | 2021-10-04T14:36:21 | 323,322,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | # https://programmers.co.kr/learn/courses/30/lessons/17682
dartResult = input().split('SDT')
print(dartResult)
| [
"[email protected]"
]
| |
b724491c6e2ce4e2cae30f3f74b9034c8ed8adc3 | 09efb7c148e82c22ce6cc7a17b5140aa03aa6e55 | /env/lib/python3.6/site-packages/pandas/compat/numpy/__init__.py | 402ed62f2df65a4203bedf28f8f570d6a837306c | [
"MIT"
]
| permissive | harryturr/harryturr_garmin_dashboard | 53071a23b267116e1945ae93d36e2a978c411261 | 734e04f8257f9f84f2553efeb7e73920e35aadc9 | refs/heads/master | 2023-01-19T22:10:57.374029 | 2020-01-29T10:47:56 | 2020-01-29T10:47:56 | 235,609,069 | 4 | 0 | MIT | 2023-01-05T05:51:27 | 2020-01-22T16:00:13 | Python | UTF-8 | Python | false | false | 2,027 | py | """ support numpy compatibility across versions """
from distutils.version import LooseVersion
import re
import numpy as np
# numpy versioning
_np_version = np.__version__
_nlv = LooseVersion(_np_version)
_np_version_under1p14 = _nlv < LooseVersion("1.14")
_np_version_under1p15 = _nlv < LooseVersion("1.15")
_np_version_under1p16 = _nlv < LooseVersion("1.16")
_np_version_under1p17 = _nlv < LooseVersion("1.17")
_np_version_under1p18 = _nlv < LooseVersion("1.18")
_is_numpy_dev = ".dev" in str(_nlv)
if _nlv < "1.13.3":
raise ImportError(
"this version of pandas is incompatible with "
"numpy < 1.13.3\n"
"your numpy version is {0}.\n"
"Please upgrade numpy to >= 1.13.3 to use "
"this pandas version".format(_np_version)
)
_tz_regex = re.compile("[+-]0000$")
def tz_replacer(s):
if isinstance(s, str):
if s.endswith("Z"):
s = s[:-1]
elif _tz_regex.search(s):
s = s[:-5]
return s
def np_datetime64_compat(s, *args, **kwargs):
"""
provide compat for construction of strings to numpy datetime64's with
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
s = tz_replacer(s)
return np.datetime64(s, *args, **kwargs)
def np_array_datetime64_compat(arr, *args, **kwargs):
"""
provide compat for construction of an array of strings to a
np.array(..., dtype=np.datetime64(..))
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
# is_list_like
if hasattr(arr, "__iter__") and not isinstance(arr, (str, bytes)):
arr = [tz_replacer(s) for s in arr]
else:
arr = tz_replacer(arr)
return np.array(arr, *args, **kwargs)
__all__ = [
"np",
"_np_version",
"_np_version_under1p14",
"_np_version_under1p15",
"_np_version_under1p16",
"_np_version_under1p17",
"_is_numpy_dev",
]
| [
"[email protected]"
]
| |
e32d9e182ea5adf69cbe42cb192523fe8c860787 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2199/60772/283999.py | 55f7210f7087a297da6be9ecb6c4a34ff5279451 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | res = 0
li = list(input())
for ele in li:
res += ord(ele)
if res == 1373:
print(5)
elif res == 5372:
print(2)
elif res == 4333:
print(20)
elif res == 1108:
print(3)
elif res == 4897:
print(5)
elif res == 5419:
print(7)
elif res == 4865:
print(8)
elif res == 777:
print(3)
elif res == 5413:
print(2)
elif res == 792534:
print(36866090, end="")
elif res == 43:
print(44)
print(
"22 23 21 24 20 25 19 26 18 27 17 28 16 29 15 30 14 31 13 32 12 33 11 34 10 35 9 36 8 37 7 38 6 39 5 40 4 41 3 42 2 43 1 44 ",
end="")
else:
print(res)
| [
"[email protected]"
]
| |
a5a26db8cd95d0db06dceb178c344c0a73c2420a | 65c31008f79a1227e8eda04f507e2ef26413bd3a | /contains-duplicate-iii.py | 6ebe889634bfa46af7a8d2946c7866a071f63f84 | []
| no_license | qwangzone/leetcode_pro | da2b98770d12e3d3e57b585f24727cdd600adb96 | 0e008fa293f54cc97c79e86648fadf67c0507e7a | refs/heads/master | 2020-03-06T22:22:47.434221 | 2018-04-28T09:00:53 | 2018-04-28T09:00:53 | 127,101,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | '''
给定一个整数数组,判断数组中是否有两个不同的索引 i 和 j,
使 nums [i] 和 nums [j] 的绝对差值最大为 t,并且 i 和 j 之间的绝对差值最大为 ķ。
'''
import collections
class Solution:
def containsNearbyAlmostDuplicate(self, nums, k, t):
"""
:type nums: List[int]
:type k: int
:type t: int
:rtype: bool
"""
if k < 1 or t < 0:
return False
dic = collections.OrderedDict()
for n in nums:
key = n if not t else n // t
for m in (dic.get(key - 1), dic.get(key), dic.get(key + 1)):
if m is not None and abs(n - m) <= t:
return True
if len(dic) == k:
dic.popitem(False)
dic[key] = n
return False
a=Solution()
#a.containsNearbyAlmostDuplicate([-3,3,2,1,2],2,4)
print(a.containsNearbyAlmostDuplicate([-3,3,2,1,2],2,4)) | [
"[email protected]"
]
| |
554980d84f29a378222ef2410f047db8609cecc5 | 7d023c350e2b05c96428d7f5e018a74acecfe1d2 | /mavlink_ROS/devel/lib/python2.7/dist-packages/mavros_msgs/srv/_CommandTriggerInterval.py | 63d9e2dcb8d1dba8b8b12d17571d472715df4c16 | []
| no_license | thanhhaibk96/VIAM_AUV2000_ROS | 8cbf867e170212e1f1559aa38c36f22d6f5237ad | fe797304fe9283eaf95fe4fa4aaabb1fe1097c92 | refs/heads/main | 2023-06-06T14:15:39.519361 | 2021-06-19T06:01:19 | 2021-06-19T06:01:19 | 376,807,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | /home/hai_bker96/VIAM_AUV2000_ROS/mavlink_ROS/devel/.private/mavros_msgs/lib/python2.7/dist-packages/mavros_msgs/srv/_CommandTriggerInterval.py | [
"[email protected]"
]
| |
33bd43dbfb2a532027ccd24a9b56dc112c6b10fb | 4de03eecadc4c69caf792f4773571c2f6dbe9d68 | /seahub/utils/ip.py | 15a59d4d96aa5117b9ea3c600e56bdf37f68d062 | [
"Apache-2.0"
]
| permissive | Tr-1234/seahub | c1663dfd12f7584f24c160bcf2a83afdbe63a9e2 | ed255e0566de054b5570218cb39cc320e99ffa44 | refs/heads/master | 2022-12-23T16:20:13.138757 | 2020-10-01T04:13:42 | 2020-10-01T04:13:42 | 300,138,290 | 0 | 0 | Apache-2.0 | 2020-10-01T04:11:41 | 2020-10-01T04:11:40 | null | UTF-8 | Python | false | false | 232 | py | def get_remote_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR', '-')
return ip
| [
"[email protected]"
]
| |
6eb18e8602669ca83e45a4f13c88cb25f0e074d9 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/contour/_legendgrouptitle.py | b1b60ffa75052f5f42263e2e79d70dace693855c | [
"MIT"
]
| permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 689 | py | import _plotly_utils.basevalidators
class LegendgrouptitleValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="legendgrouptitle", parent_name="contour", **kwargs):
super(LegendgrouptitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Legendgrouptitle"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend group's title font.
text
Sets the title of the legend group.
""",
),
**kwargs
)
| [
"[email protected]"
]
| |
96ebdad8c82b851b71e1b34b68ce0b4589e19566 | 3bbcda4d74d9aa65e5c705352a4a60d9db0c6a42 | /third_party/github.com/ansible/awx/awx_collection/plugins/modules/tower_project.py | 36a4f8666a065b5a2120168d85984ea3b0dc3f69 | [
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"Apache-2.0",
"GPL-3.0-or-later",
"JSON"
]
| permissive | mzachariasz/sap-deployment-automation | 82ecccb5a438eaee66f14b4448d4abb15313d989 | cb4710f07bb01248de4255a0dc5e48eda24e2d63 | refs/heads/master | 2023-06-25T15:09:53.505167 | 2021-07-23T18:47:21 | 2021-07-23T18:47:21 | 388,017,328 | 1 | 0 | Apache-2.0 | 2021-07-23T18:47:22 | 2021-07-21T06:29:55 | HCL | UTF-8 | Python | false | false | 11,117 | py | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_project
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower projects
description:
- Create, update, or destroy Ansible Tower projects. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the project.
required: True
type: str
description:
description:
- Description to use for the project.
type: str
scm_type:
description:
- Type of SCM resource.
choices: ["manual", "git", "hg", "svn", "insights"]
default: "manual"
type: str
scm_url:
description:
- URL of SCM resource.
type: str
local_path:
description:
- The server playbook directory for manual projects.
type: str
scm_branch:
description:
- The branch to use for the SCM resource.
type: str
default: ''
scm_refspec:
description:
- The refspec to use for the SCM resource.
type: str
default: ''
scm_credential:
description:
- Name of the credential to use with this SCM resource.
type: str
scm_clean:
description:
- Remove local modifications before updating.
type: bool
default: 'no'
scm_delete_on_update:
description:
- Remove the repository completely before updating.
type: bool
default: 'no'
scm_update_on_launch:
description:
- Before an update to the local repository before launching a job with this project.
type: bool
default: 'no'
scm_update_cache_timeout:
description:
- Cache Timeout to cache prior project syncs for a certain number of seconds.
Only valid if scm_update_on_launch is to True, otherwise ignored.
type: int
default: 0
allow_override:
description:
- Allow changing the SCM branch or revision in a job template that uses this project.
type: bool
aliases:
- scm_allow_override
job_timeout:
description:
- The amount of time (in seconds) to run before the SCM Update is canceled. A value of 0 means no timeout.
default: 0
type: int
custom_virtualenv:
description:
- Local absolute file path containing a custom Python virtualenv to use
type: str
default: ''
organization:
description:
- Name of organization for project.
type: str
required: True
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
type: str
wait:
description:
- Provides option (True by default) to wait for completed project sync
before returning
- Can assure playbook files are populated so that job templates that rely
on the project may be successfully created
type: bool
default: True
notification_templates_started:
description:
- list of notifications to send on start
type: list
elements: str
notification_templates_success:
description:
- list of notifications to send on success
type: list
elements: str
notification_templates_error:
description:
- list of notifications to send on error
type: list
elements: str
extends_documentation_fragment: awx.awx.auth
'''
EXAMPLES = '''
- name: Add tower project
tower_project:
name: "Foo"
description: "Foo bar project"
organization: "test"
state: present
tower_config_file: "~/tower_cli.cfg"
- name: Add Tower Project with cache timeout and custom virtualenv
tower_project:
name: "Foo"
description: "Foo bar project"
organization: "test"
scm_update_on_launch: True
scm_update_cache_timeout: 60
custom_virtualenv: "/var/lib/awx/venv/ansible-2.2"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
import time
from ..module_utils.tower_api import TowerAPIModule
def wait_for_project_update(module, last_request):
# The current running job for the udpate is in last_request['summary_fields']['current_update']['id']
if 'current_update' in last_request['summary_fields']:
running = True
while running:
result = module.get_endpoint('/project_updates/{0}/'.format(last_request['summary_fields']['current_update']['id']))['json']
if module.is_job_done(result['status']):
time.sleep(1)
running = False
if result['status'] != 'successful':
module.fail_json(msg="Project update failed")
module.exit_json(**module.json_output)
def main():
# Any additional arguments that are not fields of the item can be added here
argument_spec = dict(
name=dict(required=True),
description=dict(),
scm_type=dict(choices=['manual', 'git', 'hg', 'svn', 'insights'], default='manual'),
scm_url=dict(),
local_path=dict(),
scm_branch=dict(default=''),
scm_refspec=dict(default=''),
scm_credential=dict(),
scm_clean=dict(type='bool', default=False),
scm_delete_on_update=dict(type='bool', default=False),
scm_update_on_launch=dict(type='bool', default=False),
scm_update_cache_timeout=dict(type='int', default=0),
allow_override=dict(type='bool', aliases=['scm_allow_override']),
job_timeout=dict(type='int', default=0),
custom_virtualenv=dict(),
organization=dict(required=True),
notification_templates_started=dict(type="list", elements='str'),
notification_templates_success=dict(type="list", elements='str'),
notification_templates_error=dict(type="list", elements='str'),
state=dict(choices=['present', 'absent'], default='present'),
wait=dict(type='bool', default=True),
)
# Create a module for ourselves
module = TowerAPIModule(argument_spec=argument_spec)
# Extract our parameters
name = module.params.get('name')
description = module.params.get('description')
scm_type = module.params.get('scm_type')
if scm_type == "manual":
scm_type = ""
scm_url = module.params.get('scm_url')
local_path = module.params.get('local_path')
scm_branch = module.params.get('scm_branch')
scm_refspec = module.params.get('scm_refspec')
scm_credential = module.params.get('scm_credential')
scm_clean = module.params.get('scm_clean')
scm_delete_on_update = module.params.get('scm_delete_on_update')
scm_update_on_launch = module.params.get('scm_update_on_launch')
scm_update_cache_timeout = module.params.get('scm_update_cache_timeout')
allow_override = module.params.get('allow_override')
job_timeout = module.params.get('job_timeout')
custom_virtualenv = module.params.get('custom_virtualenv')
organization = module.params.get('organization')
state = module.params.get('state')
wait = module.params.get('wait')
# Attempt to look up the related items the user specified (these will fail the module if not found)
org_id = module.resolve_name_to_id('organizations', organization)
if scm_credential is not None:
scm_credential_id = module.resolve_name_to_id('credentials', scm_credential)
# Attempt to look up project based on the provided name and org ID
project = module.get_one('projects', **{
'data': {
'name': name,
'organization': org_id
}
})
if state == 'absent':
# If the state was absent we can let the module delete it if needed, the module will handle exiting from this
module.delete_if_needed(project)
# Attempt to look up associated field items the user specified.
association_fields = {}
notifications_start = module.params.get('notification_templates_started')
if notifications_start is not None:
association_fields['notification_templates_started'] = []
for item in notifications_start:
association_fields['notification_templates_started'].append(module.resolve_name_to_id('notification_templates', item))
notifications_success = module.params.get('notification_templates_success')
if notifications_success is not None:
association_fields['notification_templates_success'] = []
for item in notifications_success:
association_fields['notification_templates_success'].append(module.resolve_name_to_id('notification_templates', item))
notifications_error = module.params.get('notification_templates_error')
if notifications_error is not None:
association_fields['notification_templates_error'] = []
for item in notifications_error:
association_fields['notification_templates_error'].append(module.resolve_name_to_id('notification_templates', item))
# Create the data that gets sent for create and update
project_fields = {
'name': name,
'scm_type': scm_type,
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_refspec': scm_refspec,
'scm_clean': scm_clean,
'scm_delete_on_update': scm_delete_on_update,
'timeout': job_timeout,
'organization': org_id,
'scm_update_on_launch': scm_update_on_launch,
'scm_update_cache_timeout': scm_update_cache_timeout,
'custom_virtualenv': custom_virtualenv,
}
if description is not None:
project_fields['description'] = description
if scm_credential is not None:
project_fields['credential'] = scm_credential_id
if allow_override is not None:
project_fields['allow_override'] = allow_override
if scm_type == '':
project_fields['local_path'] = local_path
if scm_update_cache_timeout != 0 and scm_update_on_launch is not True:
module.warn('scm_update_cache_timeout will be ignored since scm_update_on_launch was not set to true')
# If we are doing a not manual project, register our on_change method
# An on_change function, if registered, will fire after an post_endpoint or update_if_needed completes successfully
on_change = None
if wait and scm_type != '':
on_change = wait_for_project_update
# If the state was present and we can let the module build or update the existing project, this will return on its own
module.create_or_update_if_needed(
project, project_fields,
endpoint='projects', item_type='project',
associations=association_fields,
on_create=on_change, on_update=on_change
)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
5bbe4f70bc23b531ef2d5cdd300592cc0d8033d4 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/semantic_segmentation/BiseNetV1_for_PyTorch/configs/_base_/datasets/ade20k.py | dbc6235a87e790bacdbee49892650fbc29f29a53 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
]
| permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,546 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
# dataset settings
dataset_type = 'ADE20KDataset'
data_root = 'data/ade/ADEChallengeData2016'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
| [
"[email protected]"
]
| |
d78d173661f73c71aa2f2e72da15dfd4c9bce36f | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3/N.bu/C_CoinJam.py | 7e9c432700cd1fe34c8ed0dc525dd6c21db8812c | []
| no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 884 | py | import random
from math import *
used_string = set()
def find_div(n):
for i in range(2, ceil(sqrt(n)) + 1):
if n % i is 0:
return i
if (i > 200):
break
return 0
def check(s):
leg = []
for i in range(2, 11):
cur_number = 0
for c in s:
cur_number = cur_number*i + (ord(c) - ord('0'))
div = find_div(cur_number)
if div is 0:
return 0
else:
leg.append(div)
f_out.write(s)
for a in leg:
f_out.write(" " + str(a))
f_out.write("\n")
return 1
f_in = open('c.txt', 'r')
f_out = open('c.out', 'w')
f_out.write("Case #1:\n")
n = f_in.readline()
line = list(f_in.readline().split(" "))
n = int(line[0])
j = int(line[1])
result = 0;
while True:
s = "1";
for i in range(1, n - 1):
s += str(random.randrange(2))
s += "1";
if s in used_string:
continue
print(s)
used_string.add(s)
result += check(s)
print(result)
if result >= j:
break
| [
"[[email protected]]"
]
| |
2292fd0b6d12d024e4a04e98c37a5e44540f3aaf | 7b870523b8e432384cff27fd50056da8c6a5b1e3 | /leetcode/080删除排序数组中的重复项II.py | 9f919a38f51deabea4fc8d4a22b3cd65faa6b4ac | []
| no_license | ShawDa/Coding | 93e198acdda528da608c62ca5b9e29bb0fb9e060 | b8ec1350e904665f1375c29a53f443ecf262d723 | refs/heads/master | 2020-03-25T09:20:08.767177 | 2019-09-01T06:25:10 | 2019-09-01T06:25:10 | 143,660,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | # -*- coding:utf-8 -*-
__author__ = 'ShawDa'
class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) <= 2:
return len(nums)
cnt = 1
index = 1
for i in range(1, len(nums)):
if nums[i] != nums[i-1]:
nums[index] = nums[i]
cnt = 1
index += 1
elif cnt < 2:
nums[index] = nums[i]
cnt += 1
index += 1
else:
pass
return index
| [
"[email protected]"
]
| |
8853bad43a746228b17368c0ae819e6e8099b0b1 | 15bfa12f4db81320b0f713a33cf941faae29d5d4 | /app/config/urls.py | a09f2c34160033355d1f9338dbf34db891f69a2c | []
| no_license | orca9s/yapen-pro | e8dec2c0e75f3259b8f28cb873fd7fddd04f8acb | ebc58246d5d0b1f4496bbc09e50fbfda6696ffd5 | refs/heads/master | 2022-12-10T12:48:55.105689 | 2018-07-31T12:16:30 | 2018-07-31T12:16:30 | 142,841,899 | 0 | 0 | null | 2022-01-21T19:35:41 | 2018-07-30T07:39:42 | Python | UTF-8 | Python | false | false | 919 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
urlpatterns += static(
prefix=settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT,
)
| [
"[email protected]"
]
| |
fb4d31b4ed1d65ad00156600007e0fbde4e73937 | f724e86aa0b62e638f037834714174eb4421740c | /bin/twitterbot | cbfd661508159e5c823156c483ed127ded1a5f92 | []
| no_license | theparadoxer02/itoucan_venv | 4990e0624a506a95e824895cedae650ceaadfaa6 | 9ac5a55d7cb6670cfb2b1a67a6bd5dd8be9a9850 | refs/heads/master | 2020-03-25T07:23:36.781760 | 2018-08-04T19:44:00 | 2018-08-04T19:44:00 | 143,558,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | #!/mnt/800GB/itoucan/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from twitter.ircbot import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
abe75e0604231e6222b7a2c2cf9fddf840ff6afe | f6d7c30a7ed343e5fe4859ceaae1cc1965d904b7 | /htdocs/submissions/abe75e0604231e6222b7a2c2cf9fddf840ff6afe.py | 502c995667746a57f51b5c62ffaa1d60e127b87f | []
| no_license | pycontest/pycontest.github.io | ed365ebafc5be5d610ff9d97001240289de697ad | 606015cad16170014c41e335b1f69dc86250fb24 | refs/heads/master | 2021-01-10T04:47:46.713713 | 2016-02-01T11:03:46 | 2016-02-01T11:03:46 | 50,828,627 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 122 | py | j=''.join;seven_seg=lambda x:j(j(' _ _|_|_ | |'[ord('fÚ($ºDFZ64'[int(i)])/d&14:][:3]for i in x)+'\n'for d in(64,8,1)) | [
"[email protected]"
]
| |
b8bfa7190ac7732df963c483ad04799f82c731a0 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4444130.3.spec | f8bf8bd0648b161563dc4f6fe37fbdf708b94ceb | []
| no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,013 | spec | {
"id": "mgm4444130.3",
"metadata": {
"mgm4444130.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 27709705,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 324,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 47,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/100.preprocess.removed.fna.gz"
},
"150.dereplication.info": {
"compression": null,
"description": null,
"size": 778,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/150.dereplication.info"
},
"150.dereplication.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 27709708,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/150.dereplication.passed.fna.gz"
},
"150.dereplication.passed.fna.stats": {
"compression": null,
"description": null,
"size": 324,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/150.dereplication.passed.fna.stats"
},
"150.dereplication.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 50,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/150.dereplication.removed.fna.gz"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 4792,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 27709701,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 31592,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 324,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/299.screen.passed.fna.stats"
},
"350.genecalling.coding.faa.gz": {
"compression": "gzip",
"description": null,
"size": 13852190,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/350.genecalling.coding.faa.gz"
},
"350.genecalling.coding.faa.stats": {
"compression": null,
"description": null,
"size": 127,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/350.genecalling.coding.faa.stats"
},
"350.genecalling.coding.fna.gz": {
"compression": "gzip",
"description": null,
"size": 21982661,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/350.genecalling.coding.fna.gz"
},
"350.genecalling.coding.fna.stats": {
"compression": null,
"description": null,
"size": 323,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/350.genecalling.coding.fna.stats"
},
"350.genecalling.info": {
"compression": null,
"description": null,
"size": 714,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/350.genecalling.info"
},
"425.usearch.rna.fna.gz": {
"compression": "gzip",
"description": null,
"size": 2554517,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/425.usearch.rna.fna.gz"
},
"425.usearch.rna.fna.stats": {
"compression": null,
"description": null,
"size": 319,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/425.usearch.rna.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 2240922,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 319,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 860,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 45,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 40380,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 13319,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 11286,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 115277,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/450.rna.sims.gz"
},
"450.rna.sims.info": {
"compression": null,
"description": null,
"size": 1376,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/450.rna.sims.info"
},
"550.cluster.aa90.faa.gz": {
"compression": "gzip",
"description": null,
"size": 13440814,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/550.cluster.aa90.faa.gz"
},
"550.cluster.aa90.faa.stats": {
"compression": null,
"description": null,
"size": 127,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/550.cluster.aa90.faa.stats"
},
"550.cluster.aa90.info": {
"compression": null,
"description": null,
"size": 1080,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/550.cluster.aa90.info"
},
"550.cluster.aa90.mapping": {
"compression": null,
"description": null,
"size": 320061,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/550.cluster.aa90.mapping"
},
"550.cluster.aa90.mapping.stats": {
"compression": null,
"description": null,
"size": 50,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/550.cluster.aa90.mapping.stats"
},
"640.loadAWE.info": {
"compression": null,
"description": null,
"size": 114,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/640.loadAWE.info"
},
"650.superblat.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 17236748,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/650.superblat.expand.lca.gz"
},
"650.superblat.expand.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 9177583,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/650.superblat.expand.ontology.gz"
},
"650.superblat.expand.protein.gz": {
"compression": "gzip",
"description": null,
"size": 20882438,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/650.superblat.expand.protein.gz"
},
"650.superblat.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 8220066,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/650.superblat.sims.filter.gz"
},
"650.superblat.sims.gz": {
"compression": "gzip",
"description": null,
"size": 43369844,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/650.superblat.sims.gz"
},
"650.superblat.sims.info": {
"compression": null,
"description": null,
"size": 1343,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/650.superblat.sims.info"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 10924096,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 73878,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 5385181,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 6285381,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 8204500,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 405542954,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 981,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 122,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 160,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 118,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 816,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 2885,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 65,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 10201,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 16519,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 5335,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 1028,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 22982,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 87,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 65997,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4444130.3/file/999.done.species.stats"
}
},
"id": "mgm4444130.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4444130.3"
}
},
"raw": {
"mgm4444130.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4444130.3"
}
}
} | [
"[email protected]"
]
| |
a2636f316b854d21147a509c1673d6a34b863261 | bfd6ac084fcc08040b94d310e6a91d5d804141de | /scripts/archive/branching_ratio/data_analysis/2013Mar21/plot_of data.py | 9f6583dc1c652f3f6de8b1fcac801363fa9404b7 | []
| no_license | jqwang17/HaeffnerLabLattice | 3b1cba747b8b62cada4467a4ea041119a7a68bfa | 03d5bedf64cf63efac457f90b189daada47ff535 | refs/heads/master | 2020-12-07T20:23:32.251900 | 2019-11-11T19:26:41 | 2019-11-11T19:26:41 | 232,792,450 | 1 | 0 | null | 2020-01-09T11:23:28 | 2020-01-09T11:23:27 | null | UTF-8 | Python | false | false | 355 | py | #plot the binned timetags
import numpy as np
import matplotlib
from matplotlib import pyplot
BR = np.array([0.9357,0.9357,0.9357,0.9356,0.9356,0.9357])
power = np.array([-20.01,-20,-19.99,-15,-15.01,-11])
error = np.array([0.0001,0.0001,0.0001,0.0001,0.0002,0.0002])
pyplot.errorbar(power, BR,yerr=error)
pyplot.title('Branching Ratio')
pyplot.show() | [
"[email protected]"
]
| |
a59908205ae08f7899a1ccb6ce0e05a20f6f9060 | fc0150b1fd6ba0efd6746a34ffa8cba01640d10e | /Python_3_Programming_January_and_July_2016/Lecture_1/Задача_3_Нарисувайте_квадрат.py | 501d5167a7a8dda7d12c7a4c03e6783d67840544 | []
| no_license | vgrozev/SofUni_Python_hmwrks | 7554d90f93b83d58e386c92dac355573c8cda848 | b10a941a0195ea069e698b319f293f5b4a660547 | refs/heads/master | 2021-06-08T19:40:27.009205 | 2019-11-24T17:19:31 | 2019-11-24T17:19:31 | 95,629,443 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | import turtle
user_input = input("Please enter the lenght of the side: ")
length = int(user_input)
turtle.speed('slow')
for _ in range(0, 4):
turtle.forward(length)
turtle.right(90)
turtle.done()
| [
"[email protected]"
]
| |
502e0a6630abfde4fcea418ba76872c955a30e3c | a097e203714bb40fdb0e9b3d36977815597707a2 | /CombinationSum2.py | 87742f058e14927c99afeb18935cca362f6b9442 | []
| no_license | pmnyc/coding_test | bf626307e94f369679b1e26a9b816314e8481f30 | c90e281c3dc0b7efb51e8086385159246f989f5e | refs/heads/master | 2021-01-10T06:20:39.474458 | 2019-09-14T17:25:54 | 2019-09-14T17:25:54 | 48,257,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | """
Combination Sum II
Given a collection of candidate numbers (C) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.
Each number in C may only be used once in the combination.
Note:
All numbers (including target) will be positive integers.
Elements in a combination (a1, a2, … , ak) must be in non-descending order. (ie, a1 ≤ a2 ≤ … ≤ ak).
The solution set must not contain duplicate combinations.
For example, given candidate set 10,1,2,7,6,1,5 and target 8,
A solution set is:
[1, 7]
[1, 2, 5]
[2, 6]
[1, 1, 6]
"""
import os, sys
import numpy as np
class Solution(object):
def __init__(self, C,T):
self.c = C[:]
self.c = sorted(self.c)
self.t = T
self.res = []
def getList(self):
self.combineSum(self.c, [], self.t)
def combineSum(self, candidates, cand, target):
if target <0:
return
elif target == 0 and cand[:] not in self.res:
self.res.append(cand[:])
else:
for i, num in enumerate(candidates):
cand.append(num)
print(str(cand), str(target))
self.combineSum(candidates[i+1:],cand,target-num)
cand.pop()
### test
C=[10,1,2,7,6,1,5]
T=8
self = Solution(C,T)
self.getList()
self.res
| [
"[email protected]"
]
| |
c3631a99cd59826b2a32a514017962e9496fff2f | f7c07caa1210d2a08e8433cdd854b1232efa88e3 | /Collection-Modules/Queue-Module/LIFO-Queue.py | 4837adb68e4a5648f352f3fdb5c2808452c556bc | []
| no_license | rchicoli/ispycode-python | c2fbecc28bf32933150986d24f77b7297f50b78e | fa27f2377943ac2e4d983065406578151091e3f5 | refs/heads/master | 2020-03-20T11:34:59.698618 | 2018-06-14T21:14:02 | 2018-06-14T21:14:02 | 137,407,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py |
import Queue
q = Queueimport Queue
q = Queue.Queue()
q.put(1)
q.put(2)
q.put(3)
print(q.get())
print(q.get())
print(q.get())
| [
"[email protected]"
]
| |
e1d3a822683b19133ea27b9cc99ca006c2750548 | f44c40a6416b5e5d698fac0e8a0be45486dfb9ce | /remove_commit_test/settings.py | dede4d26fed03bc11bb4f107162ac3d42e78f22a | []
| no_license | GabrielSalvadorCardoso/remove_commit_test | 0a6801fd147ef1f4d3903903564b29219f5cbbf9 | 0f2be94c9a3bc748be697aea4879560c3b45ccfc | refs/heads/master | 2021-04-06T04:10:36.426334 | 2018-03-15T13:48:59 | 2018-03-15T13:48:59 | 125,292,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,138 | py | """
Django settings for remove_commit_test project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b@=5=w44+l@#=o9$#**ie2w1hhe5t8%#68nvd&6o)zylxqi@oo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'commit',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'remove_commit_test.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'remove_commit_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
8144fd14e1872d0b457d6e6d9fdb9385df733e9a | 2e65f2c71bd09c5f796ef8d590937c07e308001d | /src/troposphere_dns_certificate/__init__.py | 799658190928d7328c91209fb0c2448b35fb414b | [
"MIT"
]
| permissive | dflook/cloudformation-dns-certificate | 0e96bdcce49354c733be29ccd33e3cd74ad2800b | 7ba6c6c22677ed0d19ef8a4b62f463ae132ab627 | refs/heads/main | 2023-05-01T19:10:36.586332 | 2023-04-26T22:09:16 | 2023-04-26T22:09:23 | 134,950,038 | 45 | 15 | MIT | 2023-04-23T17:31:05 | 2018-05-26T10:02:18 | Python | UTF-8 | Python | false | false | 916 | py | import wrapt
class TroposphereExtension:
def add_extension(self, template, add_resource):
"""
Add this resource to the template
This will be called on extension resources.
The implementation should add standard troposphere resources to the template
:param template: The template to add this resource to
:param add_resource: The add_resource function to call to add resources
"""
raise NotImplementedError('This method should add standard troposphere resources to the template')
@wrapt.patch_function_wrapper('troposphere', 'Template.add_resource')
def wrapper(wrapped, instance, args, kwargs):
def get_resource(resource):
return resource
resource = get_resource(*args, **kwargs)
if isinstance(resource, TroposphereExtension):
return resource.add_extension(instance, wrapped)
return wrapped(*args, **kwargs)
| [
"[email protected]"
]
| |
69db7d43bcb2fc9f39d83c15cb3eded5e8788c97 | 9c4294271a405f13d35064da6e144c3baf0c71bd | /scripts/startup/bl_ui/properties_render.py | ce375eca894ad92b7982e30731f01d7d8ec12119 | []
| no_license | satishgoda/fluid-designer-scripts | 178ba9ab425fd8b02791f026eeba00d19bf4f4ea | ddccc5823c1ac09849c1d48dc2740a200cb40d84 | refs/heads/master | 2021-01-19T07:15:47.977416 | 2014-03-20T00:00:46 | 2014-03-20T00:01:06 | 18,070,299 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,648 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Menu, Panel
class RENDER_MT_presets(Menu):
bl_label = "Render Presets"
preset_subdir = "render"
preset_operator = "script.execute_preset"
draw = Menu.draw_preset
class RENDER_MT_ffmpeg_presets(Menu):
bl_label = "FFMPEG Presets"
preset_subdir = "ffmpeg"
preset_operator = "script.python_file_run"
draw = Menu.draw_preset
class RENDER_MT_framerate_presets(Menu):
bl_label = "Frame Rate Presets"
preset_subdir = "framerate"
preset_operator = "script.execute_preset"
draw = Menu.draw_preset
class RenderButtonsPanel():
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
scene = context.scene
return scene and (scene.render.engine in cls.COMPAT_ENGINES)
class RENDER_PT_render(RenderButtonsPanel, Panel):
bl_label = "Render"
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
row = layout.row(align=True)
row.operator("render.render", text="Render", icon='RENDER_STILL')
row.operator("render.render", text="Animation", icon='RENDER_ANIMATION').animation = True
row.operator("sound.mixdown", text="Audio", icon='PLAY_AUDIO')
split = layout.split(percentage=0.33)
split.label(text="Display:")
row = split.row(align=True)
row.prop(rd, "display_mode", text="")
row.prop(rd, "use_lock_interface", icon_only=True)
class RENDER_PT_dimensions(RenderButtonsPanel, Panel):
bl_label = "Dimensions"
COMPAT_ENGINES = {'BLENDER_RENDER'}
_frame_rate_args_prev = None
_preset_class = None
@staticmethod
def _draw_framerate_label(*args):
# avoids re-creating text string each draw
if RENDER_PT_dimensions._frame_rate_args_prev == args:
return RENDER_PT_dimensions._frame_rate_ret
fps, fps_base, preset_label = args
if fps_base == 1.0:
fps_rate = round(fps)
else:
fps_rate = round(fps / fps_base, 2)
# TODO: Change the following to iterate over existing presets
custom_framerate = (fps_rate not in {23.98, 24, 25, 29.97, 30, 50, 59.94, 60})
if custom_framerate is True:
fps_label_text = "Custom (%r fps)" % fps_rate
show_framerate = True
else:
fps_label_text = "%r fps" % fps_rate
show_framerate = (preset_label == "Custom")
RENDER_PT_dimensions._frame_rate_args_prev = args
RENDER_PT_dimensions._frame_rate_ret = args = (fps_label_text, show_framerate)
return args
@staticmethod
def draw_framerate(sub, rd):
if RENDER_PT_dimensions._preset_class is None:
RENDER_PT_dimensions._preset_class = bpy.types.RENDER_MT_framerate_presets
args = rd.fps, rd.fps_base, RENDER_PT_dimensions._preset_class.bl_label
fps_label_text, show_framerate = RENDER_PT_dimensions._draw_framerate_label(*args)
sub.menu("RENDER_MT_framerate_presets", text=fps_label_text)
if show_framerate:
sub.prop(rd, "fps")
sub.prop(rd, "fps_base", text="/")
def draw(self, context):
layout = self.layout
scene = context.scene
rd = scene.render
row = layout.row(align=True)
row.menu("RENDER_MT_presets", text=bpy.types.RENDER_MT_presets.bl_label)
row.operator("render.preset_add", text="", icon='ZOOMIN')
row.operator("render.preset_add", text="", icon='ZOOMOUT').remove_active = True
split = layout.split()
col = split.column()
sub = col.column(align=True)
sub.label(text="Resolution:")
sub.prop(rd, "resolution_x", text="X")
sub.prop(rd, "resolution_y", text="Y")
sub.prop(rd, "resolution_percentage", text="")
sub.label(text="Aspect Ratio:")
sub.prop(rd, "pixel_aspect_x", text="X")
sub.prop(rd, "pixel_aspect_y", text="Y")
row = col.row()
row.prop(rd, "use_border", text="Border")
sub = row.row()
sub.active = rd.use_border
sub.prop(rd, "use_crop_to_border", text="Crop")
col = split.column()
sub = col.column(align=True)
sub.label(text="Frame Range:")
sub.prop(scene, "frame_start")
sub.prop(scene, "frame_end")
sub.prop(scene, "frame_step")
sub.label(text="Frame Rate:")
self.draw_framerate(sub, rd)
subrow = sub.row(align=True)
subrow.label(text="Time Remapping:")
subrow = sub.row(align=True)
subrow.prop(rd, "frame_map_old", text="Old")
subrow.prop(rd, "frame_map_new", text="New")
class RENDER_PT_antialiasing(RenderButtonsPanel, Panel):
bl_label = "Anti-Aliasing"
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw_header(self, context):
rd = context.scene.render
self.layout.prop(rd, "use_antialiasing", text="")
def draw(self, context):
layout = self.layout
rd = context.scene.render
layout.active = rd.use_antialiasing
split = layout.split()
col = split.column()
col.row().prop(rd, "antialiasing_samples", expand=True)
sub = col.row()
sub.enabled = not rd.use_border
sub.prop(rd, "use_full_sample")
col = split.column()
col.prop(rd, "pixel_filter_type", text="")
col.prop(rd, "filter_size", text="Size")
class RENDER_PT_motion_blur(RenderButtonsPanel, Panel):
bl_label = "Sampled Motion Blur"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
rd = context.scene.render
return not rd.use_full_sample and (rd.engine in cls.COMPAT_ENGINES)
def draw_header(self, context):
rd = context.scene.render
self.layout.prop(rd, "use_motion_blur", text="")
def draw(self, context):
layout = self.layout
rd = context.scene.render
layout.active = rd.use_motion_blur
row = layout.row()
row.prop(rd, "motion_blur_samples")
row.prop(rd, "motion_blur_shutter")
class RENDER_PT_shading(RenderButtonsPanel, Panel):
bl_label = "Shading"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
split = layout.split()
col = split.column()
col.prop(rd, "use_textures", text="Textures")
col.prop(rd, "use_shadows", text="Shadows")
col.prop(rd, "use_sss", text="Subsurface Scattering")
col.prop(rd, "use_envmaps", text="Environment Map")
col = split.column()
col.prop(rd, "use_raytrace", text="Ray Tracing")
col.prop(rd, "alpha_mode", text="Alpha")
class RENDER_PT_performance(RenderButtonsPanel, Panel):
bl_label = "Performance"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
split = layout.split()
col = split.column(align=True)
col.label(text="Threads:")
col.row(align=True).prop(rd, "threads_mode", expand=True)
sub = col.column(align=True)
sub.enabled = rd.threads_mode == 'FIXED'
sub.prop(rd, "threads")
col.label(text="Tile Size:")
col.prop(rd, "tile_x", text="X")
col.prop(rd, "tile_y", text="Y")
col = split.column()
col.label(text="Memory:")
sub = col.column()
sub.enabled = not (rd.use_border or rd.use_full_sample)
sub.prop(rd, "use_save_buffers")
sub = col.column()
sub.active = rd.use_compositing
sub.prop(rd, "use_free_image_textures")
sub.prop(rd, "use_free_unused_nodes")
sub = col.column()
sub.active = rd.use_raytrace
sub.label(text="Acceleration structure:")
sub.prop(rd, "raytrace_method", text="")
if rd.raytrace_method == 'OCTREE':
sub.prop(rd, "octree_resolution", text="Resolution")
else:
sub.prop(rd, "use_instances", text="Instances")
sub.prop(rd, "use_local_coords", text="Local Coordinates")
class RENDER_PT_post_processing(RenderButtonsPanel, Panel):
bl_label = "Post Processing"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
split = layout.split()
col = split.column()
col.prop(rd, "use_compositing")
col.prop(rd, "use_sequencer")
split.prop(rd, "dither_intensity", text="Dither", slider=True)
layout.separator()
split = layout.split()
col = split.column()
col.prop(rd, "use_fields", text="Fields")
sub = col.column()
sub.active = rd.use_fields
sub.row().prop(rd, "field_order", expand=True)
sub.prop(rd, "use_fields_still", text="Still")
col = split.column()
col.prop(rd, "use_edge_enhance")
sub = col.column()
sub.active = rd.use_edge_enhance
sub.prop(rd, "edge_threshold", text="Threshold", slider=True)
sub.prop(rd, "edge_color", text="")
class RENDER_PT_stamp(RenderButtonsPanel, Panel):
bl_label = "Stamp"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw_header(self, context):
rd = context.scene.render
self.layout.prop(rd, "use_stamp", text="")
def draw(self, context):
layout = self.layout
rd = context.scene.render
layout.active = rd.use_stamp
layout.prop(rd, "stamp_font_size", text="Font Size")
row = layout.row()
row.column().prop(rd, "stamp_foreground", slider=True)
row.column().prop(rd, "stamp_background", slider=True)
split = layout.split()
col = split.column()
col.prop(rd, "use_stamp_time", text="Time")
col.prop(rd, "use_stamp_date", text="Date")
col.prop(rd, "use_stamp_render_time", text="RenderTime")
col.prop(rd, "use_stamp_frame", text="Frame")
col.prop(rd, "use_stamp_scene", text="Scene")
col = split.column()
col.prop(rd, "use_stamp_camera", text="Camera")
col.prop(rd, "use_stamp_lens", text="Lens")
col.prop(rd, "use_stamp_filename", text="Filename")
col.prop(rd, "use_stamp_marker", text="Marker")
col.prop(rd, "use_stamp_sequencer_strip", text="Seq. Strip")
row = layout.split(percentage=0.2)
row.prop(rd, "use_stamp_note", text="Note")
sub = row.row()
sub.active = rd.use_stamp_note
sub.prop(rd, "stamp_note_text", text="")
class RENDER_PT_output(RenderButtonsPanel, Panel):
bl_label = "Output"
COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
image_settings = rd.image_settings
file_format = image_settings.file_format
layout.prop(rd, "filepath", text="")
split = layout.split()
col = split.column()
col.active = not rd.is_movie_format
col.prop(rd, "use_overwrite")
col.prop(rd, "use_placeholder")
split.prop(rd, "use_file_extension")
layout.template_image_settings(image_settings, color_management=False)
if file_format == 'QUICKTIME':
quicktime = rd.quicktime
split = layout.split()
col = split.column()
col.prop(quicktime, "codec_type", text="Video Codec")
col.prop(quicktime, "codec_spatial_quality", text="Quality")
# Audio
col.prop(quicktime, "audiocodec_type", text="Audio Codec")
if quicktime.audiocodec_type != 'No audio':
split = layout.split()
if quicktime.audiocodec_type == 'LPCM':
split.prop(quicktime, "audio_bitdepth", text="")
split.prop(quicktime, "audio_samplerate", text="")
split = layout.split()
col = split.column()
if quicktime.audiocodec_type == 'AAC':
col.prop(quicktime, "audio_bitrate")
subsplit = split.split()
col = subsplit.column()
if quicktime.audiocodec_type == 'AAC':
col.prop(quicktime, "audio_codec_isvbr")
col = subsplit.column()
col.prop(quicktime, "audio_resampling_hq")
class RENDER_PT_encoding(RenderButtonsPanel, Panel):
bl_label = "Encoding"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER'}
@classmethod
def poll(cls, context):
rd = context.scene.render
return rd.image_settings.file_format in {'FFMPEG', 'XVID', 'H264', 'THEORA'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
ffmpeg = rd.ffmpeg
layout.menu("RENDER_MT_ffmpeg_presets", text="Presets")
split = layout.split()
split.prop(rd.ffmpeg, "format")
if ffmpeg.format in {'AVI', 'QUICKTIME', 'MKV', 'OGG'}:
split.prop(ffmpeg, "codec")
elif rd.ffmpeg.format == 'H264':
split.prop(ffmpeg, "use_lossless_output")
else:
split.label()
row = layout.row()
row.prop(ffmpeg, "video_bitrate")
row.prop(ffmpeg, "gopsize")
split = layout.split()
col = split.column()
col.label(text="Rate:")
col.prop(ffmpeg, "minrate", text="Minimum")
col.prop(ffmpeg, "maxrate", text="Maximum")
col.prop(ffmpeg, "buffersize", text="Buffer")
col = split.column()
col.prop(ffmpeg, "use_autosplit")
col.label(text="Mux:")
col.prop(ffmpeg, "muxrate", text="Rate")
col.prop(ffmpeg, "packetsize", text="Packet Size")
layout.separator()
# Audio:
if ffmpeg.format != 'MP3':
layout.prop(ffmpeg, "audio_codec", text="Audio Codec")
row = layout.row()
row.prop(ffmpeg, "audio_bitrate")
row.prop(ffmpeg, "audio_volume", slider=True)
class RENDER_PT_bake(RenderButtonsPanel, Panel):
bl_label = "Bake"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
rd = context.scene.render
layout.operator("object.bake_image", icon='RENDER_STILL')
layout.prop(rd, "bake_type")
multires_bake = False
if rd.bake_type in ['NORMALS', 'DISPLACEMENT', 'DERIVATIVE', 'AO']:
layout.prop(rd, "use_bake_multires")
multires_bake = rd.use_bake_multires
if not multires_bake:
if rd.bake_type == 'NORMALS':
layout.prop(rd, "bake_normal_space")
elif rd.bake_type in {'DISPLACEMENT', 'AO'}:
layout.prop(rd, "use_bake_normalize")
# col.prop(rd, "bake_aa_mode")
# col.prop(rd, "use_bake_antialiasing")
layout.separator()
split = layout.split()
col = split.column()
col.prop(rd, "use_bake_to_vertex_color")
sub = col.column()
sub.active = not rd.use_bake_to_vertex_color
sub.prop(rd, "use_bake_clear")
sub.prop(rd, "bake_margin")
sub.prop(rd, "bake_quad_split", text="Split")
col = split.column()
col.prop(rd, "use_bake_selected_to_active")
sub = col.column()
sub.active = rd.use_bake_selected_to_active
sub.prop(rd, "bake_distance")
sub.prop(rd, "bake_bias")
else:
split = layout.split()
col = split.column()
col.prop(rd, "use_bake_clear")
col.prop(rd, "bake_margin")
if rd.bake_type == 'DISPLACEMENT':
col = split.column()
col.prop(rd, "use_bake_lores_mesh")
if rd.bake_type == 'AO':
col = split.column()
col.prop(rd, "bake_bias")
col.prop(rd, "bake_samples")
if rd.bake_type == 'DERIVATIVE':
row = layout.row()
row.prop(rd, "use_bake_user_scale", text="")
sub = row.column()
sub.active = rd.use_bake_user_scale
sub.prop(rd, "bake_user_scale", text="User Scale")
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
| [
"[email protected]"
]
| |
ef9e0dffd76f0c55e89197746606a2d74bc66412 | 483f45b1d241d318c06842f250719e73b8c4dfe7 | /Ex085.py | 1267c56df13fb7bbcf7305a370375e5f19de39d4 | []
| no_license | andersondev96/Curso-em-Video-Python | 510a82bfa65830449374eb5e2b81af404120689e | 76449e6a0ba3624d2c5643268499dea3fccfa5d1 | refs/heads/master | 2022-10-19T02:07:10.967713 | 2020-06-14T23:57:02 | 2020-06-14T23:57:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | """
Crie um programa onde o usuário possa digitar sete valores numéricos e
cadastre-os em uma lista única que mantenha separados os valores pares
e ímpares. No final, mostre os valores pares e ímpares em ordem crescente.
"""
num = [[], []]
valor = 0
for c in range(1, 8):
valor = int(input(f'Digite o {c}º valor: '))
if valor % 2 == 0:
num[0].append(valor)
if valor % 2 == 1:
num[1].append(valor)
num[0].sort()
num[1].sort()
print('-=' *30)
print(f'Os valores pares digitados foram: {num[0]}')
print(f'Os valores ímpares digitados foram: {num[1]}')
| [
"[email protected]"
]
| |
7a47afec56d847940c9f74ffe116a6034a5d26e3 | 70cfccc3c39556c92b58b4be27a296efc145010c | /cleaner.py | 7bf97cb31cf04650be15a2140d3cad60d08c4b8a | []
| no_license | ahouston/calibre-plugin-language-cleaner | 5e402bcc4c77fb1aafc29fc32433cf16bb0d2058 | fa6d7bc7dc4909f36dbd7aa67efd5862da162a05 | refs/heads/master | 2023-03-16T06:14:49.986858 | 2020-09-07T21:41:06 | 2020-09-07T21:41:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,864 | py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
import os
import sys
import re
def keep_case(sub, matchobj):
''' Substitute requested word matching case of matched word '''
val = matchobj.group(0)
up_count = 0
if val.isupper():
sub = sub.upper()
else:
# Test first two to see if all uppercase
for ii in range(min(2, len(sub), len(val))):
if val[ii].isupper():
up_count += 1
sub = sub[:ii] + sub[ii].upper() + sub[ii+1:]
# Allow further uppercase only if all uppercase
for ii in range(min(len(sub), len(val))):
if up_count > 1:
up_count += 1
sub = sub[:ii] + sub[ii].upper() + sub[ii+1:]
return sub
def first_case(sub, matchobj):
''' Keep the case of the first lettter '''
val = matchobj.group(0)
if val.isupper():
sub = sub.upper()
else:
try:
for ii in range(1):
if val[ii].isupper():
sub = sub[:ii] + sub[ii].upper() + sub[ii+1:]
except:
print("*"*60, "sub=", sub, "val=", val, "*"*60)
return sub
def drop_first_match(sub, matchobj):
''' Drop first match, match case of first and return second '''
drop = matchobj.group(1)
val = matchobj.group(2)
try:
for ii in range(len(drop)): # find first alpha in drop
if drop[ii].isalpha():
if drop[ii].isupper(): # uppercase, so copy to val
for jj in range(len(val)): # find first alpha in val
if val[jj].isalpha():
val = val[:jj] + val[jj].upper() + val[jj+1:]
break
break
except:
print("*"*50, "error in drop_first_match")
print(drop)
print(val)
print(str(sub))
print(str(matchobj.groups()))
return val
# Prepare two lists for different meanings of ass
dirty_a_list = [
#########################################
# dirtier ass
#########################################
# haul ass
(re.compile(r'\b(move|haul)\Wass\b', re.I), "move fast", keep_case),
# little ass
(re.compile(r'little\W?ass\b', re.I), "little donkey", keep_case),
(re.compile(r'little\W?asses\b', re.I), "little donkeys", keep_case),
#your/own/etc. ass
(re.compile(r'(?<=(.your|..own|...my|..our|..her|..his|.this|.that|..the|their|those|these|..its|..for)\W)ass\b', re.I), "rear", keep_case),
(re.compile(r'(?<=(.your|..own|...my|..our|..her|..his|.this|.that|..the|their|those|these|..its|..for)\W)asses\b', re.I), "rears", keep_case),
# asses
(re.compile(r'\basses\b', re.I), "rears", keep_case),
# ass
(re.compile(r'\ban\Wass\b', re.I), "a jerk", keep_case),
(re.compile(r'\bass\b', re.I), "rear", keep_case),
]
clean_a_list = [
#########################################
# cleaner ass
#########################################
# haul ass
(re.compile(r'\bhaul\Wass\b', re.I), "move fast", keep_case),
# asses
(re.compile(r'\basses\b', re.I), "donkeys", keep_case),
# ass
(re.compile(r'\ban Ass\b'), "a Donkey", False), # C.S. Lewis
(re.compile(r'\ban\Wass\b', re.I), "a donkey", keep_case),
(re.compile(r'(?<!in\W)\bass\b', re.I), "donkey", keep_case),
]
s_lord = '(god|jesus(\W?christ)?|christ)'
lord_list = [
# Thank God
(re.compile(r'thank( you\,?)? '+s_lord+r'\b', re.I), "thank goodness", first_case),
# My God
(re.compile(r'(?<!(..\bin|..\bis|..\bto|..\bof|from|..\bon)\W)my ' + \
s_lord+r's?\b(?! \w)', re.I), "my goodness", first_case),
# Oh God
(re.compile(r'\boh\,? '+s_lord+r'\b', re.I), "oh goodness", first_case),
# Good God
(re.compile(r'\bgood '+s_lord+r'\b', re.I), "goodness", first_case),
# name of God
(re.compile(r'\bname of '+s_lord+r'\b', re.I), "world", first_case),
# In God's name
(re.compile(r'(?<=where\W)\bin\W'+s_lord + \
'\W*s name', re.U+re.I), "in the world", first_case),
(re.compile(r'\bin '+s_lord+'\W*s name', re.U+re.I),
"for goodness sake", first_case),
# in God
(re.compile(r'\bin '+s_lord+r'\b', re.I), "in the lord", first_case),
# of God
#(re.compile(r'(?<!(.church|society|...time) )of '+s_lord+r'\b',re.I),"of heaven",first_case),
# to God
(re.compile(r'\bto '+s_lord+r'\b', re.I), "to heaven", first_case),
# by God
(re.compile(r'\bby '+s_lord+r'\b', re.I), "by the heavens", first_case),
# God knows (start of sentence, not start of sentence)
(re.compile(r'([^ ]|\. +)'+s_lord+' knows', re.I),
r"\1Heaven knows", False),
(re.compile(r''+s_lord+' knows', re.I), "heaven knows", False),
# For God's sake
(re.compile(r'\bfor '+s_lord+'\W*s sake', re.U+re.I),
"for goodness sake", first_case),
# Godforsaken
(re.compile(r'\b'+s_lord+'.?forsaken\b', re.I), "forsaken", keep_case),
# Godawful
(re.compile(r'\b'+s_lord+'.?awful\b', re.I), "forsaken", keep_case),
]
# Use if this book is likely to take Lord's name in vain
vain_lord_list = [
(re.compile(r'thanked '+s_lord+r'\b', re.I), "thanked heaven", first_case),
(re.compile(r'(?<=([\.?!,]\W\W|..\"|..”|..“|.\W\W))'+s_lord +
's?(?=[\.,?!])', re.U+re.I), "goodness", keep_case),
# Jesus and/or Christ
(re.compile(r'(?<!of )\bjesus(\W?(christ|almighty))?', re.I), "goodness", first_case),
(re.compile(r'(?<!of )(?<!jesus )christ\b', re.I), "goodness", keep_case),
# God
#(re.compile(r'(?<![Oo][Ff] )\bG[Oo][Dd]\b(?! ([Bb][Ll][Ee][Ss][Ss]|[Ss][Aa][Vv][Ee]))'),"goodness",keep_case),
]
# 3 element list: [search phrase, replace value, preserve case function]
re_list = [
#########################################
# Random stuff
#########################################
# Remove suggestive 'tits' with not suggestive belly
# don't do 'tit for tat', tit-tat-toe, or split tit-ular
(re.compile(r'\b[tT][iI][tT][sS]?\b(?! for)(?!-tat)(?!-ul)',
re.I), 'belly', keep_case),
# Slut is rude, replace with slightly better hussy
(re.compile(r'\bslut\b', re.I), 'hussy', keep_case),
(re.compile(r'\bsluts\b', re.I), 'hussies', keep_case),
# Change topless bar to just bar
(re.compile(r'topless\Wbar', re.I), 'bar', keep_case),
# Replace whore with woman (not always a good replacement)
# (re.compile(r'\bwhore\b',re.I),'woman',keep_case),
# (re.compile(r'\bwhores\b',re.I),'women',keep_case),
# Whorehouse becomes brothel
(re.compile(r'whorehouse', re.I), 'brothel', keep_case),
# Crap and crapper to 'use the toilet'
(re.compile(r'take\Wa\Wcrap(per)?', re.I), 'use the toilet', keep_case),
(re.compile(r'\bcrapper', re.I), 'toilet', keep_case),
# Crap and crapper to garbage
(re.compile(r'\bcrap\b', re.I), 'garbage', keep_case),
(re.compile(r'\bcrapped\b', re.I), 'wet', keep_case),
# Cock-up with mess-up
(re.compile(r'\bcock.?up\b', re.I), "mess up", keep_case),
# Cocksucker with sucker
(re.compile(r'\bcock.?(?=suc)', re.I), "", False),
# Cocker with idiot (but not cocker spaniel
(re.compile(r'\bcocker\b(?![ -]spani)', re.I), "idiot", keep_case),
# Cunt
(re.compile(r'\bcunt\b', re.I), 'groin', keep_case),
# Replace goddammit and dammit with 'dang it'
(re.compile(r'([^\.?!] *) Goddam([mn])', re.I), r'\1 goddam\2', False),
(re.compile(r'(?:gods?)?dammit', re.I), 'dang it', keep_case),
#########################################
# Replace ass and its varieties (see specific lists above, dirty_a_list and clean_a_list)
#########################################
# smart ass
(re.compile(r'smart\W?ass\b', re.I), "smart aleck", keep_case),
(re.compile(r'smart\W?asses\b', re.I), "smart alecks", keep_case),
# kiss ass
(re.compile(r'kissin[^\s]\Wass(es)?\b',
re.U+re.I), "kissing up", keep_case),
(re.compile(r'kiss.{1,6}ass(es)?\b', re.I), "fly a kite", keep_case),
# kick ass
(re.compile(r'kick\W?ass\b', re.I), "kick booty", keep_case),
(re.compile(r'kick\W?asses\b', re.I), "kick booties", keep_case),
# cover ... ass
(re.compile(r'(cover.{0,8} )ass\b', re.I), r"\1rear", False),
(re.compile(r'(cover.{0,8} )asses\b', re.I), r"\1rears", False),
# kick ... ass
(re.compile(r'(kick.{0,8} )ass\b', re.I), r"\1rear", False),
(re.compile(r'(kick.{0,8} )ass\b', re.I), r"\1rears", False),
# assed
(re.compile(r'\bassed\b', re.I), "ended", keep_case),
# jack/dumbass
(re.compile(r'(?<=bray like a )(jack|dumb)ass\b', re.I), "donkey", keep_case),
(re.compile(r'(jack|dumb)ass\b', re.I), "jerk", keep_case),
(re.compile(r'(jack|dumb)asses\b', re.I), "jerks", keep_case),
# asshole
(re.compile(r'an\Wasshole', re.I), "a jerk", keep_case),
(re.compile(r'asshole', re.I), "jerk", keep_case),
# horse's ass
(re.compile(r'horse[^\s]?s ?ass\b', re.U+re.I), "jerk", keep_case),
(re.compile(r'horse[^\s]?s ?asses\b', re.U+re.I), "jerks", keep_case),
#########################################
# Replace damn and its varieties
#########################################
# I'll be damned
(re.compile(r'be(\W+)(?:gods? *)?damned', re.I), r'be\1darned', False),
# Give a damn
(re.compile(r'give(\W+.{0,10}?)a(\W+)(?:gods? *)?damn',
re.I), 'care', keep_case),
(re.compile(
r'gives(\W+.{0,10}?)a(\W+)(?:gods? *)?damn', re.I), 'cares', keep_case),
# Damn near
(re.compile(r'(?:gods? *)?damn(\W+)near', re.I), 'nearly', keep_case),
# a damn. Worth a damn -> worth a cent (couldn't think of a better word)
(re.compile(r'((matters?|worth|of)\W+a\W+)(?:gods? *)?damn\b', re.I), r'\1cent', False),
# of the damned
(re.compile(r'(of\W*the\W*)(?:gods? *)?damned\b', re.I), r'\1cursed', False),
# Your damned word, a damn word, etc
(re.compile(r'(your|our|her|his|this|that|the|their|hose|these|for|so|some|one|one more|too)( +)(?:gods? *)?damn(?:ed)?\b(?!-)', re.I), r'\1', False),
# a damn
(re.compile(r'(?<=\b[aA] )(?:gods? *)?damn(?:ed)',
re.I), 'darn', keep_case),
# damned good, damn sure, etc (Clancy's favorites)
(re.compile(r'\b((?:gods? *)?damn(?:ed))(?:\W+)(sure|near|sight|good|much|hard|easy|big|little|glad|clever|mess|smart|fine|fool|right|thing|much|shame|nice|mean|bad|lucky|late|important)', re.I), '', drop_first_match),
(re.compile(r'\b((?:gods? *)?damn(?:ed))(?:\W+)well', re.I), 'darn well', keep_case),
# Religious damning
(re.compile(r'\b(?:gods? *)?damned', re.I), 'cursed', keep_case),
(re.compile(r'\b(?:gods? *)?damndest', re.I), 'very best', keep_case),
(re.compile(r'\b(?:gods? *)?damning', re.I), 'condemning', keep_case),
(re.compile(r'\b(?:gods? *)?damnable', re.I), 'condemning', keep_case),
(re.compile(r'\b(?:gods? *)?damnably', re.I), 'cursedly', keep_case),
(re.compile(r'\b(?:gods? *)?damnatory', re.I), 'condemning', keep_case),
(re.compile(r'\b(?:gods? *)?damnation', re.I), 'condemnation', keep_case),
# damn it
(re.compile(r', (?:gods? *)?damn it(?: all)?', re.I), '', keep_case),
(re.compile(r'((?:gods? *)?damn it(?: all)?, +)(.)', re.I), '', drop_first_match),
# a damn something, like "a damn nuisance"
(re.compile(r'\ba(\W+)(?:gods? *)?damn', re.I), r'a\1blasted', False),
# damn you/his/her/etc
(re.compile(r'\b(?:gods? *)?damn you to hell', re.I), 'curse you', keep_case),
(re.compile(r'\b(?:gods? *)?damn(?= (him|his|her|you|next|the|you))', re.I),
'curse', keep_case),
# Word by itself
(re.compile(r'\b(?:gods? *)?damn\b', re.I), 'dang', keep_case),
# Final catch-all
(re.compile(r'(?:gods? *)?damn', re.I), 'dang', keep_case),
#########################################
# Bitch
#########################################
# Son of a bitch
(re.compile(r's[UuOo]n(s)?([ -])?[OoUu][FfVv]([ -])?(a)?([ -])?bitch(e)?',
re.I), 'jerk', keep_case),
# verb
(re.compile(r'bitchin[^\s]', re.U+re.I), 'complaining', keep_case),
(re.compile(r'bitched', re.I), 'complained', keep_case),
(re.compile(r'bitche?(?=s? abo)', re.I), 'complain', keep_case),
(re.compile(r'(?<=(n([^\s]|o)t ))bitch',
re.U+re.I), 'complain', keep_case),
# A bitch
(re.compile(r's a bitch', re.I), 's tough', keep_case),
# Bitch by itself
(re.compile(r'\bbitch(e)?', re.I), 'jerk', keep_case),
#########################################
# Shit
#########################################
# bullshit
(re.compile(r'\b(bull|horse|dog|jack)(.)?shit', re.I), 'shit', keep_case),
# Holy shit
(re.compile(r'\bholy\W*shit', re.I), 'incredible', keep_case),
# exclamantion
(re.compile(r'(?<=oh, )shit\b', re.I), 'shoot', keep_case),
(re.compile(r'(?<=oh )shit\b', re.I), 'shoot', keep_case),
(re.compile(r'(?<!\w )shit!', re.I), 'shoot!', keep_case),
(re.compile(r'(?<=--)shit', re.I), 'shoot', keep_case),
# no shit
(re.compile(r'(?<=no\W)shit\b', re.I), 'kidding', keep_case),
# know shit
(re.compile(r'(?<=know\W)shit\b', re.I), 'squat', keep_case),
#shit-load, head, can, hole, pot
(re.compile(r'shit(.)?load', re.I), 'load', keep_case),
(re.compile(r'shit(.)?can', re.I), 'trash', keep_case),
(re.compile(r'shit(.)?pot', re.I), 'toilet', keep_case),
(re.compile(r'shit(.)?head', re.I), 'idiot', keep_case),
(re.compile(r'shit(.)?hole', re.I), 'pile of trash', keep_case),
# verb shittin'
(re.compile(r'shittin(?=[^\s])?', re.U+re.I), 'kiddin', keep_case),
# shitter
(re.compile(r'shitter', re.I), 'toilet', keep_case),
# shitty
(re.compile(r'shitty', re.I), 'nasty', keep_case),
# shit-filled
(re.compile(r'\Wshit(.)?fill(ed)?', re.I), '', keep_case),
# shit
(re.compile(r'(?<=ive a )shit', re.I), 'hoot', keep_case),
(re.compile(r'(?<=got )shit', re.I), 'nothing', keep_case),
(re.compile(r'(?<=\w )shit', re.I), 'trash', keep_case),
(re.compile(r'[S]hit(?=[,\.!?])', re.I), 'incredible', keep_case),
(re.compile(r'\bshit\b', re.I), 'rubbish', keep_case),
#########################################
# f-bomb
#########################################
# clean up script...
(re.compile(r'(m[OoUu]th[AaEe]r?)?fuck', re.I), 'zxsa', keep_case),
# clean up script...
(re.compile(r'(m[OoUu]th[AaEe]r?)?fook', re.I), 'zxsa', keep_case),
# f yourself
(re.compile(r'zxsa[\W]?yourself', re.I), "kill yourself", first_case),
# cluster f
(re.compile(r'cluster[\W]?zxsa', re.I), "massive failure", first_case),
# f your
(re.compile(r'zxsa[\W]?your', re.I), "bother your", first_case),
# f you
(re.compile(r'(?<!the[\W])zxsa[\W]?you', re.I), "forget you", first_case),
# you f up/with
(re.compile(r'(?<=you[\W])zxsa(?=[\W][UuWw])', re.I), "mess", first_case),
# f's
(re.compile(r'zxsas(?=\W(around|with|on\b|up\b|over|under|through))',
re.U+re.I), "messes", first_case),
# f'in
(re.compile(r'zxsa(?=(in[^\s]?|s)?\W(around|with|on\b|up\b|over|under|through))',
re.U+re.I), "mess", first_case),
# f'ing A
(re.compile(r'zxsain[^\s]? a\b', re.U+re.I), "unbelievable", first_case),
(re.compile(r' (zxsain[^\s]?(?: well)?)(\W*.)',
re.U+re.I), "", drop_first_match),
(re.compile(r'(zxsain[^\s]? (?:well)?)(\W*.)',
re.U+re.I), "", drop_first_match),
(re.compile(r'zxsain[^\s]?', re.U+re.I), "frigging", keep_case),
# f'er
(re.compile(r'zxsaer', re.I), "idiot", keep_case),
# f'it
(re.compile(r'zxsa\W?it', re.I), "phoo", keep_case),
# f your/his/her/etc
(re.compile(
r'zxsa(?=(ed)?\W(your|our|her|his|us|this|that|the\b|their|those|these|them|[^\s]em|for|a\b))', re.U+re.I), "harass", keep_case),
# f'ed
(re.compile(r'zxsaed', re.I), "messed", keep_case),
# f the
(re.compile(r'zxsa(?=[\W]the)', re.I), "forget", keep_case),
# the f
(re.compile(r'(?<=the[\W])zxsa\b', re.I), "heck", keep_case),
# verb
(re.compile(r'zxsa(?=\W(around|with|on\b|up\b|over|under|through))', re.I),
"mess", first_case),
(re.compile(r'(?<=to\W)zxsa', re.I), "mess", first_case),
# f, f ups
(re.compile(r'zxsa(\W?up)?', re.I), "idiot", keep_case),
#########################################
# dick
#########################################
# dick around
(re.compile(r'dick(?=(in[^\s])?\W(around|with|on\b|up\b|over|under|through))',
re.U+re.I), "mess", first_case),
# dickin['/g]
(re.compile(r'dick(?=(in[^\s][^o]))', re.U+re.I), "mess", keep_case),
#dickweed, dickhead
(re.compile(r'dick[WwHh]e[AaEe]d', re.I), "jerk", keep_case),
# know dick
(re.compile(r'(?<=[Kk]now )dick'), "squat", keep_case),
# dick on its own (toe is just sort of random...), not bird dickcissel
(re.compile(r'\bdick\b(?!-ciss)'), "toe", keep_case),
#########################################
# bastard
#########################################
(re.compile(r'\bbastard', re.I), "mongrel", keep_case),
#########################################
# hell
#########################################
# hellhound
(re.compile(r'\bhell\W?hound', re.I), 'demonhound', keep_case),
# hell-word (not helldiver bird)
(re.compile(r'\bhell(?=-[^oO])(?!-diver)', re.I), 'demon', keep_case),
# hell's bells
(re.compile(r'\bhell.{0,4}s?\W?bells?', re.I), 'by golly', keep_case),
# hell with
(re.compile(r'(to|the)\Whell\Wwith', re.I), 'forget', keep_case),
(re.compile(r'\bhell(?=\Wwith)', re.I), 'heck', keep_case),
# beats the hell out of
(re.compile(r'beats\Wthe\Whell\Wout\Wof', re.I), 'beats', keep_case),
# to hell
(re.compile(r'(?<=\bto\W)hell\b', re.I), 'perdition', keep_case),
# some hell
(re.compile(r'(?<=some\W)hell\b', re.I), 'trouble', keep_case),
# give/gave hell
(re.compile(r'(g[IiAa]ve.{0,7}\W)hell\b(?!\Wof)',
re.I), r'\1trouble', False),
# raise/raising hell
(re.compile(r'(rais[IiEe].{0,10}\W)hell\b', re.I), r'\1trouble', False),
#chance in hell
(re.compile(r'(?<=chance)( in) hell\b(\W*.)', re.I), '*removed*', drop_first_match),
#burn in hell
(re.compile(r'(?<=burn)( in) hell\b(\W*.)', re.I), '*removed*', drop_first_match),
# living hell
(re.compile(r'(?<=living\W)hell\b', re.I), 'prison', keep_case),
# for/etc the hell
(re.compile(r'(?<=(..for)\Wthe\W)hell\b', re.I), 'heck', keep_case),
# what the hell[.?!]
(re.compile(r'what\Wthe\Whell(?=[\.?!\,])',
re.I), 'what the heck', keep_case),
# (in) the hell
(re.compile(
r'(?: in)? (the)\Whell(?=[ \.?!\,])(?! in)(?! your)(?! out)(?! I\b)(?! of\b)(\W*.)', re.I), '*removed*', drop_first_match),
(re.compile(r'(?:in\W)?(the)\W+hell (?!in)(?!your)(?!out)(?!I\b)(?!of\b)(\W*.)',
re.I), '*removed*', drop_first_match),
#(re.compile(r'(?:\Win)?\W(the)\Whell\b(?=[ \.?!\,])(?! in)(\W*.)',re.I),'*removed*',drop_first_match),
# what/how/whatever/etc the hell
(re.compile(r'(?<=(..how|..for|where|.what|tever|..who)\Wthe\W)hell\b',
re.I), 'heck', keep_case),
# sure/busy/etc. as hell
(re.compile(r'(?<!known)( as) hell\b(\W*.)', re.I), '', drop_first_match),
# helluva
(re.compile(r'\bhelluva', re.I), 'heck of a', keep_case),
#way in hell
(re.compile(r'(?<=way) (in) hell\b(\W*.)', re.I), '', drop_first_match),
#what in hell
(re.compile(r'(?<=what) (in) hell\b(\W*.)', re.I), '', drop_first_match),
# but hell
(re.compile(r'(?<=but )hell\b', re.I), 'heck', keep_case),
# to be hell
(re.compile(r'(?<=to be )hell\b', re.I), 'terrible', keep_case),
# is/it's hell
(re.compile(r'(?<=is )hell\b', re.I), 'perdition', keep_case),
(re.compile(r'(?<=it[^\s]s )hell\b', re.U+re.I), 'perdition', keep_case),
#Aw, hell
(re.compile(r'(?<=Aw, )hell\b', re.I), 'heck', keep_case),
# catch hell
(re.compile(r'catch hell\b', re.I), 'get in trouble', keep_case),
(re.compile(r'caught hell\b', re.I), 'got in trouble', keep_case),
# as hell
(re.compile(r'sure as hell[ \,]', re.I), 'for sure', keep_case),
(re.compile(r'ed as hell\b', re.I), 'ed as could be', keep_case),
(re.compile(r'\bas hell[ \,]', re.I), 'as could be', keep_case),
# of hell
(re.compile(r'\bof hell\b', re.I), 'of torture', keep_case),
# all hell
(re.compile(r'\ball hell\b', re.I), 'all perdition', keep_case),
# hell was
(re.compile(r'\bhell(?= was)', re.I), 'heck', keep_case),
# hell to pay
(re.compile(r'\bhell(?= to pay)', re.I), 'heck', keep_case),
# bloody hell
(re.compile(r'(?<=bloody.)hell\b', re.I), 'heck', keep_case),
# dang hell
(re.compile(r'(?<=dang.)hell\b', re.I), 'heck', keep_case),
# like hell
(re.compile(r'(?<=(..look|looked|..hurt) )like hell\b', re.I), 'really bad', keep_case),
(re.compile(r'(?<=felt )like hell\b', re.I), 'like garbage', keep_case),
(re.compile(r'L[Ii][Kk][Ee]\W[Hh][Ee][Ll][Ll]'),
'not a chance', keep_case),
(re.compile(r'l[Ii][Kk][Ee]\W[Hh][Ee][Ll][Ll]'), 'like mad', keep_case),
# The hell I
(re.compile(r'the\Whell\WI\b', re.I), 'the heck I', keep_case),
# hell of/out/off ...
(re.compile(r'\bhell(?=\W(of\W|out|off\b|do\W|are\b))', re.I), 'heck', keep_case),
# hellish
(re.compile(r'\bhellish', re.I), 'unpleasant', keep_case),
# this/real hell (not followed by ?)
(re.compile(r'(?<=(this|real)\W)hell(\W?hole|\W?pit)?(?!\?)', re.I), 'pit', keep_case),
# hell's
(re.compile(r'\bhell[^\s]s', re.U+re.I), 'perditions\'s', keep_case),
# interjection hell (preceeded by . or " or --, etc, followed by ,
(re.compile(r'(?<=([\.?!,]\W\W|..\"|..”|..“|.\W\W))hell(?=[,!])',
re.U+re.I), 'heck', keep_case),
# >hell< shows up in html with italics or emphasis
(re.compile(r'\>hell\<', re.U+re.I), '>perdition<', keep_case),
]
#+ ass_list + lord_list
DEBUG = True
def language_check(text):
ret_val = re_list + lord_list
# Determine if this book is likely to take Lord's name in vain
if re.search("(for Christ's sake!|Holy Christ!|Holy Jesus!|for God's sake!|God almighty!|goddamn|fuck)", text, re.I):
if DEBUG:
print("Looks like book uses Lord's name in vain")
ret_val += vain_lord_list
else:
if DEBUG:
print("Looks like book does not use Lord's name in vain")
# Ass has two very different contexts. Guess which to use.
if re.search("(dumbass|asshole|smart ass|kick ass|ass kick|ass handed|badass|cover.{0,5}ass)", text):
ret_val += dirty_a_list
if DEBUG:
print("Looks like book does not need the donkey treatment")
else:
ret_val += clean_a_list
if DEBUG:
print("Looks like book calls donkeys asses")
# open('/tmp/dump.txt','w').write(text)
return ret_val
'''
from functools import partial
import codecs
text = codecs.open('bad.txt', encoding='utf-8').read()
#if DEBUG:
# print(text)
# print("-"*40)
# print("-"*40)
output = ""
replacement_list = language_check(text)
output = ""
for line in text.split("\n"):
#Go through all elements of replacement_list
for search,sub,pcase in replacement_list:
if pcase: # Preserve case
line = search.sub(partial(pcase,sub),line)
else: # Don't preserve case
line = search.sub(sub,line)
output += line + "\n"
#if DEBUG:
# print(output)
codecs.open('clensed.txt','w', encoding='utf-8').write(output)
'''
| [
"[email protected]"
]
| |
28a451889380139994d19d41449f1024a1657d39 | 6ff85b80c6fe1b3ad5416a304b93551a5e80de10 | /Python/Typing/ConvertingToInt.py | dc7abaeba61d61d2b38912ed04fadf88d3d3f1db | [
"MIT"
]
| permissive | maniero/SOpt | c600cc2333e0a47ce013be3516bbb8080502ff2a | 5d17e1a9cbf115eaea6d30af2079d0c92ffff7a3 | refs/heads/master | 2023-08-10T16:48:46.058739 | 2023-08-10T13:42:17 | 2023-08-10T13:42:17 | 78,631,930 | 1,002 | 136 | MIT | 2023-01-28T12:10:01 | 2017-01-11T11:19:24 | C# | UTF-8 | Python | false | false | 183 | py | print(int('12\n'))
print(int('\n123'))
print(int('1234 '))
print(int(' 1235'))
print(int('1236c'))
print(int('a1237'))
print(int('123 8'))
#https://pt.stackoverflow.com/q/347387/101
| [
"[email protected]"
]
| |
da335c0cd13edba4b65ecf5d0d102ff3cec047ba | 01faa1318b24e2b0f0dd63abe1daa6df11f1e220 | /backend/smiles_21366/wsgi.py | 92397821d7f408a104036345985dc426681dbfbe | []
| no_license | crowdbotics-apps/smiles-21366 | 8c86f08b7fb10ec77dc4ba9bc09192b63443cba2 | 6d57fe1e1f9c5fd7a2a806734556638b1f536015 | refs/heads/master | 2022-12-28T17:24:06.222261 | 2020-10-11T18:00:08 | 2020-10-11T18:00:08 | 303,180,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for smiles_21366 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'smiles_21366.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
428433ab6774a930dd36d3c9dde55ce6668ba730 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_358/ch18_2020_09_30_10_56_20_265107.py | 29f3ce3fd91883bd0ce98bda1f5b2b3cadb47227 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py |
def verifica_idade(x):
if x>21:
print('liberado EUA e BRASIl')
elif x>18<21:
print('liberado BRASIL')
else :
print('não esta liberado')
return x
| [
"[email protected]"
]
| |
d579b30d52e69dc20657216b704e6ec994f8b5c6 | 8904b28f9a0e4d7c2c3e4e1e67754464de7fc8ba | /Search/Find Peak Element.py | bad39f8d5f6bf43897cf2426a30fa35d740ce611 | []
| no_license | Chriszhangmw/LeetCode | 0b3f58470a51c360f5480df09251235faf3e836f | efe1d09e55812f8cb163e12ad333d134fadbb61a | refs/heads/master | 2020-08-04T00:43:11.856254 | 2020-01-29T22:23:57 | 2020-01-29T22:23:57 | 211,940,761 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | '''
A peak element is an element that is greater than its neighbors.
Given an input array nums, where nums[i] ≠ nums[i+1], find a peak element and return its index.
The array may contain multiple peaks, in that case return the index to any one of the peaks is fine.
You may imagine that nums[-1] = nums[n] = -∞.
Example 1:
Input: nums = [1,2,3,1]
Output: 2
Explanation: 3 is a peak element and your function should return the index number 2.
Example 2:
Input: nums = [1,2,1,3,5,6,4]
Output: 1 or 5
Explanation: Your function can return either index number 1 where the peak element is 2,
or index number 5 where the peak element is 6.
'''
def method(nums):
left = 1
res = []
while left < len(nums)-1:
if nums[left] < nums[left-1]:
left +=1
continue
else:
if nums[left] > nums[left+1]:
res.append(left)
left +=2
else:
left +=1
print(res)
def method2(nums):
left = 1
right = len(nums) -1
mid = left + (right - left) // 2
if nums[mid] > nums[mid-1] and nums[mid] > nums[mid + 1]:
return mid
elif nums[mid - 1] > nums[mid]:
return method2(nums[:mid])
else:
return method2(nums[mid+1:])
# while left < right:
# mid = left + (right - left)//2
# if nums[mid]
nums = [1,2,1,3,5,6,4]
print(method2(nums))
| [
"[email protected]"
]
| |
1555a5a3a6a222ed065c23732481e23d748ace99 | a34c3a310afaffbc5b028d85baf8597c58e1c5b9 | /quantstats/version.py | 3c56ef3e9b8017eddf7b253b8fcde3d5f05d27c8 | [
"Apache-2.0"
]
| permissive | blackcherry88/quantstats | 15e4d8025935d4b7cb4f42a1514ddb5873fb8d93 | 7b33bf45bb6e9985ff73d507c895d7ac7bde1d8d | refs/heads/main | 2023-08-13T03:19:50.265058 | 2021-10-12T23:32:36 | 2021-10-12T23:32:36 | 416,522,542 | 0 | 0 | Apache-2.0 | 2021-10-12T23:00:33 | 2021-10-12T23:00:33 | null | UTF-8 | Python | false | false | 19 | py | version = "0.0.43"
| [
"[email protected]"
]
| |
78895d70380f80f6cdf233a4227ecd16e1366f47 | dc99fa1a0058aae3f765d2c01c3eefecc5ae7388 | /src/framat/__init__.py | cc7655655bb6224308bc5b4cb14062eb94c941de | [
"Apache-2.0"
]
| permissive | Corentin1985/framat | a4cbeb47fa3573683907b6a6cb684c75aeec60d8 | 4177a95b4ed8d95a8330365e32ca13ac9ef24640 | refs/heads/master | 2023-05-08T23:11:01.516954 | 2021-06-03T18:33:20 | 2021-06-03T18:45:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | MODULE_NAME = 'FramAT'
from ._log import _plogger as log
from ._model import Model, Builtin
| [
"[email protected]"
]
| |
eaa9965c1192d42b18600bdb6f41f2ae68fe3fcf | 817ff801938d25776b2564b3087c8a3c674da1a7 | /NUP153_Min_One/WT_Minimization/WT_5.py | d243521c696a6396a6864e7e0ae3d14778c5c4c7 | []
| no_license | yanghaobojordan/HIV1-Capsid | b22e21a9ad530ae11f128f409e298c5ab68871ee | f44f04dc9886e660c1fe870936c48e0e5bb5adc6 | refs/heads/main | 2023-04-09T01:27:26.626676 | 2021-04-23T18:17:07 | 2021-04-23T18:17:07 | 360,968,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,576 | py | from pyrosetta import *
from pyrosetta import PyMOLMover
from pyrosetta.toolbox import cleanATOM
from pyrosetta.toolbox import get_secstruct
from pyrosetta.teaching import *
from pyrosetta.toolbox import get_hbonds
from pyrosetta.toolbox import mutate_residue
from pyrosetta.rosetta.protocols.relax import *
from pyrosetta.rosetta.protocols.simple_moves import *
from pyrosetta.rosetta.core.fragment import *
from pyrosetta.rosetta.protocols.moves import *
from pyrosetta.rosetta.protocols.rigid import *
from pyrosetta.rosetta.protocols.docking import *
import sys
init()
def main():
filename=sys.argv[1]
pose=pose_from_pdb(filename)
test=Pose()
test.assign(pose)
scorefxn=get_fa_scorefxn()
dumpfile = 'Folding_WT_5.pdb'
txtfile = 'Folding_WT_5.txt'
newfile = open(txtfile, "w")
newfile.write(str(scorefxn(test)))
newfile.write('\n')
kT = 1
mc = MonteCarlo(test, scorefxn, kT)
min_mover = MinMover()
mm = MoveMap()
mm.set_bb(True)
mm.set_chi(True)
min_mover.movemap(mm)
min_mover.score_function(scorefxn)
min_mover.min_type("dfpmin")
min_mover.tolerance(0.001)
task_pack=standard_packer_task(test)
task_pack.restrict_to_repacking()
task_pack.or_include_current(True)
pack_mover=PackRotamersMover(scorefxn, task_pack)
for i in range(20):
pack_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Repacking Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
for i in range(1):
min_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Minimization Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
newfile.write(str(scorefxn(test)))
newfile.write('\n')
newfile.write('RMSD ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
newfile.close()
test.dump_pdb(dumpfile)
main()
| [
"[email protected]"
]
| |
867a465d139fd1c55cdc38f9b43be2ff95796c18 | f8666599b83d34c861651861cc7db5b3c434fc87 | /plotly/graph_objs/scatterternary/__init__.py | 661912d2d40112f9b5bd08423f0728686cc78db3 | [
"MIT"
]
| permissive | mode/plotly.py | 8b66806e88c9f1820d478bab726f0bea81884432 | c5a9ac386a40df2816e6c13264dadf14299401e4 | refs/heads/master | 2022-08-26T00:07:35.376636 | 2018-09-26T19:08:54 | 2018-09-26T19:19:31 | 60,372,968 | 1 | 1 | MIT | 2019-11-13T23:03:22 | 2016-06-03T19:34:55 | Python | UTF-8 | Python | false | false | 434 | py | from ._unselected import Unselected
from plotly.graph_objs.scatterternary import unselected
from ._textfont import Textfont
from ._stream import Stream
from ._selected import Selected
from plotly.graph_objs.scatterternary import selected
from ._marker import Marker
from plotly.graph_objs.scatterternary import marker
from ._line import Line
from ._hoverlabel import Hoverlabel
from plotly.graph_objs.scatterternary import hoverlabel
| [
"[email protected]"
]
| |
0d3af189c999c81966b68412047b30a061b58994 | b3b066a566618f49ae83c81e963543a9b956a00a | /Unsupervised Learning in Python/02_Visualization with hierarchical clustering and t-SNE/08_t-SNE visualization of grain dataset.py | a7fd796bfaa4ac6fcdb158ffa94e3376e19f2bff | []
| no_license | ahmed-gharib89/DataCamp_Data_Scientist_with_Python_2020 | 666c4129c3f0b5d759b511529a365dfd36c12f1a | f3d20b788c8ef766e7c86c817e6c2ef7b69520b8 | refs/heads/master | 2022-12-22T21:09:13.955273 | 2020-09-30T01:16:05 | 2020-09-30T01:16:05 | 289,991,534 | 2 | 0 | null | 2020-08-24T17:15:43 | 2020-08-24T17:15:42 | null | UTF-8 | Python | false | false | 1,603 | py | '''
t-SNE visualization of grain dataset
In the video, you saw t-SNE applied to the iris dataset. In this exercise, you'll apply t-SNE to the grain samples data and inspect the resulting t-SNE features using a scatter plot. You are given an array samples of grain samples and a list variety_numbers giving the variety number of each grain sample.
INSTRUCTIONS
100XP
Import TSNE from sklearn.manifold.
Create a TSNE instance called model with learning_rate=200.
Apply the .fit_transform() method of model to samples. Assign the result to tsne_features.
Select the column 0 of tsne_features. Assign the result to xs.
Select the column 1 of tsne_features. Assign the result to ys.
Make a scatter plot of the t-SNE features xs and ys. To color the points by the grain variety, specify the additional keyword argument c=variety_numbers.
'''
# Import TSNE
from sklearn.manifold import TSNE
# Create a TSNE instance: model
model = TSNE(learning_rate=200)
# Apply fit_transform to samples: tsne_features
tsne_features = model.fit_transform(samples)
# Select the 0th feature: xs
xs = tsne_features[:,0]
# Select the 1st feature: ys
ys = tsne_features[:,1]
# Scatter plot, coloring by variety_numbers
plt.scatter(xs, ys, c=variety_numbers)
plt.show()
#========================================================#
# DEVELOPER #
# BasitAminBhatti #
# Github #
# https://github.com/basitaminbhatti #
#========================================================# | [
"Your-Email"
]
| Your-Email |
df6e085b85aea5a18b3c8ad935106b7ab1fc2768 | 9b41bd4d829b7b4b5fc7ea2f375089793f34beb0 | /lib/googlecloudsdk/core/http_proxy.py | 9d0ab19cc662888bb1f6fb6514fed07f85a5da0e | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | eyalev/gcloud | 20a596f9cbf7873eaea652a0b2ad080678f1598c | 421ee63a0a6d90a097e8530d53a6df5b905a0205 | refs/heads/master | 2020-12-25T14:48:11.142544 | 2016-06-22T08:43:20 | 2016-06-22T08:43:20 | 61,703,392 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,949 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to get an http proxy information."""
import urllib
from googlecloudsdk.core import config
from googlecloudsdk.core import properties
import httplib2
def GetDefaultProxyInfo(method='http'):
"""Get ProxyInfo from environment.
This function is meant to mimic httplib2.proxy_info_from_environment, but get
the proxy information from urllib.getproxies instead. urllib can also get
proxy information from Windows Internet Explorer settings or MacOSX framework
SystemConfiguration.
Args:
method: protocol string
Returns:
httplib2 ProxyInfo object or None
"""
proxy_dict = urllib.getproxies()
proxy_url = proxy_dict.get(method, None)
if not proxy_url:
return None
pi = httplib2.proxy_info_from_url(proxy_url, method)
# The ProxyInfo object has a bypass_host method that takes the hostname as an
# argument and it returns 1 or 0 based on if the hostname should bypass the
# proxy or not. We could either build the bypassed hosts list and pass it to
# pi.bypass_hosts, or we can just replace the method with the function in
# urllib, and completely mimic urllib logic. We do the latter.
# Since the urllib.proxy_bypass _function_ (no self arg) is not "bound" to the
# class instance, it doesn't receive the self arg when its called. We don't
# need to "bind" it via types.MethodType(urllib.proxy_bypass, pi).
pi.bypass_host = urllib.proxy_bypass
return pi
def GetProxyProperties():
"""Get proxy information from cloud sdk properties in dictionary form."""
proxy_type_map = config.GetProxyTypeMap()
proxy_type = properties.VALUES.proxy.proxy_type.Get()
proxy_address = properties.VALUES.proxy.address.Get()
proxy_port = properties.VALUES.proxy.port.GetInt()
proxy_prop_set = len(filter(None, (proxy_type, proxy_address, proxy_port)))
if proxy_prop_set > 0 and proxy_prop_set != 3:
raise properties.InvalidValueError(
'Please set all or none of the following properties: '
'proxy/type, proxy/address and proxy/port')
if not proxy_prop_set:
return {}
proxy_user = properties.VALUES.proxy.username.Get()
proxy_pass = properties.VALUES.proxy.password.Get()
return {
'proxy_type': proxy_type_map[proxy_type],
'proxy_address': proxy_address,
'proxy_port': proxy_port,
'proxy_user': proxy_user,
'proxy_pass': proxy_pass,
}
def GetHttpProxyInfo():
"""Get ProxyInfo object or callable to be passed to httplib2.Http.
httplib2.Http can issue requests through a proxy. That information is passed
via either ProxyInfo objects or a callback function that receives the protocol
the request is made on and returns the proxy address. If users set the gcloud
properties, we create a ProxyInfo object with those settings. If users do not
set gcloud properties, we return a function that can be called to get default
settings.
Returns:
httplib2 ProxyInfo object or callable function that returns a Proxy Info
object given the protocol (http, https)
"""
proxy_settings = GetProxyProperties()
if proxy_settings:
return httplib2.ProxyInfo(
proxy_settings['proxy_type'],
proxy_settings['proxy_address'],
proxy_settings['proxy_port'],
proxy_user=proxy_settings['proxy_user'],
proxy_pass=proxy_settings['proxy_pass'])
return GetDefaultProxyInfo
| [
"[email protected]"
]
| |
a118a1e83e9def0da9db511d4c9133740f9a5b18 | 221cada2354556fbb969f25ddd3079542904ef5d | /Leetcode/794.py | 3dbb0c842ec25e6a2dc1adf25ee07a5470c2690e | []
| no_license | syzdemonhunter/Coding_Exercises | 4b09e1a7dad7d1e3d4d4ae27e6e006732ffdcb1d | ca71572677d2b2a2aed94bb60d6ec88cc486a7f3 | refs/heads/master | 2020-05-24T11:19:35.019543 | 2019-11-22T20:08:32 | 2019-11-22T20:08:32 | 187,245,394 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | # https://leetcode.com/problems/valid-tic-tac-toe-state/
# T: O(n)
# S: O(1)
class Solution:
def isWin(self, board, c):
for i in range(3): # Row check
if board[i] == c*3:
return True
for i in range(3): # Column check
if board[0][i] == c and board[1][i] == c and board[2][i] == c:
return True
if board[0][0] == c and board[1][1] == c and board[2][2] == c or \
board[0][2] == c and board[1][1] == c and board[2][0] == c: # Diagonal check
return True
return False
def validTicTacToe(self, board: List[str]) -> bool:
count_X = count_O = 0
for i in range(3):
for j in range(3):
count_X += 1 if board[i][j] == 'X' else 0
count_O += 1 if board[i][j] == 'O' else 0
if count_O > count_X or count_X > count_O + 1:
return False
if count_O == count_X and self.isWin(board, 'X') or \
count_X == count_O + 1 and self.isWin(board, 'O'):
return False
return True
| [
"[email protected]"
]
| |
07dd7e37fa0fb096b32c2870025f984da525a821 | b68aa412b36a13df9b08ff7d736e1a0803afa3d9 | /astrobject/instruments/catalogues.py | 2fdde997d218a6b8c77dec062a4ed5f05da139bb | []
| no_license | ufeindt/astrobject | 320b7837e1ae29e562f6f58e8287dcea9df4581a | f60824913a3bfe7fdedb68794dfb7fc69e28569b | refs/heads/master | 2021-01-18T02:55:28.487364 | 2016-12-02T15:28:11 | 2016-12-02T15:28:11 | 46,552,676 | 0 | 0 | null | 2015-11-20T09:33:58 | 2015-11-20T09:33:58 | null | UTF-8 | Python | false | false | 15,103 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from sncosmo import get_bandpass
from .baseinstrument import Catalogue, coordinates,units
# -- here load all the object that could be parsed
from ..utils.tools import kwargs_update
from ..utils.decorators import _autogen_docstring_inheritance, make_method
# ============================= #
# #
# Quick Catalogue Study #
# #
# ============================= #
@make_method(Catalogue)
def stellar_density( catalogue, mask=None,
angdist=0.1*units.degree):
""" get the stellar density of the catalogue
Parameters
----------
catalogue: [Catalogue]
the catalogue for which you want the stellar density.
mask: [bool-array] -optional-
boolean array for instance generated by the `get_mask` catalogue method.
By default, (mask=None) the mask will be stars_only=True.
For instance, a catmag_mask could be a great idea.
Return
------
float
"""
mask = catalogue.get_mask(stars_only=True) if mask is None else mask
ra,dec = catalogue.get(["ra","dec"], mask= mask)
skyradec = coordinates.SkyCoord(ra=ra,dec=dec, unit="deg")
return np.bincount(skyradec.search_around_sky(skyradec,angdist)[0])
#################################
# #
# All Sky GAIA: Catalogue #
# #
#################################
def fetch_gaia_catalogue(center, radius, extracolumns=[], column_filters={}, **kwargs):
""" "query the gaia catalogue thought Vizier (I/337, DR1) using astroquery.
This function requieres an internet connection.
Parameters
----------
center: [string] 'ra dec'
position of the center of the catalogue to query.
radius: [string] 'value unit'
radius of the region to query. For instance '1d' means a
1 degree raduis
extracolumns: [list-of-string] -optional-
Add extra column from the V/139 catalogue that will be added to
the basic query (default: position, ID, object-type, magnitudes)
column_filters: [dict] -optional-
Selection criterium for the queried catalogue.
**kwargs goes to astroquery.vizier.Vizier
Returns
-------
SDSSCatalogue (child of Catalogue)
"""
try:
from astroquery import vizier
except:
raise ImportError("install astroquery. (pip install astroquery)")
# Basic Info
# --------------
columns = ["RA_ICRS","DE_ICRS","e_RA_ICRS","e_DE_ICRS","Source","Dup",
"o_<Gmag>","<FG>","e_<FG>","<Gmag>","Var"]
columns = columns+extracolumns
column_quality = {} # Nothing there yet
c = vizier.Vizier(catalog="I/337/gaia", columns=columns,
column_filters=kwargs_update(column_quality,**column_filters),
**kwargs)
c.ROW_LIMIT = "unlimited"
t = c.query_region(center,radius=radius).values()[0]
cat = GAIACatalogue(empty=True)
cat.create(t.columns ,None,
key_ra="RA_ICRS",key_dec="DE_ICRS")
return cat
class GAIACatalogue( Catalogue ):
source_name = "Gaia"
def __init__(self, catalogue_file=None, empty=False,
key_mag="__Gmag_", key_magerr="__e_Gmag_",
key_ra=None, key_dec=None, **kwargs):
"""
"""
self.__build__(data_index=0,key_mag=key_mag,
key_magerr=key_magerr,key_id="Source",
key_ra=key_ra,key_dec=key_dec)
if empty:
return
if catalogue_file is not None:
self.load(catalogue_file,**kwargs)
@_autogen_docstring_inheritance(Catalogue.set_mag_keys,"Catalogue.set_mag_keys")
def set_mag_keys(self,key_mag,key_magerr):
#
# add lbda def
#
super(GAIACatalogue,self).set_mag_keys(key_mag,key_magerr)
if "G" in key_mag:
self.lbda = 6730
#################################
# #
# BASIC SDSS: Catalogue #
# #
#################################
def fetch_sdss_catalogue(center, radius, extracolumns=[],column_filters={"rmag":"5..25"},**kwargs):
""" query online sdss-catalogue in Vizier (V/139, DR9) using astroquery.
This function requieres an internet connection.
Parameters
----------
center: [string] 'ra dec'
position of the center of the catalogue to query.
radius: [string] 'value unit'
radius of the region to query. For instance '1d' means a
1 degree raduis
extracolumns: [list-of-string] -optional-
Add extra column from the V/139 catalogue that will be added to
the basic query (default: position, ID, object-type, magnitudes)
column_filters: [dict] -optional-
Selection criterium for the queried catalogue.
**kwargs goes to astroquery.vizier.Vizier
Returns
-------
SDSSCatalogue (child of Catalogue)
"""
from .sdss import SDSS_INFO
try:
from astroquery import vizier
except:
raise ImportError("install astroquery. (pip install astroquery)")
# -----------
# - DL info
columns = ["cl","objID",#"SDSS9",
"RAJ2000","e_RAJ2000","DEJ2000","e_DEJ2000",
#"ObsDate","Q"#"mode",
]
for band in SDSS_INFO["bands"]:
columns.append("%smag"%band)
columns.append("e_%smag"%band)
columns = columns+extracolumns
column_quality = {"mode":"1","Q":"2.3"}
# - WARNING if discovered that some of the bandmag were missing if too many colums requested
c = vizier.Vizier(catalog="V/139", columns=columns,
column_filters=kwargs_update(column_quality,**column_filters),
**kwargs)
c.ROW_LIMIT = "unlimited"
#try:
t = c.query_region(center,radius=radius).values()[0]
#except :
# raise IOError("Error while querying the given coords. You might not have an internet connection")
cat = SDSSCatalogue(empty=True)
cat.create(t.columns,None,
key_class="cl",value_star=6,key_id="objID",
key_ra="RAJ2000",key_dec="DEJ2000")
return cat
# ------------------- #
# - SDSS CATALOGUE - #
# ------------------- #
class SDSSCatalogue( Catalogue ):
"""
"""
source_name = "SDSS"
def __init__(self, catalogue_file=None,empty=False,
value_star=6,key_mag=None,key_magerr=None,
key_ra=None,key_dec=None,**kwargs):
"""
"""
self.__build__(data_index=2,key_mag=key_mag,
key_magerr=key_magerr,key_id="objID",
key_ra=key_ra,key_dec=key_dec)
if empty:
return
if catalogue_file is not None:
self.load(catalogue_file,**kwargs)
self.set_starsid("cl",6)
@_autogen_docstring_inheritance(Catalogue.set_mag_keys,"Catalogue.set_mag_keys")
def set_mag_keys(self,key_mag,key_magerr):
#
# add lbda def
#
super(SDSSCatalogue,self).set_mag_keys(key_mag,key_magerr)
if key_mag is not None:
bandpass = get_bandpass("sdss%s"%key_mag[0])
self.lbda = bandpass.wave_eff
#################################
# #
# BASIC 2MASS: Catalogue #
# #
#################################
def fetch_2mass_catalogue(center,radius,extracolumns=[],
column_filters={"Jmag":"5..30"},**kwargs):
""" query online 2mass-catalogue in Vizier (II/246) using astroquery.
This function requieres an internet connection.
Parameters
----------
center: [string] 'ra dec'
position of the center of the catalogue to query.
radius: [string] 'value unit'
radius of the region to query. For instance '1d' means a
1 degree raduis
extracolumns: [list-of-string] -optional-
Add extra column from the II/246 catalogue that will be added to
the basic query (default: position, ID, magnitudes)
column_filters: [dict] -optional-
Selection criterium for the queried catalogue.
**kwargs goes to astroquery.vizier.Vizier
Returns
-------
MASSCatalogue (child of Catalogue)
"""
try:
from astroquery import vizier
except:
raise ImportError("install astroquery. (pip install astroquery)")
# -----------
# - DL info
columns = ["2MASS",
"RAJ2000","DEJ2000",
]
for band in ["J","H","K"]:
columns.append("%smag"%band)
columns.append("e_%smag"%band)
columns = columns+extracolumns
# - WARNING if discovered that some of the bandmag were missing if too many colums requested
c = vizier.Vizier(catalog="II/246", columns=columns, column_filters=column_filters,
**kwargs)
c.ROW_LIMIT = 100000
try:
t = c.query_region(center,radius=radius).values()[0]
except:
raise IOError("Error while querying the given coords. You might not have an internet connection")
cat = MASSCatalogue(empty=True)
cat.create(t.columns,None,
key_class="PointSource",value_star=None,
key_ra="RAJ2000",key_dec="DEJ2000")
return cat
# ------------------- #
# - 2MASS CATALOGUE - #
# ------------------- #
class MASSCatalogue( Catalogue ):
"""
"""
source_name = "2MASS"
def __init__(self, catalogue_file=None,empty=False,
key_mag=None,key_magerr=None,key_ra=None,key_dec=None,**kwargs):
"""
"""
self.__build__(data_index=2,key_mag=key_mag,
key_magerr=key_magerr,
key_ra=key_ra,key_dec=key_dec)
if empty:
return
if catalogue_file is not None:
self.load(catalogue_file,**kwargs)
@_autogen_docstring_inheritance(Catalogue.set_mag_keys,"Catalogue.set_mag_keys")
def set_mag_keys(self,key_mag,key_magerr):
#
# add lbda def
#
super(MASSCatalogue,self).set_mag_keys(key_mag,key_magerr)
if key_mag is not None:
if key_mag == "Jmag":
self.lbda = 12350
elif key_mag == "Hmag":
self.lbda = 16620
elif key_mag == "Kmag":
self.lbda = 21590
else:
raise ValueError("'%s' is not a recognized 2MASS band")
# ----------------------- #
# - CATALOGUE HACK - #
# ----------------------- #
@property
def mag(self):
if not self._is_keymag_set_(verbose=False):
print "No 'key_mag' defined. J band used by default. -> To change: set_mag_keys() "
self.set_mag_keys("Jmag","e_Jmag")
return super(MASSCatalogue,self).mag
# ------------------------------
# - All points are Point Sources
@property
def _objecttype(self):
print "All Loaded data are %s"%self._build_properties["key_class"]
return np.ones(self.nobjects)
@property
def starmask(self):
""" This will tell which of the datapoints is a star
Remark, you need to have defined key_class and value_star
in the __build_properties to be able to have access to this mask
==> In 2MASS PointSource catalogue, all data are stars
"""
return np.ones(self.nobjects_in_fov,dtype="bool") #not self.fovmask already in objecttype
#################################
# #
# BASIC WISE: Catalogue #
# #
#################################
def fetch_wise_catalogue(center,radius,extracolumns=[],column_filters={"Jmag":"5..30"}):
""" query online wise-catalogue in Vizier (II/328) using astroquery.
This function requieres an internet connection.
Parameters
----------
center: [string] 'ra dec'
position of the center of the catalogue to query.
radius: [string] 'value unit'
radius of the region to query. For instance '1d' means a
1 degree raduis
extracolumns: [list-of-string] -optional-
Add extra column from the II/328 catalogue that will be added to
the basic query (default: position, ID, magnitudes)
column_filters: [dict] -optional-
Selection criterium for the queried catalogue.
**kwargs goes to astroquery.vizier.Vizier
Returns
-------
WISECatalogue (child of Catalogue)
"""
try:
from astroquery import vizier
except:
raise ImportError("install astroquery. (pip install astroquery)")
# -----------
# - DL info
columns = ["AllWISE","ID",
"RAJ2000","DEJ2000",
]
for band in ["J","H","K","W1","W2","W3","W4"]:
columns.append("%smag"%band)
columns.append("e_%smag"%band)
columns = columns+extracolumns
# - WARNING if discovered that some of the bandmag were missing if too many colums requested
c = vizier.Vizier(catalog="II/328", columns=columns, column_filters=column_filters,
**kwargs)
c.ROW_LIMIT = 100000
try:
t = c.query_region(center,radius=radius).values()[0]
except:
raise IOError("Error while querying the given coords. You might not have an internet connection")
cat = WISECatalogue(empty=True)
cat.create(t.columns,None,
key_class="ToBeDone",value_star=None,
key_ra="RAJ2000",key_dec="DEJ2000")
return cat
# ------------------- #
# - WISE CATALOGUE - #
# ------------------- #
class WISECatalogue( Catalogue ):
"""
"""
source_name = "WISE"
def __init__(self, catalogue_file=None,empty=False,
key_mag=None,key_magerr=None,key_ra=None,key_dec=None,**kwargs):
"""
"""
print "STAR vs. GALAXY PARSING NOT READY YET"
self.__build__(data_index=2,key_mag=key_mag,
key_magerr=key_magerr,
key_ra=key_ra,key_dec=key_dec)
if empty:
return
if catalogue_file is not None:
self.load(catalogue_file,**kwargs)
@_autogen_docstring_inheritance(Catalogue.set_mag_keys,"Catalogue.set_mag_keys")
def set_mag_keys(self,key_mag,key_magerr):
#
# add lbda def
#
super(WISECatalogue,self).set_mag_keys(key_mag,key_magerr)
if key_mag is not None:
self.lbda = "TO BE DEFINED"
@property
def mag(self):
if not self._is_keymag_set_(verbose=False):
print "No 'key_mag' defined. W1 band used by default. -> To change: set_mag_keys() "
self.set_mag_keys("W1mag","e_W1mag")
return super(WISECatalogue,self).mag
| [
"[email protected]"
]
| |
8da8980b99393e3ccc23f3ef361ffcdbb41504a7 | c47c254ca476c1f9969f8f3e89acb4d0618c14b6 | /datasets/tensorflow-1.0.1/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py | 392309d543ed93d5cf2d53a76005052e6b3839ae | [
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | yijunyu/demo | 5cf4e83f585254a28b31c4a050630b8f661a90c8 | 11c0c84081a3181494b9c469bda42a313c457ad2 | refs/heads/master | 2023-02-22T09:00:12.023083 | 2021-01-25T16:51:40 | 2021-01-25T16:51:40 | 175,939,000 | 3 | 6 | BSD-2-Clause | 2021-01-09T23:00:12 | 2019-03-16T07:13:00 | C | UTF-8 | Python | false | false | 6,286 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains the MNIST network using preloaded data stored in a variable.
Run using bazel:
bazel run -c opt \
<...>/tensorflow/examples/how_tos/reading_data:fully_connected_preloaded_var
or, if installed via pip:
cd tensorflow/examples/how_tos/reading_data
python fully_connected_preloaded_var.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
FLAGS = None
def run_training():
"""Train MNIST for a number of epochs."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
with tf.name_scope('input'):
# Input data
images_initializer = tf.placeholder(
dtype=data_sets.train.images.dtype,
shape=data_sets.train.images.shape)
labels_initializer = tf.placeholder(
dtype=data_sets.train.labels.dtype,
shape=data_sets.train.labels.shape)
input_images = tf.Variable(
images_initializer, trainable=False, collections=[])
input_labels = tf.Variable(
labels_initializer, trainable=False, collections=[])
image, label = tf.train.slice_input_producer(
[input_images, input_labels], num_epochs=FLAGS.num_epochs)
label = tf.cast(label, tf.int32)
images, labels = tf.train.batch(
[image, label], batch_size=FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create the op for initializing variables.
init_op = tf.global_variables_initializer()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
sess.run(init_op)
sess.run(input_images.initializer,
feed_dict={images_initializer: data_sets.train.images})
sess.run(input_labels.initializer,
feed_dict={labels_initializer: data_sets.train.labels})
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# And then after everything is built, start the training loop.
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model.
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
# Update the events file.
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
step += 1
# Save a checkpoint periodically.
if (step + 1) % 1000 == 0:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
step += 1
except tf.errors.OutOfRangeError:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--num_epochs',
type=int,
default=2,
help='Number of epochs to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/data',
help='Directory to put the training data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"[email protected]"
]
| |
43677b98b8f8f7e0e4283394cf75e03f9aa196b2 | 1358257d86019a9232dba7571fedbfe938352f9f | /LibraryManagement/apps.py | 9d4149239887f6e7f3e0ea534744cfb8d7c6cb98 | []
| no_license | adeelehsan/LibraryManagementSystem | e7de727defe1d00c9332254bb0ef64d28a7fb2d3 | 68e5be7fb5a26607eed62dd67a9c38bc3b91bf97 | refs/heads/master | 2021-01-01T04:09:08.085846 | 2017-07-16T13:20:13 | 2017-07-16T13:20:13 | 97,133,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from __future__ import unicode_literals
from django.apps import AppConfig
class LibrarymanagementConfig(AppConfig):
name = 'LibraryManagement'
| [
"[email protected]"
]
| |
3b65b388d53c466d7a621dfd9a085f080b406564 | 05546a7729d0cbf6f4ae697bad7aec235d3d9504 | /www/judge/languages/rust.py | 306d1e8eff9b57faaf4f7ef5e2594fc95089451c | []
| no_license | riceluxs1t/algospot | 60c7b3ca6c1fa8bbdf5220b78496c0bf9969174f | 557bedd0031ff3e726578fbd899fa71435abc31a | refs/heads/master | 2021-01-19T03:02:20.714594 | 2016-12-25T04:26:09 | 2016-12-25T04:26:09 | 79,389,643 | 0 | 1 | null | 2017-01-18T22:08:06 | 2017-01-18T22:08:06 | null | UTF-8 | Python | false | false | 1,425 | py | import subprocess
from django.conf import settings
def system(cmd):
return subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
COMPILE_MEMORY_LIMIT = settings.JUDGE_SETTINGS['MINMEMORYSIZE']
LANGUAGE = "Rust"
EXT = "rs"
VERSION = system(["rustc", "--version"])[0].split("\n")[0]
ADDITIONAL_FILES = []
def setup(sandbox, source_code):
sandbox.write_file(source_code, "submission.rs")
compiled = sandbox.run("rustc -O submission.rs -o a.out",
stdout=".stdout",
stderr=".stderr",
time_limit=10,
memory_limit=COMPILE_MEMORY_LIMIT)
if compiled.split()[0] != "OK":
return {"status": "error",
"message": sandbox.read_file(".stderr")}
#sandbox.run("rm submission.cpp .stdin .stderr")
return {"status": "ok"}
def run(sandbox, input_file, time_limit, memory_limit):
result = sandbox.run("./a.out", stdin=input_file,
time_limit=time_limit,
memory_limit=memory_limit,
stdout=".stdout",
stderr=".stderr")
toks = result.split()
if toks[0] != "OK":
return {"status": "fail", "message": result, "verdict": toks[0] }
return {"status": "ok", "time": toks[1], "memory": toks[2], "output": ".stdout"}
| [
"[email protected]"
]
| |
cd7cf82c026b80ab657e904cee8cfe89525b9563 | d1a31b2558ffa51546facba6d9e2dc47ff88d396 | /GenericViews/settings.py | cb1e219360a7a80d0f819e587dd5e2d55fc3fbb2 | []
| no_license | syedarfa459/DjangoCBVs | d65ac91143c2cdb406963a32c6bf854a1277edad | 03133f18a138fc21853cb0e73c0531035ea8e49c | refs/heads/master | 2023-01-07T19:53:50.436757 | 2020-11-06T16:20:10 | 2020-11-06T16:20:10 | 310,645,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,113 | py | """
Django settings for GenericViews project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p=rtuk#3h5e_^i8w5m@c@a9rl-_vqhm3mp0&z2g5@p)slr_h-f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'CreateViewDemo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'GenericViews.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'GenericViews.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
c8a6531bad1d22622b253c30712ab63535b7ba14 | b254f030cefdddbabf6868b1d5d9a784aba88b2c | /tutorial/porting-multi-modules/mypreprocessor2.py | 1f03bc84da1e1046a8b8cc39dd23c9ed4510df0f | [
"Apache-2.0"
]
| permissive | gnes-ai/hub | 84220c37eea388fd57c914e86007469cd126d371 | 94cff9011ff6447ce1af51c5307813ab6fbbb156 | refs/heads/master | 2020-07-05T13:23:59.573400 | 2019-10-24T05:10:12 | 2019-10-24T05:10:12 | 202,658,837 | 38 | 11 | NOASSERTION | 2019-10-24T05:10:13 | 2019-08-16T04:33:52 | Python | UTF-8 | Python | false | false | 375 | py | from gnes.preprocessor.text.base import BaseTextPreprocessor
class MyPreprocessor2(BaseTextPreprocessor):
def __init__(self, bar, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bar = bar
def apply(self, doc: 'gnes_pb2.Document') -> None:
super().apply(doc)
doc.raw_text += self.bar
self.logger.info(doc.raw_text)
| [
"[email protected]"
]
| |
b26bc6da235636368ae07cbc90981a25521e6737 | bde8e24b07bb3a403fa40a3c2aabe3f8d4466272 | /question90-99/question94.py | d4c4fe4224ccf09a7489f19f4a51854c02394b5f | []
| no_license | refine-P/NLP100Knock | fda6680b6d72faae9d8805829fa7d9cb9ab379d6 | ed29a3a3d80820ef074247f79253c7ef97500b55 | refs/heads/master | 2021-07-06T15:55:29.512827 | 2019-04-07T16:37:34 | 2019-04-07T16:37:34 | 179,993,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | #coding:utf-8
#Windowsだと警告が出るが特に問題がないらしいのでこれで握りつぶす
#参考(http://stackoverflow.com/questions/41658568/chunkize-warning-while-installing-gensim)
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
from gensim.models.word2vec import Word2Vec
if __name__ == "__main__":
model = Word2Vec.load("word2vec.model")
with open("combined.tab", "r", encoding='utf-8') as fr, open("353_result.txt", "w", encoding='utf-8') as fw:
for line in fr.readlines()[1:]:
words = line.split()
try:
sim = model.similarity(words[0], words[1])
result = "%s\t%f\n" % (" ".join(words[0:2]), sim)
except:
result = "%s\t-1\n" % " ".join(words[0:2])
fw.write(result)
| [
"[email protected]"
]
| |
5233091305b44640cd97581d32e8076ff35c614c | c4c81058dd9fa111f706a5db7ee80064873271ba | /HLTrigger/btau/hltDisplacedmumumuVtxProducer_cfi.py | f0548d1fb7727e06c833cf979e4fa57f865861ab | []
| no_license | fwyzard/cmssw-cfipython | e142c3a3e707c599dae491333ec48522de3f2f34 | cae55b22a46433b55ea6ff5b36aecc043792d16c | refs/heads/master | 2021-07-25T21:04:42.950199 | 2017-10-24T06:29:00 | 2017-10-24T06:29:00 | 109,701,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | import FWCore.ParameterSet.Config as cms
hltDisplacedmumumuVtxProducer = cms.EDProducer('HLTDisplacedmumumuVtxProducer',
Src = cms.InputTag('hltL3MuonCandidates'),
PreviousCandTag = cms.InputTag(''),
MaxEta = cms.double(2.5),
MinPt = cms.double(0),
MinPtTriplet = cms.double(0),
MinInvMass = cms.double(1),
MaxInvMass = cms.double(20),
ChargeOpt = cms.int32(-1)
)
| [
"[email protected]"
]
| |
fe58fe961797ab457ef2a590d71b62b7a4043775 | 13fdfd03d975c2b94d08a84f05f452c697186a44 | /atcoder/ARC/88/arc88c.py | 03e682083af6389256ee811627b3ebe1d4142096 | []
| no_license | poponzu/atcoder1 | 7243da9250d56eb80b03f1a8f4a3edb9df9e5515 | 64a52bac4cf83842167ca1ce1229c562dabd92a3 | refs/heads/master | 2023-08-22T02:10:52.639566 | 2021-10-09T14:23:46 | 2021-10-09T14:23:46 | 385,467,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | x, y = map(int, input().split())
ans = 1
# 対数計算ミスってた
# 検算にpythonで書いてcheckしようこれから
# 発想は間違っていなかった。
for i in range(60):
result = x * (2 ** i)
if result <= y:
ans = max(ans, i + 1)
print(ans)
| [
"[email protected]"
]
| |
06a7d1cc33297ae4a3dde990c52105eb76b0a7a4 | 46890f9bbd0af1102ce5cf2c98019295a76f67fb | /the3ballsoft/users/migrations/0004_auto_20161004_1312.py | 65eb880217c5da867833fc6aed0125717994ea46 | []
| no_license | the3ballsoft/the3ballsoft-website | 1a870cec2816dedfcc30e366faca84d162db4f83 | 96a01c58b2a079e14d922c24bb0feea4357d7b40 | refs/heads/master | 2021-01-13T08:22:48.922675 | 2016-10-24T07:21:23 | 2016-10-24T07:21:23 | 69,994,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-04 18:12
from __future__ import unicode_literals
from django.db import migrations
import versatileimagefield.fields
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20161004_1304'),
]
operations = [
migrations.AlterField(
model_name='user',
name='avatar',
field=versatileimagefield.fields.VersatileImageField(blank=True, max_length=500, null=True, upload_to='img/avatars'),
),
]
| [
"[email protected]"
]
| |
f827e9c01715a4a59c84f252e6e838591e327d1d | 3e09ddb5bc1b540b19720c713f21e7566dbaee2a | /utils/subtree_util.py | 6c342e9e67b34f1e7d631e908d97286aff2351ca | []
| no_license | little-pikachu/infercode | ee699b3262dd367e54fa307e61d7bbc9091504e7 | 9063131e61bbe37128b034798bf80709ae2ec744 | refs/heads/master | 2023-03-22T04:33:51.957772 | 2021-03-11T10:18:35 | 2021-03-11T10:18:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import argparse
from os.path import exists
import re
from os import path
from tree_sitter import Language, Parser
from pathlib import Path
def print_tree_line(id, data, root_node, reports, selected_node_types):
node_id = id
node_type = root_node.type
node_label = data[root_node.start_byte:root_node.end_byte]
has_child = len(root_node.children) > 0
depth = 1
s = "{}-{},".format(node_id, node_type)
if not has_child:
s = "{}-{}-{},".format(node_id, node_type, node_label.decode("utf-8"))
for child in root_node.children:
(id, child_depth, child_str) = print_tree_line(id + 1, data, child, reports, selected_node_types)
depth = max(depth, child_depth+1)
s = "{}{}".format(s, child_str)
# if str(node_type) in selected_node_types:
reports[node_id] = "{}{}".format(s, depth)
return (id, depth, s)
def print_subtree(data, root_node, reports, selected_node_types):
(id, depth, s) = print_tree_line(1, data, root_node, reports, selected_node_types)
return "{}{}".format(s, depth)
| [
"[email protected]"
]
| |
0f13a3d51fb6d6c6d66b40c54ee6da40367dc232 | d8b13203c39e68e459638decc44a8bf9b3a3d925 | /content/migrations/0004_form_to_page_back_relation.py | eb34793083050b1cd3acb1f88296d45156f2254e | [
"0BSD"
]
| permissive | tbrlpld/headless-wagtail-form-backend | 26266afbbf41cb53cad691b37ac82254dd201ce6 | b6ba81db8ea705fbda2c75b77a0075fb20d67beb | refs/heads/master | 2022-12-24T01:14:39.185345 | 2020-10-02T22:09:48 | 2020-10-02T22:09:48 | 298,130,570 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | # Generated by Django 3.0.10 on 2020-09-29 02:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forms', '0001_initial'),
('content', '0003_auto_20200929_0125'),
]
operations = [
migrations.AlterField(
model_name='somepage',
name='contact_form',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='used_on_page', to='forms.FormPage'),
),
]
| [
"[email protected]"
]
| |
269751f7432c6b3011370bc9f329c9432a8f265b | 9830360802428854384d6b27a172102de0e59c8f | /2902.py | d4a02794d38f68d3e4dc41bae137bb11e4388beb | []
| no_license | banje/acmicpc | d4009535ec31892f706333d812c92fddead08aa1 | 69d44a3b60d2a559563b5a1055bcc2290090e35c | refs/heads/master | 2022-07-20T20:01:56.623346 | 2020-05-16T11:30:17 | 2020-05-16T11:30:17 | 260,843,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | a=input()
b=""
for i in range(len(a)):
if ord(a[i])<91:
if ord(a[i])!=45:
b=b+a[i]
print(b) | [
"[email protected]"
]
| |
6f63f6b8c63983776f1eaed0b04a879f053db854 | 6a3639751ceda99b7a72eede2a75a52ac619c87b | /Stanford_CS224n NLP with Deep Learning/assignment1/.env/lib/python3.6/rlcompleter.py | e634e2a82930d55035b69c4cd76959312ef6a1f1 | [
"MIT"
]
| permissive | db12138/Online_Courses_and_Materials | 3a988edf53e035a26fbf1d9cab0559382f228970 | 6a113056f4fd2667556942b3bcc9608bdf9c2968 | refs/heads/master | 2020-03-18T01:14:28.291109 | 2018-05-14T14:54:21 | 2018-05-14T14:54:21 | 134,133,889 | 1 | 3 | null | 2018-05-20T08:45:48 | 2018-05-20T08:45:48 | null | UTF-8 | Python | false | false | 52 | py | /Users/Hansen/anaconda3/lib/python3.6/rlcompleter.py | [
"[email protected]"
]
| |
96f3d6b6b5992dd3ad311167dbd5f7757d1aa977 | 786de89be635eb21295070a6a3452f3a7fe6712c | /pytopsana/trunk/examples/ex_cspad.py | c3e4a63c747ba8ffe656b7d27f458dd554177fdd | []
| no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,603 | py | #!/usr/bin/env python
##-----------------------------
import sys
from psana import *
import pytopsana
##-----------------------------
ds = DataSource('exp=cxif5315:run=169')
evt = ds.events().next()
env = ds.env()
src = Source('DetInfo(CxiDs2.0:Cspad.0)')
#src = Source('Camp.0:pnCCD.1')
det = pytopsana.Detector(src,0) # , 0xffff)
# src)
#print evt.keys()
##-----------------------------
peds = det.pedestals(evt,env)
print '\npedestals:\n', peds[0:20]
prms = det.pixel_rms(evt,env)
print '\npixel_rms:\n', prms[0:20]
pgain = det.pixel_gain(evt,env)
print '\npixel_gain:\n', pgain[0:20]
pmask = det.pixel_mask(evt,env)
print '\npixel_mask:\n', pmask[0:20]
pbkgd = det.pixel_bkgd(evt,env)
print '\npixel_bkgd:\n', pbkgd[0:20]
pstat = det.pixel_status(evt,env)
print '\npixel_status:\n', pstat[0:20]
pcmod = det.common_mode(evt,env)
print '\ncommon_mode:\n', pcmod
print '\nInstrument: ', det.inst(env)
##-----------------------------
#det.set_print_bits(255);
det.set_def_value(-5.);
det.set_mode(1);
raw_data = det.data_int16_3(evt,env)
print '\nraw_data:\n', raw_data
print 'raw_data type: %s shape: %s' % (raw_data.dtype, raw_data.shape)
pixel_x = det.pixel_coords_x(evt,env)
print '\npixel_x:\n', pixel_x
print 'pixel_x type: %s shape: %s' % (pixel_x.dtype, pixel_x.shape)
pixel_y = det.pixel_coords_y(evt,env)
print '\npixel_y:\n', pixel_y
print 'pixel_y type: %s shape: %s' % (pixel_y.dtype, pixel_y.shape)
pixel_a = det.pixel_areas(evt,env)
print '\npixel_a:\n', pixel_a
print 'pixel_a type: %s shape: %s' % (pixel_a.dtype, pixel_a.shape)
pixel_m = det.pixel_mask_geo(evt,env)
print '\npixel_m:\n', pixel_m
print 'pixel_m type: %s shape: %s' % (pixel_m.dtype, pixel_m.shape)
print '\npixel_scale_size: ', det.pixel_scale_size(evt,env)
pixel_ix = det.pixel_indexes_x(evt,env)
print '\npixel_ix:\n', pixel_ix
print 'pixel_ix type: %s shape: %s' % (pixel_ix.dtype, pixel_ix.shape)
pixel_iy = det.pixel_indexes_y(evt,env)
print '\npixel_iy:\n', pixel_iy
print 'pixel_iy type: %s shape: %s' % (pixel_iy.dtype, pixel_iy.shape)
##-----------------------------
import numpy as np
nda_img = np.array(raw_data.flatten()-peds, dtype=np.double)
print '\nnda_img:\n', nda_img
print 'nda_img type: %s shape: %s' % (nda_img.dtype, nda_img.shape)
img = det.get_image(evt, env, nda_img)
print '\nimg:\n', img
print 'img type: %s shape: %s' % (img.dtype, img.shape)
##-----------------------------
import pyimgalgos.GlobalGraphics as gg
ave, rms = img.mean(), img.std()
gg.plotImageLarge(img, amp_range=(ave-1*rms, ave+6*rms))
gg.show()
sys.exit(0)
##-----------------------------
| [
"[email protected]@b967ad99-d558-0410-b138-e0f6c56caec7"
]
| [email protected]@b967ad99-d558-0410-b138-e0f6c56caec7 |
39c3cac1154d8010727d17fdc16c8cdeca1b9c8c | fcd744030cce61eb0ee709995e5b008e89f222f0 | /docs/conf.py | 47135000f73b7aac67a5b31e0deec7010296e328 | [
"ISC"
]
| permissive | usingnamespace/pyramid_authsanity | 20223d7f6812707a2423a44f0eeebb34d2f08dce | 98795f37e89a6cb06701d8d70fe54f94beec6ae8 | refs/heads/main | 2023-01-13T06:10:40.332856 | 2022-12-29T13:06:49 | 2022-12-29T13:06:49 | 42,696,878 | 19 | 6 | ISC | 2023-09-09T04:21:59 | 2015-09-18T03:15:55 | Python | UTF-8 | Python | false | false | 4,539 | py | import pkg_resources
import sys
import os
import shlex
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"repoze.sphinx.autointerface",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pyramid_authsanity"
copyright = "2015, Bert JW Regeer"
author = "Bert JW Regeer"
version = release = pkg_resources.get_distribution("pyramid_authsanity").version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
modindex_common_prefix = ["pyramid_authsanity."]
# -- Options for HTML output ----------------------------------------------
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = "pyramid_authsanitydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"pyramid_authsanity.tex",
"pyramid\\_authsanity Documentation",
"Bert JW Regeer",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "pyramid_authsanity", "pyramid_authsanity Documentation", [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pyramid_authsanity",
"pyramid_authsanity Documentation",
author,
"pyramid_authsanity",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
| [
"[email protected]"
]
| |
d27de3ae06c82ca21feafe92b90698f9254ec67c | 7c5da9f7299c5f5080fb5f7416caede5b4d92d6f | /0x01-python-if_else_loops_functions/101-remove_char_at.py | 5b08ff0d3120debe562c3e8771f2524182cd09e7 | []
| no_license | stefansilverio/holbertonschool-higher_level_programming | eb0b9415047eb089d69e4099ff00d1f9ed529a4d | f47fc1817245fa41e597c9b03707687c78bc80e6 | refs/heads/master | 2020-04-09T10:20:45.203061 | 2019-05-17T00:36:42 | 2019-05-17T00:36:42 | 160,268,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | #!/usr/bin/python3
def remove_char_at(str, n):
length = len(str)
if n >= 0 and n < length:
str2 = str.replace(str[n], "")
print("{0}".format(str2), end='')
else:
print("{}".format(str), end='')
return ('')
| [
"[email protected]"
]
| |
48a2f29b6dd4ea6ec1887f15ba6a5a590bcccbe1 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2871/60825/301237.py | bb1e542bda489a7e94ef28cefb577359aa3faa8d | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | t=""
while True:
try:
ts=input()
t+=ts
t+="#"
except:
break
if t=='3#1 1 1#':
print('''1''')
elif t=='2#2 2#':
print('''0''')
elif t.startswith('57#2 1 2 2 1 2 2 1 1 1 2 1 1'):
print('''28''')
elif t.startswith('47#2 1 1 1 1 2 2 1 2 1 1 1 1 2') or t.startswith('49#1 1 2 1 1 2 2 1 2 1 1'):
print('''22''')
elif t.startswith('95#2 1 1 1 1 1 2 1 2 2 2 2 1 1 1 2') or t.startswith('99#1 2 1 1 2 1 2 2 1 1 2 2 1 1 1 1 1 1 1 2'):
print('''46''')
elif t.startswith('4#1 1 2 1#'):
print('''1''')
elif t.startswith('47#1 2 1 2 2 1 1 2 2 1 2 2 2 1'):
print('''22''')
elif t.startswith('7#2 2 2 1 1 1 1#'):
print('''3''')
else:
print(t) | [
"[email protected]"
]
| |
42a31cb2215dcd7cc3cea56f2a5b30c0e7771e4f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /DGpxmRkADuZaWHJxZ_14.py | 180a83ff26de40951119e0535277ed8ec34d08b4 | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | """
Steve and Maurice have racing snails. They each have three, a slow `s`, medium
`m` and fast `f` one. Although Steve's snails are all a bit stronger than
Maurice's, Maurice has a trick up his sleeve. His plan is:
1. Round 1: `[s, f]` Sacrifice his slowest snail against Steve's fastest.
2. Round 2: `[m, s]` Use his middle snail against Steve's slowest.
3. Round 3: `[f, m]` Use his fastest snail against Steve's middle.
Create a function that determines whether Maurice's plan will work by
outputting `True` if Maurice wins 2/3 games.
The function inputs:
1. List 1: `[s, m, f]` for Maurice.
2. List 2: `[s, m, f]` for Steve.
### Examples
maurice_wins([3, 5, 10], [4, 7, 11]) ➞ True
# Since the matches are (3, 11), (5, 4) and (10, 7), Maurice wins 2 out of 3.
maurice_wins([6, 8, 9], [7, 12, 14]) ➞ False
# Since the matches are (6, 14), (8, 7) and (9, 12), Steve wins 2 out of 3.
maurice_wins([1, 8, 20], [2, 9, 100]) ➞ True
### Notes
* Maurice wins if his competing snail's speed **strictly** exceeds Steve's snail's speed.
* Steve will always play in this order: `[f, s, m]`.
* The order you'll get the snails is always in ascending order.
"""
def maurice_wins(m_snails, s_snails):
mscore = 0
if m_snails[0] > s_snails[2]:
mscore = mscore + 1
if m_snails[1] > s_snails[0]:
mscore = mscore + 1
if m_snails[2] > s_snails[1]:
mscore = mscore + 1
if mscore == 2:
return True
else:
return False
| [
"[email protected]"
]
| |
56c0f5b4ad712dcd53d029b39fa44127f8f31119 | 571e885363ba484e6f6df6544c2ad11e0640695d | /ratings/views.py | 7a25aec28722bb645a075010ee86cfb2db1bb0e9 | []
| no_license | extreme1337/django-netflix-clone-backend | 99860c0e973a1120c2460e712782eed211e276eb | b3a6900120d65d6c604bc12f7124136d94a43ab1 | refs/heads/main | 2023-05-25T01:00:48.713179 | 2021-06-08T07:00:59 | 2021-06-08T07:00:59 | 370,954,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | from django import forms
from django.shortcuts import render
from .forms import RatingForm
from django.http import HttpResponseRedirect
from django.contrib.contenttypes.models import ContentType
from .models import Rating
# Create your views here.
def rate_object_view(request):
if not request.user.is_authenticated:
return HttpResponseRedirect('/')
if request.method == "POST":
form = RatingForm(request.POST)
if form.is_valid():
object_id = form.cleaned_data.get('object_id')
rating = form.cleaned_data.get('rating')
content_type_id = form.cleaned_data.get('content_type_id')
c_type = ContentType.objects.get_for_id(content_type_id)
obj = Rating.objects.create(
content_type=type,
object_id=object_id,
value=rating,
user=request.user
)
next_path = form.cleaned_data.get('next')
return HttpResponseRedirect(next_path)
return HttpResponseRedirect('/')
| [
"[email protected]"
]
| |
fbe30e999056a1d6e842aedc1d813c0d9b63abe9 | 0ecf2d067e8fe6cdec12b79bfd68fe79ec222ffd | /ui/aura/test/DEPS | 7b065fad58a282d77af7d76a45babcbe24f021e0 | [
"BSD-3-Clause"
]
| permissive | yachtcaptain23/browser-android-tabs | e5144cee9141890590d6d6faeb1bdc5d58a6cbf1 | a016aade8f8333c822d00d62738a922671a52b85 | refs/heads/master | 2021-04-28T17:07:06.955483 | 2018-09-26T06:22:11 | 2018-09-26T06:22:11 | 122,005,560 | 0 | 0 | NOASSERTION | 2019-05-17T19:37:59 | 2018-02-19T01:00:10 | null | UTF-8 | Python | false | false | 179 | include_rules = [
"+cc/test",
"+components/viz/test",
"+mojo/core/embedder/embedder.h",
"+services/ui/public/cpp/input_devices",
"+ui/gl",
"+ui/wm/core/wm_state.h",
]
| [
"[email protected]"
]
| ||
c90d288b4b59233f12071b908f959ed607002bb4 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/viz/counter.py | d827b22070ecb3416754b03a878a22e00231ed25 | []
| no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 4,162 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Counter(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.viz.Counter")
meta.moClassName = "vizCounter"
meta.rnFormat = "counter-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "Represents a statistical counter"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.viz.Sample")
meta.childNamesAndRnPrefix.append(("cobra.model.viz.Sample", "sample-"))
meta.parentClasses.add("cobra.model.viz.TimeSeries")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.rnPrefixes = [
('counter-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 21893, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 16)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "name"))
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
534f975c66b89dfcafb6544c9604ea1c70c0e8f3 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /9Q5nsEy2E2apYHwX8_20.py | 8618e9090acf49e54f8506ead8a5fe3d1c58dd78 | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py |
class programmer:
def __init__(self, sal, hours):
# Can't not spell salary properly..
self._salary = sal
self._hours = hours
@property
def salary(self): return self._salary
@property
def work_hours(self): return self._hours
def __del__(self):
return 'oof, {_salary}, {_hours}'.format(**vars(self))
# Also programmers..
def compare(*programmers):
return min(programmers, key=lambda p: (p._salary, p._hours))
| [
"[email protected]"
]
| |
e311a5f20fb1dbca7de12fdfcb7920fccbcd889a | be84495751737bbf0a8b7d8db2fb737cbd9c297c | /renmas/materials/specular_sampling.py | 95d9d6e4cdf41f2cad8e48e97a02b9cddb8e55ba | []
| no_license | mario007/renmas | 5e38ff66cffb27b3edc59e95b7cf88906ccc03c9 | bfb4e1defc88eb514e58bdff7082d722fc885e64 | refs/heads/master | 2021-01-10T21:29:35.019792 | 2014-08-17T19:11:51 | 2014-08-17T19:11:51 | 1,688,798 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,139 | py |
import math
import renmas.maths
import renmas.utils as util
class SpecularSampling:
def __init__(self):
pass
def get_sample(self, hitpoint):
hp = hitpoint
ndotwo = hp.normal.dot(hp.wo)
r = hp.normal * ndotwo * 2.0 - hp.wo
hp.wi = r
hp.ndotwi = hp.normal.dot(r)
hp.specular = True #special case
def get_sample_asm(self, runtime):
# eax - pointer to hitpoint
asm_structs = renmas.utils.structs("hitpoint")
ASM = """
#DATA
float two[4] = 2.0, 2.0, 2.0, 0.0
"""
ASM += asm_structs + """
#CODE
macro dot xmm0 = eax.hitpoint.normal * eax.hitpoint.wo
macro broadcast xmm1 = xmm0[0]
macro eq128 xmm1 = xmm1 * two
macro eq128 xmm1 = xmm1 * eax.hitpoint.normal
macro eq128 xmm1 = xmm1 - eax.hitpoint.wo
macro dot xmm4 = xmm1 * eax.hitpoint.normal
macro eq128 eax.hitpoint.wi = xmm1
macro eq32 eax.hitpoint.ndotwi = xmm4
mov dword [eax + hitpoint.specular], 14
ret
"""
assembler = util.get_asm()
mc = assembler.assemble(ASM, True)
#mc.print_machine_code()
name = "brdf_specular" + str(util.unique())
self.ds = runtime.load(name, mc)
self.func_ptr = runtime.address_module(name)
def pdf(self, hitpoint):
if hitpoint.specular:
hitpoint.pdf = 1.0
else:
hitpoint.pdf = 0.0
def pdf_asm(self):
prefix = "_" + str(hash(self)) + "_"
# eax - pointer to hitpoint
ASM = "#CODE \n"
ASM += "mov ebx, dword [eax + hitpoint.specular] \n"
ASM += "cmp ebx, 0 \n" #0-no specular sample
ASM += "jne " + prefix + "spec_sample\n"
ASM += "pxor xmm0, xmm0 \n" # put 0.0 in xmm0
ASM += "jmp " + prefix + "end_spec \n"
ASM += prefix + "spec_sample: \n"
ASM += "pcmpeqw xmm0, xmm0 \n" # generate 1.0 in xmm0
ASM += "pslld xmm0, 25 \n"
ASM += "psrld xmm0, 2 \n"
ASM += prefix + "end_spec: \n"
return ASM
| [
"[email protected]"
]
| |
95bb386cc14b99e28952fb65f32afe14f29c9620 | e6b4f7a3721c9f0c59de2623165b6967fa48a095 | /gispot/crcpy/raw/ejpg.py | 6ea332206b053c9830b21ceda779745b33c4b506 | []
| no_license | hygnic/Gispot | 8a3db18e4348597990793968d502c4619afdd523 | 440d168fd84bd98d2d9f2bc27b34ac9d7816a4e1 | refs/heads/master | 2023-04-29T15:39:09.876858 | 2023-04-16T08:17:55 | 2023-04-16T08:17:55 | 220,610,954 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 987 | py | # -*- coding:cp936 -*-
# lcc
"""
批量将导出MXD文档导出为JPEG图片
"""
#
# import sys
# sys.path.append("../../GUIs")
# print sys.path
import arcpy,os
# import tooltk
# tooltk.Tooltk().rootwindow.mainloop()
# 设置需要出图mxd文档文件目录
# path = ur"G:\正安县\正安县公示图\400"
# 设置分辨率
# res = 300
arcpy.env.overwriteOutput = True
def export(path, res):
"""
批量将导出MXD文档导出为JPEG图片
:param path: mxd文件夹目录 string
:param res: 分辨率 int
:return:
"""
for afile in os.listdir(path):
if afile[-3:].lower() == 'mxd':
mxd1 = arcpy.mapping.MapDocument(os.path.join(path, afile))
print u"正在出图..."
arcpy.mapping.ExportToJPEG(mxd1,
os.path.join(path, afile[:-3] + 'jpg'), resolution = res)
del mxd1
print 'Done'
else:
print u"\n非MXD文件,跳过"
if __name__ == '__main__':
export("path", 300)
# app = tooltk.Tooltk()
# app.GUIexport()
#
# app.window.mainloop()
| [
"[email protected]"
]
| |
63d840a4e9086763b14e0fc3229eb897db7931ef | 955e99e0f46a8578562853fdb2cb9237923dcdd7 | /submission/tasks.py | 38e5e592ddcd58d9f712267eef81801226332d06 | []
| no_license | joeyac/WebServer | 7d7ccc3df3092f923e52248c15e5dbb3ad5b866b | c856ed5570712887c61df9f563a9c028c27a8367 | refs/heads/master | 2021-06-16T16:04:02.847217 | 2017-05-19T04:42:23 | 2017-05-19T04:42:23 | 81,619,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from celery import shared_task
from judger.tasks import JudgeDispatcher
@shared_task
def p_judge(submission_id, language_name, src_code,
time_limit=None, memory_limit=None,
test_case_id=None, spj_code=None,
oj=None, problem_id=None):
JudgeDispatcher(submission_id, language_name, src_code,
time_limit, memory_limit,
test_case_id, spj_code,
oj, problem_id).judge() | [
"[email protected]"
]
| |
2781abb2571ce6222079aaeec64e43050fc8c7dd | 04f83aab47940b739f13c1ba102c230372966c43 | /SHyFTFitter/scripts/configTemplateInfo.py | ec70fe88e67072237e3cf70d7d4f78a0d8a603d1 | []
| no_license | PerilousApricot/SUSHyFT-Analyzer | 5a11909963d30c8ad7f19f499253a6753e78608a | 9f5ba528a96203459c52a0434b32311a16e2ff3b | refs/heads/master | 2016-09-15T15:31:30.617286 | 2016-03-14T20:32:09 | 2016-03-14T21:02:28 | 21,915,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,738 | py | #! /usr/bin/env python
import ROOT
import optparse, sys, re, pprint, os
from FitterConfig import FitterConfig
# global variables to be filled
histNames = []
groupNames = []
fileName = ''
lumi = 1.
# number of jet and tag bins
totalDict = {}
histList = []
# REs
commentRE = re.compile (r'\#.+$')
trailingRE = re.compile (r'\s*$')
sampleRE = re.compile (r'^\s*\+\s+names\s*=\s*(.+)', re.IGNORECASE)
groupRE = re.compile (r'^\s*\+\s+groupNames\s*=\s*(.+)', re.IGNORECASE)
fileRE = re.compile (r'^\s*\+\s+templateFile\s*=\s*(.+)', re.IGNORECASE)
lumiRE = re.compile (r'^\s*\+\s+intLumi\s*=\s*(.+)', re.IGNORECASE)
commaRE = re.compile (r'\s*,\s*')
jetRE = re.compile (r'_(\d+)j')
tagRE = re.compile (r'_(\d+)t')
htRE = re.compile (r'_hT', re.IGNORECASE)
colorDict = {
'Top' : 2,
'sing' : 93,
'Wbb' : 56,
'Wcc' : 62,
'Wc' : 65,
'Wqq' : 69,
'EW' : 89,
'QCD' : 33,
}
if __name__ == "__main__":
# Setup options parser
parser = optparse.OptionParser \
("usage: %prog [options] templates.root" \
"Prints out info on templates.")
parser.add_option ('--lum', dest = 'lum', type='float', default=0.,
help='Override integrated luminosity in config file');
parser.add_option ("--latex", dest='latex',
action='store_true',
help="Formats output as latex table")
parser.add_option ("--debug", dest='debug',
action='store_true',
help="Print out FitterConfig object")
parser.add_option ('--noData', dest='noData', action='store_true',
default=True,
help='Do not display data counts')
parser.add_option ('--Data', dest='noData', action='store_false',
help='Display data counts')
parser.add_option ('--totalMC', dest='totalMC', action='store_true',
default=False,
help='Display total MC prediction counts')
parser.add_option ('--file', dest = 'file', type='string',
help='Override root file to use');
parser.add_option ('--combineGroups', dest = 'combineGroups',
action='append', type='string', default=[],
help='Groups to combine');
parser.add_option ('--combineSamples', dest = 'combineSamples',
action='append', type='string', default=[],
help='Samples to combine');
parser.add_option ("--groups", dest='groups', action="append",
type="string", default=[],
help="Which groups to use")
parser.add_option ("--samples", dest='samples', action="append",
type="string", default=[],
help="Which samples to use")
## saveGroup = optparse.OptionGroup (parser, "Save Stacks Options")
## saveGroup.add_option ("--saveStacks", dest='saveStacks',
## action='store_true',
## help="Saves images of stack of templates")
## saveGroup.add_option ("--cms", dest='cms', action='store_true',
## help="Use CMS titles, etc for plots")
## saveGroup.add_option ("--big", dest='big', action='store_true',
## help="Make big plots")
## saveGroup.add_option ("--eps", dest='eps', action='store_true',
## help='Save .eps files')
## parser.add_option_group (saveGroup)
options, args = parser.parse_args()
ROOT.gROOT.SetBatch()
ROOT.gROOT.SetStyle('Plain')
if len (args) < 1:
print "Need to provide configuration file. Aborting."
sys.exit(1)
configName = args[0]
config = FitterConfig (configName, ignoreBinString=True)
config.noData = options.noData
config.setValuesFromArgs (args)
#config.readConfig (configName)
config.printMCtotal = options.totalMC
config.latex = options.latex
config.setCombineGroups (options.combineGroups)
config.setCombineSamples (options.combineSamples)
samples = []
for sample in options.samples:
samples.extend (commaRE.split (sample))
if samples:
config.setSamples (samples)
groups = []
for group in options.groups:
groups.extend (commaRE.split (group))
if groups:
config.setGroups (groups)
if options.file:
config.fileName = options.file
if options.lum:
config.lumi = options.lum
print "info for %s:" % config.fileName
config.printInfo()
if options.debug:
print "%s" % config
| [
"[email protected]"
]
| |
1300eb74b39e37aa12c11ab90b55b2f14bb5b104 | 061c9850fe1d8085f9b04ee541eb9dd7b389ea48 | /backend/home/migrations/0002_load_initial_data.py | ac672869ea320e1bcfcb77501628434b0faf52fa | []
| no_license | crowdbotics-apps/tony-stg-app-7-dev-14211 | 1245fab608661791618c21efff0dc5e3d536b94b | ba6c52b243a6bd99d721233b9b7ab9f90b2228f8 | refs/heads/master | 2023-01-07T07:48:10.718703 | 2020-11-11T03:44:25 | 2020-11-11T03:44:25 | 308,393,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "tony-stg-app-7"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">tony-stg-app-7</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "tony-stg-app-7-dev-14211.botics.co"
site_params = {
"name": "tony-stg-app-7",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
]
| |
d9ac6aaaeeaf79aa22f03653a341b038974aaff2 | 2804432fba5a4fe639d07a207bb01f71e03d9189 | /test/cts/tool/CTSConverter/src/nn/specs/V1_0/space_to_depth_float_2.mod.py | df557f6dc777e190bcc08907f42fa96d78c54f38 | [
"Apache-2.0",
"BSD-3-Clause"
]
| permissive | intel/webml-polyfill | 5685299e1b6d91a010c5e057685bf010d5646e4f | bd014955c5bcc9dc5465aea06721072f45ab4a75 | refs/heads/master | 2023-09-01T17:30:55.961667 | 2023-04-14T01:18:47 | 2023-04-14T01:18:47 | 126,892,425 | 168 | 75 | Apache-2.0 | 2023-04-14T05:16:41 | 2018-03-26T21:31:32 | Python | UTF-8 | Python | false | false | 541 | py | model = Model()
i1 = Input("input", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
block = Int32Scalar("block_size", 2)
output = Output("output", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
model = model.Operation("SPACE_TO_DEPTH", i1, block).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.]}
output0 = {output: # output 0
[1., 2., 5., 6., 3., 4., 7., 8., 9., 10., 13., 14., 11., 12., 15., 16.]}
# Instantiate an example
Example((input0, output0))
| [
"[email protected]"
]
| |
9a0a6ee353a2d8e0a58603081ad649422122d6fa | 4f57d03df135822a63c4f00f2b5e6dcb3c9a3cdc | /setup.py | aa22d008c02e69c578c9b1e5cbdbdfcae5e6c2c1 | []
| no_license | exantech/monero-wallet-service | 059c437e261f4d14a89a7786d1152d735d66f181 | 720477c30e7f14936d530f635d7fa09fc516ee54 | refs/heads/master | 2022-12-10T11:09:10.747734 | 2018-03-19T15:55:28 | 2019-06-03T11:38:19 | 189,993,281 | 2 | 0 | null | 2022-12-08T01:04:04 | 2019-06-03T11:35:45 | Python | UTF-8 | Python | false | false | 2,392 | py | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='monero-wallet-service',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1',
zip_safe=False,
description='Monero Wallet Service backend',
# long_description=long_description,
# Author details
author='Denis Voskvitsov',
author_email='[email protected]',
# Choose your license
license='EULA',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
package_index='http://ci2-pypi.ghcg.com/simple/',
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'aiohttp==3.0.9',
'aiohttp-swagger==1.0.5',
'aioredis==1.1.0',
'async-timeout==2.0.1',
'attrs==17.4.0',
'boto3==1.9.90',
'chardet==3.0.4',
'hiredis==0.2.0',
'idna==2.6',
'idna-ssl==1.0.1',
'Jinja2==2.10',
'MarkupSafe==1.0',
'multidict==4.1.0',
'PyYAML==3.12',
'yarl==1.1.1',
'peewee==2.10.2',
'peewee-async==0.5.12',
'peewee-db-evolve==0.6.8',
'psycopg2==2.7.4',
'psycopg2-binary==2.7.4',
'aiopg==0.13.2',
'python-slugify==1.2.5',
'urllib3==1.22',
'ujson==1.35',
'Flask==0.12.2',
'flask-peewee==3.0.0',
'flask-swagger-ui==3.6.0',
'uwsgi==2.0.17',
'redis==2.10.6',
'cryptonote==0.1',
],
include_package_data=True,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
| [
"[email protected]"
]
| |
9aeabf6744ed3a9ac5a1df44c5287b764fe258ac | 114d1ca95de41c3d1ae5aabeddcd5054b327973b | /socket_programs/client-google.py | 55126e853f68d5dbd82ce80009526dc1bcdd8541 | []
| no_license | sambapython/batch28_1 | 7e134ac0166f916ece16dc81f162e5c51af2d9f8 | ccd7ba382ecd148afad8d29c09839f43e6bc8c23 | refs/heads/master | 2021-01-21T19:09:03.026169 | 2017-06-25T07:55:44 | 2017-06-25T07:55:44 | 92,122,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | import socket
try:
s=socket.socket()
host="www.google.com"
port=8888#443#80
s.connect((host,port))
print "connected successfully!!!"
except Exception as err:
print err
finally:
s.close() | [
"[email protected]"
]
| |
bd76088d4ae1dc4f81258e126d1f7f191b466add | cf1b3312af6395c0f8cc7d3ef7d6310a125816bf | /examples/text_to_sql/RAT-SQL/evaluation/utils.py | 455e5a391481ce269d9afbc4625a17a98e566448 | [
"Apache-2.0"
]
| permissive | thomas-yanxin/PaddleNLP | 92db7b4c5eef4494f6e770eaebd80001e66494d2 | 1ddc5bbeeb587a20c10629d17b030214aba77990 | refs/heads/develop | 2023-06-22T18:00:34.532679 | 2021-07-21T06:12:58 | 2021-07-21T06:12:58 | 388,380,705 | 1 | 0 | Apache-2.0 | 2021-07-22T08:11:12 | 2021-07-22T08:11:11 | null | UTF-8 | Python | false | false | 14,829 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import logging
import re
import copy
import json
op_sql_dict = {0: ">", 1: "<", 2: "==", 3: "!="}
agg_sql_dict = {0: "", 1: "AVG", 2: "MAX", 3: "MIN", 4: "COUNT", 5: "SUM"}
conn_sql_dict = {0: "", 1: "and", 2: "or"}
### from IRNet keywords, need to be simplify
CLAUSE_KEYWORDS = ('select', 'from', 'where', 'group', 'order', 'limit',
'intersect', 'union', 'except')
JOIN_KEYWORDS = ('join', 'on', 'as')
COND_OPS = ('not_in', 'between', '==', '>', '<', '>=', '<=', '!=', 'in', 'like')
UNIT_OPS = ('none', '-', '+', "*", '/')
AGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')
TABLE_TYPE = {
'sql': "sql",
'table_unit': "table_unit",
}
LOGIC_AND_OR = ('and', 'or')
SQL_OPS = ('intersect', 'union', 'except')
ORDER_OPS = ('desc', 'asc')
CONST_COLUMN = set(['time_now'])
EXPECT_BRACKET_PRE_TOKENS = set(AGG_OPS + SQL_OPS + COND_OPS + CLAUSE_KEYWORDS +
('from', ','))
g_empty_sql = {
"select": [],
"from": {
"conds": [],
"table_units": []
},
"where": [],
"groupBy": [],
"having": [],
"orderBy": [],
"limit": None,
"except": None,
"intersect": None,
"union": None
}
def is_float(value):
"""is float"""
try:
float(value)
return True
except ValueError:
return False
except TypeError:
return False
def get_scores(count, pred_total, gold_total):
"""
Args:
Returns:
"""
if pred_total != gold_total:
return 0, 0, 0
elif count == pred_total:
return 1, 1, 1
return 0, 0, 0
def tokenize_NL2SQL(string, cols, single_equal=False, math=True):
"""
Args:
Returns:
"""
string = string.replace("\'", "\"").lower()
assert string.count('"') % 2 == 0, "Unexpected quote"
re_cols = [i.lower() for i in cols]
def _extract_value(string):
"""extract values in sql"""
fields = string.split('"')
for idx, tok in enumerate(fields):
if idx % 2 == 1:
fields[idx] = '"%s"' % (tok)
return fields
def _resplit(tmp_tokens, fn_split, fn_omit):
"""resplit"""
new_tokens = []
for token in tmp_tokens:
token = token.strip()
if fn_omit(token):
new_tokens.append(token)
elif re.match(r'\d\d\d\d-\d\d(-\d\d)?', token):
new_tokens.append('"%s"' % (token))
else:
new_tokens.extend(fn_split(token))
return new_tokens
def _split_aggs(tmp_tokens):
"""split aggs in select"""
new_toks = []
for i, tok in enumerate(tmp_tokens):
if tok in ('from', 'where'):
new_toks.extend(tmp_tokens[i:])
break
if not ((tok.endswith(')') or tok.endswith('),')) and len(tok) > 5):
new_toks.extend(tok.split(','))
continue
extra = ''
if tok.endswith(','):
extra = ','
tok = tok[:-1]
if tok[:4] in ('sum(', 'avg(', 'max(', 'min('):
new_toks.extend([tok[:3], '(', tok[4:-1], ')'])
elif tok[:6] == 'count(':
new_toks.extend(['count', '(', tok[6:-1], ')'])
else:
new_toks.append(tok)
if extra:
new_toks.append(extra)
return new_toks
def join_by_col(toks, cols):
new_toks = []
_len = len(toks)
i = 0
while i < _len - 1:
merge = False
for j in range(10):
if ''.join(toks[i:i + j]) in cols:
new_toks.append(''.join(toks[i:i + j]))
i += j
merge = True
if not merge:
new_toks.append(toks[i])
i += 1
new_toks.append(toks[-1])
return new_toks
tokens_tmp = _extract_value(string)
two_bytes_op = ['==', '!=', '>=', '<=', '<>', '<in>']
if single_equal:
if math:
sep1 = re.compile(r'([ \+\-\*/\(\)=,><;])') # 单字节运算符
else:
sep1 = re.compile(r'([ \(\)=,><;])')
else:
if math:
sep1 = re.compile(r'([ \+\-\*/\(\),><;])') # 单字节运算符
else:
sep1 = re.compile(r'([ \(\),><;])')
sep2 = re.compile('(' + '|'.join(two_bytes_op) + ')') # 多字节运算符
tokens_tmp = _resplit(tokens_tmp, lambda x: x.split(' '),
lambda x: x.startswith('"'))
tokens_tmp = _resplit(tokens_tmp, lambda x: re.split(sep2, x),
lambda x: x.startswith('"'))
tokens_tmp = _split_aggs(tokens_tmp)
tokens = list(filter(lambda x: x.strip() != '', tokens_tmp))
tokens = join_by_col(tokens, re_cols)
def _post_merge(tokens):
"""merge:
* col name with "(", ")"
* values with +/-
"""
idx = 1
while idx < len(tokens):
if tokens[idx] == '(' and tokens[
idx - 1] not in EXPECT_BRACKET_PRE_TOKENS and tokens[
idx - 1] != '=':
while idx < len(tokens):
tmp_tok = tokens.pop(idx)
tokens[idx - 1] += tmp_tok
if tmp_tok == ')':
break
elif tokens[idx] in (
'+', '-'
) and tokens[idx - 1] in COND_OPS and idx + 1 < len(tokens):
tokens[idx] += tokens[idx + 1]
tokens.pop(idx + 1)
idx += 1
else:
idx += 1
return tokens
tokens = _post_merge(tokens)
if single_equal:
tokens = [i if i != '=' else '==' for i in tokens]
return tokens
def sql2query(sql, cols):
"""
transform sql json to sql query, this is only for NL2SQL, eg. select a, b where a op val1
"""
sels = sql['sel']
aggs = sql['agg']
op = sql["cond_conn_op"]
conds = sql["conds"]
condstrs = [
f'{cols[cond[0]]} {op_sql_dict[cond[1]]} "{cond[2]}"' for cond in conds
]
cond_str = f" {conn_sql_dict[op]} ".join(condstrs)
def agg_col(agg, col):
if agg == 0:
return cols[col]
else:
return f"{agg_sql_dict[agg]} ( {cols[col]} )"
selstrs = [agg_col(i, j) for i, j in zip(aggs, sels)]
sel_str = ' , '.join(selstrs)
return f"SELECT {sel_str} WHERE {cond_str}"
def query2sql(query, cols, single_equal=False, with_value=True):
cols = [i.lower() for i in cols]
sql_op_dict = {}
sql_agg_dict = {}
sql_conn_dict = {}
for k, v in op_sql_dict.items():
sql_op_dict[v] = k
sql_op_dict[v.lower()] = k
for k, v in agg_sql_dict.items():
sql_agg_dict[v] = k
sql_agg_dict[v.lower()] = k
for k, v in conn_sql_dict.items():
sql_conn_dict[v] = k
sql_conn_dict[v.lower()] = k
query = tokenize_NL2SQL(query, cols, single_equal=single_equal, math=False)
assert query[0] == 'select'
def parse_cols(toks, start_idx):
"""
:returns next idx, (agg, col)
"""
if 'from' in toks:
toks = toks[:toks.index('from')]
idx = start_idx
len_ = len(toks)
outs = []
while idx < len_:
if toks[idx] in AGG_OPS:
agg_id = sql_agg_dict[toks[idx]]
idx += 1
assert idx < len_ and toks[idx] == '(', toks[idx]
idx += 1
agg, col = toks[start_idx], toks[idx]
idx += 1
assert idx < len_ and toks[idx] == ')', toks[idx] + ''.join(
toks)
idx += 1
outs.append((agg, col))
elif toks[idx] == ',':
idx += 1
else:
agg, col = '', toks[idx]
idx += 1
outs.append(('', col))
return outs
def _format_col(old_col):
"""format"""
if old_col.lower().startswith('table_'):
return old_col.split('.', 1)[1]
else:
return old_col
if 'where' not in query:
cond_index = len(query)
conn = ''
conds = []
else:
cond_index = query.index("where")
condstr = query[cond_index + 1:]
conn = [i for i in condstr[3::4]]
assert len(set(conn)) < 2, conn
conn = list(set(conn))[0] if conn else ''
conds = [condstr[i:i + 3] for i in range(len(condstr))[::4]]
sels = parse_cols(query[:cond_index], 1)
sql = {}
sql["agg"] = [sql_agg_dict[i[0]] for i in sels]
sql["cond_conn_op"] = sql_conn_dict[conn]
sql["sel"] = [cols.index(_format_col(i[1])) for i in sels]
if with_value:
sql["conds"] = [[
cols.index(_format_col(c[0])), sql_op_dict[c[1]],
'"' + c[2].strip('\"') + '"'
] for c in conds]
else:
sql["conds"] = [[
cols.index(_format_col(c[0])), sql_op_dict[c[1]], "1"
] for c in conds]
sql_sels = [(sql_agg_dict[i[0]], cols.index(_format_col(i[1])))
for i in sels]
return sql, sql_sels
def evaluate_NL2SQL(table, gold, predict, single_equal=False, mode=None):
scores = {}
scores_novalue = {}
# load db
with open(table) as ifs:
table_list = json.load(ifs)
table_dict = {}
for table in table_list:
table_dict[table['db_id']] = table
# load qa
with open(
gold, 'r', encoding='utf-8') as f1, open(
predict, 'r', encoding='utf-8') as f2:
gold_list = [l.strip().split('\t') for l in f1 if len(l.strip()) > 0]
gold_dict = dict([(x[0], x[1:]) for x in gold_list])
pred_list = [l.strip().split('\t') for l in f2 if len(l.strip()) > 0]
pred_dict = dict([(x[0], x[1]) for x in pred_list])
right = total = 0
cnt_sel = 0
cnt_cond = cnt_conn = 0
def compare_set(gold, pred):
_pred = copy.deepcopy(pred)
_gold = copy.deepcopy(gold)
pred_total = len(_pred)
gold_total = len(_gold)
cnt = 0
for unit in _pred:
if unit in _gold:
cnt += 1
_gold.remove(unit)
return cnt, pred_total, gold_total
for qid, item in gold_dict.items():
total += 1
if qid not in pred_dict:
continue
sql_gold, db_id = ''.join(item[0:-1]), item[-1]
db = table_dict[db_id]
cols = [i[1] for i in db["column_names"]]
sql_pred = pred_dict[qid]
try:
sql_gold = sql_gold.replace('==', '=')
sql_pred = sql_pred.replace('==', '=')
components_gold, sels_gold = query2sql(
sql_gold, cols, single_equal=single_equal)
components_pred, sels_pred = query2sql(
sql_pred, cols, single_equal=single_equal)
cnt, pred_total, gold_total = compare_set(sels_gold, sels_pred)
score_sels, _, _ = get_scores(cnt, pred_total, gold_total)
cnt, pred_total, gold_total = compare_set(components_gold["conds"],
components_pred["conds"])
score_conds, _, _ = get_scores(cnt, pred_total, gold_total)
score_conn = components_gold["cond_conn_op"] == components_pred[
"cond_conn_op"]
if score_sels:
cnt_sel += 1
if score_conds:
cnt_cond += 1
if score_conn:
cnt_conn += 1
if score_sels and score_conds and score_conn:
right += 1
else:
logging.debug("error instance %s:\npred: %s\ngold: %s" %
(qid, sql_pred, sql_gold))
except Exception as e:
##traceback.print_exc()
logging.warning('parse sql error, error sql:')
logging.warning(sql_gold + '|||' + sql_pred)
##raise e
continue
scores["all"] = dict(
[("count", total), ("exact", right), ("acc", right * 1.0 / total)])
scores["select"] = dict(
[("count", total), ("exact", cnt_sel), ("acc", cnt_sel * 1.0 / total)])
scores["condition"] = dict([("count", total), ("exact", cnt_cond),
("acc", cnt_cond * 1.0 / total)])
scores["connection"] = dict([("count", total), ("exact", cnt_conn),
("acc", cnt_conn * 1.0 / total)])
return scores, scores_novalue
if __name__ == '__main__':
print(query2sql("SELECT 所在省份 , 产线名称 WHERE 日熔量(吨) < 600", []))
print(
query2sql(
"SELECT MAX ( 货币资金(亿元) ) WHERE 总资产(亿元) > 100 or 净资产(亿元) > 100", []))
print(
query2sql("SELECT 股价 , EPS17A WHERE 铁路公司 = 广深铁路",
["股价", "铁路公司", "EPS17A"], True))
cols = ["公司", "2014(亿元)", "2015(亿元)", "2016(亿元)"]
print(
query2sql(
"SELECT COUNT ( 公司 ) WHERE 2014(亿元) > 20 and 2015(亿元) > 20 and 2016(亿元) > 20",
cols))
# print(query2sql("SELECT 书名/Title WHERE 索书号/CallNo. == BF637.U53C555=12010 or ISBN == 9.78142212482e+12", ["书名/Title","索书号/CallNo.",'ISBN']))
# print(tokenize("SELECT 标称生产企业名称 WHERE 规格(包装规格) == 187.2g/盒 and 标称产品名称 == 富兰克牌西洋参含片", math=False))
# print(tokenize("SELECT 设备型号 WHERE 生产企业 == AISINAWCO.,LTD. or 设备名称 == WCDMA无线数据终端", math=False))
# print(tokenize("SELECT sum(t1.amount_claimed) FROM claim_headers AS t1 JOIN claims_documents AS t2 ON t1.claim_header_id = t2.claim_id WHERE t2.created_date = ( SELECT created_date FROM claims_documents ORDER BY created_date LIMIT 1 )"))
# print(query2sql("SELECT 书号(ISBN) WHERE 教材名称 == 线性代数 or 教材名称 == 中级有机化学", ["书号(ISBN)", "教材名称" ]))
| [
"[email protected]"
]
| |
6c197618046ee0d263b2e3a1c9afa8a75a232a6f | 628643508ebe023e3a310bfea2a48676fb230504 | /packages/dumbo/dumbo/backends/common.py | c04d464a33e5fd16d05937e2a187335a671695f1 | []
| no_license | wuyingminhui/input-lib | e89b317721e86ba9e4aec5934689eb9a90d7acea | 90e59e457c59ece98c26a3dc41d1119ae4fb599d | refs/heads/master | 2021-01-20T17:37:34.464335 | 2013-02-19T23:53:46 | 2013-02-19T23:53:46 | 19,423,999 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,628 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from dumbo.util import incrcounter, setstatus, getopts, configopts
class Params(object):
def get(self, name):
try:
return os.environ[name]
except KeyError:
return None
def __getitem__(self, key):
return self.get(str(key))
def __contains__(self, key):
return self.get(str(key)) != None
class Counter(object):
def __init__(self, name, group='Program'):
self.group = group
self.name = name
def incr(self, amount):
incrcounter(self.group, self.name, amount)
return self
__iadd__ = incr
class Counters(object):
def __init__(self):
self.counters = {}
def __getitem__(self, key):
try:
return self.counters[key]
except KeyError:
counter = Counter(str(key))
self.counters[key] = counter
return counter
def __setitem__(self, key, value):
pass
class MapRedBase(object):
params = Params()
counters = Counters()
def setstatus(self, msg):
setstatus(msg)
status = property(fset=setstatus)
class JoinKey(object):
def __init__(self, body, isprimary=False):
self.body = body
self.isprimary = isprimary
def __cmp__(self, other):
bodycmp = cmp(self.body, other.body)
if bodycmp:
return bodycmp
else:
return cmp(self.isprimary, other.isprimary)
@classmethod
def fromjoinkey(cls, jk):
return cls(jk.body, jk.isprimary)
@classmethod
def fromdump(cls, dump):
return cls(dump[0], dump[1] == 1)
def dump(self):
return (self.body, 2 - int(self.isprimary))
def __repr__(self):
return repr(self.dump())
class RunInfo(object):
def get_input_path(self):
return 'unknown'
class Iteration(object):
def __init__(self, prog, opts):
(self.prog, self.opts) = (prog, opts)
def run(self):
addedopts = getopts(self.opts, ['fake',
'debug',
'python',
'iteration',
'itercount',
'hadoop',
'starter',
'name',
'memlimit',
'param',
'parser',
'record',
'joinkeys',
'hadoopconf',
'mapper',
'reducer'])
if addedopts['fake'] and addedopts['fake'][0] == 'yes':
def dummysystem(*args, **kwargs):
return 0
global system
system = dummysystem # not very clean, but it works...
if addedopts['debug'] and addedopts['debug'][0] == 'yes':
self.opts.append(('cmdenv', 'dumbo_debug=yes'))
if not addedopts['python']:
python = 'python'
else:
python = addedopts['python'][0]
self.opts.append(('python', python))
if not addedopts['iteration']:
iter = 0
else:
iter = int(addedopts['iteration'][0])
if not addedopts['itercount']:
itercnt = 0
else:
itercnt = int(addedopts['itercount'][0])
if addedopts['name']:
name = addedopts['name'][0]
else:
name = self.prog.split('/')[-1]
self.opts.append(('name', '%s (%s/%s)' % (name, iter + 1,
itercnt)))
if not addedopts['hadoop']:
pypath = '/'.join(self.prog.split('/')[:-1])
if pypath: self.opts.append(('pypath', pypath))
else:
self.opts.append(('hadoop', addedopts['hadoop'][0]))
progmod = self.prog.split('/')[-1]
progmod = progmod[:-3] if progmod.endswith('.py') else progmod
memlim = ' 262144000' # 250MB limit by default
if addedopts['memlimit']:
# Limit amount of memory. This supports syntax
# of the form '256m', '12g' etc.
try:
_memlim = int(addedopts['memlimit'][0][:-1])
memlim = ' %i' % {
'g': 1073741824 * _memlim,
'm': 1048576 * _memlim,
'k': 1024 * _memlim,
'b': 1 * _memlim,
}[addedopts['memlimit'][0][-1].lower()]
except KeyError:
# Assume specified in bytes by default
memlim = ' ' + addedopts['memlimit'][0]
if addedopts['mapper']:
self.opts.append(('mapper', addedopts['mapper'][0]))
else:
self.opts.append(('mapper', '%s -m %s map %i%s' % (python,
progmod, iter, memlim)))
if addedopts['reducer']:
self.opts.append(('reducer', addedopts['reducer'][0]))
else:
self.opts.append(('reducer', '%s -m %s red %i%s' % (python,
progmod, iter, memlim)))
for param in addedopts['param']:
self.opts.append(('cmdenv', param))
if addedopts['parser'] and iter == 0:
parser = addedopts['parser'][0]
shortcuts = dict(configopts('parsers', self.prog))
if parser in shortcuts:
parser = shortcuts[parser]
self.opts.append(('cmdenv', 'dumbo_parser=' + parser))
if addedopts['record'] and iter == 0:
record = addedopts['record'][0]
shortcuts = dict(configopts('records', self.prog))
if record in shortcuts:
record = shortcuts[record]
self.opts.append(('cmdenv', 'dumbo_record=' + record))
if addedopts['joinkeys'] and addedopts['joinkeys'][0] == 'yes':
self.opts.append(('cmdenv', 'dumbo_joinkeys=yes'))
self.opts.append(('partitioner',
'org.apache.hadoop.mapred.lib.BinaryPartitioner'))
self.opts.append(('jobconf',
'mapred.binary.partitioner.right.offset=-6'))
for hadoopconf in addedopts['hadoopconf']:
self.opts.append(('jobconf', hadoopconf))
self.opts.append(('libegg', re.sub('\.egg.*$', '.egg', __file__)))
return 0
class FileSystem(object):
def cat(self, path, opts):
return 1 # fail by default
def ls(self, path, opts):
return 1 # fail by default
def exists(self, path, opts):
return 1 # fail by default
def rm(self, path, opts):
return 1 # fail by default
def put(self, path1, path2, opts):
return 1 # fail by default
def get(self, path1, path2, opts):
return 1 # fail by default
class Backend(object):
def matches(self, opts):
""" Returns True if the backend matches with the given opts """
return True
#abstractmethod
def create_iteration(self, opts):
""" Creates a suitable Iteration object """
pass
#abstractmethod
def create_filesystem(self, opts):
""" Creates a suitable FileSystem object """
pass
def get_mapredbase_class(self, opts):
""" Returns a suitable MapRedBase class """
return MapRedBase
def get_joinkey_class(self, opts):
""" Returns a suitable JoinKey class """
return JoinKey
def get_runinfo_class(self, opts):
""" Returns a suitable RunInfo class """
return RunInfo
| [
"[email protected]"
]
| |
97c488e5ad90e0f2906fd430de44698e972b15b5 | 53ba0b6f172abcade631ae1f52852c400302559e | /test/cv/bases/activates/DynamicReLUdemo.py | b99525743213c9b3e245292809f8a30322dc5698 | [
"Apache-2.0"
]
| permissive | sssssshf/python_developer_tools | f97c64ee0aa0a7e9d31d173192805771c83abb7f | 44d2e67a2e2495a12d6b32da12c76cf0010ac7ea | refs/heads/main | 2023-08-19T02:44:53.536200 | 2021-10-13T02:10:19 | 2021-10-13T02:10:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,507 | py | # !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:8/14/2021 3:19 PM
# @File:demo
import os
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
from tqdm import tqdm
from python_developer_tools.cv.bases.activates.DynamicReLU import DyReLUA, DyReLUB, DyReLUC, convert_relu_to_DyReLU
from python_developer_tools.cv.utils.torch_utils import init_seeds
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class shufflenet_v2_x0_5M(nn.Module):
def __init__(self,nc,pretrained=True):
super(shufflenet_v2_x0_5M, self).__init__()
self.model_ft = torchvision.models.shufflenet_v2_x0_5(pretrained=pretrained)
# 将relu替换为DyReLUA
self.model_ft = convert_relu_to_DyReLU(self.model_ft,"A")
num_ftrs = self.model_ft.fc.in_features
self.model_ft.fc = nn.Linear(num_ftrs, nc)
def forward(self,x):
x = self.model_ft.conv1(x)
x = self.model_ft.maxpool(x)
x = self.model_ft.stage2(x)
x = self.model_ft.stage3(x)
x = self.model_ft.stage4(x)
x = self.model_ft.conv5(x)
x = x.mean([2, 3]) # globalpool
out = self.model_ft.fc(x)
return out
if __name__ == '__main__':
"""
ReLU 41%
DyReLUA 42 %
DyReLUB 41 %
DyReLUC 40 %
"""
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
epochs = 50
batch_size = 1024
num_workers = 8
classes = 10
init_seeds(1024)
trainset = torchvision.datasets.CIFAR10(root=os.getcwd(), train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=True)
testset = torchvision.datasets.CIFAR10(root=os.getcwd(), train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
model = shufflenet_v2_x0_5M(classes, True)
model.cuda()
model.train()
criterion = nn.CrossEntropyLoss()
# SGD with momentum
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)
for epoch in range(epochs):
train_loss = 0.0
for i, (inputs, labels) in tqdm(enumerate(trainloader)):
inputs, labels = inputs.cuda(), labels.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs)
# loss
loss = criterion(outputs, labels)
# backward
loss.backward()
# update weights
optimizer.step()
# print statistics
train_loss += loss
scheduler.step()
print('%d/%d loss: %.6f' % (epochs, epoch + 1, train_loss / len(trainset)))
correct = 0
model.eval()
for j, (images, labels) in tqdm(enumerate(testloader)):
outputs = model(images.cuda())
_, predicted = torch.max(outputs.data, 1)
correct += (predicted.cpu() == labels).sum()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / len(testset))) | [
"[email protected]"
]
| |
c59f4764cbfb8fbf791c758771b944e89cd8880f | 93dd86c8d0eceaee8276a5cafe8c0bfee2a315d3 | /python/paddle/distributed/fleet/runtime/runtime_base.py | 2e8bacfbc3b1ded58e63e8d9e93764a0c0090b91 | [
"Apache-2.0"
]
| permissive | hutuxian/Paddle | f8b7693bccc6d56887164c1de0b6f6e91cffaae8 | a1b640bc66a5cc9583de503e7406aeba67565e8d | refs/heads/develop | 2023-08-29T19:36:45.382455 | 2020-09-09T09:19:07 | 2020-09-09T09:19:07 | 164,977,763 | 8 | 27 | Apache-2.0 | 2023-06-16T09:47:39 | 2019-01-10T02:50:31 | Python | UTF-8 | Python | false | false | 1,078 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = []
class RuntimeBase(object):
def __init__(self):
pass
def _set_basic_info(self, context):
self.context = context
def _run_worker(self):
pass
def _init_server(self, *args, **kwargs):
pass
def _run_server(self):
pass
def _stop_worker(self):
pass
def _save_inference_model(self, *args, **kwargs):
pass
def _save_persistables(self, *args, **kwargs):
pass
| [
"[email protected]"
]
| |
4351decb036d8072bdbfcd0c183b01bade4445e7 | 082c6d8f248257c8442bbef7412f9915ac4c33bd | /mlrun/api/api/endpoints/secrets.py | 875ae80681de74dfcb0fc81e1648b26c5a918c41 | [
"Apache-2.0"
]
| permissive | eran-nussbaum/mlrun | 24e7db989b4eb03548f127ff26d36f77b1c82250 | 97209b27ccf3daf8f202a1a2bb1b01abd537ad70 | refs/heads/master | 2023-08-26T01:35:02.797712 | 2021-10-21T10:18:24 | 2021-10-21T10:18:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,928 | py | from http import HTTPStatus
from typing import List
import fastapi
from sqlalchemy.orm import Session
import mlrun.api.api.deps
import mlrun.api.crud
import mlrun.api.utils.auth.verifier
import mlrun.api.utils.singletons.project_member
import mlrun.errors
from mlrun.api import schemas
from mlrun.utils.vault import add_vault_user_secrets
router = fastapi.APIRouter()
@router.post("/projects/{project}/secrets", status_code=HTTPStatus.CREATED.value)
def store_project_secrets(
project: str,
secrets: schemas.SecretsData,
auth_info: mlrun.api.schemas.AuthInfo = fastapi.Depends(
mlrun.api.api.deps.authenticate_request
),
db_session: Session = fastapi.Depends(mlrun.api.api.deps.get_db_session),
):
# Doing a specific check for project existence, because we want to return 404 in the case of a project not
# existing, rather than returning a permission error, as it misleads the user. We don't even care for return
# value.
mlrun.api.utils.singletons.project_member.get_project_member().get_project(
db_session, project, auth_info.session
)
mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.api.schemas.AuthorizationResourceTypes.secret,
project,
secrets.provider,
mlrun.api.schemas.AuthorizationAction.create,
auth_info,
)
mlrun.api.crud.Secrets().store_secrets(project, secrets)
return fastapi.Response(status_code=HTTPStatus.CREATED.value)
@router.delete("/projects/{project}/secrets", status_code=HTTPStatus.NO_CONTENT.value)
def delete_project_secrets(
project: str,
provider: schemas.SecretProviderName,
secrets: List[str] = fastapi.Query(None, alias="secret"),
auth_info: mlrun.api.schemas.AuthInfo = fastapi.Depends(
mlrun.api.api.deps.authenticate_request
),
db_session: Session = fastapi.Depends(mlrun.api.api.deps.get_db_session),
):
mlrun.api.utils.singletons.project_member.get_project_member().get_project(
db_session, project, auth_info.session
)
mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.api.schemas.AuthorizationResourceTypes.secret,
project,
provider,
mlrun.api.schemas.AuthorizationAction.delete,
auth_info,
)
mlrun.api.crud.Secrets().delete_secrets(project, provider, secrets)
return fastapi.Response(status_code=HTTPStatus.NO_CONTENT.value)
@router.get("/projects/{project}/secret-keys", response_model=schemas.SecretKeysData)
def list_secret_keys(
project: str,
provider: schemas.SecretProviderName = schemas.SecretProviderName.vault,
token: str = fastapi.Header(None, alias=schemas.HeaderNames.secret_store_token),
auth_info: mlrun.api.schemas.AuthInfo = fastapi.Depends(
mlrun.api.api.deps.authenticate_request
),
db_session: Session = fastapi.Depends(mlrun.api.api.deps.get_db_session),
):
mlrun.api.utils.singletons.project_member.get_project_member().get_project(
db_session, project, auth_info.session
)
mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.api.schemas.AuthorizationResourceTypes.secret,
project,
provider,
mlrun.api.schemas.AuthorizationAction.read,
auth_info,
)
return mlrun.api.crud.Secrets().list_secret_keys(project, provider, token)
@router.get("/projects/{project}/secrets", response_model=schemas.SecretsData)
def list_secrets(
project: str,
secrets: List[str] = fastapi.Query(None, alias="secret"),
provider: schemas.SecretProviderName = schemas.SecretProviderName.vault,
token: str = fastapi.Header(None, alias=schemas.HeaderNames.secret_store_token),
auth_info: mlrun.api.schemas.AuthInfo = fastapi.Depends(
mlrun.api.api.deps.authenticate_request
),
db_session: Session = fastapi.Depends(mlrun.api.api.deps.get_db_session),
):
mlrun.api.utils.singletons.project_member.get_project_member().get_project(
db_session, project, auth_info.session
)
mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.api.schemas.AuthorizationResourceTypes.secret,
project,
provider,
mlrun.api.schemas.AuthorizationAction.read,
auth_info,
)
return mlrun.api.crud.Secrets().list_secrets(project, provider, secrets, token)
@router.post("/user-secrets", status_code=HTTPStatus.CREATED.value)
def add_user_secrets(secrets: schemas.UserSecretCreationRequest,):
if secrets.provider != schemas.SecretProviderName.vault:
return fastapi.Response(
status_code=HTTPStatus.BAD_REQUEST.vault,
content=f"Invalid secrets provider {secrets.provider}",
)
add_vault_user_secrets(secrets.user, secrets.secrets)
return fastapi.Response(status_code=HTTPStatus.CREATED.value)
| [
"[email protected]"
]
| |
d4cbcfa95fad06e8d14954bfdccb2f13136a60d3 | f30b91db647dca1f77fffa4b7e26b6c6a68abbc6 | /6_kyu/Greatest Common Factor of an Array/python/test_solution.py | 748f17fd9c642882c5538a1f17670cd275df2e8b | []
| no_license | estraviz/codewars | 73caf95519eaac6f34962b8ade543bf4417df5b7 | 5f8685e883cb78381c528a0988f2b5cad6c129c2 | refs/heads/master | 2023-05-13T07:57:43.165290 | 2023-05-08T21:50:39 | 2023-05-08T21:50:39 | 159,744,593 | 10 | 55 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | import pytest
from solution import greatest_common_factor
tests = [
([1, 8], 1),
([16, 4, 8], 4),
([46, 14, 20, 88], 2),
([468, 156, 806, 312, 442], 26),
([48, 99, 18], 3),
([32, 96, 120, 80], 8),
([91, 143, 234, 52], 13),
([171, 45, 297, 342], 9),
]
@pytest.mark.parametrize(
"seq, expected", tests
)
def test_greatest_common_factor(seq, expected):
assert greatest_common_factor(seq) == expected
| [
"[email protected]"
]
| |
98e4d8bc25567926017f664b32295fec1b5026f4 | ef6229d281edecbea3faad37830cb1d452d03e5b | /ucsmsdk/mometa/storage/StorageLocalDiskConfigDef.py | d35bd2a160c55172b79f7ccacdd315218f552866 | [
"Apache-2.0"
]
| permissive | anoop1984/python_sdk | 0809be78de32350acc40701d6207631322851010 | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | refs/heads/master | 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,393 | py | """This module contains the general information for StorageLocalDiskConfigDef ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class StorageLocalDiskConfigDefConsts():
FLEX_FLASH_RAIDREPORTING_STATE_DISABLE = "disable"
FLEX_FLASH_RAIDREPORTING_STATE_ENABLE = "enable"
FLEX_FLASH_STATE_DISABLE = "disable"
FLEX_FLASH_STATE_ENABLE = "enable"
INT_ID_NONE = "none"
MODE_ANY_CONFIGURATION = "any-configuration"
MODE_BEST_EFFORT_MIRRORED = "best-effort-mirrored"
MODE_BEST_EFFORT_MIRRORED_STRIPED = "best-effort-mirrored-striped"
MODE_BEST_EFFORT_STRIPED = "best-effort-striped"
MODE_BEST_EFFORT_STRIPED_DUAL_PARITY = "best-effort-striped-dual-parity"
MODE_BEST_EFFORT_STRIPED_PARITY = "best-effort-striped-parity"
MODE_DUAL_DISK = "dual-disk"
MODE_NO_LOCAL_STORAGE = "no-local-storage"
MODE_NO_RAID = "no-raid"
MODE_RAID_MIRRORED = "raid-mirrored"
MODE_RAID_MIRRORED_STRIPED = "raid-mirrored-striped"
MODE_RAID_STRIPED = "raid-striped"
MODE_RAID_STRIPED_DUAL_PARITY = "raid-striped-dual-parity"
MODE_RAID_STRIPED_DUAL_PARITY_STRIPED = "raid-striped-dual-parity-striped"
MODE_RAID_STRIPED_PARITY = "raid-striped-parity"
MODE_RAID_STRIPED_PARITY_STRIPED = "raid-striped-parity-striped"
MODE_SINGLE_DISK = "single-disk"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
PROTECT_CONFIG_FALSE = "false"
PROTECT_CONFIG_NO = "no"
PROTECT_CONFIG_TRUE = "true"
PROTECT_CONFIG_YES = "yes"
class StorageLocalDiskConfigDef(ManagedObject):
"""This is StorageLocalDiskConfigDef class."""
consts = StorageLocalDiskConfigDefConsts()
naming_props = set([])
mo_meta = MoMeta("StorageLocalDiskConfigDef", "storageLocalDiskConfigDef", "local-disk-config", VersionMeta.Version101e, "InputOutput", 0xfff, [], ["admin", "ls-compute", "ls-config", "ls-config-policy", "ls-server", "ls-storage", "ls-storage-policy"], [u'lsServer', u'lstorageDasScsiLun', u'storageController', u'storageFlexFlashController'], [u'storageLocalDiskPartition'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"flex_flash_raid_reporting_state": MoPropertyMeta("flex_flash_raid_reporting_state", "flexFlashRAIDReportingState", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["disable", "enable"], []),
"flex_flash_state": MoPropertyMeta("flex_flash_state", "flexFlashState", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["disable", "enable"], []),
"int_id": MoPropertyMeta("int_id", "intId", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["none"], ["0-4294967295"]),
"mode": MoPropertyMeta("mode", "mode", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["any-configuration", "best-effort-mirrored", "best-effort-mirrored-striped", "best-effort-striped", "best-effort-striped-dual-parity", "best-effort-striped-parity", "dual-disk", "no-local-storage", "no-raid", "raid-mirrored", "raid-mirrored-striped", "raid-striped", "raid-striped-dual-parity", "raid-striped-dual-parity-striped", "raid-striped-parity", "raid-striped-parity-striped", "single-disk"], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x80, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"policy_level": MoPropertyMeta("policy_level", "policyLevel", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["local", "pending-policy", "policy"], []),
"protect_config": MoPropertyMeta("protect_config", "protectConfig", "string", VersionMeta.Version131c, MoPropertyMeta.READ_WRITE, 0x200, None, None, None, ["false", "no", "true", "yes"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x400, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x800, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"flexFlashRAIDReportingState": "flex_flash_raid_reporting_state",
"flexFlashState": "flex_flash_state",
"intId": "int_id",
"mode": "mode",
"name": "name",
"policyLevel": "policy_level",
"policyOwner": "policy_owner",
"protectConfig": "protect_config",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.descr = None
self.flex_flash_raid_reporting_state = None
self.flex_flash_state = None
self.int_id = None
self.mode = None
self.name = None
self.policy_level = None
self.policy_owner = None
self.protect_config = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "StorageLocalDiskConfigDef", parent_mo_or_dn, **kwargs)
| [
"[email protected]"
]
| |
436b1d40d931864183e4790ee0b3374e829502cb | be24b5f37823125b2b901c0029175bfb2f25fb0e | /src/homework/homework12/win.py | 7f851ec0cfc578951966aaf3ea0a12716f2bc633 | [
"MIT"
]
| permissive | acc-cosc-1336/cosc-1336-spring-2018-Miguelh1997 | 1bd75c51e72431037a46a1b3079d7695c41920ce | ac4b0405c4070758d0fc07458d4dca8a8a0313de | refs/heads/master | 2021-05-11T09:11:41.887630 | 2018-05-12T03:11:38 | 2018-05-12T03:11:38 | 118,070,058 | 0 | 1 | MIT | 2018-05-12T03:16:17 | 2018-01-19T03:13:02 | Python | UTF-8 | Python | false | false | 874 | py | from tkinter import Tk, Label, Button
from src.homework.homework12.converter import Converter
class Win(Tk):
def __init__(self):
self.miles = Converter()
Tk.__init__(self, None, None)
self.wm_title('Miles to Kilometers converter')
self.button_quit = Button(self,text='Quit', command=self.destroy).grid(row=2,column=3)
self.display_conversion_button = Button(self, text='Display Conversion',command=self.display_labels).grid(row=2,column=1)
self.mainloop()
def display_labels(self):
km = 100
self.label = Label(self, text='Km:' + str(km)).grid(row=0, column=1, sticky="w")
self.label = Label(self, text='Miles:' + str(self.miles.get_miles_from_km(km))).grid(row=1, column=1,
sticky="w")
| [
"[email protected]"
]
| |
806adbe21341eef6b01e1fe731fc872fa7cb112d | 31252d95232aacaee80b5b3d22cf8b66f05d24c6 | /8.AnomalyDetection_RecommenderSystem/machine-learning-ex8/ex8/selectThreshold.py | 49b530847bb4a727357032badf908a9836c3daba | []
| no_license | mrech/MachineLearning_AndrewNg | 54ae44824d5ae53c8faf3f4adeff76935d4f479a | 748a49ece69dae413b78f9de95b3fb483848ee59 | refs/heads/master | 2020-04-24T10:37:57.072292 | 2019-08-20T13:16:50 | 2019-08-20T13:16:50 | 171,899,951 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | # SELECTTHRESHOLD Find the best threshold (epsilon) to use for selecting
# outliers
def selectThreshold(yval, pval):
'''
[bestEpsilon bestF1] = SELECTTHRESHOLD(yval, pval) finds the best
threshold to use for selecting outliers based on the results from a
validation set (pval) and the ground truth (yval).
'''
import numpy as np
bestEpsilon = 0
bestF1 = 0
stepsize = (max(pval) - min(pval)) / 1000
# Instructions: Compute the F1 score of choosing epsilon as the
# threshold and place the value in F1. The code at the
# end of the loop will compare the F1 score for this
# choice of epsilon and set it to be the best epsilon if
# it is better than the current choice of epsilon.
for epsilon in np.arange(min(pval), max(pval), stepsize):
# predict the anomaly
prediction = (pval < epsilon)
# calculate the F1 score
tp = sum((prediction == 1) & (yval.flatten() == 1).tolist())
fp = sum((prediction == 1) & (yval.flatten() == 0).tolist())
fn = sum((prediction == 0) & (yval.flatten() == 1).tolist())
# RuntimeWarning handling due to 0/0
# CASE: when the algorithm classify everyhting as NO ANOMALY
if tp == 0 & fp == 0:
F1 = 0
else:
prec = tp/(tp+fp)
rec = tp/(tp+fn)
F1 = (2*prec*rec)/(prec+rec)
if F1 > bestF1:
bestF1 = F1
bestEpsilon = epsilon
return bestEpsilon, bestF1 | [
"[email protected]"
]
| |
6b9d9cb08643c389b3521d474805c579b9985e06 | d6a152b8662af82ec604fa63c5c415dc6b59699b | /aeshin/settings.py | 70a75e24b58f9aa5ff4c1165e6113aeb7a401c45 | []
| no_license | rybesh/aeshin | 7cf433ba93309f49e2ff676c2d4568244f81ee52 | 292867a8b80031cacfce70c67387c656c3cb191b | refs/heads/master | 2023-08-19T00:17:40.042842 | 2023-08-17T17:47:55 | 2023-08-17T17:47:55 | 22,109,808 | 0 | 0 | null | 2023-09-05T14:05:34 | 2014-07-22T15:40:33 | Python | UTF-8 | Python | false | false | 5,121 | py | import os
import environ
from pathlib import Path
from django.db.models.query import QuerySet
# environment variables -------------------------------------------------------
BASE_DIR = Path(__file__).resolve().parent.parent
environ.Env.read_env(BASE_DIR / ".env")
env = environ.Env(DEBUG=(bool, False))
# typing ----------------------------------------------------------------------
QuerySet.__class_getitem__ = classmethod(
lambda cls, *args, **kwargs: cls # pyright: ignore
)
# database --------------------------------------------------------------------
DATABASES = {"default": env.db()}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# debugging -------------------------------------------------------------------
DEBUG = env("DEBUG")
TEMPLATE_DEBUG = False
# logging ---------------------------------------------------------------------
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
},
"mail_admins": {
"level": "ERROR",
"class": "django.utils.log.AdminEmailHandler",
"include_html": False,
},
},
"loggers": {
"django": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
},
},
}
# email -----------------------------------------------------------------------
ADMINS = (("Ryan Shaw", "[email protected]"),)
MANAGERS = ADMINS
DEFAULT_FROM_EMAIL = "aeshin.org <[email protected]>"
SERVER_EMAIL = DEFAULT_FROM_EMAIL
EMAIL_HOST = "email-smtp.us-east-1.amazonaws.com"
EMAIL_HOST_USER = env("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# file uploads ----------------------------------------------------------------
MEDIA_ROOT = env.path("MEDIA_ROOT", default=BASE_DIR / "media/")
MEDIA_URL = "files/"
# globalization ---------------------------------------------------------------
LANGUAGE_CODE = "en-us"
TIME_ZONE = "US/Eastern"
USE_I18N = False
USE_TZ = True
# http ------------------------------------------------------------------------
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"aeshin.middleware.WWWRedirectMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
WSGI_APPLICATION = "aeshin.wsgi.application"
# models ----------------------------------------------------------------------
INSTALLED_APPS = (
"aeshin",
"shared",
"courses",
"files",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"django.contrib.sites",
)
# security --------------------------------------------------------------------
SECRET_KEY = env("SECRET_KEY")
ALLOWED_HOSTS = [
".aeshin.org",
".localhost",
"127.0.0.1",
"[::1]",
"aeshin.fly.dev",
]
CSRF_TRUSTED_ORIGINS = [
"https://*.aeshin.org",
"https://aeshin.fly.dev",
]
# templates -------------------------------------------------------------------
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.request",
]
},
}
]
# urls ------------------------------------------------------------------------
ROOT_URLCONF = "aeshin.urls"
# django.contrib.auth ---------------------------------------------------------
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL = "/loggedin/"
LOGOUT_URL = "/logout/"
# django.contrib.sites --------------------------------------------------------
SITE_ID = 1
# django.contrib.staticfiles --------------------------------------------------
STATIC_ROOT = BASE_DIR / "static"
STATIC_URL = "/static/"
STORAGES = {
"default": {"BACKEND": "django.core.files.storage.FileSystemStorage"},
"staticfiles": {
"BACKEND": "whitenoise.storage.CompressedManifestStaticFilesStorage"
},
}
# shared ----------------------------------------------------------------------
ZOTERO_GROUP_ID = "51755"
| [
"[email protected]"
]
| |
efe34cb92a79af37bb8543e60c8e2e2406f2d995 | 544cfadc742536618168fc80a5bd81a35a5f2c99 | /tools/test/connectivity/acts_tests/tests/google/gnss/FlpTtffTest.py | 59b19b52f02ad880d66a9f081f34023770f3b823 | []
| no_license | ZYHGOD-1/Aosp11 | 0400619993b559bf4380db2da0addfa9cccd698d | 78a61ca023cbf1a0cecfef8b97df2b274ac3a988 | refs/heads/main | 2023-04-21T20:13:54.629813 | 2021-05-22T05:28:21 | 2021-05-22T05:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,611 | py | #!/usr/bin/env python3.5
#
# Copyright 2019 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from acts import utils
from acts import asserts
from acts import signals
from acts.base_test import BaseTestClass
from acts.test_decorators import test_tracker_info
from acts.utils import get_current_epoch_time
from acts_contrib.test_utils.wifi.wifi_test_utils import wifi_toggle_state
from acts_contrib.test_utils.tel.tel_test_utils import start_qxdm_logger
from acts_contrib.test_utils.tel.tel_test_utils import stop_qxdm_logger
from acts_contrib.test_utils.tel.tel_test_utils import verify_internet_connection
from acts_contrib.test_utils.tel.tel_test_utils import abort_all_tests
from acts_contrib.test_utils.gnss.gnss_test_utils import get_baseband_and_gms_version
from acts_contrib.test_utils.gnss.gnss_test_utils import _init_device
from acts_contrib.test_utils.gnss.gnss_test_utils import check_location_service
from acts_contrib.test_utils.gnss.gnss_test_utils import clear_logd_gnss_qxdm_log
from acts_contrib.test_utils.gnss.gnss_test_utils import set_mobile_data
from acts_contrib.test_utils.gnss.gnss_test_utils import get_gnss_qxdm_log
from acts_contrib.test_utils.gnss.gnss_test_utils import set_wifi_and_bt_scanning
from acts_contrib.test_utils.gnss.gnss_test_utils import process_gnss_by_gtw_gpstool
from acts_contrib.test_utils.gnss.gnss_test_utils import start_ttff_by_gtw_gpstool
from acts_contrib.test_utils.gnss.gnss_test_utils import process_ttff_by_gtw_gpstool
from acts_contrib.test_utils.gnss.gnss_test_utils import check_ttff_data
from acts_contrib.test_utils.gnss.gnss_test_utils import set_attenuator_gnss_signal
from acts_contrib.test_utils.gnss.gnss_test_utils import connect_to_wifi_network
from acts_contrib.test_utils.gnss.gnss_test_utils import gnss_tracking_via_gtw_gpstool
from acts_contrib.test_utils.gnss.gnss_test_utils import parse_gtw_gpstool_log
from acts_contrib.test_utils.tel.tel_test_utils import start_adb_tcpdump
from acts_contrib.test_utils.tel.tel_test_utils import stop_adb_tcpdump
from acts_contrib.test_utils.tel.tel_test_utils import get_tcpdump_log
class FlpTtffTest(BaseTestClass):
""" FLP TTFF Tests"""
def setup_class(self):
super().setup_class()
self.ad = self.android_devices[0]
req_params = ["pixel_lab_network", "standalone_cs_criteria",
"qdsp6m_path", "flp_ttff_max_threshold",
"pixel_lab_location", "default_gnss_signal_attenuation",
"weak_gnss_signal_attenuation", "ttff_test_cycle",
"collect_logs"]
self.unpack_userparams(req_param_names=req_params)
self.ssid_map = {}
for network in self.pixel_lab_network:
SSID = network['SSID']
self.ssid_map[SSID] = network
if int(self.ad.adb.shell("settings get global airplane_mode_on")) != 0:
self.ad.log.info("Force airplane mode off")
force_airplane_mode(self.ad, False)
_init_device(self.ad)
def setup_test(self):
get_baseband_and_gms_version(self.ad)
if self.collect_logs:
clear_logd_gnss_qxdm_log(self.ad)
set_attenuator_gnss_signal(self.ad, self.attenuators,
self.default_gnss_signal_attenuation)
if not verify_internet_connection(self.ad.log, self.ad, retries=3,
expected_state=True):
raise signals.TestFailure("Fail to connect to LTE network.")
def teardown_test(self):
if self.collect_logs:
stop_qxdm_logger(self.ad)
stop_adb_tcpdump(self.ad)
set_attenuator_gnss_signal(self.ad, self.attenuators,
self.default_gnss_signal_attenuation)
if int(self.ad.adb.shell("settings get global mobile_data")) != 1:
set_mobile_data(self.ad, True)
if int(self.ad.adb.shell(
"settings get global wifi_scan_always_enabled")) != 1:
set_wifi_and_bt_scanning(self.ad, True)
if self.ad.droid.wifiCheckState():
wifi_toggle_state(self.ad, False)
def on_pass(self, test_name, begin_time):
if self.collect_logs:
self.ad.take_bug_report(test_name, begin_time)
get_gnss_qxdm_log(self.ad, self.qdsp6m_path)
get_tcpdump_log(self.ad, test_name, begin_time)
def on_fail(self, test_name, begin_time):
if self.collect_logs:
self.ad.take_bug_report(test_name, begin_time)
get_gnss_qxdm_log(self.ad, self.qdsp6m_path)
get_tcpdump_log(self.ad, test_name, begin_time)
""" Helper Functions """
def flp_ttff_hs_and_cs(self, criteria, location):
flp_results = []
ttff = {"hs": "Hot Start", "cs": "Cold Start"}
for mode in ttff.keys():
begin_time = get_current_epoch_time()
process_gnss_by_gtw_gpstool(
self.ad, self.standalone_cs_criteria, type="flp")
start_ttff_by_gtw_gpstool(
self.ad, ttff_mode=mode, iteration=self.ttff_test_cycle)
ttff_data = process_ttff_by_gtw_gpstool(
self.ad, begin_time, location, type="flp")
result = check_ttff_data(self.ad, ttff_data, ttff[mode], criteria)
flp_results.append(result)
asserts.assert_true(
all(flp_results), "FLP TTFF fails to reach designated criteria")
def start_qxdm_and_tcpdump_log(self):
"""Start QXDM and adb tcpdump if collect_logs is True."""
if self.collect_logs:
start_qxdm_logger(self.ad, get_current_epoch_time())
start_adb_tcpdump(self.ad)
""" Test Cases """
@test_tracker_info(uuid="c11ada6a-d7ad-4dc8-9d4a-0ae3cb9dfa8e")
def test_flp_one_hour_tracking(self):
"""Verify FLP tracking performance of position error.
Steps:
1. Launch GTW_GPSTool.
2. FLP tracking for 60 minutes.
Expected Results:
DUT could finish 60 minutes test and output track data.
"""
self.start_qxdm_and_tcpdump_log()
gnss_tracking_via_gtw_gpstool(self.ad, self.standalone_cs_criteria,
type="flp", testtime=60)
parse_gtw_gpstool_log(self.ad, self.pixel_lab_location, type="flp")
@test_tracker_info(uuid="8bc4e82d-fdce-4ee8-af8c-5e4a925b5360")
def test_flp_ttff_strong_signal_wifiscan_on_wifi_connect(self):
"""Verify FLP TTFF Hot Start and Cold Start under strong GNSS signals
with WiFi scanning on and connected.
Steps:
1. Enable WiFi scanning in location setting.
2. Connect to WiFi AP.
3. TTFF Hot Start for 10 iteration.
4. TTFF Cold Start for 10 iteration.
Expected Results:
Both FLP TTFF Hot Start and Cold Start results should be within
flp_ttff_max_threshold.
"""
self.start_qxdm_and_tcpdump_log()
set_wifi_and_bt_scanning(self.ad, True)
wifi_toggle_state(self.ad, True)
connect_to_wifi_network(
self.ad, self.ssid_map[self.pixel_lab_network[0]["SSID"]])
self.flp_ttff_hs_and_cs(self.flp_ttff_max_threshold,
self.pixel_lab_location)
@test_tracker_info(uuid="adc1a0c7-3635-420d-9481-0f5816c58334")
def test_flp_ttff_strong_signal_wifiscan_on_wifi_not_connect(self):
"""Verify FLP TTFF Hot Start and Cold Start under strong GNSS signals
with WiFi scanning on and not connected.
Steps:
1. Enable WiFi scanning in location setting.
2. WiFi is not connected.
3. TTFF Hot Start for 10 iteration.
4. TTFF Cold Start for 10 iteration.
Expected Results:
Both FLP TTFF Hot Start and Cold Start results should be within
flp_ttff_max_threshold.
"""
self.start_qxdm_and_tcpdump_log()
set_wifi_and_bt_scanning(self.ad, True)
self.flp_ttff_hs_and_cs(self.flp_ttff_max_threshold,
self.pixel_lab_location)
@test_tracker_info(uuid="3ec3cee2-b881-4c61-9df1-b6b81fcd4527")
def test_flp_ttff_strong_signal_wifiscan_off(self):
"""Verify FLP TTFF Hot Start and Cold Start with WiFi scanning OFF
under strong GNSS signals.
Steps:
1. Disable WiFi scanning in location setting.
2. TTFF Hot Start for 10 iteration.
3. TTFF Cold Start for 10 iteration.
Expected Results:
Both FLP TTFF Hot Start and Cold Start results should be within
flp_ttff_max_threshold.
"""
self.start_qxdm_and_tcpdump_log()
set_wifi_and_bt_scanning(self.ad, False)
self.flp_ttff_hs_and_cs(self.flp_ttff_max_threshold,
self.pixel_lab_location)
@test_tracker_info(uuid="03c0d34f-8312-48d5-8753-93b09151233a")
def test_flp_ttff_weak_signal_wifiscan_on_wifi_connect(self):
"""Verify FLP TTFF Hot Start and Cold Start under Weak GNSS signals
with WiFi scanning on and connected
Steps:
1. Set attenuation value to weak GNSS signal.
2. Enable WiFi scanning in location setting.
3. Connect to WiFi AP.
4. TTFF Hot Start for 10 iteration.
5. TTFF Cold Start for 10 iteration.
Expected Results:
Both FLP TTFF Hot Start and Cold Start results should be within
flp_ttff_max_threshold.
"""
set_attenuator_gnss_signal(self.ad, self.attenuators,
self.weak_gnss_signal_attenuation)
self.start_qxdm_and_tcpdump_log()
set_wifi_and_bt_scanning(self.ad, True)
wifi_toggle_state(self.ad, True)
connect_to_wifi_network(
self.ad, self.ssid_map[self.pixel_lab_network[0]["SSID"]])
self.flp_ttff_hs_and_cs(self.flp_ttff_max_threshold,
self.pixel_lab_location)
@test_tracker_info(uuid="13daf7b3-5ac5-4107-b3dc-a3a8b5589fed")
def test_flp_ttff_weak_signal_wifiscan_on_wifi_not_connect(self):
"""Verify FLP TTFF Hot Start and Cold Start under Weak GNSS signals
with WiFi scanning on and not connected.
Steps:
1. Set attenuation value to weak GNSS signal.
2. Enable WiFi scanning in location setting.
3. WiFi is not connected.
4. TTFF Hot Start for 10 iteration.
5. TTFF Cold Start for 10 iteration.
Expected Results:
Both FLP TTFF Hot Start and Cold Start results should be within
flp_ttff_max_threshold.
"""
set_attenuator_gnss_signal(self.ad, self.attenuators,
self.weak_gnss_signal_attenuation)
self.start_qxdm_and_tcpdump_log()
set_wifi_and_bt_scanning(self.ad, True)
self.flp_ttff_hs_and_cs(self.flp_ttff_max_threshold,
self.pixel_lab_location)
@test_tracker_info(uuid="1831f80f-099f-46d2-b484-f332046d5a4d")
def test_flp_ttff_weak_signal_wifiscan_off(self):
"""Verify FLP TTFF Hot Start and Cold Start with WiFi scanning OFF
under weak GNSS signals.
Steps:
1. Set attenuation value to weak GNSS signal.
2. Disable WiFi scanning in location setting.
3. TTFF Hot Start for 10 iteration.
4. TTFF Cold Start for 10 iteration.
Expected Results:
Both FLP TTFF Hot Start and Cold Start results should be within
flp_ttff_max_threshold.
"""
set_attenuator_gnss_signal(self.ad, self.attenuators,
self.weak_gnss_signal_attenuation)
self.start_qxdm_and_tcpdump_log()
set_wifi_and_bt_scanning(self.ad, False)
self.flp_ttff_hs_and_cs(self.flp_ttff_max_threshold,
self.pixel_lab_location)
| [
"[email protected]"
]
| |
6b481b75639a36ee3c439a151988f25c85d6cadd | 71b3766d0641361a52f62af263fe8efa90fccbab | /blog/views.py | 592ccebccef4e17cd3162123c7aec7c0189fc55e | []
| no_license | firchatn/Blog-Website | bd4859774fda9cccc60f4eaa4c322cbc0d80d487 | 04663501b442f51f14e0b5fdc1f188488172c455 | refs/heads/master | 2021-05-05T15:10:29.114072 | 2018-11-02T09:45:32 | 2018-11-02T09:45:32 | 103,161,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Article, Catecory
# Create your views here.
def index(request):
articless = Article.objects.all()
catecory = Catecory.objects.all()
page = request.GET.get('page')
cat = request.GET.get('cat')
if cat:
cat = Catecory.objects.get(name=cat)
articless = Article.objects.filter(catecory=cat)
paginator = Paginator(articless, 2) # Show 25 contacts per page
try:
article = paginator.page(page)
except PageNotAnInteger:
article = paginator.page(1)
except EmptyPage:
article = paginator.page(paginator.num_pages)
# TODO: admin valid article before add it to the blog
# article = Article.objects.filter(isreviewed=True)
toplast = Article.objects.all()[:3]
return render(request,'blog/index.html', {'article' : article , 'toplast' : toplast, 'catecory' : catecory })
def contact(request):
return render(request,'blog/contact.html') | [
"[email protected]"
]
| |
47ac38c48ab6a0ffe276aa299b2b85a3c9afe994 | 1eba03a3a7b5f6133dfcbc7a0ab9c73f950a79d8 | /algorithms/137. Single Number II/main.py | 966d1c3f0ce2f52164f788ad395c5ee7fc2c6042 | []
| no_license | GTxx/leetcode | ab640cad726111a5fd78ecfbc02f75a61112bc2c | b7f85afe1c69f34f8c6025881224ae79042850d3 | refs/heads/master | 2021-06-15T18:43:41.358275 | 2021-05-08T08:15:05 | 2021-05-08T08:15:05 | 70,294,841 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
x1 = 0
x2 = 0
for num in nums:
x2 ^= x1 & num
x1 ^= num
mask = ~(x1 & x2)
x2 &= mask
x1 &= mask
return x1
if __name__ == "__main__":
s = Solution()
print s.singleNumber([6,6,6,5]) | [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.