blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b9ed6ed0530e8623a9bbac53c115fadbaf8fb92 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_recesses.py | a34a6d1f64dbe47f008faa9c0c762b260b8b828f | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _RECESSES():
def __init__(self,):
self.name = "RECESSES"
self.definitions = recess
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['recess']
| [
"[email protected]"
]
| |
0f6bff7af88112200164ee73a63e93548e0b7606 | 1094e533594d6fbdf4a0f605b06a1954336b52e8 | /index/views.py | 586009091d84dd75a9a807174d8ade7c1949bc90 | []
| no_license | leezhiyong08/friutday | ac424c31bc2dd54aa61e76f13b8264042b4ba741 | 16f6a25d827f64fe88a526adf3e51de543b1c2de | refs/heads/master | 2020-04-24T01:14:11.321113 | 2019-02-16T13:40:16 | 2019-02-16T13:40:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,810 | py | import json
from django.core import serializers
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .forms import *
# Create your views here.
def index_views(request):
return render(request,'index.html')
# /login 对应的视图
def login_views(request):
url = '/'
if request.method == 'GET':
# get 的流程
# 判断session中是否有登录信息
if 'uid' in request.session and 'uphone' in request.session:
# session中有值,重定向回首页或原路径
print('session中有数据')
return redirect(url)
else:
# session中没有值
# 判断cookie中是否有uid和uphone
if 'uid' in request.COOKIES and 'uphone' in request.COOKIES:
# cookie 中有登录信息
# 从cookie中取出数据保存进session
uid = request.COOKIES['uid']
uphone = request.COOKIES['uphone']
request.session['uid']=uid
request.session['uphone']=uphone
# 重定向到首页或原路径
return redirect(url)
else:
# cookie 中没有登录信息
# 去往登录页面
form = LoginForm()
return render(request,'login.html',locals())
else:
# post 的流程
# 实现登录操作:取出uphone和upwd到db中判断
uphone = request.POST['uphone']
upwd = request.POST['upwd']
uList = Users.objects.filter(uphone=uphone,upwd=upwd)
# if uList:
if uphone=='13511225566' and upwd=='123456':
# 登录成功
# uid = uList[0].id
# 取出 uphone 和 uid 保存进session
uid = '01'
request.session['uid'] = uid
request.session['uphone'] = uphone
# 判断是否有记住密码,记住密码的话则将值保存进cookie
resp = redirect(url)
if 'isSaved' in request.POST:
# 记住密码,保存进cookie
expires = 60 * 60 * 24 * 366
resp.set_cookie('uid',uid,expires)
resp.set_cookie('uphone',uphone,expires)
# 重定向到首页或原路径
return resp
else:
#登录失败 : 回登录页
form = LoginForm()
errMsg = "用户名或密码不正确"
return render(request,'login.html',locals())
# /register 对应的视图
def register_views(request):
if request.method == 'GET':
return render(request,'register.html')
else:
#实现注册的功能
dic ={
"uphone":request.POST['uphone'],
"upwd":request.POST['upwd'],
"uname":request.POST['uname'],
"uemail":request.POST['uemail'],
}
#将数据插入进数据库 - 注册
Users(**dic).save()
#根据uphone的值再查询数据库
u = Users.objects.get(uphone=request.POST['uphone'])
#将用户id和uphone保存进session
request.session['uid'] = u.id
request.session['uphone'] = u.uphone
return redirect('/')
# 检查手机号码是否存在 -> /check_uphone/
def check_uphone_views(request):
if request.method == 'POST':
#接收前端传递过来的手机号码
uphone = request.POST['uphone']
uList = Users.objects.filter(uphone=uphone)
if uList:
# 如果条件为真,则表示手机号码已经存在
# 响应 status值为0,用于通知客户端手机号码已存在
# 响应 text值为 “手机号码已存在”
dic = {
"status":"0",
"text":'手机号码已存在',
}
return HttpResponse(json.dumps(dic))
else:
dic = {
"status":"1",
"text":"可以注册",
}
return HttpResponse(json.dumps(dic))
# 检查用户是否登录,如果有的话则取出uname的值
def check_login_views(request):
# 判断 session 中是否有 uid 和 uphone
if 'uid' in request.session and 'uphone' in request.session:
# 用户此时处于登录状态
# 根据 uid 获取 uname 的值
uid = request.session['uid']
user = Users.objects.get(id=uid)
#处理响应数据
dic = {
"status":'1',
'user':json.dumps(user.to_dict())
}
return HttpResponse(json.dumps(dic))
else:
# 判断cookie是否有登录信息
if 'uid' in request.COOKIES and 'uphone' in request.COOKIES:
# 从cookie中取出数据保存进session
uid = request.COOKIES['uid']
uphone = request.COOKIES['uphone']
request.session['uid']=uid
request.session['uphone']=uphone
# 根据uid查询处对应的user信息转换成字典,响应给客户端
user = Users.objects.get(id=uid)
jsonStr = json.dumps(user.to_dict())
dic = {
"status":"1",
"user":jsonStr,
}
return HttpResponse(json.dumps(dic))
else:
# session和cookie中都没有登录信息
dic = {
"status":0,
'text':'用户尚未登录'
}
if request.method == 'POST':
tmp_url = '/'
uphone = request.POST['uphone']
tmp_resp = redirect(tmp_url)
tmp_expires = 60 * 60 * 24 * 366
tmp_resp.set_cookie('uphone', uphone, tmp_expires)
return redirect(tmp_url)
return HttpResponse(json.dumps(dic))
# 退出登录
# 清除 session 和 cookie 中的数据
# 原路返回
def logout_views(request):
#获取请求源地址,如果没有,则返回首页 /
url = request.META.get('HTTP_REFERER','/')
resp = redirect(url)
# 判断 session 中是否有登录信息
if 'uid' in request.session and 'uphone' in request.session:
del request.session['uid']
del request.session['uphone']
if 'uid' in request.COOKIES and 'uphone' in request.COOKIES:
resp.delete_cookie('uid')
resp.delete_cookie('uphone')
return resp
def type_goods_views(request):
all_list=[]
types=GoodsType.objects.all()
for type in types:
type_json=json.dumps(type.to_dic())
g_list=type.goods_set.all()
g_list_json=serializers.serialize('json',g_list)
dic={
'type':type_json,
'goods':g_list_json,
}
all_list.append(dic)
return HttpResponse(json.dumps(all_list))
| [
"[email protected]"
]
| |
980b466c28c5040171706e805a75717fbb69f66d | ed7342bcfd051d5280c444f5a625fac507ef9b53 | /demo/basics/sum_of_numbers_v2.py | 724f67c49fef9989060ad053d4ae302ff4759cd0 | []
| no_license | srikanthpragada/PYTHON_19_MAR_2021 | 55f86289e7d6be5398c18ad9f52bfd4d81563827 | 20cd95481c1fc4c156d1fed01e29cb3b09b03333 | refs/heads/master | 2023-04-06T02:37:52.657864 | 2021-05-05T03:01:31 | 2021-05-05T03:01:31 | 350,551,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | # Take numbers until 0 is given and display sum of numbers
total = 0
while True:
num = int(input("Enter a number [0 to stop] :"))
if num == 0:
break # Terminate loop
total += num
print(f"Total = {total}") | [
"[email protected]"
]
| |
d617aaac35275cf070b7f5bd47f28582080b01ae | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /kosmos-2/fairseq/fairseq/models/speech_to_speech/__init__.py | d34883552596496799514422e5a895376d02f735 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
]
| permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 248 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .modules import * # noqa
from .s2s_transformer import * # noqa
| [
"[email protected]"
]
| |
ef835c8ab8f9b1d665e298b1da78b17ab7380731 | 135d2c02b3ad706573bdfafa75ebc14bd170ef97 | /firedex-static/sdn-controller/sdn_controller.py | d19c338814f705b0ff1ecde6b5649d74806fa4f2 | []
| no_license | boulouk/firedex | 4afc6467bd83e096051d941699e59f1be806a46c | 187012986f4adf85d017e84a64db7c9bb1f447b0 | refs/heads/master | 2022-06-06T01:56:38.464322 | 2019-11-24T09:44:03 | 2019-11-24T09:44:03 | 138,659,150 | 2 | 1 | null | 2022-05-20T20:55:18 | 2018-06-25T23:09:54 | Python | UTF-8 | Python | false | false | 376 | py |
from ryu.cmd import manager
applications = ["topology_application", "flow_application"]
def run_controller(applications):
arguments = []
arguments.extend(applications)
arguments.append("--observe-links")
arguments.append("--enable-debugger")
manager.main( args = arguments )
if __name__ == '__main__':
run_controller(applications = applications)
| [
"[email protected]"
]
| |
23766bceb270d73585937f8eb705efca167b4426 | c3b739b07214507bf1023b926c19d30784623e98 | /segme/model/cascade_psp/refine.py | b8419f1aa09101135ce9339c1be00c9ec1fa696d | [
"MIT"
]
| permissive | templeblock/segme | 20a96787500c46483cb7af0db917207fcedafb0b | 8192ed066558c1ea1e7283805b40da4baa5b3827 | refs/heads/master | 2023-08-30T12:31:39.327283 | 2021-11-11T17:08:40 | 2021-11-11T17:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,917 | py | import cv2
import numpy as np
import tensorflow as tf
from keras import backend
from tensorflow_hub import KerasLayer
class Refiner:
def __init__(self, hub_uri, max_size=900):
self.model = KerasLayer(hub_uri)
self.max_size = max_size
self.image = tf.Variable(
shape=(1, None, None, 3), dtype='uint8', initial_value=np.zeros((1, 0, 0, 3)).astype(np.uint8))
self.mask = tf.Variable(
shape=(1, None, None, 1), dtype='uint8', initial_value=np.zeros((1, 0, 0, 1)).astype(np.uint8))
self.prev = tf.Variable(
shape=(1, None, None, 1), dtype='uint8', initial_value=np.zeros((1, 0, 0, 1)).astype(np.uint8))
def __call__(self, image, mask, fast=False):
fine, coarse = self._global_step(image, mask)
if fast:
return fine
return self._local_step(image, fine, coarse)
def _global_step(self, image, mask):
height_width = image.shape[:2]
if max(height_width) < self.max_size:
image = Refiner._resize_max_side(image, self.max_size, cv2.INTER_CUBIC)
mask = Refiner._resize_max_side(mask, self.max_size, cv2.INTER_LINEAR)
elif max(height_width) > self.max_size:
image = Refiner._resize_max_side(image, self.max_size, cv2.INTER_AREA)
mask = Refiner._resize_max_side(mask, self.max_size, cv2.INTER_AREA)
fine, coarse = self._safe_predict(image, mask)
if max(height_width) < self.max_size:
fine = Refiner._resize_fixed_size(fine, height_width, interpolation=cv2.INTER_AREA)
coarse = Refiner._resize_fixed_size(coarse, height_width, interpolation=cv2.INTER_AREA)
elif max(height_width) > self.max_size:
fine = Refiner._resize_fixed_size(fine, height_width, interpolation=cv2.INTER_LINEAR)
coarse = Refiner._resize_fixed_size(coarse, height_width, interpolation=cv2.INTER_LINEAR)
return fine, coarse
def _local_step(self, image, fine, coarse, padding=16):
height, width = fine.shape[:2]
grid_mask = np.zeros_like(fine, dtype=np.uint32)
grid_weight = np.zeros_like(fine, dtype=np.uint32)
step_size = self.max_size // 2 - padding * 2
used_start_idx = set()
for x_idx in range(width // step_size + 1):
for y_idx in range(height // step_size + 1):
start_x = x_idx * step_size
start_y = y_idx * step_size
end_x = start_x + self.max_size
end_y = start_y + self.max_size
# Shift when required
if end_x > width:
end_x = width
start_x = width - self.max_size
if end_y > height:
end_y = height
start_y = height - self.max_size
# Bound x/y range
start_x = max(0, start_x)
start_y = max(0, start_y)
end_x = min(width, end_x)
end_y = min(height, end_y)
# The same crop might appear twice due to bounding/shifting
start_idx = start_y * width + start_x
if start_idx in used_start_idx:
continue
used_start_idx.add(start_idx)
# Take crop
part_image = image[start_y:end_y, start_x:end_x, :]
part_mask = fine[start_y:end_y, start_x:end_x]
part_prev = coarse[start_y:end_y, start_x:end_x]
# Skip when it is not an interesting crop anyway
part_mean = (part_mask > 127).astype(np.float32).mean()
if part_mean > 0.9 or part_mean < 0.1:
continue
grid_fine, _ = self._safe_predict(part_image, part_mask, part_prev)
# Padding
pred_sx = pred_sy = 0
pred_ex = self.max_size
pred_ey = self.max_size
if start_x != 0:
start_x += padding
pred_sx += padding
if start_y != 0:
start_y += padding
pred_sy += padding
if end_x != width:
end_x -= padding
pred_ex -= padding
if end_y != height:
end_y -= padding
pred_ey -= padding
grid_mask[start_y:end_y, start_x:end_x] += grid_fine[pred_sy:pred_ey, pred_sx:pred_ex]
grid_weight[start_y:end_y, start_x:end_x] += 1
# Final full resolution output
grid_weight_ = grid_weight.astype(np.float32) + backend.epsilon()
grid_mask = np.round(grid_mask.astype(np.float32) / grid_weight_).astype(np.uint8)
fine = np.where(grid_weight == 0, fine, grid_mask)
return fine
def _safe_predict(self, image, mask, prev=None):
if len(image.shape) != 3:
raise ValueError('Wrong image supplied')
if image.dtype != 'uint8':
raise ValueError('Wrong image dtype')
if len(mask.shape) != 2:
raise ValueError('Wrong mask supplied')
if mask.dtype != 'uint8':
raise ValueError('Wrong mask dtype')
if prev is not None and len(prev.shape) != 2:
raise ValueError('Wrong prev supplied')
if prev is not None and prev.dtype != 'uint8':
raise ValueError('Wrong prev dtype')
height, width = image.shape[:2]
_image = np.pad(image, ((0, height % 8), (0, width % 8), (0, 0)))
_mask = np.pad(mask, ((0, height % 8), (0, width % 8)))
_prev = _mask if prev is None else np.pad(prev, ((0, height % 8), (0, width % 8)))
self.image.assign(_image[None, ...])
self.mask.assign(_mask[None, ..., None])
self.prev.assign(_prev[None, ..., None])
fine, coarse = self.model([self.image, self.mask, self.prev])
fine, coarse = fine[0, :height, :width, 0], coarse[0, :height, :width, 0]
fine = np.round(fine * 255).astype(np.uint8)
coarse = np.round(coarse * 255).astype(np.uint8)
return fine, coarse
@staticmethod
def _resize_max_side(image, max_size, interpolation=cv2.INTER_LINEAR):
if len(image.shape) > 3 or len(image.shape) < 2:
raise ValueError('Wrong image supplied')
aspect = max_size / max(image.shape[:2])
return cv2.resize(image, (0, 0), fx=aspect, fy=aspect, interpolation=interpolation)
@staticmethod
def _resize_fixed_size(image, height_width, interpolation=cv2.INTER_LINEAR):
if len(image.shape) > 3 or len(image.shape) < 2:
raise ValueError('Wrong image supplied')
if len(height_width) != 2:
raise ValueError('Wrong desired size supplied')
return cv2.resize(image, height_width[::-1], interpolation=interpolation)
| [
"[email protected]"
]
| |
771a2bf6caaa7ad3e08d7d92a9dd0f6c8d49b9a8 | f74119a55ff5d4e89f5b7fb7da24a23828e1c203 | /test_labeler.py | 0ee0907d1d02f413876674b0d058a669f89f461d | [
"MIT"
]
| permissive | mdlaskey/yolo_labeler | 3f15dd229f6a5e01e508c5141345ff9363717b94 | 93463ee54ee8773e7c2ce2368a95c4c1102e712c | refs/heads/master | 2021-08-16T00:50:10.238386 | 2017-09-20T22:49:40 | 2017-09-20T22:49:40 | 96,812,011 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,526 | py | import os,sys
import xml.etree.ElementTree as ET
import numpy as np
import cv2
import cPickle
import copy
import glob
import yolo.config as cfg
import cPickle as pickle
import IPython
class TestLabeler(object):
def __init__(self):
self.cache_path = cfg.CACHE_PATH
self.image_path = cfg.IMAGE_PATH
self.label_path = cfg.LABEL_PATH
self.batch_size = cfg.BATCH_SIZE
self.image_size = cfg.IMAGE_SIZE
self.cell_size = cfg.CELL_SIZE
self.classes = cfg.CLASSES
self.class_to_ind = dict(zip(self.classes, xrange(len(self.classes))))
def check_label(self,frame):
label_path = cfg.LABEL_PATH+frame+'.p'
label_data = pickle.load(open(label_path,'r'))
for objs in label_data['objects']:
box_ind = objs['box_index']
class_label = objs['num_class_label']
print "CLASS LABEL"
print class_label
print "BOX INDEX"
print box_ind
def check_frame(self,frame):
image_path = cfg.IMAGE_PATH+frame+'.png'
image = cv2.imread(image_path)
cv2.imshow('debug',image)
cv2.waitKey(0)
def image_read(self, imname, flipped=False):
image = cv2.imread(imname)
image = cv2.resize(image, (self.image_size, self.image_size))
# cv2.imshow('debug',image)
# cv2.waitKey(30)
#image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image = (image / 255.0) * 2.0 - 1.0
if flipped:
image = image[:, ::-1, :]
return image
def load_bbox_annotation(self, label):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
label_data = pickle.load(open(label,'r'))
num_objs = label_data['num_labels']
label = np.zeros((self.cell_size, self.cell_size, 5+cfg.NUM_LABELS))
for objs in label_data['objects']:
box_ind = objs['box_index']
class_label = objs['num_class_label']
x_ind = int(box_ind[0] * self.cell_size / self.image_size)
y_ind = int(box_ind[1] * self.cell_size / self.image_size)
label[y_ind, x_ind, 0] = 1
label[y_ind, x_ind, 1:5] = box_ind
label[y_ind, x_ind, 5 + class_label] = 1
return label, num_objs
if __name__ == '__main__':
tl = TestLabeler()
frame = 'frame_1771'
tl.check_label(frame)
tl.check_frame(frame)
| [
"[email protected]"
]
| |
1b5dde44a062a74cb90f2e60d15903012ccb7620 | eff2fc11905f6118dcd70050392f168cd7aea086 | /leetcode/5_longest_palindromic_substring/solution2.py | dc6f8c44f995cff0b89286e6dbc72af866bea932 | []
| no_license | algobot76/leetcode-python | 28f1e1107fa941a3b40006f074eec6231e674ac1 | ec8bff8978d6915bfdf187c760b97ee70f7515af | refs/heads/master | 2021-07-05T17:06:40.581977 | 2020-09-19T22:02:38 | 2020-09-19T22:02:38 | 199,255,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | class Solution:
def longestPalindrome(self, s):
n = len(s)
if n < 2:
return s
f = [[False] * n for _ in range(n)]
ans = ""
for i in range(n - 1, -1, -1):
for j in range(i, n):
if s[i] == s[j]:
if self._get_len(i, j) > 2:
if f[i + 1][j - 1]:
f[i][j] = True
else:
f[i][j] = True
if f[i][j]:
if self._get_len(i, j) > len(ans):
ans = s[i:j + 1]
return ans
def _get_len(self, i, j):
return j - i + 1
| [
"[email protected]"
]
| |
4b7d04c5de2f897b35e6ea61fc5a14077a9d6ef7 | 9f91ce42e1982ded6f77e184a0c6e35331b9ad23 | /greedy_color/main.py | 9308c47eb7dcc321bf983e03e6c97dfc36b2951d | [
"MIT"
]
| permissive | dixler/graph-coloring | b5b1b5aeb91d24ba4f94fc1b837225019327c885 | 6a5e853b9a88bdddfd8a02c75dfe588f26eddaba | refs/heads/master | 2020-04-10T14:17:53.701941 | 2018-12-15T09:44:36 | 2018-12-15T09:44:36 | 161,073,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,551 | py | #!/usr/bin/env python3
# gonna make a stupid algorithm
import json
import sys
num_colors = 0
graph = json.loads(input())
num_nodes = len(graph)
class Node():
def __init__(self):
self.color = None
self.neighbors = set()
nodes = {int(i): Node() for i, val in graph.items()}
# add edges to graph
for k, val in graph.items():
nodes[int(k)].neighbors = set(val)
# add inbound edges
for k, adj_list in graph.items():
for endpoint in adj_list:
nodes[endpoint].neighbors |= {int(k)}
def recursive_color(graph, start_index):
'determines the color of interconnected nodes'
global num_colors
node = graph[start_index]
if node.color != None:
'we already colored it'
return
else:
neighbor_colors = {graph[neighbor_id].color for neighbor_id in node.neighbors}
new_color_id = 0
while new_color_id in neighbor_colors:
new_color_id += 1
node.color = new_color_id
num_colors = max(num_colors, new_color_id+1)
for neighbor_id in node.neighbors:
recursive_color(graph, neighbor_id)
return
# make a stack of unvisited nodes
graph = {int(k): v for k, v in graph.items()}
unvisited = {k for k, v in graph.items()}
while unvisited != set():
start_index = max(unvisited)
recursive_color(nodes, start_index)
unvisited = unvisited - {k for k, node in nodes.items() if node.color != None}
print('satisfiable with %d colors' % num_colors)
for k, node in nodes.items():
print((k, node.color), end=', ')
| [
"[email protected]"
]
| |
55452e8eaf3c675ee734d7d08b29328ed897b400 | 344b654cbb8b13d683bcd2cacf522c983287a5fe | /Exercises/fileExtension.py | 295ca1b77df26281183deef41448b83bb4510202 | []
| no_license | tchaitanya2288/pyproject01 | d869522584ab498008e67e81c209472ab20685c2 | 565660b73039db6f0e9ed986504c2f96ba674f9c | refs/heads/master | 2020-03-15T13:18:21.480443 | 2018-06-19T18:44:47 | 2018-06-19T18:44:47 | 132,163,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | Filename = input('Enter your required filename:')
Extension = Filename.split('.')
print("The Extension of file is:" +repr(Extension[-1])) | [
"[email protected]"
]
| |
f39ba693f9984287400dc51c6fd3384c2c8d4aad | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/92/usersdata/216/46367/submittedfiles/atividade.py | 6f93a371a202140a4fcb7fb058a09a066cd9d666 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | # -*- coding: utf-8 -*-
import math
n=int(input('Digite um número:'))
soma=0
if n>0:
for i in range(0,n,1):
if n>=0:
i=i+1
soma=soma+((i)/(n))
n=n-1
else:
n=n*(-1)
i=i+1
soma=soma+((i)/(n))
n=n-1
else:
n=n*(-1)
print('%.5f'%soma)
| [
"[email protected]"
]
| |
c7db867a68cfc633338475e43990083bb406cd98 | 1564d12d61f669ce9f772f3ef7563167f7fe13bf | /codeforces/educationalRound73/A-books.py | 77e50053332612a3e54fa06049612ac125655ecd | []
| no_license | sakshamk6999/codingPractice | 73ec4873defb0f0d2e47173150a589ee12e5e0a1 | f727aac6d87448b19fc9d48660dc6978fe5edc14 | refs/heads/master | 2020-12-01T20:22:36.299535 | 2020-02-04T05:55:53 | 2020-02-04T05:55:53 | 230,757,937 | 0 | 0 | null | 2020-02-12T20:38:12 | 2019-12-29T14:00:22 | Python | UTF-8 | Python | false | false | 316 | py | for _ in range(int(input())):
n = int(input())
a = list(map(int, input().split()))
dp = [0 for i in range(n)]
for i in range(n - 2, -1, -1):
if a[i] == a[i + 1]:
dp[i] = dp[i + 1]
else:
dp[i] = n - 1 - i
for i in dp:
print(i, end=" ")
print('') | [
"[email protected]"
]
| |
9a378ac66d24667514820bb7ae2934ca7d3f4f35 | e2242f78a129f2b87252a0bf1621e8190fd07442 | /src/compas_vol/microstructures/tpms.py | 6264e6a6389be9c6043785e4474fb65d97fa8cda | [
"MIT"
]
| permissive | ilmihur/compas_vol | 751237e00f841f25546accf1bf1db782aa9a4559 | 8aedc611bd96acd95d26b9f34c805a8ff05020bf | refs/heads/master | 2022-11-19T12:21:03.829785 | 2020-07-16T11:22:52 | 2020-07-16T11:22:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,724 | py | from math import pi, sin, cos
from compas import PRECISION
class TPMS(object):
"""A triply periodic minimal surface (TPMS) is defined by a type and a wavelength.
Parameters
----------
tpmstype: String
Type of TPMS. Currently avaliable are Gyroid, SchwartzP, Diamond, Neovius, Lidinoid and FischerKoch.
wavelength: float
The wavelength of the trigonometric function.
Examples
--------
>>> a = TPMS(tpmstype='Gyroid', wavelength=5.0)
"""
def __init__(self, tpmstype=0, wavelength=1.0):
self.tpmstypes = ['gyroid', 'schwartzp', 'diamond', 'neovius', 'lidinoid', 'fischerkoch']
self._tpmstype = None
self.tpmstype = tpmstype
self._wavelength = None
self.wavelength = wavelength
self._factor = self.wavelength/pi
# ==========================================================================
# descriptors
# ==========================================================================
@property
def tpmstype(self):
return self._tpmstype
@tpmstype.setter
def tpmstype(self, tpmstype):
if type(tpmstype) == str:
if tpmstype.lower() in self.tpmstypes:
self._tpmstype = self.tpmstypes.index(tpmstype.lower())
else:
self._tpmstype = 0
elif type(tpmstype) == int:
self._tpmstype = max(0, min(tpmstype, len(self.tpmstypes) - 1))
@property
def wavelength(self):
"""float: The wavelength of the TPMS."""
return self._wavelength
@wavelength.setter
def wavelength(self, wavelength):
self._wavelength = float(wavelength)
self._factor = self.wavelength/pi
def __repr__(self):
return 'TPMS({0},{1:.{2}f})'.format(self.tpmstype, self.wavelength, PRECISION[:1])
# ==========================================================================
# distance function
# ==========================================================================
def get_distance(self, point):
"""
single point distance function
"""
x, y, z = point
px = x/self._factor
py = y/self._factor
pz = z/self._factor
d = 0
if self.tpmstype == 0: # 'Gyroid':
d = sin(px)*cos(py) + sin(py)*cos(pz) + sin(pz)*cos(px)
elif self.tpmstype == 1: # 'SchwartzP':
d = cos(px) + cos(py) + cos(pz)
elif self.tpmstype == 2: # 'Diamond':
d = (
sin(px) * sin(py) * sin(pz) +
sin(px) * cos(py) * cos(pz) +
cos(px) * sin(py) * cos(pz) +
cos(px) * cos(py) * sin(pz)
)
elif self.tpmstype == 3: # 'Neovius':
d = (3 * cos(px) + cos(py) + cos(pz) +
4 * cos(px) * cos(py) * cos(pz))
elif self.tpmstype == 4: # 'Lidinoid':
d = (0.5 * (sin(2*px) * cos(py) * sin(pz) +
sin(2*py) * cos(py) * sin(px) +
sin(2*pz) * cos(px) * sin(pz)) -
0.5 * (cos(2*px) * cos(2*py) +
cos(2*py) * cos(2*pz) +
cos(2*pz) * cos(2*px)) + 0.15)
elif self.tpmstype == 5: # 'FischerKoch':
d = (cos(2*px) * sin(py) * cos(pz) +
cos(2*py) * sin(pz) * cos(px) +
cos(2*pz) * sin(px) * cos(py))
return d
def get_distance_numpy(self, x, y, z):
"""
vectorized distance function
"""
import numpy as np
px = x/self._factor
py = y/self._factor
pz = z/self._factor
d = 0
# Gyroid
if self.tpmstype == 0:
d = np.sin(px) * np.cos(py) + np.sin(py)*np.cos(pz) + np.sin(pz)*np.cos(px)
# SchwartzP
elif self.tpmstype == 1:
d = np.cos(px) + np.cos(py) + np.cos(pz)
# Diamond
elif self.tpmstype == 2:
d = (
np.sin(px) * np.sin(py) * np.sin(pz) +
np.sin(px) * np.cos(py) * np.cos(pz) +
np.cos(px) * np.sin(py) * np.cos(pz) +
np.cos(px) * np.cos(py) * np.sin(pz)
)
# Neovius
elif self.tpmstype == 3:
d = (3 * np.cos(px) + np.cos(py) + np.cos(pz) +
4 * np.cos(px) * np.cos(py) * np.cos(pz))
# Lidinoid
elif self.tpmstype == 4:
d = (0.5 * (np.sin(2*px) * np.cos(py) * np.sin(pz) +
np.sin(2*py) * np.cos(py) * np.sin(px) +
np.sin(2*pz) * np.cos(px) * np.sin(pz)) -
0.5 * (np.cos(2*px) * np.cos(2*py) +
np.cos(2*py) * np.cos(2*pz) +
np.cos(2*pz) * np.cos(2*px)) + 0.15)
# FischerKoch
elif self.tpmstype == 5:
d = (np.cos(2*px) * np.sin(py) * np.cos(pz) +
np.cos(2*py) * np.sin(pz) * np.cos(px) +
np.cos(2*pz) * np.sin(px) * np.cos(py))
# IWP?
return d
if __name__ == "__main__":
# from compas.geometry import Point
import numpy as np
import matplotlib.pyplot as plt
b = TPMS(tpmstype='schwartzP', wavelength=5)
print(b)
x, y, z = np.ogrid[-14:14:112j, -12:12:96j, -10:10:80j]
m = b.get_distance_numpy(x, y, z)
plt.imshow(m[:, :, 25].T, cmap='RdBu') # transpose because numpy indexing is 1)row 2) column instead of x y
plt.colorbar()
plt.axis('equal')
plt.show()
# for y in range(-15, 15):
# s = ''
# for x in range(-30, 30):
# d = b.get_distance(Point(x*0.5, y, 1.))
# if d < 0:
# s += 'x'
# else:
# s += '.'
# print(s)
| [
"[email protected]"
]
| |
73acba9528101c1bfa9187c8776c8d7234afbc3f | c6fca34b2c9cb973d9d65d23e58e40d4513e173a | /aoc2015/day18.py | 65008c1bad113a40d1876343cbf348d6f612d6a1 | []
| no_license | tomkooij/AdventOfCode | 8ff47c027c887194b0d441f61a8db172c4e260ea | 7890d45a01498dcb48972a7e311888ce6f003bd2 | refs/heads/master | 2021-08-15T19:46:21.869137 | 2021-01-18T06:37:50 | 2021-01-18T06:37:50 | 48,421,868 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | # adventofcode.com
# day18
from copy import deepcopy
INPUT = ('input/input18', 100)
TESTCASE = ('input/test18', 4)
ON = '#'
OFF = '.'
def pretty_print(lights):
for l in lights:
print ''.join(l).rstrip('\n')
def count(lights):
return sum([l.count('#') for l in lights])
def get_neighbours(lights, x, y):
neighbours = []
xmax = ymax = len(lights)
for i in range(max(y-1, 0), min(y+2, ymax)):
for j in range(max(x-1,0), min(x+2, xmax)):
neighbours.append((i,j))
if (y,x) in neighbours:
neighbours.remove((y,x))
return neighbours
def count_neighbours(lights, x, y):
n = get_neighbours(lights, x, y)
return count([lights[y][x] for y,x in n])
FILENAME, STEPS = INPUT
if __name__ == '__main__':
with open(FILENAME) as f:
lights = map(list, f.read().splitlines())
for _ in range(STEPS+1):
old_lights = deepcopy(lights)
pretty_print(lights)
print count(lights)
for y in range(0, len(lights)):
for x in range(0, len(lights)):
#print y, x, count_neighbours(lights, x, y)
if old_lights[y][x] == ON:
if not count_neighbours(old_lights, x, y) in [2, 3]:
lights[y][x] = OFF
elif old_lights[y][x] == OFF:
if count_neighbours(old_lights, x, y) == 3:
lights[y][x] = ON
else:
assert False, 'lp0 on fire! %d %d %c' % (x, y, lights[y][x])
| [
"[email protected]"
]
| |
0099ea1a24cd0a7e27e7caa9bcd30ad25bb5fc29 | d4b91d9ebb7c850f07b06e5c15794b2885f2e767 | /6/Tema3(Циклы)/6.c_6.py | 3d423e792c84c79c5c729e0ca3d5be2f25693867 | []
| no_license | Timur597/First6team | 13b6dbb2d2e68d5df5c76c5bbba587d563a95957 | 4df85a6f20bad626ad76196cd5bc867ce27d0aac | refs/heads/master | 2023-03-05T15:39:24.311784 | 2021-02-20T07:17:36 | 2021-02-20T07:17:36 | 340,588,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | 6 Задание
names = ('Максат','Лязат','Данияр','Айбек','Атай','Салават','Адинай','Жоомарт','Алымбек','Эрмек','Дастан','Бекмамат','Аслан')
i = 0
while i < 12:
print (names [i])
i = i + 2
| [
"[email protected]"
]
| |
ff5236cfbc685f7702d63948ddb042f1e8ba1d78 | f8f2536fa873afa43dafe0217faa9134e57c8a1e | /aliyun-python-sdk-multimediaai/aliyunsdkmultimediaai/request/v20190810/RegisterFaceImageRequest.py | fcfb213b95d912b2eaf0be5026e27f0f3fad4814 | [
"Apache-2.0"
]
| permissive | Sunnywillow/aliyun-openapi-python-sdk | 40b1b17ca39467e9f8405cb2ca08a85b9befd533 | 6855864a1d46f818d73f5870da0efec2b820baf5 | refs/heads/master | 2022-12-04T02:22:27.550198 | 2020-08-20T04:11:34 | 2020-08-20T04:11:34 | 288,944,896 | 1 | 0 | NOASSERTION | 2020-08-20T08:04:01 | 2020-08-20T08:04:01 | null | UTF-8 | Python | false | false | 1,779 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmultimediaai.endpoint import endpoint_data
class RegisterFaceImageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'multimediaai', '2019-08-10', 'RegisterFaceImage')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_FaceGroupId(self):
return self.get_query_params().get('FaceGroupId')
def set_FaceGroupId(self,FaceGroupId):
self.add_query_param('FaceGroupId',FaceGroupId)
def get_FacePersonId(self):
return self.get_query_params().get('FacePersonId')
def set_FacePersonId(self,FacePersonId):
self.add_query_param('FacePersonId',FacePersonId)
def get_ImageUrl(self):
return self.get_query_params().get('ImageUrl')
def set_ImageUrl(self,ImageUrl):
self.add_query_param('ImageUrl',ImageUrl) | [
"[email protected]"
]
| |
1d87192e81d61530ae36b21063abb510bd089aee | fbaf44a5f4effe2838a03165f237a7a282284f64 | /Practice/PIle_length-width_soilE/1.1 readODB.py | 3f59145257606d79712227f140d6214a9b44a5d9 | []
| no_license | WangDooo/Python-in-Abaqus | b568f5499bbfd8bc4893f4510a233b9c0be30cf8 | c7bcbd1adc3bcff9661e13c8ce883cb59269ceb8 | refs/heads/master | 2021-06-13T14:05:25.639543 | 2021-03-24T03:32:44 | 2021-03-24T03:32:44 | 173,902,521 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # coding:utf8
from odbAccess import *
odb = openOdb(path='Job-1.odb')
step = odb.steps['Step-1']
point = odb.rootAssembly.nodeSets['SET-PILETOPPOINT']
lastFrame = step.frames[-1]
u = lastFrame.fieldOutputs['U']
u_point = u.getSubset(region=point)
uFile = open('U2.csv','w')
uFile.write('nodeLabel,U2 \n')
for uValue in u_point.values:
uFile.write('NO.%s, %f \n' % (uValue.nodeLabel, uValue.data[1])) | [
"[email protected]"
]
| |
6d268fb1bb10e27331a3f7427f4e7ec31917a891 | 5e557741c8867bca4c4bcf2d5e67409211d059a3 | /test/distributed/elastic/timer/local_timer_example.py | 8d3702c9a70283500c437adc763c2e6090b382a9 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
]
| permissive | Pandinosaurus/pytorch | a2bb724cfc548f0f2278b5af2fd8b1d2758adb76 | bb8978f605e203fbb780f03010fefbece35ac51c | refs/heads/master | 2023-05-02T20:07:23.577610 | 2021-11-05T14:01:30 | 2021-11-05T14:04:40 | 119,666,381 | 2 | 0 | NOASSERTION | 2021-11-05T19:55:56 | 2018-01-31T09:37:34 | C++ | UTF-8 | Python | false | false | 4,080 | py | #!/usr/bin/env python3
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing as mp
import signal
import time
import unittest
import torch.distributed.elastic.timer as timer
import torch.multiprocessing as torch_mp
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
IS_WINDOWS,
IS_MACOS,
sandcastle_skip_if,
)
logging.basicConfig(
level=logging.INFO, format="[%(levelname)s] %(asctime)s %(module)s: %(message)s"
)
def _happy_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(0.5)
def _stuck_function(rank, mp_queue):
timer.configure(timer.LocalTimerClient(mp_queue))
with timer.expires(after=1):
time.sleep(5)
# timer is not supported on macos or windowns
if not (IS_WINDOWS or IS_MACOS):
class LocalTimerExample(unittest.TestCase):
"""
Demonstrates how to use LocalTimerServer and LocalTimerClient
to enforce expiration of code-blocks.
Since torch multiprocessing's ``start_process`` method currently
does not take the multiprocessing context as parameter argument
there is no way to create the mp.Queue in the correct
context BEFORE spawning child processes. Once the ``start_process``
API is changed in torch, then re-enable ``test_torch_mp_example``
unittest. As of now this will SIGSEGV.
"""
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
def test_torch_mp_example(self):
# in practice set the max_interval to a larger value (e.g. 60 seconds)
mp_queue = mp.get_context("spawn").Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
# all processes should complete successfully
# since start_process does NOT take context as parameter argument yet
# this method WILL FAIL (hence the test is disabled)
torch_mp.spawn(
fn=_happy_function, args=(mp_queue,), nprocs=world_size, join=True
)
with self.assertRaises(Exception):
# torch.multiprocessing.spawn kills all sub-procs
# if one of them gets killed
torch_mp.spawn(
fn=_stuck_function, args=(mp_queue,), nprocs=world_size, join=True
)
server.stop()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
def test_example_start_method_spawn(self):
self._run_example_with(start_method="spawn")
# @sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test is asan incompatible")
# def test_example_start_method_forkserver(self):
# self._run_example_with(start_method="forkserver")
def _run_example_with(self, start_method):
spawn_ctx = mp.get_context(start_method)
mp_queue = spawn_ctx.Queue()
server = timer.LocalTimerServer(mp_queue, max_interval=0.01)
server.start()
world_size = 8
processes = []
for i in range(0, world_size):
if i % 2 == 0:
p = spawn_ctx.Process(target=_stuck_function, args=(i, mp_queue))
else:
p = spawn_ctx.Process(target=_happy_function, args=(i, mp_queue))
p.start()
processes.append(p)
for i in range(0, world_size):
p = processes[i]
p.join()
if i % 2 == 0:
self.assertEqual(-signal.SIGKILL, p.exitcode)
else:
self.assertEqual(0, p.exitcode)
server.stop()
if __name__ == "__main__":
run_tests()
| [
"[email protected]"
]
| |
31c75823ceccc46b7570986abb36366707a7b394 | f995860ad78fc266d04b03c3478c74e989d8b568 | /PE/pe0178.py | 50751fc483b0c73fe0a8686c699f684a06e3cf11 | []
| no_license | 196884/Python | edd0234fd72a40d7a0b3310776edcaa8bda74478 | 8dc2e7a32dd350227cde748600e713dc3eea3f4a | refs/heads/master | 2016-09-06T19:26:19.860746 | 2015-11-09T00:09:23 | 2015-11-09T00:09:23 | 28,167,634 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | def initList():
r = []
for i in range(0, 10):
r.append([0 for j in range(0, 10)])
return r
def solve():
# dynamic programming:
# after n steps, l[i][j] is the number of paths:
# * of length n
# * starting at 0
# * going up or down by 1 at each step
# * with minimum -i, and maximum +j
r = 0
l = initList()
l[0][0] = 1
for n in range(1, 40):
lNew = initList()
for i in range(0, 10):
for j in range(0, 9):
lNew[max(0, i-1)][j+1] += l[i][j]
lNew[j+1][max(0, i-1)] += l[j][i]
l = lNew
for i in range(1, 10): # The starting with a 0 is covered in the previous count!
r += l[i][9-i]
return r
if __name__ == "__main__":
result = solve()
print "Result: %d" % result
| [
"[email protected]"
]
| |
c6d53eae7e1128d46dbbb956b76c3a7d625330d0 | 13f4a06cd439f579e34bf38406a9d5647fe7a0f3 | /seed/types/MergeableHeap__immutable_tree.py | 62b0066d20d34eb16226590a2d6f29b3da608310 | []
| no_license | edt-yxz-zzd/python3_src | 43d6c2a8ef2a618f750b59e207a2806132076526 | 41f3a506feffb5f33d4559e5b69717d9bb6303c9 | refs/heads/master | 2023-05-12T01:46:28.198286 | 2023-05-01T13:46:32 | 2023-05-01T13:46:32 | 143,530,977 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 33,990 | py | #__all__:goto
#testing_________goto:goto
r'''[[[
seed.types.MergeableHeap__immutable_tree
view ../../python3_src/seed/types/MergeableHeap__immutable_tree.py
using immutable_tree underlying
O(1) copy
++unoin/merge: like eat() but donot clear() input heap
view ../../python3_src/seed/types/MergeableHeap__mutable_tree.py
just forward seed.types.MergeableHeap
e ../../python3_src/seed/types/MergeableHeap__immutable_tree.py
ver1:
view ../../python3_src/seed/types/MergeableHeap-ver1-eat-O(logM_mul_logN)-not-best.py
所有 非叶节点 含payload
eat() - O(logM*logN)
ver2:
view ../../python3_src/seed/types/MergeableHeap.py
view ../../python3_src/seed/types/MergeableHeap__mutable_tree.py
所有 二叉节点fork 含min_payload
只有 单元节点unit 含 min_payload是payload
eat() - O(logM+logN)
ver3 [当前]:
view ../../python3_src/seed/types/MergeableHeap__immutable_tree.py
using immutable_tree
[[[
===
used in:
e script/matrix_chain_product.py
e others/book/matrix_chain_product/Computation of matrix chain products I,II(1984)(Hu)(Shing)[polygon partitioning].pdf.txt
以下ver1相关,已过时[[
平衡二叉树:
左右子树 高度相差 至多为一
[min_num_nodes_of_height(h) =[def]= if h==0 then 0 else if h==1 then 1 else min_num_nodes_of_height(h-1)+min_num_nodes_of_height(h-2)+1]
[max_num_nodes_of_height(h) =[def]= if h==0 then 0 else max_num_nodes_of_height(h-1)+max_num_nodes_of_height(h-1)+1]
[max_num_nodes_of_height(h) == 2**h-1]
0,1,3,7,15,31
1,2,4,8,16,32
[min_num_nodes_of_height(h) == Fibonacci_sequence[h+2]-1]
# ~= 1.618**h * K
0,1,2,4,7,12
0,1,1,2,3,5,8,13
[min_num_nodes_of_height,max_num_nodes_of_height 都是指数增长]
高度 最多比 完美平衡树 多一半
>>> from math import log
>>> log(2)/log(1.618)
1.4404829720657013
]]
ver2[[
bug: [min_num_nodes_of_height(h) =[def]= if h==0 then 0 else if h==1 then 1 else min_num_nodes_of_height(h-1)+min_num_nodes_of_height(h-2)+1]
[min_num_nodes_of_height(h) =[def]= if h==0 then 0 else if h==1 then 1 else if h==2 then 3 else min_num_nodes_of_height(h-1)+min_num_nodes_of_height(h-2)+1]
非叶节点的数量:二叉节点+单元节点 # 不含 空叶节点
(2->3)是因为 二叉节点 的 直接子代 不是 空叶节点
0,1,3,5,9,15,25,41,67,109
see:_eat_
[min_num_payloads_of_height(h) =[def]= if h==0 then 0 else if h==1 then 1 else if h==2 then 2 else min_num_payloads_of_height(h-1)+min_num_payloads_of_height(h-2)]
实际数据的数量:单元节点
(2->2)是因为 二叉节点 的 直接子代 不含 空叶节点
0,1,2,3,5,8,13,21,34,55,89
[min_num_payloads_of_height(h) == Fibonacci_sequence[h+1] -[h==0]]
0,1,2,3,5, 8,13
0,1,1,2,3,5, 8,13
0,1,3,5,9,15,25,41,67,109
[min_num_nodes_of_height(h) == min_num_payloads_of_height(h)*2 -1 +[h==0]]
!! [num_nodes_of_height(tree) == max(0, num_payloads_of_height(tree)*2 -1)]
[min_num_nodes_of_height(h) == Fibonacci_sequence[h+1]*2 -1 -[h==0]]
[max_num_nodes_of_height(h) =[def]= if h==0 then 0 else max_num_nodes_of_height(h-1)+max_num_nodes_of_height(h-1)+1]
[max_num_payloads_of_height(h) =[def]= if h==0 then 0 else if h==1 then 1 else max_num_payloads_of_height(h-1)+max_num_payloads_of_height(h-1)]
[max_num_nodes_of_height(h) == 2**h-1]
[max_num_payloads_of_height(h) == floor(2**(h-1))]
]]
O(1)操作:
len(heap) -> size
bool(heap) -> bool
heap.peak() -> min_item
O(log(N))操作:
heap.push(item)
heap.pop() -> min_item
取出最小值
heap.merge(const& heap)
ver3[当前]:O(logN+logM)
new in ver3
heap.eat(std::move(heap))
破坏性融合
ver1:O(logN*logM)算法。比 普通情形O(N*logM)稍佳,但不及预期目标O(logN+logM)
ver2:O(logN+logM)
heap.push_then_pop(item) -> min_item
heap.pop_then_push(item) -> min_item
O(logN+M+logM)操作:
heap.pushs([item])
O(M)操作:
MergeableHeap([item])
O(N)操作:
[*iter(heap)]
非破坏性无序只读操作
heap.as_tree() -> tree=(()|(payload, tree, tree))
O(N*log(N))操作:
[*heap.iter_pops()]
破坏性有序操作
heap.to_sorted()
new in ver3
]]]
py -m nn_ns.app.debug_cmd seed.types.MergeableHeap__immutable_tree
py -m seed.types.MergeableHeap__immutable_tree
from seed.types.MergeableHeap__immutable_tree import MergeableHeap__immutable_tree, MergeableHeap, HeapError__Empty, HeapError__EatSelf, HeapError__Validate
[[[
===
>>> from seed.types.MergeableHeap__immutable_tree import MergeableHeap__immutable_tree, MergeableHeap, HeapError__Empty, HeapError__EatSelf, HeapError__Validate
>>> heap = MergeableHeap()
>>> heap
MergeableHeap()
>>> bool(heap)
False
>>> len(heap)
0
>>> [*iter(heap)]
[]
>>> [*heap.iter_pops()]
[]
>>> heap.as_tree()
()
>>> heap.verify()
>>> heap.push(999)
>>> heap.verify()
>>> heap
MergeableHeap([999])
>>> bool(heap)
True
>>> len(heap)
1
>>> [*iter(heap)]
[999]
>>> heap.as_tree()
(999, (), ())
>>> heap.peak()
999
>>> [*heap.iter_pops()]
[999]
>>> heap.peak() #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeapError__Empty
>>> heap.pop() #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeapError__Empty
>>> heap.eat(heap) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
HeapError__EatSelf
>>> heap.push(999)
>>> heap
MergeableHeap([999])
>>> heap.as_tree()
(999, (), ())
>>> heap.push(888)
>>> heap
MergeableHeap([999, 888])
>>> heap.as_tree()
(888, (999, (), ()), (888, (), ()))
>>> heap.verify()
>>> heap.push(222)
>>> heap
MergeableHeap([999, 222, 888])
>>> heap.as_tree()
(222, (222, (999, (), ()), (222, (), ())), (888, (), ()))
>>> heap.verify()
>>> heap.push(333)
>>> heap
MergeableHeap([999, 222, 888, 333])
>>> heap.as_tree()
(222, (222, (999, (), ()), (222, (), ())), (333, (888, (), ()), (333, (), ())))
>>> heap.verify()
>>> heap.push(777)
>>> heap
MergeableHeap([999, 777, 222, 888, 333])
>>> heap.as_tree()
(222, (222, (777, (999, (), ()), (777, (), ())), (222, (), ())), (333, (888, (), ()), (333, (), ())))
>>> heap.verify()
>>> heap.push(555)
>>> heap
MergeableHeap([999, 777, 222, 888, 555, 333])
>>> heap.as_tree()
(222, (222, (777, (999, (), ()), (777, (), ())), (222, (), ())), (333, (555, (888, (), ()), (555, (), ())), (333, (), ())))
>>> heap.verify()
>>> heap.push(444)
>>> heap
MergeableHeap([999, 777, 222, 444, 888, 555, 333])
>>> heap.as_tree()
(222, (222, (777, (999, (), ()), (777, (), ())), (222, (222, (), ()), (444, (), ()))), (333, (555, (888, (), ()), (555, (), ())), (333, (), ())))
>>> heap.verify()
>>> heap.push(666)
>>> heap
MergeableHeap([999, 666, 777, 222, 444, 888, 555, 333])
>>> heap.as_tree()
(222, (222, (666, (666, (999, (), ()), (666, (), ())), (777, (), ())), (222, (222, (), ()), (444, (), ()))), (333, (555, (888, (), ()), (555, (), ())), (333, (), ())))
>>> heap.verify()
>>> heap.push(111)
>>> heap
MergeableHeap([999, 666, 777, 222, 444, 888, 555, 333, 111])
>>> heap.as_tree()
(111, (222, (666, (666, (999, (), ()), (666, (), ())), (777, (), ())), (222, (222, (), ()), (444, (), ()))), (111, (555, (888, (), ()), (555, (), ())), (111, (333, (), ()), (111, (), ()))))
>>> heap.verify()
>>> bool(heap)
True
>>> len(heap)
9
>>> [*iter(heap)]
[999, 666, 777, 222, 444, 888, 555, 333, 111]
>>> heap.peak()
111
>>> heap.pop()
111
>>> heap.as_tree()
(222, (222, (666, (666, (), ()), (777, (), ())), (222, (222, (), ()), (444, (), ()))), (333, (555, (888, (), ()), (555, (), ())), (333, (333, (), ()), (999, (), ()))))
>>> heap.verify()
>>> heap.pop()
222
>>> heap.as_tree()
(333, (444, (777, (), ()), (444, (666, (), ()), (444, (), ()))), (333, (555, (888, (), ()), (555, (), ())), (333, (333, (), ()), (999, (), ()))))
>>> heap.verify()
>>> heap.pop()
333
>>> heap.as_tree()
(444, (444, (777, (), ()), (444, (), ())), (555, (555, (888, (), ()), (555, (), ())), (666, (666, (), ()), (999, (), ()))))
>>> heap.verify()
>>> heap.pop()
444
>>> heap.as_tree()
(555, (777, (777, (), ()), (888, (), ())), (555, (555, (), ()), (666, (666, (), ()), (999, (), ()))))
>>> heap.verify()
>>> heap.pop()
555
>>> heap.as_tree()
(666, (777, (777, (), ()), (888, (), ())), (666, (666, (), ()), (999, (), ())))
>>> heap.verify()
>>> heap.pop()
666
>>> heap.as_tree()
(777, (888, (), ()), (777, (777, (), ()), (999, (), ())))
>>> heap.verify()
>>> heap.pop()
777
>>> heap.as_tree()
(888, (888, (), ()), (999, (), ()))
>>> heap.verify()
>>> [*heap.iter_pops()]
[888, 999]
>>> len(heap)
0
>>> bool(heap)
False
>>> heap.as_tree()
()
>>> heap.verify()
>>> heap
MergeableHeap()
>>> heap.pushs(range(111, 1000, 111))
>>> heap.as_tree()
(111, (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.push_then_pop(-555)
-555
>>> heap.as_tree()
(111, (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.clear()
>>> heap.as_tree()
()
>>> heap.pushs(range(111, 1000, 111))
>>> heap.push(-555)
>>> heap.as_tree()
(-555, (-555, (-555, (-555, (111, (), ()), (-555, (), ())), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.pop()
-555
>>> heap.as_tree()
(111, (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.clear()
>>> heap.as_tree()
()
>>> heap.pushs(range(999, 100, -111))
>>> heap.as_tree()
(111, (666, (888, (999, (), ()), (888, (), ())), (666, (777, (), ()), (666, (), ()))), (111, (444, (555, (), ()), (444, (), ())), (111, (222, (333, (), ()), (222, (), ())), (111, (), ()))))
>>> heap.verify()
>>> heap.pop_then_push(-555)
111
>>> heap.as_tree()
(-555, (666, (888, (999, (), ()), (888, (), ())), (666, (777, (), ()), (666, (), ()))), (-555, (444, (555, (), ()), (444, (), ())), (-555, (222, (333, (), ()), (222, (), ())), (-555, (), ()))))
>>> heap.verify()
>>> heap.clear()
>>> heap.as_tree()
()
>>> heap.pushs(range(999, 100, -111))
>>> heap.as_tree()
(111, (666, (888, (999, (), ()), (888, (), ())), (666, (777, (), ()), (666, (), ()))), (111, (444, (555, (), ()), (444, (), ())), (111, (222, (333, (), ()), (222, (), ())), (111, (), ()))))
>>> heap.pop()
111
>>> heap.as_tree()
(222, (666, (888, (999, (), ()), (888, (), ())), (666, (777, (), ()), (666, (), ()))), (222, (444, (555, (), ()), (444, (), ())), (222, (222, (), ()), (333, (), ()))))
>>> heap.verify()
>>> heap.push(-555)
>>> heap.as_tree()
(-555, (-555, (-555, (-555, (999, (), ()), (-555, (), ())), (888, (), ())), (666, (777, (), ()), (666, (), ()))), (222, (444, (555, (), ()), (444, (), ())), (222, (222, (), ()), (333, (), ()))))
>>> heap.verify()
>>> heap.clear()
>>> heap.pushs(range(111, 1000, 111))
>>> heap.pop_then_push(700)
111
>>> heap.as_tree()
(222, (222, (222, (700, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.push_then_pop(400)
222
>>> heap.as_tree()
(333, (333, (400, (700, (), ()), (400, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.clear()
>>> heap.pushs(range(111, 1000, 111))
>>> heap2 = MergeableHeap(range(99, 10, -11))
>>> heap.as_tree()
(111, (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap2.as_tree()
(11, (66, (88, (99, (), ()), (88, (), ())), (66, (77, (), ()), (66, (), ()))), (11, (44, (55, (), ()), (44, (), ())), (11, (22, (33, (), ()), (22, (), ())), (11, (), ()))))
>>> heap.eat(heap2)
>>> heap2.as_tree()
()
>>> heap.as_tree()
(11, (111, (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ())))), (11, (66, (88, (99, (), ()), (88, (), ())), (66, (77, (), ()), (66, (), ()))), (11, (44, (55, (), ()), (44, (), ())), (11, (22, (33, (), ()), (22, (), ())), (11, (), ())))))
>>> heap.verify()
>>> heap.clear()
>>> heap.pushs(range(111, 1000, 111))
>>> heap2 = MergeableHeap(range(44, 10, -11))
>>> heap2.as_tree()
(11, (33, (44, (), ()), (33, (), ())), (11, (22, (), ()), (11, (), ())))
>>> heap.eat(heap2)
>>> heap2.as_tree()
()
>>> heap.as_tree()
(11, (11, (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ()))), (11, (33, (44, (), ()), (33, (), ())), (11, (22, (), ()), (11, (), ())))), (555, (555, (555, (), ()), (666, (), ())), (777, (777, (777, (), ()), (888, (), ())), (999, (), ()))))
>>> heap.verify()
>>> heap.clear()
>>> heap.pushs(range(111, 500, 111))
>>> heap2 = MergeableHeap(range(88, 10, -11))
>>> heap.as_tree()
(111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ())))
>>> heap2.as_tree()
(11, (55, (77, (88, (), ()), (77, (), ())), (55, (66, (), ()), (55, (), ()))), (11, (33, (44, (), ()), (33, (), ())), (11, (22, (), ()), (11, (), ()))))
>>> heap.eat(heap2)
>>> heap2.as_tree()
()
>>> heap.as_tree()
(11, (55, (55, (77, (88, (), ()), (77, (), ())), (55, (66, (), ()), (55, (), ()))), (111, (111, (111, (), ()), (222, (), ())), (333, (333, (), ()), (444, (), ())))), (11, (33, (44, (), ()), (33, (), ())), (11, (22, (), ()), (11, (), ()))))
>>> heap.verify()
>>> heap = MergeableHeap(key=len)
>>> heap.push({1,2,3})
>>> heap.push(range(100))
>>> heap.peak()
{1, 2, 3}
>>> heap.verify()
>>> heap = MergeableHeap(key=len, __lt__=opss.__gt__)
>>> heap.push({1,2,3})
>>> heap.push(range(100))
>>> heap.peak()
range(0, 100)
>>> heap.verify()
>>> heap = MergeableHeap(key=len, __lt__=opss.__gt__, reverse=True)
>>> heap.push({1,2,3})
>>> heap.push(range(100))
>>> heap.peak()
{1, 2, 3}
>>> heap.verify()
>>> heap = MergeableHeap([1, 2, 3])
>>> heap
MergeableHeap([1, 2, 3])
>>> heap.merge(heap)
>>> heap
MergeableHeap([1, 2, 3, 1, 2, 3])
>>> heap.verify()
>>> heap2 = MergeableHeap([4, 5, 6])
>>> heap2
MergeableHeap([4, 5, 6])
>>> heap | heap2
MergeableHeap([1, 2, 3, 4, 5, 6, 1, 2, 3])
>>> heap
MergeableHeap([1, 2, 3, 1, 2, 3])
>>> heap2
MergeableHeap([4, 5, 6])
>>> heap.verify()
>>> heap.merge(heap2)
>>> heap
MergeableHeap([1, 2, 3, 4, 5, 6, 1, 2, 3])
>>> heap2
MergeableHeap([4, 5, 6])
>>> heap.verify()
testing_________goto
#]]]'''
r'''[[[
]]]
#]]]'''
__all__ = '''
MergeableHeap__immutable_tree
MergeableHeap
HeapError__Empty
HeapError__EatSelf
HeapError__Validate
'''.split()
import operator as opss
from itertools import pairwise
from seed.tiny import echo, null_tuple
from seed.helper.repr_input import repr_helper
from collections import namedtuple
class _MHNodeEmpty:
__slots__ = ()
is_empty = True
is_unit = False
is_fork = False
height = 0
size = 0
_empty_node = _MHNodeEmpty()
_MHNode = namedtuple('_MHNode', '''
min_payload
lhs_child
rhs_child
height
size
'''.split()
)
class _MHNode(_MHNode):
__slots__ = ()
is_empty = False
is_unit = False
is_fork = True
__iter__ = None
__len__ = None
__bool__ = None
__getitem__ = None
def __new__(cls, _lt__node, lhs_child, rhs_child, /):
cls.check_args(lhs_child, rhs_child)
(height, size) = cls.prepare_auto_args__shape(lhs_child, rhs_child)
min_payload = cls.prepare_auto_arg__min_payload(_lt__node, lhs_child, rhs_child)
sf = super(__class__, cls).__new__(cls, min_payload, lhs_child, rhs_child, height, size)
return sf
@classmethod
def check_args(cls, lhs_child, rhs_child, /):
assert isinstance(lhs_child, _MHNodeChildTypes)
assert isinstance(rhs_child, _MHNodeChildTypes)
assert abs(lhs_child.height - rhs_child.height) <= 1
assert min(lhs_child.height, rhs_child.height) >= 1
@classmethod
def prepare_auto_args__shape(cls, lhs_child, rhs_child, /):
height = 1+max(lhs_child.height, rhs_child.height)
size = lhs_child.size + rhs_child.size
return (height, size)
@classmethod
def prepare_auto_arg__min_payload(cls, _lt__node, lhs_child, rhs_child, /):
min_child = rhs_child if _lt__node(rhs_child, lhs_child) else lhs_child
min_payload = min_child.min_payload
return min_payload
@property
def children(sf, /):
return (sf.lhs_child, sf.rhs_child)
@property
def sorted_children_by_height(sf, /):
return sorted(sf.children, key=_get_height4node)
@property
def large_child(sf, /):
'giant'
return max(sf.children, key=_get_height4node)
@property
def small_child(sf, /):
'dwarf'
return min(sf.children, key=_get_height4node)
@property
def the_min_payload_child(sf, /):
for child in sf.children:
if sf.min_payload is child.min_payload:
break
else:
raise logic-err
return child
def another_child_of(sf, old_child, /):
if sf.lhs_child is old_child:
return sf.rhs_child
elif sf.rhs_child is old_child:
return sf.lhs_child
else:
raise logic-err
def _get_height4node(node, /):
return node.height
_MHNodeUnit = namedtuple('_MHNodeUnit', '''
payload
'''.split()
)
class _MHNodeUnit(_MHNodeUnit):
__slots__ = ()
is_empty = False
is_unit = True
is_fork = False
#crotch fork
lhs_child = _empty_node
rhs_child = _empty_node
height = 1
size = 1
__iter__ = None
__len__ = None
__bool__ = None
__getitem__ = None
@property
def min_payload(sf, /):
return sf.payload
children = (_empty_node,)*2
sorted_children_by_height = children
small_child, large_child = sorted_children_by_height
#no:the_min_payload_child
_MHNodeTypes = (_MHNode, _MHNodeUnit, _MHNodeEmpty)
_MHNodeChildTypes = (_MHNode, _MHNodeUnit)
del _MHNodeEmpty
class HeapError__Empty(Exception):pass
class HeapError__EatSelf(Exception):pass
class HeapError__Validate(Exception):pass
class _MergeableHeap__mixin:
__slots__ = ()
def verify(sf, /):
for node in _unorder_iter_nodes5root(sf._node):
sf._verify__node(node)
def _verify__node(sf, node, /):
if not node.is_empty:
if not all(1 <= node.height-child.height <= 2 for child in node.children): raise HeapError__Validate
if not any(node.height-child.height == 1 for child in node.children): raise HeapError__Validate
if not abs(node.lhs_child.height-node.rhs_child.height) <= 1: raise HeapError__Validate
if node.is_fork:
if any(sf._lt__node(child, node) for child in node.children): raise HeapError__Validate
if not any(node.min_payload is child.min_payload for child in node.children): raise HeapError__Validate
def __bool__(sf, /):
return not (sf._node.is_empty)
def __len__(sf, /):
return (sf._node.size)
def as_tree(sf, /):
'tree=(()|(payload, tree, tree))'
return _node_as_tree(sf._node)
def __iter__(sf, /):
'unorder iter'
return _unorder_iter_payloads5root(sf._node)
def __repr__(sf, /):
kwargs = {}
if not sf._key_func is echo:
kwargs.update(key=sf._key_func)
if not sf._lt is opss.__lt__:
kwargs.update(__lt__=sf._lt)
if not sf._reverse is False:
kwargs.update(reverse=sf._reverse)
iterable = [*sf]
args = [iterable] if iterable else []
return repr_helper(sf, *args, **kwargs)
def peak(sf, /):
if not sf:
raise HeapError__Empty
return sf._node.min_payload
def _lt__node(sf, lhs_node, rhs_node, /):
assert not lhs_node.is_empty
assert not rhs_node.is_empty
return sf._lt__payload(lhs_node.min_payload, rhs_node.min_payload)
#xxx '[_empty_node == +oo] <<== _pushdown_payload_at_root'
if lhs_node.is_empty:
return False
if rhs_node.is_empty:
return True
return sf._lt__payload(lhs_node.min_payload, rhs_node.min_payload)
def _lt__payload(sf, lhs_payload, rhs_payload, /):
if sf._reverse:
lhs_payload, rhs_payload = rhs_payload, lhs_payload
return sf._lt(sf._key_func(lhs_payload), sf._key_func(rhs_payload))
#end-class _MergeableHeap__mixin:
if 0:
MergeableHeap__immutable_tree = namedtuple('MergeableHeap__immutable_tree', '''
_key_func
_lt
_reverse
_node
'''.split()
)
#ValueError: Field names cannot start with an underscore: '_key_func'
else:
MergeableHeap__immutable_tree = namedtuple('MergeableHeap__immutable_tree', '''
Xkey_func
Xlt
Xreverse
Xnode
'''.split()
)
class MergeableHeap__immutable_tree(MergeableHeap__immutable_tree, _MergeableHeap__mixin):
__slots__ = ()
def __new__(cls, iterable=None, /, *, key=None, __lt__=None, reverse=False):
_key_func = echo if key is None else key
_lt = opss.__lt__ if __lt__ is None else __lt__
_reverse = bool(reverse)
#==>> __repr__, eat, _lt__payload
_node = _empty_node
sf = super(__class__, cls).__new__(cls, _key_func, _lt, _reverse, _node)
iterable = '' if iterable is None else iterable
sf._key_func
#bug:sf = sf.ipushs(iterable)
sf, _ = sf.ipushs(iterable)
sf._key_func
return sf
@property
def _key_func(sf, /):
return sf.Xkey_func
@property
def _lt(sf, /):
return sf.Xlt
@property
def _reverse(sf, /):
return sf.Xreverse
@property
def _node(sf, /):
return sf.Xnode
_key_func
_lt
_reverse
_node
def _return(sf, root, result, /):
new = sf._mk5new_root(root)
return new, result
def _mk5new_root(sf, root, /):
assert isinstance(root, _MHNodeTypes)
ot = sf._replace(Xnode=root)
assert type(sf) is type(ot)
return ot
def iclear(sf, /):
root = _empty_node
return sf._return(root, None)
def ipushs(sf, iterable, /):
root = _pushs(sf, sf._node, iterable)
return sf._return(root, None)
def iter_ipops(sf, /):
while sf:
sf, min_payload = sf.ipop()
yield min_payload
return
def ipop_then_push(sf, payload, /):
min_payload, root = _pop_then_push(sf, sf._node, payload)
return sf._return(root, min_payload)
def ipush_then_pop(sf, payload, /):
min_payload, root = _push_then_pop(sf, sf._node, payload)
return sf._return(root, min_payload)
def ipop(sf, /):
min_payload, root = _pop(sf, sf._node)
return sf._return(root, min_payload)
def ipush(sf, payload, /):
root = _push(sf, sf._node, payload)
return sf._return(root, None)
def imerge(sf, other_heap, /):
if not isinstance(other_heap, __class__):raise TypeError
if not sf._lt is other_heap._lt:raise TypeError
if not sf._key_func is other_heap._key_func:raise TypeError
if not sf._reverse is other_heap._reverse:raise TypeError
root = _eat(sf, sf._node, other_heap._node)
return sf._return(root, None)
def __or__(sf, other_heap, /):
union, _ = sf.imerge(other_heap)
return union
def copy(sf, /):
'O(1)'
return sf
class MergeableHeap(_MergeableHeap__mixin):
__slots__ = '_heap'.split()
def __init__(sf, iterable=None, /, *, key=None, __lt__=None, reverse=False):
sf._heap = MergeableHeap__immutable_tree(iterable, key=key, __lt__=__lt__, reverse=reverse)
sf._key_func
def copy(sf, /):
'O(1)'
new = type(sf)()
new._heap = sf._heap
return new
@property
def _key_func(sf, /):
return sf._heap._key_func
@property
def _lt(sf, /):
return sf._heap._lt
@property
def _reverse(sf, /):
return sf._heap._reverse
@property
def _node(sf, /):
return sf._heap._node
_key_func
_lt
_reverse
_node
def _fwd(sf, f, /, *args, **kwargs):
sf._heap, result = f(*args, **kwargs)
return result
def clear(sf, /):
return sf._fwd(sf._heap.iclear)
def pushs(sf, iterable, /):
return sf._fwd(sf._heap.ipushs, iterable)
def iter_pops(sf, /):
while sf:
yield sf.pop()
return
def pop_then_push(sf, payload, /):
return sf._fwd(sf._heap.ipop_then_push, payload)
def push_then_pop(sf, payload, /):
return sf._fwd(sf._heap.ipush_then_pop, payload)
def pop(sf, /):
return sf._fwd(sf._heap.ipop)
def push(sf, payload, /):
return sf._fwd(sf._heap.ipush, payload)
def merge(sf, other_heap, /):
if not isinstance(other_heap, __class__):raise TypeError
#bug:return sf._fwd(sf._heap.imerge, other_heap)
return sf._fwd(sf._heap.imerge, other_heap._heap)
def __or__(sf, other_heap, /):
new = sf.copy()
new.merge(other_heap)
return new
def eat(sf, other_heap, /):
'heap.eat(std::move(heap))'
if sf is other_heap: raise HeapError__EatSelf
sf.merge(other_heap)
other_heap.clear()
return None
def _pop(sf, root, /):
'-> (min_payload, root)|raise HeapError__Empty'
L = root.size
H = root.height
(min_payload, root) = _pop_(sf, root)
assert root.size == L-1
assert H-1 <= root.height <= H
return (min_payload, root)
def _pop_(sf, root, /):
(deepest_unit, root) = _pop__deepest_unit(sf, root)
#bug:min_payload = _pop_then_push(sf, root, removed_unit.payload)
(min_payload, root) = _push_then_pop(sf, root, deepest_unit.payload)
return (min_payload, root)
def _pop__deepest_unit(sf, root, /):
if root.is_empty: raise HeapError__Empty
ls = [root]
while not ls[-1].is_unit:
ls.append(ls[-1].large_child)
if not ls: raise logic-err
removed_unit = ls.pop()
assert removed_unit.height == 1
if ls:
#bug:_replace_child(ls[-1], removed_unit, _empty_node)
removed_fork = ls.pop()
assert removed_fork.lhs_child.is_unit
assert removed_fork.rhs_child.is_unit
another_unit = removed_fork.another_child_of(removed_unit)
root = _replace_child_and_fresh_nodes(sf, ls, removed_fork, another_unit)
deepest_unit = removed_unit
else:
assert root.is_unit
deepest_unit = root
root = _empty_node
return (deepest_unit, root)
def _push_then_pop(sf, root, payload, /):
if root.is_empty:
return payload, root
assert root.height
if not sf._lt__payload(root.min_payload, payload):
return payload, root
return _pop_then_push(sf, root, payload)
def _pop_then_push(sf, root, payload, /):
assert root.height > 0
min_payload = root.min_payload
ls = [root]
while not ls[-1].is_unit:
ls.append(ls[-1].the_min_payload_child)
min_unit = ls.pop()
assert min_unit.height == 1
assert min_unit.payload is min_payload
new_unit = _MHNodeUnit(payload)
root = _replace_child_and_fresh_nodes(sf, ls, min_unit, new_unit)
return min_payload, root
def _mk(sf, payloads, /):
'-> root'
ls = [*map(_MHNodeUnit, payloads)]
L = len(ls)
while len(ls) > 1:
#INVARIANT: assert all(0 <= ls[-1].height - node.height <= 1 for node in ls[:-1])
# let H := ls[0].height
# [ls[?].height == H]
# [H <= ls[-1].height <= H+1]
assert 0 <= ls[-1].height - ls[-2].height <= 1
#bug:xs = [_MHNode(sf._lt__node, ls[i], ls[i+1]) for i in range(0, len(ls), 2)]
xs = [_MHNode(sf._lt__node, ls[i], ls[i+1]) for i in range(0, len(ls)-1, 2)]
# [xs[?].height == H+1]
# [H+1 <= xs[-1].height <= H+2]
# [[xs[-1].height == H+2] <-> [[ls[-1].height == H+1][len(ls)%2==0]]]
if len(ls)&1:
# !! [[xs[-1].height == H+2] <-> [[ls[-1].height == H+1][len(ls)%2==0]]]
# !! [len(ls)%2=!=0]
# [xs[-1].height =!= H+2]
# [xs[-1].height == H+1]
#bug:xs.append(ls[-1])
if ls[-1].height < xs[-1].height:
# !! [H <= ls[-1].height <= H+1]
# !! [xs[-1].height == H+1]
# !! [ls[-1].height < xs[-1].height]
# [ls[-1].height == H]
xs[-1] = _MHNode(sf._lt__node, xs[-1], ls[-1])
# [xs[-1].height == H+2]
# [xs[?].height == H+1]
else:
# !! [H <= ls[-1].height <= H+1]
# !! [not [ls[-1].height < xs[-1].height]]
# !! [xs[-1].height == H+1]
# [ls[-1].height == H+1]
xs.append(ls[-1])
# [xs[-1].height == H+1]
# [xs[?].height == H+1]
# [H+1 <= xs[-1].height <= H+2]
# [xs[?].height == H+1]
# [H+1 <= xs[-1].height <= H+2]
# [xs[?].height == H+1]
ls = xs
# [H+1 <= ls[-1].height <= H+2]
# [ls[?].height == H+1]
if not ls:
root = _empty_node
else:
[root] = ls
assert root.size == L
return root
def _pushs(sf, root, payloads, /):
'-> root'
return _eat(sf, root, _mk(sf, payloads))
def _push(sf, root, payload, /):
'-> root'
new_unit = _MHNodeUnit(payload)
return _eat(sf, root, new_unit)
def _replace_child_and_fresh_nodes(sf, ancestors, old_child, new_child, /):
for i in reversed(range(len(ancestors))):
old_parent = ancestors[i]
new_parent = _replace_child(sf, old_parent, old_child, new_child)
ancestors[i] = new_parent
(old_child, new_child) = (old_parent, new_parent)
root = new_child
return root
def _replace_child(sf, parent, old_child, new_child, /):
assert not new_child.is_empty
lhs_child, rhs_child = parent.children
if parent.lhs_child is old_child:
lhs_child = new_child
elif parent.rhs_child is old_child:
rhs_child = new_child
else:
raise logic-err
return _MHNode(sf._lt__node, lhs_child, rhs_child)
def _eat(sf, lhs_root, rhs_root, /):
'-> root #O(logM)算法'
L = lhs_root.size + rhs_root.size
H = max(lhs_root.height, rhs_root.height)
########
root = _eat_(sf, lhs_root, rhs_root)
assert root.size == L
assert H <= root.height <= H+1
return root
def _eat_(sf, lhs_root, rhs_root, /):
(rhs_root, lhs_root) = sorted([rhs_root, lhs_root], key=_get_height4node)
if rhs_root.is_empty:
#[rhs_root is lhs_root is _empty_node] is OK
return lhs_root
if 0:
if rhs_root is lhs_root:
#[rhs_root is lhs_root is not _empty_node] is bug
raise HeapError__EatSelf
assert 1 <= rhs_root.height <= lhs_root.height
ls = [lhs_root]
while ls[-1].height > rhs_root.height:
ls.append(ls[-1].small_child)
removed_subtree = ls.pop()
assert removed_subtree.height <= rhs_root.height
assert not ls or 1 <= ls[-1].height - removed_subtree.height <= 2
assert not ls or 1 <= ls[-1].height - rhs_root.height <= 2
assert 0 <= rhs_root.height - removed_subtree.height <= 1
if removed_subtree.is_empty:
assert removed_subtree.height == 0 < 1 <= rhs_root.height <= lhs_root.height
assert ls
assert 1 == rhs_root.height < ls[-1].height == 2
new_node = rhs_root
else:
new_node = _MHNode(sf._lt__node, removed_subtree, rhs_root)
assert not ls or 0 <= ls[-1].height - new_node.height <= 1
# small_child ==>> [0 <= ls[-1].another_child<removed_subtree>.height - removed_subtree.height <= 1]
# [rhs_root.height >= removed_subtree.height] ==>> [new_node.height == rhs_root.height+1 > removed_subtree.height]
# [rhs_root.height < ls[-1].height] ==>> [new_node.height <= ls[-1].height]
# [removed_subtree.height < ls[-1].another_child<removed_subtree>.height <= new_node.height <= ls[-1].height]
# 『==』==>>_fresh_nodes
root = _replace_child_and_fresh_nodes(sf, ls, removed_subtree, new_node)
return root
def _unorder_iter_payloads5root(root, /):
for node in _unorder_iter_nodes5root(root):
if node.is_unit:
yield node.payload
def _unorder_iter_nodes5root(root, /):
ls = [root]
while ls:
node = ls.pop()
if not node.is_empty:
yield node
ls.append(node.rhs_child)
ls.append(node.lhs_child)
def _node_as_tree(root, /):
'tree=(()|(payload, tree, tree))'
xs = []
ls = [root]
while ls:
node = ls.pop()
xs.append(node)
if not node.is_empty:
ls.append(node.rhs_child)
ls.append(node.lhs_child)
#xs = [root, root.lhs_child, ..., root.rhs_child, ...]
while xs:
node = xs.pop()
if node.is_empty:
tree = null_tuple
else:
children = []
children.append(ls.pop())
children.append(ls.pop())
assert len(children)==2
tree = (node.min_payload, *children)
ls.append(tree)
[tree] = ls
return tree
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"[email protected]"
]
| |
962da2abca34985938d9ede37484fcea375e39e4 | c730d4df20898a966b8ff215b2d3cce894bcf55e | /Linked_Lists/concatenate_circularLinkedList.py | 9cf6ab5a0b417f94f29737d725ed6562f0d0d219 | []
| no_license | venukumarbv/Datastructure_Algorithms_using_Python | 23a6996b171aafc0bcfc43f55e679ee6ef76c5d7 | cd32691edbf9f7b6cdfc16ea742f78fbc5f003e4 | refs/heads/master | 2022-11-19T22:27:38.751963 | 2020-07-21T10:46:55 | 2020-07-21T10:46:55 | 281,368,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | class Node:
def __init__(self,value):
self.info = value
self.link = None
class CircularLinkedList:
def __init__(self):
self.last = None
def insert_a_node(self, value):
temp = Node(value)
if self.last is None: # Create a logical cicular list during empty list
self.last = temp
self.last.link = self.last
# insert at end
temp.link = self.last.link
self.last.link = temp
self.last = temp
def create_list(self):
n = int(input("Enter number of Nodes: "))
for i in range(n):
value = int(input("Enter the vale of {} node ".format(i+1)))
self.insert_a_node(value)
def display(self):
if self.last is None:
print("The List is Empty")
return
p = self.last.link
while True:
print('-->', p.info, end='')
p = p.link
if p == self.last.link:
break
print()
def concatenate(self, list2):
if self.last is None:
self.last = list2.last.link
return
if list2.last is None:
return
p = self.last.link
self.last.link = list2.last.link
list2.last.link = p
self.last = list2.last
clist1 = CircularLinkedList()
clist2 = CircularLinkedList()
print("List 1")
clist1.create_list()
print("List 2")
clist2.create_list()
print("The List 1 is:")
clist1.display()
print("The List 2 is:")
clist2.display()
print("Concatenated List is :")
clist1.concatenate(clist2)
clist1.display() | [
"[email protected]"
]
| |
88dfb09583a57d41a21b54043ae54aaf5acc50da | 1bf9f6b0ef85b6ccad8cb029703f89039f74cedc | /src/spring/azext_spring/vendored_sdks/appplatform/v2022_11_01_preview/operations/_buildpack_binding_operations.py | fa960c96a5d386a8bcb2472976a7bf735c0cfe36 | [
"MIT",
"LicenseRef-scancode-generic-cla"
]
| permissive | VSChina/azure-cli-extensions | a1f4bf2ea4dc1b507618617e299263ad45213add | 10b7bfef62cb080c74b1d59aadc4286bd9406841 | refs/heads/master | 2022-11-14T03:40:26.009692 | 2022-11-09T01:09:53 | 2022-11-09T01:09:53 | 199,810,654 | 4 | 2 | MIT | 2020-07-13T05:51:27 | 2019-07-31T08:10:50 | Python | UTF-8 | Python | false | false | 37,434 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"buildServiceName": _SERIALIZER.url("build_service_name", build_service_name, "str"),
"builderName": _SERIALIZER.url("builder_name", builder_name, "str"),
"buildpackBindingName": _SERIALIZER.url("buildpack_binding_name", buildpack_binding_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"buildServiceName": _SERIALIZER.url("build_service_name", build_service_name, "str"),
"builderName": _SERIALIZER.url("builder_name", builder_name, "str"),
"buildpackBindingName": _SERIALIZER.url("buildpack_binding_name", buildpack_binding_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"buildServiceName": _SERIALIZER.url("build_service_name", build_service_name, "str"),
"builderName": _SERIALIZER.url("builder_name", builder_name, "str"),
"buildpackBindingName": _SERIALIZER.url("buildpack_binding_name", buildpack_binding_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"buildServiceName": _SERIALIZER.url("build_service_name", build_service_name, "str"),
"builderName": _SERIALIZER.url("builder_name", builder_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class BuildpackBindingOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appplatform.v2022_11_01_preview.AppPlatformManagementClient`'s
:attr:`buildpack_binding` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
**kwargs: Any
) -> _models.BuildpackBindingResource:
"""Get a buildpack binding by name.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param builder_name: The name of the builder resource. Required.
:type builder_name: str
:param buildpack_binding_name: The name of the Buildpack Binding Name. Required.
:type buildpack_binding_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BuildpackBindingResource or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.BuildpackBindingResource]
request = build_get_request(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
builder_name=builder_name,
buildpack_binding_name=buildpack_binding_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("BuildpackBindingResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}"} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
buildpack_binding: Union[_models.BuildpackBindingResource, IO],
**kwargs: Any
) -> _models.BuildpackBindingResource:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BuildpackBindingResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(buildpack_binding, (IO, bytes)):
_content = buildpack_binding
else:
_json = self._serialize.body(buildpack_binding, "BuildpackBindingResource")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
builder_name=builder_name,
buildpack_binding_name=buildpack_binding_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("BuildpackBindingResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("BuildpackBindingResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
buildpack_binding: _models.BuildpackBindingResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.BuildpackBindingResource]:
"""Create or update a buildpack binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param builder_name: The name of the builder resource. Required.
:type builder_name: str
:param buildpack_binding_name: The name of the Buildpack Binding Name. Required.
:type buildpack_binding_name: str
:param buildpack_binding: The target buildpack binding for the create or update operation.
Required.
:type buildpack_binding:
~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BuildpackBindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
buildpack_binding: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.BuildpackBindingResource]:
"""Create or update a buildpack binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param builder_name: The name of the builder resource. Required.
:type builder_name: str
:param buildpack_binding_name: The name of the Buildpack Binding Name. Required.
:type buildpack_binding_name: str
:param buildpack_binding: The target buildpack binding for the create or update operation.
Required.
:type buildpack_binding: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BuildpackBindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
buildpack_binding: Union[_models.BuildpackBindingResource, IO],
**kwargs: Any
) -> LROPoller[_models.BuildpackBindingResource]:
"""Create or update a buildpack binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param builder_name: The name of the builder resource. Required.
:type builder_name: str
:param buildpack_binding_name: The name of the Buildpack Binding Name. Required.
:type buildpack_binding_name: str
:param buildpack_binding: The target buildpack binding for the create or update operation. Is
either a model type or a IO type. Required.
:type buildpack_binding:
~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BuildpackBindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BuildpackBindingResource]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
builder_name=builder_name,
buildpack_binding_name=buildpack_binding_name,
buildpack_binding=buildpack_binding,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("BuildpackBindingResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
builder_name=builder_name,
buildpack_binding_name=buildpack_binding_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}"} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
service_name: str,
build_service_name: str,
builder_name: str,
buildpack_binding_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Operation to delete a Buildpack Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param builder_name: The name of the builder resource. Required.
:type builder_name: str
:param buildpack_binding_name: The name of the Buildpack Binding Name. Required.
:type buildpack_binding_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
builder_name=builder_name,
buildpack_binding_name=buildpack_binding_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings/{buildpackBindingName}"} # type: ignore
@distributed_trace
def list(
self, resource_group_name: str, service_name: str, build_service_name: str, builder_name: str, **kwargs: Any
) -> Iterable["_models.BuildpackBindingResource"]:
"""Handles requests to list all buildpack bindings in a builder.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param build_service_name: The name of the build service resource. Required.
:type build_service_name: str
:param builder_name: The name of the builder resource. Required.
:type builder_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BuildpackBindingResource or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2022_11_01_preview.models.BuildpackBindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.BuildpackBindingResourceCollection]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
service_name=service_name,
build_service_name=build_service_name,
builder_name=builder_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BuildpackBindingResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/buildServices/{buildServiceName}/builders/{builderName}/buildpackBindings"} # type: ignore
| [
"[email protected]"
]
| |
d0f7ae8b7499a9ca59ab3835244c320159fe0290 | d6589ff7cf647af56938a9598f9e2e674c0ae6b5 | /imagesearch-20201214/setup.py | 3e51f0f9157b6734ebf9de9021339da732085c83 | [
"Apache-2.0"
]
| permissive | hazho/alibabacloud-python-sdk | 55028a0605b1509941269867a043f8408fa8c296 | cddd32154bb8c12e50772fec55429a9a97f3efd9 | refs/heads/master | 2023-07-01T17:51:57.893326 | 2021-08-02T08:55:22 | 2021-08-02T08:55:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,885 | py | # -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
from setuptools import setup, find_packages
"""
setup module for alibabacloud_imagesearch20201214.
Created on 20/05/2021
@author: Alibaba Cloud SDK
"""
PACKAGE = "alibabacloud_imagesearch20201214"
NAME = "alibabacloud_imagesearch20201214" or "alibabacloud-package"
DESCRIPTION = "Alibaba Cloud image search (20201214) SDK Library for Python"
AUTHOR = "Alibaba Cloud SDK"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/aliyun/alibabacloud-python-sdk"
VERSION = __import__(PACKAGE).__version__
REQUIRES = [
"alibabacloud_tea_util>=0.3.3, <1.0.0",
"alibabacloud_oss_sdk>=0.1.0, <1.0.0",
"alibabacloud_tea_rpc>=0.1.0, <1.0.0",
"alibabacloud_openplatform20191219>=1.1.1, <2.0.0",
"alibabacloud_oss_util>=0.0.5, <1.0.0",
"alibabacloud_tea_fileform>=0.0.3, <1.0.0",
"alibabacloud_tea_openapi>=0.2.4, <1.0.0",
"alibabacloud_openapi_util>=0.1.4, <1.0.0",
"alibabacloud_endpoint_util>=0.0.3, <1.0.0"
]
LONG_DESCRIPTION = ''
if os.path.exists('./README.md'):
with open("README.md", encoding='utf-8') as fp:
LONG_DESCRIPTION = fp.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["alibabacloud","imagesearch20201214"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
python_requires=">=3.6",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development"
)
)
| [
"[email protected]"
]
| |
b56f2969e543d5827dc089cd6dcd23d2f694d788 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_chomps.py | b371d13ce61f330b9635ad464a74c0b37a02a7dd | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.verbs._chomp import _CHOMP
#calss header
class _CHOMPS(_CHOMP, ):
def __init__(self,):
_CHOMP.__init__(self)
self.name = "CHOMPS"
self.specie = 'verbs'
self.basic = "chomp"
self.jsondata = {}
| [
"[email protected]"
]
| |
7fa882dc540662fffa8f714c6124767e6bb8b1a6 | 7118862c20c0b503f9e901026e48a809e29f5cf5 | /ar_markers/coding.py | 87df7bd2c25243aa1dfe07fe9b784377cd8a6788 | [
"BSD-3-Clause"
]
| permissive | pstraczynski/ar_markers | 964c0405dd7b51ac12f6f4c042626514667f7324 | 408737244ef7a655607858a6852189d5aef02e9b | refs/heads/master | 2022-11-17T22:08:42.885805 | 2020-07-16T11:46:38 | 2020-07-16T11:46:38 | 280,138,112 | 0 | 0 | BSD-3-Clause | 2020-07-16T11:38:53 | 2020-07-16T11:38:52 | null | UTF-8 | Python | false | false | 2,924 | py | # this is all hamming code stuff, no user stuff here ... move along, move along
from numpy import matrix, array
GENERATOR_MATRIX = matrix([
[1, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 1, 1],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
REGENERATOR_MATRIX = matrix([
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
])
PARITY_CHECK_MATRIX = matrix([
[1, 0, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 1, 1, 1, 1],
])
HAMMINGCODE_MARKER_POSITIONS = [
[1, 2], [1, 3], [1, 4],
[2, 1], [2, 2], [2, 3], [2, 4], [2, 5],
[3, 1], [3, 2], [3, 3], [3, 4], [3, 5],
[4, 1], [4, 2], [4, 3], [4, 4], [4, 5],
[5, 2], [5, 3], [5, 4],
]
def encode(bits):
encoded_code = ''
if len(bits) % 4 != 0:
raise ValueError('Only a multiple of 4 as bits are allowed.')
while len(bits) >= 4:
four_bits = bits[:4]
bit_array = generate_bit_array(four_bits)
hamming_code = matrix_array_multiply_and_format(GENERATOR_MATRIX, bit_array)
encoded_code += ''.join(hamming_code)
bits = bits[4:]
return encoded_code
def decode(bits):
decoded_code = ''
if len(bits) % 7 != 0:
raise ValueError('Only a multiple of 7 as bits are allowed.')
for bit in bits:
if int(bit) not in [0, 1]:
raise ValueError('The provided bits contain other values that 0 or 1: %s' % bits)
while len(bits) >= 7:
seven_bits = bits[:7]
uncorrected_bit_array = generate_bit_array(seven_bits)
corrected_bit_array = parity_correct(uncorrected_bit_array)
decoded_bits = matrix_array_multiply_and_format(REGENERATOR_MATRIX, corrected_bit_array)
decoded_code += ''.join(decoded_bits)
bits = bits[7:]
return decoded_code
def parity_correct(bit_array):
# Check the parity using the PARITY_CHECK_MATRIX
checked_parity = matrix_array_multiply_and_format(PARITY_CHECK_MATRIX, bit_array)
parity_bits_correct = True
# every value as to be 0, so no error accoured:
for bit in checked_parity:
if int(bit) != 0:
parity_bits_correct = False
if not parity_bits_correct:
error_bit = int(''.join(checked_parity), 2)
for index, bit in enumerate(bit_array):
if error_bit == index + 1:
if bit == 0:
bit_array[index] = 1
else:
bit_array[index] = 0
return bit_array
def matrix_array_multiply_and_format(matrix, array):
unformated = matrix.dot(array).tolist()[0]
return [str(bit % 2) for bit in unformated]
def generate_bit_array(bits):
return array([int(bit) for bit in bits])
def extract_hamming_code(mat):
hamming_code = ''
for pos in HAMMINGCODE_MARKER_POSITIONS:
hamming_code += str(int(mat[pos[0], pos[1]]))
return hamming_code
| [
"[email protected]"
]
| |
a6c223f868e4c11922e97249c425499dc397669a | 9baa9f1bedf7bc973f26ab37c9b3046824b80ca7 | /venv-bck/lib/python2.7/site-packages/pymongo/write_concern.py | d16f1d0b8e1ba18818c6bcc891bc21c10cae0f6b | []
| no_license | shakthydoss/suriyan | 58774fc5de1de0a9f9975c2ee3a98900e0a5dff4 | 8e39eb2e65cc6c6551fc165b422b46d598cc54b8 | refs/heads/master | 2020-04-12T05:36:59.957153 | 2017-01-08T06:12:13 | 2017-01-08T06:12:13 | 59,631,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,444 | py | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for working with write concerns."""
from bson.py3compat import integer_types, string_type
from pymongo.errors import ConfigurationError
class WriteConcern(object):
"""WriteConcern
:Parameters:
- `w`: (integer or string) Used with replication, write operations
will block until they have been replicated to the specified number
or tagged set of servers. `w=<integer>` always includes the replica
set primary (e.g. w=3 means write to the primary and wait until
replicated to **two** secondaries). **w=0 disables acknowledgement
of write operations and can not be used with other write concern
options.**
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised.
- `j`: If ``True`` block until write operations have been committed
to the journal. Cannot be used in combination with `fsync`. Prior
to MongoDB 2.6 this option was ignored if the server was running
without journaling. Starting with MongoDB 2.6 write operations will
fail with an exception if this option is used when the server is
running without journaling.
- `fsync`: If ``True`` and the server is running without journaling,
blocks until the server has synced all data files to disk. If the
server is running with journaling, this acts the same as the `j`
option, blocking until write operations have been committed to the
journal. Cannot be used in combination with `j`.
"""
__slots__ = ("__document", "__acknowledged")
def __init__(self, w=None, wtimeout=None, j=None, fsync=None):
self.__document = {}
self.__acknowledged = True
if wtimeout is not None:
if not isinstance(wtimeout, integer_types):
raise TypeError("wtimeout must be an integer")
self.__document["wtimeout"] = wtimeout
if j is not None:
if not isinstance(j, bool):
raise TypeError("j must be True or False")
self.__document["j"] = j
if fsync is not None:
if not isinstance(fsync, bool):
raise TypeError("fsync must be True or False")
if j and fsync:
raise ConfigurationError("Can't set both j "
"and fsync at the same time")
self.__document["fsync"] = fsync
if self.__document and w == 0:
raise ConfigurationError("Can not use w value "
"of 0 with other options")
if w is not None:
if isinstance(w, integer_types):
self.__acknowledged = w > 0
elif not isinstance(w, string_type):
raise TypeError("w must be an integer or string")
self.__document["w"] = w
@property
def document(self):
"""The document representation of this write concern.
.. note::
:class:`WriteConcern` is immutable. Mutating the value of
:attr:`document` does not mutate this :class:`WriteConcern`.
"""
return self.__document.copy()
@property
def acknowledged(self):
"""If ``True`` write operations will wait for acknowledgement before
returning.
"""
return self.__acknowledged
def __repr__(self):
return ("WriteConcern(%s)" % (
", ".join("%s=%s" % kvt for kvt in self.document.items()),))
def __eq__(self, other):
return self.document == other.document
def __ne__(self, other):
return self.document != other.document
| [
"[email protected]"
]
| |
19225bced8ac87070dfd4bf7df8d4fe653fba6af | 0d59fa410624676908e1470fb9105cb8a280525c | /Algorithms/itertools/itertools_cycle.py | 779bb2629e1349e4c5d5978c5e075686ef194ad3 | [
"MIT"
]
| permissive | Nobodylesszb/python_module | 122d41e776036dfc61a187e383dda821c35e25c4 | 37d2cdcf89a3ff02a9e560696a059cec9272bd1f | refs/heads/master | 2020-05-31T07:48:57.695494 | 2019-07-29T11:32:17 | 2019-07-29T11:32:17 | 190,173,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | #该cycle()函数返回一个迭代器,它重复无限期给出的参数的内容。
# 由于它必须记住输入迭代器的全部内容,
# 如果迭代器很长,它可能会消耗相当多的内存
from itertools import *
for i in zip(range(7), cycle(['a', 'b', 'c'])):
print(i)
"""
output:
(0, 'a')
(1, 'b')
(2, 'c')
(3, 'a')
(4, 'b')
(5, 'c')
(6, 'a')
""" | [
"[email protected]"
]
| |
62cee17ddeb7c10ac5f70ed1eb57139892d7c7ca | a16236f9fbe72be1a8566d2067e4e66921a8a90e | /fbpmp/data_processing/attribution_id_combiner/attribution_id_spine_combiner_cpp.py | b6614231fe3d00730513a482b8b7b874faec7f16 | [
"MIT"
]
| permissive | peking2/fbpcs-1 | dc9e57afc5bab28f0d43ed537d4147e008f51030 | 234bc748f24046a13fbd14ee7794df5d70ab348b | refs/heads/main | 2023-07-29T22:03:05.983480 | 2021-08-18T23:56:25 | 2021-08-18T23:57:19 | 397,813,444 | 0 | 0 | MIT | 2021-08-19T04:15:22 | 2021-08-19T04:15:22 | null | UTF-8 | Python | false | false | 5,169 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import logging
from typing import Optional
from fbpcp.entity.container_instance import ContainerInstanceStatus
from fbpcp.service.onedocker import OneDockerService
from fbpmp.onedocker_binary_names import OneDockerBinaryNames
from fbpmp.pid.service.pid_service.pid_stage import PIDStage
# 10800 s = 3 hrs
DEFAULT_CONTAINER_TIMEOUT_IN_SEC = 10800
class CppAttributionIdSpineCombinerService:
def _get_combine_cmd_args_for_container(
self,
spine_path: str,
data_path: str,
output_path: str,
run_name: str,
tmp_directory: str,
padding_size: int,
sort_strategy: str,
) -> str:
# TODO: Probably put exe in an env variable?
# Try to align with existing paths
cmd_args = " ".join(
[
f"--spine_path={spine_path}",
f"--data_path={data_path}",
f"--output_path={output_path}",
f"--run_name={run_name}",
f"--tmp_directory={tmp_directory}",
f"--padding_size={padding_size}",
f"--sort_strategy={sort_strategy}",
]
)
return cmd_args
def combine_on_container(
self,
spine_path: str,
data_path: str,
output_path: str,
num_shards: int,
run_name: str,
onedocker_svc: OneDockerService,
tmp_directory: str,
padding_size: int,
binary_version: str,
sort_strategy: str = "sort",
container_timeout: Optional[int] = None,
) -> None:
asyncio.run(
self.combine_on_container_async(
spine_path,
data_path,
output_path,
num_shards,
run_name,
onedocker_svc,
tmp_directory,
padding_size,
binary_version,
sort_strategy,
container_timeout,
)
)
async def combine_on_container_async(
self,
spine_path: str,
data_path: str,
output_path: str,
num_shards: int,
run_name: str,
onedocker_svc: OneDockerService,
tmp_directory: str,
padding_size: int,
binary_version: str,
sort_strategy: str = "sort",
container_timeout: Optional[int] = None,
) -> None:
logger = logging.getLogger(__name__)
timeout = container_timeout or DEFAULT_CONTAINER_TIMEOUT_IN_SEC
# TODO: Combiner could be made async so we don't have to spawn our
# own ThreadPoolExecutor here and instead use async primitives
cmd_args_list = []
for shard in range(num_shards):
# TODO: There's a weird dependency between these two services
# AttributionIdSpineCombiner should operate independently of PIDStage
next_spine_path = PIDStage.get_sharded_filepath(spine_path, shard)
next_data_path = PIDStage.get_sharded_filepath(data_path, shard)
next_output_path = PIDStage.get_sharded_filepath(output_path, shard)
cmd_args = self._get_combine_cmd_args_for_container(
next_spine_path,
next_data_path,
next_output_path,
run_name,
tmp_directory,
padding_size,
sort_strategy,
)
cmd_args_list.append(cmd_args)
containers = await onedocker_svc.start_containers_async(
package_name=OneDockerBinaryNames.ATTRIBUTION_ID_SPINE_COMBINER.value,
version=binary_version,
cmd_args_list=cmd_args_list,
timeout=timeout,
)
# Busy wait until all containers are finished
any_failed = False
for shard, container in enumerate(containers):
# Busy wait until the container is finished
status = ContainerInstanceStatus.UNKNOWN
logger.info(f"Task[{shard}] started, waiting for completion")
while status not in [
ContainerInstanceStatus.FAILED,
ContainerInstanceStatus.COMPLETED,
]:
container = onedocker_svc.get_containers([container.instance_id])[0]
status = container.status
# Sleep 5 seconds between calls to avoid an unintentional DDoS
logger.debug(f"Latest status: {status}")
await asyncio.sleep(5)
logger.info(
f"container_id({container.instance_id}) finished with status: {status}"
)
if status is not ContainerInstanceStatus.COMPLETED:
logger.error(f"Container {container.instance_id} failed!")
any_failed = True
if any_failed:
raise RuntimeError(
"One or more containers failed. See the logs above to find the exact container_id"
)
| [
"[email protected]"
]
| |
c4b3fd6c50a9c062239571170f6518b778e577d4 | fb67e1b98f4170077da0e29617e34317d7d68d53 | /main.py | 4fa1d56aae636f2b2f3181cb77d4674c0efca12c | []
| no_license | HadiGhazali/rock-paper-scissors | 14151f518d0349bb07b4d22d88a2d423165c9553 | 5505a91f27fb448536364aab277f91a4193cf5a2 | refs/heads/main | 2023-02-01T10:04:22.179265 | 2020-12-20T16:12:59 | 2020-12-20T16:12:59 | 318,649,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | from random import choice
from constants import PLAYER_OPTIONS, PLAY_BUTTON, STATUS
from core import check_win, modify_scores, check_total
scores = {'user': 0, 'system': 0, 'total_user': 0, 'total_system': 0}
play = True
while play:
user_input = input('Enter your choice pleas')
system_input = choice(list(PLAYER_OPTIONS.keys()))
if user_input in PLAYER_OPTIONS.keys():
result = check_win(user_input, system_input)
current_scores = modify_scores(result, scores)
print('your choice:{}, system choice:{},result:{},\t {}-{}'.format(PLAYER_OPTIONS[user_input],
PLAYER_OPTIONS[system_input], STATUS[result],
current_scores['user'],
current_scores['system']))
check_total(current_scores)
elif user_input in PLAY_BUTTON.keys():
play = False
print('Bye!')
else:
print('Invalid input')
| [
"[email protected]"
]
| |
ee143f1efcc713e6d0ebae48abd8d2f0e560c0ad | cf0c4657fd8198b904932a3c924f3c1f22bddd87 | /setup.py | cf7c2a620b5ed21b92440e9125e1ae9bfc4fad00 | []
| no_license | Coconut-System-Engineer/Create-automatic-akun-instagram | 5e46fd2df6c5a3dcd22058a9e009c972340208bd | 00ec28ffed76428a9db7e05f5ad3e3023897ad87 | refs/heads/master | 2021-03-01T03:16:46.573225 | 2020-03-09T06:21:09 | 2020-03-09T06:21:09 | 245,750,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,003 | py | import os
import random
import sys
import time
from time import sleep
os.system('clear')
def mengetik (s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(random.random() * 0.4)
os.system('clear')
sleep(0.1)
print ('Loading...')
sleep(0.1)
mengetik(' > > > > > > > > > > > > > > > > > > > > > > > > > >] 100%')
sleep(1)
def main():
print "\033[1;00m==============================================================================================================="
sleep(0.1)
print "\033[1;91m*********************___________***Auto Create Akun Instagram***___________************************************"
sleep(0.1)
print "* 0000000000000 000000000000 000000000000 000000 000000 00000000000000000000 *"
sleep(0.1)
print "* 000 0000 000000000000 0000000000000 00000000000 000000000 00000000000000000000 *"
sleep(0.1)
print "* 000 00 0000 0000 0000 000000 000000 000000 000000 000000 00 00000 00 *"
sleep(0.1)
print "* 000 00000 0000 0000 000000 00000 00000 00000 00000 00000 *"
sleep(0.1)
print "* 000 000000000000 0000 00000 0000 0000 0000 0000 00000 *"
sleep(0.1)
print "* 000 00000 000000000000 00000000000 0000 0000 0000 0000 00000 *"
sleep(0.1)
print "* 000 00 0000 0000 00000000000 00000 00000 00000 00000 00000 *"
sleep(0.1)
print "* 000 0000 0000 0000 0000 000000 000000 000000 000000 000000 00000 *"
sleep(0.1)
print "* 000 0000 000000000000 0000 000000 00000000000 0000000000 00000 *"
sleep(0.1)
print "* 000000000000 000000000000 0000 000000 000000 000000 00000000000000 *"
sleep(0.1)
print "\033[00m \033[1;94m*********************___________****** C O C O N U T ******___________****************************************"
sleep(0.1)
print "\033[00m==============================================================================================================="
print '\n \033[1;92m > > > silakan tunggu proses penginstalan pakage < < < \n'
sleep(0.1)
os.system("apt-get update && apt-get install python-pip && pip install selenium")
sleep(0.1)
mengetik('> > > > > > > > > > > > > > > > > > > > > > > > >] 100%')
sleep(0.1)
os.system("chmod 777 geckodriver-v0.26.0-linux64/geckodriver")
sleep(0.1)
mengetik('> > > > > > > > > > > > > > > > > > > > > > > > >] 100%')
sleep(0.1)
os.system("cp geckodriver-v0.26.0-linux64/geckodriver /usr/local/bin/")
sleep(0.1)
mengetik('> > > > > > > > > > > > > > > > > > > > > > > > >] 100%')
sleep(0.1)
print '\n \033[1;00m\033[1;94m*************** __________Selelsai__________ ***************'
main()
| [
"[email protected]"
]
| |
bc0931805ad7e9284f0119c1ac19292c92649d57 | 3624e9f0a026b57ebdafa4e842b93f56e5a8504d | /Codeforces/CodeCraft 2015/Problem H/gen2.py | 46250d91b25f885c1db8bb12d779c6009c0ba217 | [
"MIT"
]
| permissive | ailyanlu1/Competitive-Programming-2 | 54109c8644d3ac02715dc4570916b212412c25c0 | 6c990656178fb0cd33354cbe5508164207012f24 | refs/heads/master | 2020-03-23T07:48:20.560283 | 2018-02-15T06:49:49 | 2018-02-15T06:49:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | print 10**5,2
n = 10**5
for i in xrange(n):
print n-i,
| [
"[email protected]"
]
| |
37ff219abc5a713483b7f6bfffc7ffcae2e5104d | bbc3ff5dc623774d8cd4e8d8154da353b7523552 | /Lambda_double.py | 11c687a55b041ebac4ec0aece9d8286dd0a4b7b0 | []
| no_license | millanmilu/Learn-Python | e78b562e212fb1854322e726f5663c7f74d3b7f7 | ab5f55a86686d1c7bb5ccbe5201f4186ad8fdbc8 | refs/heads/master | 2020-04-13T20:01:01.892395 | 2019-01-05T14:18:34 | 2019-01-05T14:18:34 | 163,418,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | def my_fun(n):
return lambda a:a*n
mydouble = my_fun(2)
print(mydouble(11)) | [
"[email protected]"
]
| |
464ebf186e3319a72253e12fa4a37890c21aa4a0 | 06a2dab18197a13fc3371debd29b476ae99cb01c | /T3/inputs/dnn2017.py | 3c0661637347e70ff855197b207c33c81ac06421 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
]
| permissive | PandaPhysics/PandaAnalysis | 397a031f9e8d399be1814ab04dd525d69b41f060 | 3167d106d41dfce58219c3e07d30e201ee823b55 | refs/heads/master | 2021-06-18T13:52:57.650900 | 2019-04-08T17:35:29 | 2019-04-08T17:35:29 | 168,376,672 | 0 | 0 | NOASSERTION | 2019-04-08T17:33:55 | 2019-01-30T16:34:09 | C++ | UTF-8 | Python | false | false | 2,252 | py | #!/usr/bin/env python
from re import sub
from sys import argv,exit
from os import system,getenv,path
from time import clock,time
import json
which = int(argv[1])
submit_id = int(argv[2])
sname = argv[0]
argv=[]
import ROOT as root
from PandaCore.Tools.Misc import *
from PandaCore.Utils.load import *
import PandaCore.Tools.job_config as cb
import PandaAnalysis.Tagging.cfg_v8 as tagcfg
import PandaAnalysis.T3.job_utilities as utils
from PandaAnalysis.Flat.analysis import wlnhbb2017, breg
Load('PandaAnalyzer')
data_dir = getenv('CMSSW_BASE') + '/src/PandaAnalysis/data/'
def fn(input_name, isData, full_path):
logger.info(sname+'.fn','Starting to process '+input_name)
# now we instantiate and configure the analyzer
a = breg(True)
a.bjetBDTReg = True
a.bjetDeepReg = True
a.inpath = input_name
a.outpath = utils.input_to_output(input_name)
a.datapath = data_dir
a.isData = isData
utils.set_year(a, 2017)
a.processType = utils.classify_sample(full_path, isData)
if a.processType in {root.pa.kTT, root.pa.kH}:
a.reclusterGen = True # only turn on if necessary
skimmer = root.pa.PandaAnalyzer(a)
return utils.run_PandaAnalyzer(skimmer, isData, a.outpath)
if __name__ == "__main__":
sample_list = cb.read_sample_config('local.cfg',as_dict=False)
to_run = None #sample_list[which]
for s in sample_list:
if which==s.get_id():
to_run = s
break
if not to_run:
logger.error(sname,'Could not find a job for PROCID=%i'%(which))
exit(3)
outdir = getenv('SUBMIT_OUTDIR')
lockdir = getenv('SUBMIT_LOCKDIR')
outfilename = to_run.name+'_%i.root'%(submit_id)
processed = {}
utils.report_start(outdir,outfilename,to_run.files)
wd = utils.isolate()
utils.main(to_run, processed, fn)
utils.hadd(processed.keys())
utils.print_time('hadd')
ret = utils.stageout(outdir,outfilename)
utils.cleanup('*.root')
utils.un_isolate(wd)
utils.print_time('stageout and cleanup')
if not ret:
utils.report_done(lockdir,outfilename,processed)
utils.cleanup('*.lock')
utils.print_time('create lock')
else:
exit(-1*ret)
exit(0)
| [
"[email protected]"
]
| |
a8bf5034a92d0e71d35bafc0166787ac78929292 | 98d7cc2690c8d632a2a8d8867c0d6b2dfab2f13f | /code_featureEngineer/demo0226_logVolDic_discrete01location_missingValue_xgboost_differentFea.py~ | e99a70652af3459398ad05ce2bcfa95483c20b6f | []
| no_license | 2877992943/telstra_binary_classification | f55b6d418486881193f0f3b3eca32d0de710a08a | f3d6fe2e25cd7539c30dbdcd617e2513b2678cce | refs/heads/master | 2021-01-01T05:14:31.284945 | 2016-05-13T02:21:10 | 2016-05-13T02:21:10 | 58,692,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,875 | #!/usr/bin/env python
# encoding=utf-8
"""
discrete fea count
"""
import pandas as pd
import numpy as np
import xgboost as xgb
from scipy.optimize import fmin_powell
from ml_metrics import quadratic_weighted_kappa
import cPickle
import pylab as plt
dataPath='/home/yr/telstra/'
def eval_wrapper(yhat, y): #pred true
y = np.array(y);print y[:10]
y = y.astype(int);print yhat[:10]
yhat = np.array(yhat)
#yhat = np.clip(np.round(yhat), np.min(y), np.max(y)).astype(int)
#####accuracy
#err=np.sum((y-yhat)*(y-yhat))/float(y.shape[0])
#return err
#######-loglikely
return np.mean(-np.log(yhat+0.00001)*y-(1.-y)*np.log(1.-yhat+0.00001) )
#return quadratic_weighted_kappa(yhat, y)
def get_params(maxDepth):
plst={
"objective": 'multi:softprob',#"binary:logistic",
"booster": "gbtree",
"eval_metric": "auc",
"eta": 0.01, # 0.06, #0.01,
#"min_child_weight": 240,
"silent":1,
"subsample": 0.75,
"colsample_bytree": 0.68,
"max_depth": maxDepth,
"num_class":3
}
return plst
def pad(train):
train.v22.fillna('',inplace=True)
padded=train.v22.str.pad(4)
spadded=sorted(np.unique(padded))
v22_map={}
c=0
for i in spadded:
v22_map[i]=c
c+=1
train.v22=padded.replace(v22_map,inplace=False)
return train
def save2pickle(c,name):
write_file=open(dataPath+str(name),'wb')
cPickle.dump(c,write_file,-1)#[ (timestamp,[motion,x,y,z]),...]
write_file.close()
def load_pickle(path_i):
f=open(path_i,'rb')
data=cPickle.load(f)#[ [time,[xyz],y] ,[],[]...]
f.close()
#print data.__len__(),data[0]
return data
def str2dummy(fea_xi,allFeaList,logVolumeDic_xi):#allFeaList [string,...]1921
#print 'fea xi',len(fea_xi) #9 nonzero dimention->1920dim 0-1 vec
vec01=[]
#remove volume(int) in fea_xi,allFeaList
#allFeaList=[f for f in allFeaList if type(f)==str]
#fea_xi=[f for f in fea_xi if type(f)==str ]
#
for fi in allFeaList:#for each string_fea in 1921 ,include: string int
if fi in fea_xi:#'log_feature'
#print fi
v=[logVolumeDic_xi[fi] if type(fi)==str and 'feature' in fi else 1][0]
vec01.append(v)
else:vec01.append(0)
return np.array(vec01)
def get_logVolume(patch):
dic={}
log=np.unique(patch.log_feature.values)
for logI in log:
volumeI=np.unique(patch[patch.log_feature==logI].volume.values)[0]
dic[logI]=volumeI
return dic
def howMany(strFi_short,strList_xi):
num=0
for strI in strList_xi:
if type(strI)==str and strFi_short in strI:
num+=1;#print strI
return num
def uniqueInorder(strArr) : #arr[str] ->list
lis=[]
for strI in strArr:
if strI not in lis:lis.append(strI)
return lis
def normal(xi_dic,normDic):#{'severity_type': 1, 'location': 1, 'event_type': 11, 'resource_type': 5, 'feature': 20}
for k,v in normDic.items():
while len(xi_dic[k])<v:
xi_dic[k].append(-1)
#xi={'event_type': ['34', '35', -1, -1, -1, -1, -1, -1, -1, -1, -1], 'feature': ['312', '232', -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], 'severity_type': ['2'], 'volume': [11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], 'location': ['460'], 'resource_type': ['2', -1, -1, -1, -1]}
#get values ,transform into int
return xi_dic
if __name__=='__main__':
#load ->merge ->count_value each fea->factorize ->fillna -> knn mean-> train cross valid
# XGBoost params:
print('Load data...')
train = pd.read_csv("../input/train.csv");
#print train.location.value_counts(),train.fault_severity.value_counts()
print '--train.csv'
for col in train.columns:
print col
print np.unique(train[col].values).shape
event_type=pd.read_csv('../input/event_type.csv')
print '--event_type.csv'
for col in event_type.columns:
print col
print np.unique(event_type[col].values).shape
log_feature=pd.read_csv('../input/log_feature.csv')
print '--log_feature.csv'
for col in log_feature.columns:
print col
print np.unique(log_feature[col].values).shape
resource_type=pd.read_csv('../input/resource_type.csv')
print '--resource_type.csv'
for col in resource_type.columns:
print col
print np.unique(resource_type[col].values).shape
severity_type=pd.read_csv('../input/severity_type.csv')
print '--severity_type.csv'
for col in severity_type.columns:
print col
print np.unique(severity_type[col].values).shape
target = train['fault_severity'];save2pickle(target.values,'target')
#train = train.drop(['ID','target'],axis=1)
test = pd.read_csv("../input/test.csv")
print '--test.csv'
for col in test.columns:
print col
print np.unique(test[col].values).shape
#ids = test['ID'].values
#test = test.drop(['ID'],axis=1)
####
###transform dataframe
trainTest=pd.concat([train,test],axis=0);print trainTest.values.shape
merge1=pd.merge(trainTest,event_type,on='id',how='left')
merge2=pd.merge(merge1,log_feature,on='id',how='left')
merge3=pd.merge(merge2,resource_type,on='id',how='left')
merge4=pd.merge(merge3,severity_type,on='id',how='left')
uniqueId= np.unique(merge4.id.values)
dataDic_count={};targetDic={};
mat=merge4.drop(['id','fault_severity'],axis=1).values;print mat.shape
allFeaList=list(np.unique(mat.flatten() ) )
print len(allFeaList)
for idi in uniqueId[:]:
#for each id
patch= merge4[ merge4['id']==idi ]
target=np.unique(patch.fault_severity.values)[0]
#print 'xi',patch
patch=patch.drop(['id','fault_severity'],axis=1)
#
#logVolumeDic=get_logVolume(patch);
#print logVolumeDic
#
#fea_xi discrete ->count
#{'event_type': ['34', '35'], 'feature': ['312', '232'], 'severity_type': ['2'], 'volume': [11, 6], 'location': ['460'], 'resource_type': ['2']}
fea_xi={}
for col in patch.columns:
fiStrList=uniqueInorder(patch[col].values)#['event 1','event 3',]
if type(fiStrList[0])==str: #['fea 1','fea 3']
for fi in fiStrList:
k,v= fi.split(' ')#'event_type 3'->['event_type','3']
if k not in fea_xi:fea_xi[k]=[v]
else:fea_xi[k].append(v)
else:#[ 4 5]volume
fea_xi['volume']=fiStrList
#get dummy fea
#fea01=str2dummy(fea_xi,allFeaList,logVolumeDic)#array [1000,]
#print fea01[np.nonzero(fea01)[0]],fea01.shape
#print fea_xi
#dataDic[idi]=fea01;#print fea01.shape
#fea_xi=normal(fea_xi,{'severity_type': 1, 'location': 1, 'event_type': 11, 'resource_type': 5, 'feature': 20,'volume':20})
#print fea_xi,fea_xi.values()
count={}
count['event_type_num']=len(fea_xi['event_type'])
count['resource_type_num']=len(fea_xi['event_type'])
count['feature_num']=len(fea_xi['feature'])
count['volume_num']=sum(fea_xi['volume'])
#feaXiList=[int(xii) for xi in fea_xi.values() for xii in xi ]
#print count
#print feaXiList,len(feaXiList)
dataDic_count[idi]=count.values()
targetDic[idi]=target
#print dataDic,targetDic
save2pickle([dataDic_count,targetDic,allFeaList],'dataTargetFeaAll_count')
#########
#get 01fea 1900(all01 include location),790(all01 except for location),combine count fea
#########
dataDicCount,_,_=load_pickle(dataPath+'dataTargetFeaAll_count')
#all 01 include location, notall01
dataDic_all01,dataDic_notall01,targetDic,allFeaList=load_pickle(dataPath+'dataTarget_FeaAll01_notall01')
dataDic_01_count={}
for idi,arr in dataDic_notall01.items()[:]:
count=np.array(dataDicCount[idi])
x=np.concatenate((count,arr))
dataDic_01_count[idi]=x;#print x.shape
save2pickle([dataDic_01_count,targetDic],'xy_dummy_Count')
"""
####see each xi ,at most how many event_type,
strFiDic={'event_type':0,'feature':0,'resource_type':0,'severity_type':0,'location':0}
#{'severity_type': 1, 'location': 1, 'event_type': 11, 'resource_type': 5, 'feature': 20}
dataDic,targetDic,allFeaList=load_pickle(dataPath+'dataTargetFeaAll_3')#{id:[strFea...]
for idi,strList in dataDic.items()[:]:#each xi
#print strList
for strFi_short,mostNum in strFiDic.items():
num=howMany(strFi_short,strList)
if num>mostNum:strFiDic[strFi_short]=num
print strFiDic
"""
####dataDic transform-> dataFrame
dataFrame={'id':[],'severity_type':[],'location':[],\
'event_type1':[],'event_type2':[],'event_type3':[],'event_type4':[],\
'event_type5':[],'event_type6':[],'event_type7':[],'event_type8':[],\
'event_type9':[],'event_type10':[],'event_type11':[],\
'resource_type1':[],'resource_type2':[],'resource_type3':[],'resource_type4':[],\
'resource_type5':[],\
'feature1':[],'feature2':[],'feature3':[],'feature4':[],'feature5':[],\
'feature6':[],'feature7':[],'feature8':[],'feature9':[],'feature10':[],\
'feature11':[],'feature12':[],'feature13':[],'feature14':[],'feature15':[],\
'feature16':[],'feature17':[],'feature18':[],'feature19':[],'feature20':[]}
"""
#split discrete variable 'bc'
print('Clearing...')
# v22 v56 v125 'bcn'remain,add more variable,err not decrease
train['v22_0']=train.v22.str[0];
train['v22_1']=train.v22.str[1];
train['v22_2']=train.v22.str[2];
train['v22_3']=train.v22.str[3];
train['v56_0']=train.v56.str[0];
train['v56_1']=train.v56.str[1];
train['v125_0']=train.v125.str[0];
train['v125_1']=train.v125.str[1];
train['v113_0']=train.v113.str[0]
train['v113_1']=train.v113.str[1]
strList=['v22','v56','125','113']
newfea=[]
for strI in strList:
for col in train.columns:
if col.find(strI+'_')!=-1:
print col
serial=train[col].values
print np.unique(serial).shape
print np.unique(serial)[:50]
#
s, tmp_indexer = pd.factorize(train[col])
print s.shape
newfea.append(s)
newfea=np.array(newfea).T#[d,n] ->[n,d]
print newfea.shape#[n,10]
save2pickle(newfea,'splitFea')
#pad v22
#train=pad(train)
#
"""
"""
#dropna not factorized,see complete dataset without nan
train1=train.dropna(axis=1,how='any')#12 fea with all value
train2=train.dropna(axis=0,how='any');print 'complete data',train2.values.shape #complete fea data
test2=test.dropna(axis=0,how='any')
train2test2=np.concatenate((train2.values,test2.values),axis=0);print train2test2.shape#not factorized
print 'all value fea',train1.columns
test1=test[train1.columns]
#train=train1;test=test1
#
# fill na ,factorize str feature
missFea=[];completeFea=[]
feaInd=-1
for (train_name, train_series), (test_name, test_series) in zip(train.iteritems(),test.iteritems())[:]:
feaInd+=1
# each columns,fea
valuePercnt_train=train[train_name].count()/float(train.values.shape[0])
valuePercnt_test=test[test_name].count()/float(test.values.shape[0])
#print 'non-nan value fea',train_name,train_series.dtype,valuePercnt_train,valuePercnt_test
##
if train_series.dtype == 'O':
#for objects: factorize
train[train_name], tmp_indexer = pd.factorize(train[train_name]);
#print np.unique(tmp_indexer).shape
test[test_name] = tmp_indexer.get_indexer(test[test_name])
if valuePercnt_test+valuePercnt_train<2.:missFea.append(feaInd)
else:completeFea.append(feaInd)
#but now we have -1 values (NaN)
else:
#print train_name,np.unique(train_series).shape
#for int or float: fill NaN with mean
if valuePercnt_test+valuePercnt_train<2.:
missFea.append(feaInd)
tmp_len = len(train[train_series.isnull()]);
if tmp_len>0:
train.loc[train_series.isnull(), train_name] = -1000
#and Test
tmp_len = len(test[test_series.isnull()])
if tmp_len>0:
test.loc[test_series.isnull(), test_name] = -1000
else:
completeFea.append(feaInd)
tmp_len = len(train[train_series.isnull()]);
if tmp_len>0:
train.loc[train_series.isnull(), train_name] = train_series.mean()
#and Test
tmp_len = len(test[test_series.isnull()])
if tmp_len>0:
test.loc[test_series.isnull(), test_name] = train_series.mean() #TODO
"""
"""
print len(missFea),len(completeFea)
##
missInd=list(np.where(train.values==-1)[0])+list(np.where(train.values==-1000)[0])
train1=train.drop(missInd,axis=0,inplace=False)
missInd=list(np.where(test.values==-1)[0])+list(np.where(test.values==-1000)[0])
test1=test.drop(missInd,axis=0,inplace=False)
train2test2=np.concatenate((train1,test1),axis=0);print 'complete data',train2test2.shape
save2pickle([missFea,completeFea,train.values,test.values,train2test2],'midData')
"""
"""
#####################
#xgboost
###################
# convert data to xgb data structure
missing_indicator=-1000
xgtrain = xgb.DMatrix(train.values, target.values,missing=missing_indicator);
#xgtest = xgb.DMatrix(test,missing=missing_indicator)
# train model
print('Fit different model...')
for boost_round in [50,100][:1]:
for maxDepth in [7,14][:1]:#7 14
xgboost_params = get_params(maxDepth)
# train model
#clf = xgb.train(xgboost_params,xgtrain,num_boost_round=boost_round,verbose_eval=True,maximize=False)
clf=xgb.train(xgboost_params,xgtrain,num_boost_round=boost_round)
# train error
train_preds = clf.predict(xgtrain, ntree_limit=clf.best_iteration)
print maxDepth,boost_round
print('Train err is:', eval_wrapper(train_preds, target.values))# 50 7 0.19
"""
"""
#test predict
print('Predict...')
test_preds = clf.predict(xgtest, ntree_limit=clf.best_iteration)
# Save results
#
preds_out = pd.DataFrame({"ID": ids, "PredictedProb": test_preds})
preds_out.to_csv("../acc_process_submission.csv")
#
"""
"""
{id:{event:[11 events at most for one xi] in order---------------53 kinds
feature:[20] -----------------386
resource:[5]------------------10
severity:[1]------------------------5
location:[1]-------------------------------929+1039
volume:[20]------------------------341
"""
| [
"[email protected]"
]
| ||
a1d4f595354a0c572ca7b1aa0b4325eaf227c9ce | ec21d4397a1939ac140c22eca12491c258ed6a92 | /Zope-2.9/lib/python/DocumentTemplate/tests/testDTML.py | 13621444b23ae590a45dfe79a7efeb236d8bd539 | []
| no_license | wpjunior/proled | dc9120eaa6067821c983b67836026602bbb3a211 | 1c81471295a831b0970085c44e66172a63c3a2b0 | refs/heads/master | 2016-08-08T11:59:09.748402 | 2012-04-17T07:37:43 | 2012-04-17T07:37:43 | 3,573,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,417 | py | ##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Document Template Tests
"""
__rcs_id__='$Id: testDTML.py 69084 2006-07-10 20:39:09Z tseaver $'
__version__='$Revision: 1.15 $'[11:-2]
import sys, os
import unittest
if __name__=='__main__':
here = os.curdir
else:
from DocumentTemplate import tests
here = tests.__path__[0]
def read_file(name):
f = open(os.path.join(here, name), 'r')
res = f.read()
f.close()
return res
from DocumentTemplate.DT_HTML import HTML, String
from ExtensionClass import Base
class D:
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, **kw):
for k, v in kw.items(): self.__dict__[k]=v
def __repr__(self): return "D(%s)" % `self.__dict__`
def d(**kw): return kw
class PukeError(Exception):
"""Exception raised in test code."""
class DTMLTests (unittest.TestCase):
doc_class = HTML
def testBatchingEtc(self):
def item(key,**kw): return (key,kw)
def item2(key,**kw): return kw
class item_class:
def __init__(self,key,**kw):
for k in kw.keys(): self.__dict__[k]=kw[k]
items=(
item( 1,dealer='Bay Chevy', make='Chevrolet',
model='Caprice', year=96),
item( 2,dealer='Bay Chevy', make='Chevrolet',
model='Nova', year=96),
item( 4,dealer='Bay Chevy', make='Chevrolet',
model='Nova', year=96),
item( 5,dealer='Bay Chevy', make='Chevrolet',
model='Nova', year=96),
item( 3,dealer='Bay Chevy', make='Chevrolet',
model='Corvett', year=96),
item( 6,dealer='Bay Chevy', make='Chevrolet',
model='Lumina', year=96),
item( 7,dealer='Bay Chevy', make='Chevrolet',
model='Lumina', year=96),
item( 8,dealer='Bay Chevy', make='Chevrolet',
model='Lumina', year=95),
item( 9,dealer='Bay Chevy', make='Chevrolet',
model='Corsica', year=96),
item(10,dealer='Bay Chevy', make='Chevrolet',
model='Corsica', year=96),
item(11,dealer='Bay Chevy', make='Toyota',
model='Camry', year=95),
item(12,dealer='Colman Olds', make='Olds',
model='Ciera', year=96),
item(12,dealer='Colman Olds', make='Olds',
model='Ciera', year=96),
item(12,dealer='Colman Olds', make='Olds',
model='Ciera', year=96),
item(12,dealer='Colman Olds', make='Olds',
model='Cutlass', year=96),
item(12,dealer='Colman Olds', make='Olds',
model='Cutlas', year=95),
item(12,dealer='Colman Olds', make='Dodge',
model='Shadow', year=93),
item(12,dealer='Colman Olds', make='Jeep',
model='Cheroke', year=94),
item(12,dealer='Colman Olds', make='Toyota',
model='Previa', year=92),
item(12,dealer='Colman Olds', make='Toyota',
model='Celica', year=93),
item(12,dealer='Colman Olds', make='Toyota',
model='Camry', year=93),
item(12,dealer='Colman Olds', make='Honda',
model='Accord', year=94),
item(12,dealer='Colman Olds', make='Honda',
model='Accord', year=92),
item(12,dealer='Colman Olds', make='Honda',
model='Civic', year=94),
item(12,dealer='Colman Olds', make='Honda',
model='Civix', year=93),
item( 1,dealer='Spam Chev', make='Chevrolet',
model='Caprice', year=96),
item( 2,dealer='Spam Chev', make='Chevrolet',
model='Nova', year=96),
item( 4,dealer='Spam Chev', make='Chevrolet',
model='Nova', year=96),
item( 5,dealer='Spam Chev', make='Chevrolet',
model='Nova', year=96),
item( 3,dealer='Spam Chev', make='Chevrolet',
model='Corvett', year=96),
item( 6,dealer='Spam Chev', make='Chevrolet',
model='Lumina', year=96),
item( 7,dealer='Spam Chev', make='Chevrolet',
model='Lumina', year=96),
item( 8,dealer='Spam Chev', make='Chevrolet',
model='Lumina', year=95),
item( 9,dealer='Spam Chev', make='Chevrolet',
model='Corsica', year=96),
item(10,dealer='Spam Chev', make='Chevrolet',
model='Corsica', year=96),
item(11,dealer='Spam Chevy', make='Toyota',
model='Camry', year=95),
item(12,dealer='Spam Olds', make='Olds',
model='Ciera', year=96),
item(12,dealer='Spam Olds', make='Olds',
model='Ciera', year=96),
item(12,dealer='Spam Olds', make='Olds',
model='Ciera', year=96),
item(12,dealer='Spam Olds', make='Olds',
model='Cutlass', year=96),
item(12,dealer='Spam Olds', make='Olds',
model='Cutlas', year=95),
item(12,dealer='Spam Olds', make='Dodge',
model='Shadow', year=93),
item(12,dealer='Spam Olds', make='Jeep',
model='Cheroke', year=94),
item(12,dealer='Spam Olds', make='Toyota',
model='Previa', year=92),
item(12,dealer='Spam Olds', make='Toyota',
model='Celica', year=93),
item(12,dealer='Spam Olds', make='Toyota',
model='Camry', year=93),
item(12,dealer='Spam Olds', make='Honda',
model='Accord', year=94),
item(12,dealer='Spam Olds', make='Honda',
model='Accord', year=92),
item(12,dealer='Spam Olds', make='Honda',
model='Civic', year=94),
item(12,dealer='Spam Olds', make='Honda',
model='Civix', year=93),
)
html=self.doc_class(read_file('dealers.dtml'))
res = html(inventory=items, first_ad=15)
expected = read_file('dealers.out')
self.assertEqual(res,expected)
def testSequenceSummaries(self):
def d(**kw): return kw
data=(d(name='jim', age=38),
# d(name='kak', age=40),
d(name='will', age=7),
d(name='drew', age=4),
d(name='ches', age=1),
)
html = self.doc_class('<dtml-in data mapping>'
'<dtml-if sequence-end>'
'Variable "name": '
'min=<dtml-var min-name> '
'max=<dtml-var max-name> '
'count=<dtml-var count-name> '
'total=<dtml-var total-name> '
'median=<dtml-var median-name> '
'Variable "age": '
'min=<dtml-var min-age> '
'max=<dtml-var max-age> '
'count=<dtml-var count-age> '
'total=<dtml-var total-age> '
'median=<dtml-var median-age> '
'mean=<dtml-var mean-age> '
'<dtml-let sda=standard-deviation-age>'
's.d.=<dtml-var expr="_.int(sda)">'
'</dtml-let>'
'</dtml-if sequence-end>'
'</dtml-in data>')
res = html(data=data)
expected = ('Variable "name": min=ches max=will count=4 total= '
'median=between jim and drew '
'Variable "age": min=1 max=38 count=4 total=50 '
'median=5 mean=12.5 s.d.=17')
assert res == expected, res
def testDTMLDateFormatting(self):
import DateTime
html = self.doc_class(
"<dtml-var name capitalize spacify> is "
"<dtml-var date fmt=year>/<dtml-var date "
"fmt=month>/<dtml-var date fmt=day>")
res = html(date=DateTime.DateTime("1995-12-25"),
name='christmas_day')
expected = 'Christmas day is 1995/12/25'
assert res == expected, res
def testSimpleString(self):
dt = String('%(name)s')
res = dt(name='Chris')
expected = 'Chris'
assert res == expected, res
def testStringDateFormatting(self):
import DateTime
html = String("%(name capitalize spacify)s is "
"%(date fmt=year)s/%(date fmt=month)s/%(date fmt=day)s")
res = html(date=DateTime.DateTime("2001-04-27"),
name='the_date')
expected = 'The date is 2001/4/27'
assert res == expected, res
def testSequence1(self):
html=self.doc_class(
'<dtml-in spam><dtml-in sequence-item><dtml-var sequence-item> '
'</dtml-in sequence-item></dtml-in spam>')
expected = '1 2 3 4 5 6 '
res = html(spam=[[1,2,3],[4,5,6]])
assert res == expected, res
def testSequence2(self):
html=self.doc_class(
'<dtml-in spam><dtml-in sequence-item><dtml-var sequence-item>-'
'</dtml-in sequence-item></dtml-in spam>')
expected = '1-2-3-4-5-6-'
res = html(spam=[[1,2,3],[4,5,6]])
assert res == expected, res
def testNull(self):
html=self.doc_class('<dtml-var spam fmt="$%.2f bobs your uncle" '
'null="spam%eggs!|">')
expected = '$42.00 bobs your unclespam%eggs!|'
res = html(spam=42) + html(spam=None)
assert res == expected, res
def testUrlUnquote(self):
html1 = self.doc_class(
"""
<dtml-var expr="'http%3A//www.zope.org%3Fa%3Db%20123'" fmt=url-unquote>
"""
)
html2 = self.doc_class(
"""
<dtml-var expr="'http%3A%2F%2Fwww.zope.org%3Fa%3Db+123'" fmt=url-unquote-plus>
"""
)
expected = (
"""
http://www.zope.org?a=b 123
"""
)
self.assertEqual(html1(), expected)
self.assertEqual(html2(), expected)
html1 = self.doc_class(
"""
<dtml-var expr="'http%3A//www.zope.org%3Fa%3Db%20123'" url_unquote>
"""
)
html2 = self.doc_class(
"""
<dtml-var expr="'http%3A%2F%2Fwww.zope.org%3Fa%3Db+123'" url_unquote_plus>
"""
)
expected = (
"""
http://www.zope.org?a=b 123
"""
)
self.assertEqual(html1(), expected)
self.assertEqual(html2(), expected)
def test_fmt(self):
html=self.doc_class(
"""
<dtml-var spam>
html=<dtml-var spam fmt=html-quote>
url=<dtml-var spam fmt=url-quote>
multi=<dtml-var spam fmt=multi-line>
dollars=<dtml-var spam fmt=whole-dollars>
cents=<dtml-var spam fmt=dollars-and-cents>
dollars,=<dtml-var spam fmt=dollars-with-commas>
cents,=<dtml-var spam fmt=dollars-and-cents-with-commas>""")
expected = (
'''
4200000
html=4200000
url=4200000
multi=4200000
dollars=$4200000
cents=$4200000.00
dollars,=$4,200,000
cents,=$4,200,000.00
None
html=None
url=None
multi=None
dollars=
cents=
dollars,=
cents,=
<a href="spam">
foo bar
html=<a href="spam">
foo bar
url=%3Ca%20href%3D%22spam%22%3E%0Afoo%20bar
multi=<a href="spam"><br />
foo bar
dollars=
cents=
dollars,=
cents,=''')
res = html(spam=4200000) + html(spam=None) + html(
spam='<a href="spam">\nfoo bar')
self.assertEqual(res,expected)
def test_fmt_reST_include_directive_raises(self):
source = '.. include:: /etc/passwd'
html = self.doc_class('<dtml-var name="foo" fmt="restructured-text">')
html._vars['foo'] = source
self.assertRaises(NotImplementedError, html)
def test_fmt_reST_raw_directive_disabled(self):
EXPECTED = '<h1>HELLO WORLD</h1>'
source = '.. raw:: html\n\n %s\n' % EXPECTED
html = self.doc_class('<dtml-var name="foo" fmt="restructured-text">')
html._vars['foo'] = source
result = html() # don't raise, but don't work either
self.failIf(EXPECTED in result)
self.failUnless(""raw" directive disabled" in result)
from cgi import escape
self.failUnless(escape(EXPECTED) in result)
def test_fmt_reST_raw_directive_file_option_raises(self):
source = '.. raw:: html\n :file: inclusion.txt'
html = self.doc_class('<dtml-var name="foo" fmt="restructured-text">')
html._vars['foo'] = source
self.assertRaises(NotImplementedError, html, source)
def test_fmt_reST_raw_directive_url_option_raises(self):
source = '.. raw:: html\n :url: http://www.zope.org'
html = self.doc_class('<dtml-var name="foo" fmt="restructured-text">')
html._vars['foo'] = source
self.assertRaises(NotImplementedError, html, source)
def testPropogatedError(self):
class foo:
def __len__(self): return 9
def __getitem__(self,i):
if i >= 9: raise IndexError, i
return self.testob(i)
class testob (Base):
__roles__ = None # Public
def __init__(self, index):
self.index = index
self.value = 'item %s' % index
getValue__roles__ = None # Public
def getValue(self):
return self.value
puke__roles__ = None # Public
def puke(self):
raise PukeError('raaalf')
html=self.doc_class(
"""
<dtml-if spam>
<dtml-in spam>
<dtml-var getValue>
<dtml-var puke>
</dtml-in spam>
</dtml-if spam>
""")
try:
html(spam=foo())
except PukeError:
# Passed the test.
pass
else:
assert 0, 'Puke error not propogated'
def testRenderCallable(self):
"Test automatic rendering of callable objects"
class C (Base):
__allow_access_to_unprotected_subobjects__ = 1
x=1
def y(self): return self.x*2
C.h = self.doc_class("The h method, <dtml-var x> <dtml-var y>")
C.h2 = self.doc_class("The h2 method")
expected = "1, 2, The h method, 1 2"
res = self.doc_class("<dtml-var x>, <dtml-var y>, <dtml-var h>")(C())
assert res == expected, res
expected = (
'''
1,
2,
The h2 method''')
res = self.doc_class(
'''
<dtml-var expr="_.render(i.x)">,
<dtml-var expr="_.render(i.y)">,
<dtml-var expr="_.render(i.h2)">''')(i=C())
assert res == expected, res
def testWith(self):
class person:
__allow_access_to_unprotected_subobjects__ = 1
name='Jim'
height_inches=73
expected = 'Hi, my name is %s and my height is %d cm.' % (
person.name, int(person.height_inches * 2.54))
res = self.doc_class(
'<dtml-with person>Hi, my name is <dtml-var name> '
'and my height is <dtml-var "_.int(height_inches*2.54)"> '
'cm.</dtml-with>')(person=person)
assert res == expected, res
def testRaise(self):
try:
res = self.doc_class(
"<dtml-raise IndexError>success!</dtml-raise>")()
except IndexError, v:
res = v
assert str(res) == 'success!', `res`
def testNoItemPush(self):
data=d(sec='B', name='XXX', sub=(d(name='b1'),d(name='b2',sec='XXX')))
html = """
<dtml-with data mapping><dtml-in sub no_push_item>
<dtml-var sec>.<dtml-with sequence-item mapping><dtml-var name></dtml-with>
</dtml-in></dtml-with>
"""
expected = """
B.b1 B.b2"""
result = self.doc_class(html)(data=data)
assert result == expected, result
def testBasicHTMLIn(self):
data=(
d(name='jim', age=39),
d(name='kak', age=29),
d(name='will', age=8),
d(name='andrew', age=5),
d(name='chessie',age=2),
)
html="""
<!--#in data mapping-->
<!--#var name-->, <!--#var age-->
<!--#/in-->
"""
expected = """
jim, 39
kak, 29
will, 8
andrew, 5
chessie, 2
"""
result = self.doc_class(html)(data=data)
assert result == expected, result
def testBasicHTMLIn2(self):
xxx=(D(name=1), D(name=2), D(name=3))
html = """
<!--#in xxx-->
<!--#var name -->
<!--#/in-->
"""
expected = """
1
2
3
"""
result = self.doc_class(html)(xxx=xxx)
assert result == expected, result
def testBasicHTMLIn3(self):
ns = {'prop_ids': ('title', 'id'), 'title': 'good', 'id': 'times'}
html = """:<dtml-in prop_ids><dtml-var sequence-item>=<dtml-var
expr="_[_['sequence-item']]">:</dtml-in>"""
result = self.doc_class(html)(None, ns)
expected = ":title=good:id=times:"
assert result == expected, result
def testHTMLInElse(self):
xxx=(D(name=1), D(name=2), D(name=3))
html="""
<!--#in data mapping-->
<!--#var name-->, <!--#var age-->
<!--#else-->
<!--#in xxx-->
<!--#var name -->
<!--#/in-->
<!--#/in-->
"""
expected = """
1
2
3
"""
result = self.doc_class(html)(xxx=xxx, data={})
assert result == expected, result
def testBasicStringIn(self):
data=(
d(name='jim', age=39),
d(name='kak', age=29),
d(name='will', age=8),
d(name='andrew', age=5),
d(name='chessie',age=2),
)
s="""
%(in data mapping)[
%(name)s, %(age)s
%(in)]
"""
expected = """
jim, 39
kak, 29
will, 8
andrew, 5
chessie, 2
"""
result = String(s)(data=data)
assert expected == result, result
def test_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite( DTMLTests ) )
return suite
def main():
unittest.TextTestRunner().run(test_suite())
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
02f22fe5f02b8df2182114217e0c398ecfda644f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_buyout.py | 5ef4f182702e4982179b22670203e03692b7d3ff | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py |
#calss header
class _BUYOUT():
def __init__(self,):
self.name = "BUYOUT"
self.definitions = [u'(in business) a situation in which a person or group buys all the shares belonging to a company and so gets control of it: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
]
| |
fec9542a490d26aa855dab0e2d6f204c0a65f190 | 55628a9a08a6b6646b4a8aa74bedbf2e3fd7d850 | /.history/master_20200126005041.py | 0cf75bdb067cd902873d4068cf323f8de7ac42e3 | []
| no_license | StRobertCHSCS/final-project-team | c115dc11b318f7ac782c94860a8801bb558bd107 | 48907e72813c4dd3b48ff36f794f6fce04533219 | refs/heads/master | 2020-12-03T22:35:37.833893 | 2020-01-31T04:05:38 | 2020-01-31T04:05:38 | 231,506,873 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,880 | py | '''
-**make snake longer when eaten
- FIGURE OUT HOW TO KNOW WHERE TO ADD THE NEXT BLOCK (MOVE LAST LOCATION TO BACK)
DONEEE
-fix player_location lists, so that the list only has the location of the current snake location, not infinite list (done)
- fix apple so disappers when you go over it (done)
- add score (done)
-fix speed so that it resets when you go back to main page
- add high score page (txt file, saves high scores outside of program)
'''
import arcade
import random
import json
import time
# Starting screen
alive_button = []
start_button_text = ["Noob: 0.5 speed \n (Refresh rate 1/5 seconds)",
"Normal speed: 1 \n (Refresh rate 1/10 seconds)",
"Hard: 1.5 speed \n (Refresh rate 1/15 seconds)",
"Expert: 2.5 speed \n (Refresh rate 1/25 seconds)"]
for i in range (2, 10, 2):
start_options = [i*100, 200, 150, 50, start_button_text[(i // 2) - 1]] # x, y, width, height
alive_button.append(start_options)
show_text = False
# Set how many rows and columns we will have
ROW_COUNT = 29
COLUMN_COUNT = 51
# This sets the WIDTH and HEIGHT of each grid location
WIDTH = 20
HEIGHT = 20
# This sets the margin between each cell
# and on the edges of the screen.
MARGIN = 5
# Do the math to figure out our screen dimensions
SCREEN_WIDTH = (WIDTH + MARGIN) * COLUMN_COUNT + MARGIN
SCREEN_HEIGHT = (HEIGHT + MARGIN) * ROW_COUNT + MARGIN
# Death screen
dead_button = []
death_button_text = ["Retry", "Starting screen", "High scores", "Quit"]
text_num = 0
for x in range (1, 5, 2):
for y in range (1, 5, 2):
death_options = [x*(SCREEN_WIDTH//4) - 75, y*(SCREEN_HEIGHT//4) - 75 , 150, 150, death_button_text[text_num]] # x, y, width, height
dead_button.append(death_options)
text_num += 1
# Direction the snake is moving in
up = False
down = False
left = False
right = False
# Use snakes position shown on grid, not the python coordinates
player_x_column = 5
player_y_row = 5
# Length of the snake body
body = 1
# Current snake location
snake_pos = []
# Determine where the starting apple will be drawn in
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
# Boolean to see if apple needs to be moved
apple_display = True
# Background grid
grid_texture = arcade.load_texture("29x51_grid.jpg")
score = 0
# Landing page, game, death screen, or high score
page = 0
SPEED = 1
high_score = 0
time = 0
millisecond = 0
second = 0
red = 0
green = 255
blue = 0
def on_update(delta_time):
snake_move()
def on_draw():
global page
arcade.start_render()
if page == 0:
start_screen()
elif page == 1:
main_game()
elif page == 2:
grid_background()
death_screen()
elif page == 3:
high_score_page()
print(time)
def stop_watch():
global time, second, millisecond
time += 1
if (time * SPEED) % SPEED == 0):
millisecond += 1
if (time % SPEED == 0):
second += 1
minute = int(second//60)
arcade.draw_text(f"Time: {minute:02d}:{second:02d}: {millisecond: 01d}", 75, SCREEN_HEIGHT - 50, arcade.color.BLUE,
25, font_name='calibri', bold = True, anchor_x="center", anchor_y="center")
def high_score_check():
global high_score, score
with open("high_score.json", "r") as high_score_file:
high_score = json.load(high_score_file)
with open("high_score.json", "w") as high_score_file:
if score > high_score:
json.dump(score, high_score_file)
else:
json.dump(high_score, high_score_file)
def high_score_page():
global high_score
high_score_check()
arcade.draw_text("The high score is " + str(high_score), SCREEN_WIDTH //2, SCREEN_HEIGHT // 2,
arcade.color.WHITE, 50, font_name='calibri', anchor_x="center", anchor_y="center")
def main_game():
grid_background()
snake()
apple()
stop_watch()
def start_screen():
global alive_button
arcade.draw_text("Welcome to snake \n choose your level", (SCREEN_WIDTH//2), 3*(SCREEN_HEIGHT//4),
arcade.color.WHITE, 25, font_name='calibri', anchor_x="center", anchor_y="center")
# arcade.draw_text(str(current_time), (3 * SCREEN_WIDTH // 4), (SCREEN_HEIGHT//4),
# arcade.color.BLACK, 25, font_name='calibri', anchor_x="center", anchor_y="center")
for i in range (0, 4):
arcade.draw_xywh_rectangle_filled(alive_button[i][0],
alive_button[i][1],
alive_button[i][2],
alive_button[i][3],
arcade.color.WHITE)
arcade.draw_text(alive_button[i][4], alive_button[i][0] + (alive_button[i][2] // 2), alive_button[i][1] + (alive_button[i][3] // 2),
arcade.color.BLACK, 10, font_name='calibri', anchor_x="center", anchor_y="center")
def death_screen():
global dead_button, death_button_text, red, green, blue
if (red == 255 and 0 <= green < 255 and blue == 0):
green += 5
elif (0 < red <= 255 and green == 255 and blue == 0):
red -= 5
elif (red == 0 and green == 255 and 0 <= blue < 255):
blue += 5
elif (red == 0 and 0 < green <= 255 and blue == 255):
green -= 5
elif (0 <= red < 255 and green == 0 and blue == 255):
red += 5
elif (red == 255 and green == 0 and 0 < blue <= 255):
blue -= 5
for i in range (2):
arcade.draw_text("You died rip lol", random.randint(50, SCREEN_WIDTH), random.randint(50, SCREEN_HEIGHT), (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),
50, font_name='calibri', bold = True, anchor_x="center", anchor_y="center")
for i in range (0, 4):
arcade.draw_xywh_rectangle_filled(dead_button[i][0],
dead_button[i][1],
dead_button[i][2],
dead_button[i][3],
(red, blue, green))
arcade.draw_text(dead_button[i][4], dead_button[i][0] + (dead_button[i][2] // 2), dead_button[i][1] + (dead_button[i][3] // 2),
arcade.color.BLACK, 15, font_name='calibri', anchor_x="center", anchor_y="center")
def grid_background():
arcade.draw_texture_rectangle(SCREEN_WIDTH//2, SCREEN_HEIGHT//2, grid_texture.width, grid_texture.height, grid_texture, 0)
def snake_move():
global player_x, player_y, player_x_column, player_y_row
global snake_pos
global page, score
if (0 <= player_x_column < COLUMN_COUNT) and (0 <= player_y_row < ROW_COUNT):
if up:
player_y_row += 1
elif down:
player_y_row -= 1
elif right:
player_x_column += 1
elif left:
player_x_column -= 1
else:
page = 2
suicide_check = []
for position in snake_pos:
if position not in suicide_check:
suicide_check.append(position)
else:
page = 2
# Player coordinates
player_x = (MARGIN + WIDTH) * player_x_column + MARGIN + WIDTH // 2
player_y = (MARGIN + HEIGHT) * player_y_row + MARGIN + HEIGHT // 2
def restart():
global player_x_column, player_y_row, snake_len, body, snake_pos
global up, down, left, right
global page, score, time
player_x_column = 5
player_y_row = 5
snake_len = []
body = 1
snake_pos = []
up = False
down = False
left = False
right = False
page = 1
score = 0
time = 0
print ("You died")
def snake():
global player_x_column, player_y_row, snake_len, body
global apple_x, apple_y
arcade.draw_rectangle_filled(player_x , player_y, WIDTH, HEIGHT, arcade.color.BLUE)
snake_len = [[player_x_column, player_y_row]]
snake_pos.append([player_x_column, player_y_row])
if body < len(snake_pos):
snake_pos.pop(0)
if (body > 1):
for num in range (1, body):
snake_len.append([snake_pos[num - 1][0], snake_pos[num - 1][1]])
for i in range (body):
arcade.draw_rectangle_filled(
(MARGIN + WIDTH) * snake_len[i][0] + MARGIN + WIDTH // 2,
(MARGIN + HEIGHT) * snake_len[i][1] + MARGIN + HEIGHT // 2 ,
WIDTH, HEIGHT, arcade.color.BLUE)
def apple():
global apple_x, apple_y, apple_x_coordinate, apple_y_coordinate, body, snake_len
global score
global SPEED
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
if (player_x_column == apple_x) and (player_y_row == apple_y):
apple_display = False
body += 1
print ("hit")
else:
apple_display = True
if apple_display is True:
arcade.draw_rectangle_filled(apple_x_coordinate, apple_y_coordinate, WIDTH, HEIGHT, arcade.color.RED)
elif apple_display is False:
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
# Make sure that apple doesn't spawn where the snake is
for apple in range (len(snake_pos)):
if apple_x == snake_pos[apple][0] or apple_y == snake_pos[apple][1]:
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
apple_x_coordinate = (MARGIN + WIDTH) * apple_x + MARGIN + WIDTH // 2
apple_y_coordinate = (MARGIN + HEIGHT) * apple_y + MARGIN + HEIGHT // 2
score += 10
apple_display == True
arcade.draw_text("Score is " + str(score), SCREEN_WIDTH - 75, SCREEN_HEIGHT - 50, arcade.color.GREEN,
25, font_name='calibri', bold = True, anchor_x="center", anchor_y="center")
def on_key_press(key, modifiers):
global up, down, left, right
if page == 1:
if (key == arcade.key.W) and (down == False):
up = True
down = False
right = False
left = False
elif (key == arcade.key.S) and (up == False):
down = True
up = False
right = False
left = False
elif (key == arcade.key.A) and (right == False):
left = True
up = False
down = False
right = False
elif (key == arcade.key.D) and (left == False):
right = True
up = False
down = False
left = False
def on_key_release(key, modifiers):
pass
def on_mouse_press(x, y, button, modifiers):
global alive_button, dead_button, page
global start_screen, restart
global high_score_page
global SPEED
if page == 0:
# For starting screen, check which button has been clicked
if (x > alive_button[0][0] and x < alive_button[0][0] + alive_button[0][2] and
y > alive_button[0][1] and y < alive_button[0][1] + alive_button[0][3]):
page += 1
SPEED = 5
arcade.schedule(on_update, 1/(SPEED))
print("noob")
elif (x > alive_button[1][0] and x < alive_button[1][0] + alive_button[1][2] and
y > alive_button[1][1] and y < alive_button[1][1] + alive_button[1][3]):
page += 1
SPEED = 10
arcade.schedule(on_update, 1/(SPEED))
print("normal")
elif (x > alive_button[2][0] and x < alive_button[2][0] + alive_button[2][2] and
y > alive_button[2][1] and y < alive_button[2][1] + alive_button[2][3]):
page += 1
SPEED = 15
arcade.schedule(on_update, 1/(SPEED))
print("hard")
elif (x > alive_button[3][0] and x < alive_button[3][0] + alive_button[3][2] and
y > alive_button[3][1] and y < alive_button[3][1] + alive_button[3][3]):
page += 1
SPEED = 25
arcade.schedule(on_update, 1/(SPEED))
print("expert")
else:
SPEED = 1
if page == 2:
if (x > dead_button[0][0] and x < dead_button[0][0] + dead_button[0][2] and
y > dead_button[0][1] and y < dead_button[0][1] + dead_button[0][3]):
restart()
print("try again")
elif (x > dead_button[1][0] and x < dead_button[1][0] + dead_button[1][2] and
y > dead_button[1][1] and y < dead_button[1][1] + dead_button[1][3]):
start_screen()
print("main")
elif (x > dead_button[2][0] and x < dead_button[2][0] + dead_button[2][2] and
y > dead_button[2][1] and y < dead_button[2][1] + dead_button[2][3]):
high_score_page()
print("high score")
elif (x > dead_button[3][0] and x < dead_button[3][0] + dead_button[3][2] and
y > dead_button[3][1] and y < dead_button[3][1] + dead_button[3][3]):
print("exit")
arcade.close_window()
def setup():
global grid, SPEED
# SPEED = float(input("What fast do you want? \n Noob: Type 0.5 \n Normal: Type 1 \n Hard: Type 1.5 - 2 \n Expert: Type 2.5 or more \n *Changes the refresh rate* \n"))
arcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, "snake")
arcade.set_background_color(arcade.color.BLACK)
arcade.schedule(on_update, 1/SPEED)
# Override arcade window methods
window = arcade.get_window()
window.on_draw = on_draw
window.on_key_press = on_key_press
window.on_key_release = on_key_release
window.on_mouse_press = on_mouse_press
arcade.run()
if __name__ == '__main__':
setup()
| [
"[email protected]"
]
| |
7e862eae0d9148a1e0b88084c5981c3280296cc4 | 53b1cf89f3ac00d86add6dc6e103160d50e1b4ea | /pgadmin/pgadmin4/web/pgadmin/browser/server_groups/servers/tests/test_server_get.py | 338f7fcfb45c96ee37238b621d1a4a0c92353062 | [
"PostgreSQL"
]
| permissive | luvres/armhf | b5e9e59c0e5db7f4a280242a0d940c4066a47716 | aa1ec48e246f1fb8e0f4099fa8d392eddcb414ad | refs/heads/master | 2021-10-01T19:08:53.395884 | 2018-11-28T17:57:42 | 2018-11-28T17:57:42 | 79,672,248 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,629 | py | # ##########################################################################
#
# #pgAdmin 4 - PostgreSQL Tools
#
# #Copyright (C) 2013 - 2016, The pgAdmin Development Team
# #This software is released under the PostgreSQL Licence
#
# ##########################################################################
from pgadmin.utils.route import BaseTestGenerator
from regression import test_utils as utils
from regression import parent_node_dict
class ServersGetTestCase(BaseTestGenerator):
"""
This class will fetch added servers under default server group
by response code.
"""
scenarios = [
# Fetch the default url for server node
('Default Server Node url', dict(url='/browser/server/obj/'))
]
def setUp(self):
"""This function add the server to test the GET API"""
self.server_id = utils.create_server(self.server)
server_dict = {"server_id": self.server_id}
utils.write_node_info("sid", server_dict)
def runTest(self):
""" This function will fetch the added servers to object browser. """
server_id = parent_node_dict["server"][-1]["server_id"]
if not server_id:
raise Exception("Server not found to test GET API")
response = self.tester.get(self.url + str(utils.SERVER_GROUP) + '/' +
str(server_id),
follow_redirects=True)
self.assertEquals(response.status_code, 200)
def tearDown(self):
"""This function delete the server from SQLite """
utils.delete_server_with_api(self.tester, self.server_id)
| [
"[email protected]"
]
| |
a4c44f2e0343cc29ca7b39dda84c174ba0bae39a | 01733042e84a768b77f64ec24118d0242b2f13b8 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/learnedfilter_dc8ce473700453874488c1ea95947fa8.py | 0e44fc370da4d80e98b03eef81573a428644af64 | [
"MIT"
]
| permissive | slieberth/ixnetwork_restpy | e95673905854bc57e56177911cb3853c7e4c5e26 | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | refs/heads/master | 2023-01-04T06:57:17.513612 | 2020-10-16T22:30:55 | 2020-10-16T22:30:55 | 311,959,027 | 0 | 0 | NOASSERTION | 2020-11-11T12:15:34 | 2020-11-11T12:06:00 | null | UTF-8 | Python | false | false | 5,854 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LearnedFilter(Base):
"""This object contains criteria for filtering the learned routes.
The LearnedFilter class encapsulates a required learnedFilter resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'learnedFilter'
_SDM_ATT_MAP = {
'Afi': 'afi',
'EnableAfiSafi': 'enableAfiSafi',
'EnablePrefix': 'enablePrefix',
'Safi': 'safi',
}
def __init__(self, parent):
super(LearnedFilter, self).__init__(parent)
@property
def Capabilities(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.capabilities_4db6ad32c315806e926b0bd131f64535.Capabilities): An instance of the Capabilities class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.capabilities_4db6ad32c315806e926b0bd131f64535 import Capabilities
return Capabilities(self)._select()
@property
def Prefix(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.prefix_14ff2c47c83ae14aa22718e67f21f827.Prefix): An instance of the Prefix class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.prefix_14ff2c47c83ae14aa22718e67f21f827 import Prefix
return Prefix(self)._select()
@property
def Afi(self):
"""
Returns
-------
- number: Address Family Identifier value. Identifies the network layer protocol to be used with these routes.
"""
return self._get_attribute(self._SDM_ATT_MAP['Afi'])
@Afi.setter
def Afi(self, value):
self._set_attribute(self._SDM_ATT_MAP['Afi'], value)
@property
def EnableAfiSafi(self):
"""
Returns
-------
- bool: If enabled, allows the user to set values to be used for BGP-MP - the user-specified AFI and SAFI values for the BGP MP_REACH_NLRI.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableAfiSafi'])
@EnableAfiSafi.setter
def EnableAfiSafi(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableAfiSafi'], value)
@property
def EnablePrefix(self):
"""
Returns
-------
- bool: If enabled, BGP Prefix Filters configured in this dialog will be used to filter for routes that match those filter entries. Only those routes will be stored in the routing table. If disabled, all learned BGP routes will be stored.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnablePrefix'])
@EnablePrefix.setter
def EnablePrefix(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnablePrefix'], value)
@property
def Safi(self):
"""
Returns
-------
- number: Subsequent Address Family Identifier value. Used with, and provides additional information about, the AFI in the NLRI, per RFC 2858.
"""
return self._get_attribute(self._SDM_ATT_MAP['Safi'])
@Safi.setter
def Safi(self, value):
self._set_attribute(self._SDM_ATT_MAP['Safi'], value)
def update(self, Afi=None, EnableAfiSafi=None, EnablePrefix=None, Safi=None):
"""Updates learnedFilter resource on the server.
Args
----
- Afi (number): Address Family Identifier value. Identifies the network layer protocol to be used with these routes.
- EnableAfiSafi (bool): If enabled, allows the user to set values to be used for BGP-MP - the user-specified AFI and SAFI values for the BGP MP_REACH_NLRI.
- EnablePrefix (bool): If enabled, BGP Prefix Filters configured in this dialog will be used to filter for routes that match those filter entries. Only those routes will be stored in the routing table. If disabled, all learned BGP routes will be stored.
- Safi (number): Subsequent Address Family Identifier value. Used with, and provides additional information about, the AFI in the NLRI, per RFC 2858.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
| [
"[email protected]"
]
| |
8f1ad74885c3e26272c09d24c7a5c2073c619087 | bb8ed8b5aeede5f503ff5dac3870cf3817619282 | /trunk/soft/common/tools/change_pkg_proto.py | a4d8b24460f16c0c6a6455fd20e6e89f113f0b26 | []
| no_license | mengtest/idle | 561da3b4542ceca8a1b983e9214a57d6ecb7d22d | 6e7866d0f493155fbfc9c2c35062af833217cbd0 | refs/heads/master | 2022-03-01T00:07:51.808702 | 2019-10-31T11:09:22 | 2019-10-31T11:09:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | import sys
if __name__ == '__main__':
if len(sys.argv) > 1:
fname = sys.argv[1]
with open(fname, 'r') as f:
content = f.read()
flag = True
start = 0
while flag:
flag = False
i1 = content.find("_pb2 as ", start)
start = i1 + 21
if i1 >= 0:
flag = True
i2 = content.rfind("import", 0, i1)
content = content[0:i2 + 7] + "common.proto." + content[i2 + 7:]
with open(fname, 'w') as f:
f.write(content)
| [
"[email protected]"
]
| |
0715fd0b7eb50cb61eb5b8b45cab73ceb41c0401 | f4534e1f23add4255a810688cc2d1c6c10a4c9b3 | /ch07/ex7-1.py | 39a55a8d82466def138a5764f39a4b20b086866d | [
"MIT"
]
| permissive | jasonhuayen91/Introduction_to_Computing_and_Programming_Using_Python | 610ee2c060dd45d04652fb823f29a88c6bca1c45 | 9f211e66f8711b6c35405a1f40f14fcf9637294a | refs/heads/master | 2021-05-29T23:21:40.326647 | 2015-03-03T16:41:54 | 2015-03-03T16:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | def sumDigit(s):
"""sを文字列とする.
sの中の数字の合計を返す.
例えば, sが'a2b3c'ならば5を返す"""
ret = 0
for d in s:
try:
ret += int(d)
except ValueError:
pass
return ret
print(sumDigit('a2b3c') == 5)
| [
"[email protected]"
]
| |
6a3623bd08a74a8f907ecbdfc4368e677f98e843 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano1433.py | a2fb809bcd5cf445b8aa9aa55ee1a7513b1de66e | []
| no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/120000/AFA857F6-C77B-314D-B472-A50BFA0A7BAC.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest1433.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"[email protected]"
]
| |
3dd6b2986f6fd886dd1179e7b456bb349f201ad3 | e9156143e706fa7981f531dafb4fec72f42d9d78 | /snapflow_bi/functions/transaction_ltv_model/tests/test_transaction_ltv_model.py | 637af7409b431737d5b34b8640b2f23d389eff06 | [
"BSD-3-Clause"
]
| permissive | kvh/snapflow-bi | b5a00b4c8902e663b400e4831da53ce7d1888a21 | 2e0877b19fb0738ba384b798ad1c5c33c4b3111e | refs/heads/master | 2023-06-07T20:27:16.467895 | 2021-06-18T15:17:20 | 2021-06-18T15:17:20 | 308,482,793 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | inputs = {
"transactions": dict(
data="""
customer_id,transacted_at,amount
1,2020-01-01 00:00:00,100
2,2020-02-01 00:00:00,100
2,2020-03-01 00:00:00,100
3,2020-01-01 00:00:00,300
3,2020-04-01 00:00:00,400
4,2020-01-01 00:00:00,100
4,2020-02-01 00:00:00,100
4,2020-03-01 00:00:00,50
5,2020-01-01 00:00:00,1000
""",
schema="bi.Transaction",
)
}
outputs = {
"default": """
customer_id,ltv
1,100
2,100
3,400
4,50
5,1000
"""
}
# from __future__ import annotations
# from dcp.storage.database.utils import get_tmp_sqlite_db_url
# from snapflow import Environment, graph, produce
# from snapflow.testing.utils import str_as_dataframe
# def test_ltv():
# from snapflow_bi import module as bi
# input_data = """
# customer_id,transacted_at,amount
# 1,2020-01-01 00:00:00,100
# 2,2020-02-01 00:00:00,100
# 2,2020-03-01 00:00:00,100
# 3,2020-01-01 00:00:00,300
# 3,2020-04-01 00:00:00,400
# 4,2020-01-01 00:00:00,100
# 4,2020-02-01 00:00:00,100
# 4,2020-03-01 00:00:00,50
# 5,2020-01-01 00:00:00,1000
# """
# env = Environment(metadata_storage=get_tmp_sqlite_db_url())
# txs = str_as_dataframe(env, input_data, nominal_schema=bi.schemas.Transaction)
# g = graph()
# df = g.create_node(
# "core.import_dataframe", params={"dataframe": txs, "schema": "bi.Transaction"}
# )
# ltv = g.create_node(bi.functions.transaction_ltv_model, upstream=df)
# blocks = produce(ltv, env=env, modules=[bi])
# output_df = blocks[0].as_dataframe()
# assert len(output_df) == 5
# assert set(output_df["customer_id"]) == set(i for i in range(1, 6))
| [
"[email protected]"
]
| |
6e2124708a83e98ff77c0a59c40e0542ef09c006 | ea5bc4fedbc076ce20fc51b0a6c0a231b1301fc0 | /tests/test_topchef_client_end_to_end.py | 8205f191f3715f810b426de384416170c960fbf3 | []
| no_license | TopChef/NMRClient | 57f1c692014291aebcd6febf30d8f5d1bb4d8ec7 | 40d1ae3f6bc585ef3707c01f46d8bfbe576bd279 | refs/heads/master | 2020-09-18T19:14:38.566893 | 2016-09-06T17:27:45 | 2016-09-06T17:27:45 | 67,529,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | import sys
import time
LIBRARY_PATH = '/opt/topspin/exp/stan/nmr/py/user'
sys.path.append(LIBRARY_PATH)
from topchef_client import NetworkManager
from topchef_client import TopChefService
True = "1"
False = "0"
server_address = 'http://192.168.1.216/dev'
adder_service_id = '1cb40868-101f-11d9-9a55-000cf18a2ce6'
network = NetworkManager(server_address)
service = TopChefService(adder_service_id, network)
assert (service.has_timed_out() == False)
parameters = {'value': 10}
job = service.request_job(parameters)
result = service.get_result_for_job(job, polling_interval=5, timeout=30)
MSG(str(result))
| [
"[email protected]"
]
| |
0e6b15d493b10254f9e208b5e71756058f247465 | 84f073856c8665b0f8b813a46a38f96ccd4f2790 | /object_detection/utils/label_map_util.py | c5e5c86fb326b6e7ce16928714c3540fbed82932 | []
| no_license | fengrk/ml_tools | ad9336e47447e9a0f63ba7fc2e86c7eea51c955e | 70e634250455ff6f3aeb826e781b8096adbdc066 | refs/heads/master | 2023-07-19T15:34:46.780323 | 2019-03-02T03:59:53 | 2019-03-02T03:59:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,442 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
import logging
import tensorflow as tf
from google.protobuf import text_format
from ml_tools.object_detection.protos import string_int_label_map_pb2
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 0:
raise ValueError('Label map ids should be >= 0.')
if (item.id == 0 and item.name != 'background' and
item.display_name != 'background'):
raise ValueError('Label map id 0 is reserved for the background label')
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def get_max_label_map_index(label_map):
"""Get maximum index in label map.
Args:
label_map: a StringIntLabelMapProto
Returns:
an integer
"""
return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Given label map proto returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field as
category name. If False or if the display_name field does not exist, uses
'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info(
'Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
return categories
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def get_label_map_dict(label_map_path,
use_display_name=False,
fill_in_gaps_and_background=False):
"""Reads a label map and returns a dictionary of label names to id.
Args:
label_map_path: path to StringIntLabelMap proto text file.
use_display_name: whether to use the label map items' display names as keys.
fill_in_gaps_and_background: whether to fill in gaps and background with
respect to the id field in the proto. The id: 0 is reserved for the
'background' class and will be added if it is missing. All other missing
ids in range(1, max(id)) will be added with a dummy class name
("class_<id>") if they are missing.
Returns:
A dictionary mapping label names to id.
Raises:
ValueError: if fill_in_gaps_and_background and label_map has non-integer or
negative values.
"""
label_map = load_labelmap(label_map_path)
label_map_dict = {}
for item in label_map.item:
if use_display_name:
label_map_dict[item.display_name] = item.id
else:
label_map_dict[item.name] = item.id
if fill_in_gaps_and_background:
values = set(label_map_dict.values())
if 0 not in values:
label_map_dict['background'] = 0
if not all(isinstance(value, int) for value in values):
raise ValueError('The values in label map must be integers in order to'
'fill_in_gaps_and_background.')
if not all(value >= 0 for value in values):
raise ValueError('The values in the label map must be positive.')
if len(values) != max(values) + 1:
# there are gaps in the labels, fill in gaps.
for value in range(1, max(values)):
if value not in values:
label_map_dict['class_' + str(value)] = value
return label_map_dict
def create_categories_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': an integer id uniquely identifying this category.
'name': string representing category name e.g., 'cat', 'dog'.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
label_map = load_labelmap(label_map_path)
max_num_classes = max(item.id for item in label_map.item)
return convert_label_map_to_categories(label_map, max_num_classes,
use_display_name)
def create_category_index_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns a category index.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
A category index, which is a dictionary that maps integer ids to dicts
containing categories, e.g.
{1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...}
"""
categories = create_categories_from_labelmap(label_map_path, use_display_name)
return create_category_index(categories)
def create_class_agnostic_category_index():
"""Creates a category index with a single `object` class."""
return {1: {'id': 1, 'name': 'object'}}
| [
"[email protected]"
]
| |
619937df9bc5ad69bb41fd822a6d57377e711e63 | d659fb0db310793b918640fdb673b9bd755578bc | /third_party/text_analysis.py | 77b623298203a576c583c2364375d44483e1c9d1 | [
"MIT"
]
| permissive | astuk/python-snippets | 562bdcdb23c537650a767fb0369388d9530a67ae | 212f63f820b6f5842f74913ed08da18d41dfe7a4 | refs/heads/master | 2023-06-18T04:29:48.111537 | 2021-07-14T10:55:59 | 2021-07-14T10:55:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from pathlib import Path
from textblob import TextBlob
path = Path("src/text.txt")
with open(path) as f:
text = f.read()
blob = TextBlob(text)
for sentence in blob.sentences:
print(sentence.sentiment.polarity)
| [
"[email protected]"
]
| |
00d78c4a4adeb9bd9683c99726c067a3d7829696 | 80d9806dfb09858875c77c285a3ce1ce496dbbcd | /setup.py | d418d3608286c45bad5380aed630c48c76ffa793 | []
| no_license | wkentaro/chainer-cyclegan | 86e9a5a3c8aae03caf37940209aa432738478989 | 64b811773802e4d755eebb5110735f8953beb220 | refs/heads/master | 2021-10-23T15:33:26.856556 | 2019-03-18T13:00:07 | 2019-03-18T13:00:07 | 114,517,994 | 13 | 4 | null | 2018-03-30T14:40:41 | 2017-12-17T07:32:05 | Python | UTF-8 | Python | false | false | 982 | py | import subprocess
import sys
from setuptools import find_packages
from setuptools import setup
version = '1.2.5'
if sys.argv[-1] == 'release':
commands = [
'python setup.py sdist upload',
'git tag v{0}'.format(version),
'git push origin master --tag',
]
for cmd in commands:
subprocess.call(cmd, shell=True)
sys.exit(0)
try:
import cv2 # NOQA
except ImportError:
print('Please install OpenCV.')
quit(1)
install_requires = []
with open('requirements.txt') as f:
for req in f:
if req.startswith('-e'):
continue
install_requires.append(req.strip())
setup(
name='chainer-cyclegan',
description='Chainer Implementation of CycleGAN.',
version=version,
packages=find_packages(),
install_requires=install_requires,
author='Kentaro Wada',
author_email='[email protected]',
url='https://github.com/wkentaro/chainer-cyclegan',
license='MIT',
)
| [
"[email protected]"
]
| |
0305bffab91530450d963a852da22b235312750e | 41d1e085dc3ec6c329b8d6443035e1e8a1c93bcc | /gridded/tests/test_pysgrid/test_processing_2d.py | dc315187cf10aeba2e0c9777265a7f8e7304e614 | [
"Unlicense"
]
| permissive | Ocean1125/gridded | 9252d3d89ecacc55c59a0ecf6fd60fe6ac0afd6e | 90cca5edf4c8d9a47914c2b6d6f78180d9c280a5 | refs/heads/master | 2023-05-15T13:21:34.144583 | 2021-06-03T21:50:01 | 2021-06-03T21:50:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | """
Created on Apr 3, 2015
@author: ayan
"""
from __future__ import (absolute_import, division, print_function)
import pytest
import numpy as np
from gridded.pysgrid.processing_2d import avg_to_cell_center, rotate_vectors, vector_sum
def test_vector_sum():
x_vector = np.array([3, 5, 9, 11])
y_vector = np.array([4, 12, 40, 60])
sum_result = vector_sum(x_vector, y_vector)
expected = np.array([5, 13, 41, 61])
np.testing.assert_almost_equal(sum_result, expected)
@pytest.fixture
def rotate_vectors_data():
x = np.array([3, 5, 9, 11])
y = np.array([4, 12, 40, 60])
angles_simple = np.array([0, np.pi / 2, 0, np.pi / 2])
angles_complex = np.array([np.pi / 6, np.pi / 5,
np.pi / 4, np.pi / 3])
return x, y, angles_simple, angles_complex
def test_vector_rotation_simple(rotate_vectors_data):
x, y, angles_simple, angles_complex = rotate_vectors_data
rotated_x, rotated_y = rotate_vectors(x, y, angles_simple)
expected_x = np.array([3, -12, 9, -60])
expected_y = np.array([4, 5, 40, 11])
np.testing.assert_almost_equal(rotated_x, expected_x, decimal=3)
np.testing.assert_almost_equal(rotated_y, expected_y, decimal=3)
def test_vector_rotation_complex(rotate_vectors_data):
x, y, angles_simple, angles_complex = rotate_vectors_data
rotated_x, rotated_y = rotate_vectors(x, y, angles_complex)
expected_x = np.array([0.5981, -3.0083, -21.9203, -46.4615])
expected_y = np.array([4.9641, 12.6471, 34.6482, 39.5263])
np.testing.assert_almost_equal(rotated_x, expected_x, decimal=3)
np.testing.assert_almost_equal(rotated_y, expected_y, decimal=3)
@pytest.fixture
def avg_center_data():
return np.array([[4, 5, 9, 10], [8, 39, 41, 20], [5, 29, 18, 71]])
def test_no_transpose(avg_center_data):
data = avg_center_data
avg_result = avg_to_cell_center(data, 1)
expected = np.array([[4.5, 7, 9.5],
[23.5, 40, 30.5],
[17, 23.5, 44.5]])
np.testing.assert_almost_equal(avg_result, expected, decimal=3)
def test_with_transpose(avg_center_data):
data = avg_center_data
avg_result = avg_to_cell_center(data, 0)
expected = np.array([[6, 22, 25, 15], [6.5, 34, 29.5, 45.5]])
np.testing.assert_almost_equal(avg_result, expected, decimal=3)
| [
"[email protected]"
]
| |
d14f782b0de48917af7243ab5ea11b9cf46f61c0 | 794decce384b8e0ba625e421cc35681b16eba577 | /tensorflow/python/ops/nn_loss_scaling_utilities_test.py | a71c0cf5992d24f735c49f3c68f1f7b9a7e2d43c | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | 911gt3/tensorflow | a6728e86100a2d5328280cfefcfa8e7c8de24c4c | 423ea74f41d5f605933a9d9834fe2420989fe406 | refs/heads/master | 2023-04-09T14:27:29.072195 | 2023-04-03T06:20:23 | 2023-04-03T06:22:54 | 258,948,634 | 0 | 0 | Apache-2.0 | 2020-04-26T05:36:59 | 2020-04-26T05:36:58 | null | UTF-8 | Python | false | false | 8,517 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for loss scaling utilities in tensorflow.ops.nn."""
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test as test_lib
class LossUtilitiesTest(test_lib.TestCase, parameterized.TestCase):
def testComputeAverageLossGlobalBatchSize(self):
per_example_loss = [1, 2, 3, 4, 5]
loss = nn_impl.compute_average_loss(per_example_loss, global_batch_size=10)
self.assertEqual(self.evaluate(loss), 1.5)
def testComputeAverageLossGlobalBatchSize_BatchSizeNonScalar(self):
per_example_loss = [1, 2, 3, 4, 5]
with self.assertRaisesWithPredicateMatch(
ValueError, "global_batch_size must be scalar"):
nn_impl.compute_average_loss(per_example_loss, global_batch_size=[10])
def testComputeAverageLossGlobalBatchSize_BatchSizeFloat(self):
per_example_loss = [1, 2, 3, 4, 5]
with self.assertRaisesWithPredicateMatch(
TypeError, "global_batch_size must be an int"):
nn_impl.compute_average_loss(per_example_loss, global_batch_size=10.0)
def testComputeAverageLossGlobalBatchSize_BatchSizeNegative(self):
per_example_loss = [1, 2, 3, 4, 5]
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, "global_batch_size must be positive"):
nn_impl.compute_average_loss(per_example_loss, global_batch_size=-1)
def testComputeAverageLossGlobalBatchSize_BatchSizeZero(self):
per_example_loss = [1, 2, 3, 4, 5]
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, "global_batch_size must be positive"):
nn_impl.compute_average_loss(per_example_loss, global_batch_size=0)
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_two_cpus],
mode=["graph", "eager"],
)
)
def testComputeAverageLossDefaultGlobalBatchSize(self, distribution):
# Without strategy - num replicas = 1
per_example_loss = constant_op.constant([2.5, 6.2, 5.])
loss = nn_impl.compute_average_loss(per_example_loss)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.) / 3)
# With strategy - num replicas = 2
with distribution.scope():
per_replica_losses = distribution.run(
nn_impl.compute_average_loss, args=(per_example_loss,))
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.) / 3)
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_two_cpus],
mode=["graph", "eager"],
)
)
def testComputeAverageLossSampleWeights(self, distribution):
with distribution.scope():
# Scalar sample weight
per_replica_losses = distribution.run(
nn_impl.compute_average_loss,
args=([2., 4., 6.],),
kwargs={"sample_weight": 2})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2. + 4. + 6.) * 2. / 3)
# Per example sample weight
per_replica_losses = distribution.run(
nn_impl.compute_average_loss,
args=([2., 4., 6.],),
kwargs={"sample_weight": [0.3, 0.5, 0.2]})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(
self.evaluate(loss), (2. * 0.3 + 4. * 0.5 + 6. * 0.2) / 3)
# Time-step sample weight
per_replica_losses = distribution.run(
nn_impl.compute_average_loss,
args=([[2., 0.5], [4., 1.]],),
kwargs={"sample_weight": [[0.3, 0.7], [0.2, 0.8]]})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(
self.evaluate(loss), (2. * 0.3 + 0.5 * 0.7 + 4. * 0.2 + 1. * 0.8) / 2)
def testComputeAverageLossInvalidSampleWeights(self):
with self.assertRaisesIncompatibleShapesError(
(ValueError, errors_impl.InvalidArgumentError)):
nn_impl.compute_average_loss([2.5, 6.2, 5.],
sample_weight=[0.2, 0.8],
global_batch_size=10)
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_two_cpus],
mode=["graph", "eager"],
)
)
def testComputeAverageLossDtype(self, distribution):
with distribution.scope():
per_example_loss = constant_op.constant([2., 4., 6.],
dtype=dtypes.float64)
per_replica_losses = distribution.run(
nn_impl.compute_average_loss,
args=(per_example_loss,),
kwargs={"sample_weight": 2})
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertEqual(loss.dtype, dtypes.float64)
def testComputeAverageLossInvalidRank(self):
per_example_loss = constant_op.constant(2)
# Static rank
with self.assertRaisesRegex(
ValueError, "Invalid value passed for `per_example_loss`. "
"Expected a tensor with at least rank 1."):
nn_impl.compute_average_loss(per_example_loss)
with context.graph_mode():
# Dynamic rank
per_example_loss = array_ops.placeholder(dtype=dtypes.float32)
loss = nn_impl.compute_average_loss(per_example_loss)
with self.cached_session() as sess:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Invalid value passed for `per_example_loss`. "
"Expected a tensor with at least rank 1."):
sess.run(loss, {per_example_loss: 2})
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_two_cpus],
mode=["graph", "eager"],
)
)
def testComputeAverageLossInCrossReplicaContext(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError,
"You are calling `compute_average_loss` in cross replica context"):
nn_impl.compute_average_loss([2, 3])
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_two_cpus],
mode=["graph", "eager"],
)
)
def testScaleRegularizationLoss(self, distribution):
# Without strategy - num replicas = 1
reg_losses = constant_op.constant([2.5, 6.2, 5.])
loss = nn_impl.scale_regularization_loss(reg_losses)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.))
# With strategy - num replicas = 2
with distribution.scope():
per_replica_losses = distribution.run(
nn_impl.scale_regularization_loss, args=(reg_losses,))
loss = distribution.reduce("SUM", per_replica_losses, axis=None)
self.assertAllClose(self.evaluate(loss), (2.5 + 6.2 + 5.))
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.mirrored_strategy_with_two_cpus],
mode=["graph", "eager"],
)
)
def testScaleRegularizationLossInCrossReplicaContext(self, distribution):
with distribution.scope():
with self.assertRaisesRegex(
RuntimeError, "You are calling `scale_regularization_loss` in "
"cross replica context"):
nn_impl.scale_regularization_loss([2, 3])
if __name__ == "__main__":
test_lib.main()
| [
"[email protected]"
]
| |
1464ca6d44baf915444d9dad3ecc767fdff28e0e | dffd7156da8b71f4a743ec77d05c8ba031988508 | /ac/abc154/abc154_b/9971915.py | c5baf251f913ab6f70b442a7456aca0f26f2032a | []
| no_license | e1810/kyopro | a3a9a2ee63bc178dfa110788745a208dead37da6 | 15cf27d9ecc70cf6d82212ca0c788e327371b2dd | refs/heads/master | 2021-11-10T16:53:23.246374 | 2021-02-06T16:29:09 | 2021-10-31T06:20:50 | 252,388,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py |
print("x"*len(input()))
| [
"[email protected]"
]
| |
8852a16d08a5a003bc41bff9adedcf3cc48f8f8d | ec34cd789c188573987741d478addc3c4a576f22 | /BIOMD0000000500/model.py | 11dfd41813aa524d632ef5d5903df4221ed7bffd | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
]
| permissive | biomodels/BIOMD0000000500 | 49362f1fffbb49e07d8077a5aab81e3ec7072ab5 | 2e28e1c78e37f1bdb716300a0bf902c6e8a0056e | refs/heads/master | 2018-12-31T19:25:22.954078 | 2014-10-16T05:27:55 | 2014-10-16T05:27:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000500.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | [
"[email protected]"
]
| |
59835f76410fdd430aaafe095baf7b9c493635fe | f848ebf1adb25cc6d188f43fb02c06dad1b01651 | /api/employee.py | 71e0dada8044141e1a869937b0bb167c0e182676 | []
| no_license | miao88318/day03_apiTestIHRM | 673320c724d9a661fa9ed120a62e0d82118719d9 | 213e4a498055e693993b21ca2bc7942af2a25c74 | refs/heads/master | 2022-07-28T04:39:05.390142 | 2020-05-21T07:06:23 | 2020-05-21T07:06:23 | 265,769,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | # 导包
import requests
# 创建员工的api类
class TestEmployeeApi:
def __init__(self):
self.login_url = "http://ihrm-test.itheima.net" + "/api/sys/login"
self.emp_url = "http://ihrm-test.itheima.net" + "/api/sys/user"
def add_emp(self, headers, username, mobile):
response = requests.post(self.emp_url,
json={
"username": username,
"mobile": mobile,
"timeOfEntry": "2020-05-05",
"formOfEmployment": 1,
"workNumber": "123433",
"departmentName": "测试部",
"departmentId": "1063678149528784896",
"correctionTime": "2020-05-17T16:00:00.000Z"
}, headers=headers)
return response
def query_emp(self, emp_id, headers):
query_url = self.emp_url + "/" + emp_id
response = requests.get(query_url, headers=headers)
return response
def modify_emp(self,emp_id, headers, username):
modify_url = self.emp_url + "/" + emp_id
response = requests.put(url=modify_url,json={"username":username},
headers=headers)
return response
def delete_emp(self, emp_id, headers):
delete_url = self.emp_url + "/" + emp_id
response = requests.delete(url=delete_url, headers=headers)
return response | [
"[email protected]"
]
| |
738ca2b4d18b5b461b81b8391794ffc365fb64ac | 2d1649a7a00d49b72ed7e53afa4abb3c9281ce03 | /.history/ParticleFilter/go_to_goal_20190422000909.py | 44131ed7c7cc6b39d57b9fe072ecd26c460c9af9 | []
| no_license | joshzhang5/CS3630Lab6 | 9547dc6c89198e9bb4aebd8359d4feb974082d20 | 69e6df12829e18a211ae850236d74b4d728046ef | refs/heads/master | 2020-05-15T13:59:51.906195 | 2019-04-22T18:21:42 | 2019-04-22T18:21:42 | 182,317,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,138 | py | # Jiaxi Zhang
# George McAlear
try:
import matplotlib
matplotlib.use('TkAgg')
except ImportError:
pass
from skimage import color
import numpy as np
from numpy.linalg import inv
import threading
import time
import sys
import asyncio
from PIL import Image
from markers import detect, annotator
from grid import CozGrid
from gui import GUIWindow
from particle import Particle, Robot
from setting import *
from particle_filter import *
from utils import *
from time import sleep
import time
import asyncio
import cozmo
from cozmo.util import distance_mm, degrees, speed_mmps, Pose
#particle filter functionality
class ParticleFilter:
def __init__(self, grid):
self.particles = Particle.create_random(PARTICLE_COUNT, grid)
self.grid = grid
def update(self, odom, r_marker_list):
# ---------- Motion model update ----------
self.particles = motion_update(self.particles, odom)
# ---------- Sensor (markers) model update ----------
self.particles = measurement_update(self.particles, r_marker_list, self.grid)
# ---------- Show current state ----------
# Try to find current best estimate for display
m_x, m_y, m_h, m_confident = compute_mean_pose(self.particles)
return (m_x, m_y, m_h, m_confident)
class CozmoWarehouseWorker:
def __init__(self, robot: cozmo.robot.Robot):
self.current_arena_pose = None
self.current_robot_pose = robot.pose
self.robot = robot
# start streaming
robot.camera.image_stream_enabled = True
robot.camera.color_image_enabled = False
robot.camera.enable_auto_exposure()
# Obtain the camera intrinsics matrix
fx, fy = robot.camera.config.focal_length.x_y
cx, cy = robot.camera.config.center.x_y
self.camera_settings = np.array([
[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]
], dtype=np.float)
self.pick_up_pose = Pose(x=4.5, y=13.75, z=0, angle_z=degrees(90))
self.drop_off_pose = Pose(x=21.75, y=13.75, z=0, angle_z=degrees(90))
self.drop_off_directions = [Pose(x=3, y=4.5, z=0, angle_z=degrees(0)), Pose(x=21.75, y=4.5, z=0, angle_z=degrees(90)), self.drop_off_pose]
self.pick_up_directions = [Pose(x=21.75, y=4.5, z=0, angle_z=degrees(90)), Pose(x=3, y=4.5, z=0, angle_z=degrees(0)), self.pick_up_pose]
self.drive_speed = speed_mmps(50)
print("Robot initialized!")
self.grid = CozGrid("map_arena.json")
self.pf = ParticleFilter(self.grid)
print("Robot initialized!")
threading.Thread(target=self.runGUI).start()
def runGUI(self):
self.gui = GUIWindow(self.grid, show_camera=True)
self.gui.show_particles(self.pf.particles)
self.gui.show_mean(0, 0, 0)
self.gui.start()
async def drive_to(self, directions):
print("-" * 20 + "DRIVING" + "-" * 20)
if isinstance(directions, (list,)):
for pose in directions:
await self.__drive_to_pose(pose)
else:
await self.__drive_to_pose(directions)
async def __drive_to_pose(self, pose):
print("We are at ", self.current_arena_pose, " and we are driving to ", pose)
translation = (pose - self.current_arena_pose).position
directions = Pose(x=translation.x, y=translation.y, z=0, angle_z=pose.rotation.angle_z)
print("We will follow these directions: ", directions, "\n\n")
await self.__execute_directions(directions)
print("Directions followed!", "\n\n")
self.update_current_arena_pose()
def update_current_arena_pose(self):
print("-" * 20 + "UPDATING POSE" + "-" * 20)
coordinate_systems_diff = diff_heading_deg(self.current_robot_pose.rotation.angle_z.degrees, self.current_arena_pose.rotation.angle_z.degrees)
arena_initial_pose_mm = rotate_point(self.current_robot_pose.position.x, self.current_robot_pose.position.y, coordinate_systems_diff)
arena_final_pose_mm = rotate_point(self.robot.pose.position.x, self.robot.pose.position.y, coordinate_systems_diff)
d_x = arena_final_pose_mm[0] - arena_initial_pose_mm[0]
d_y = arena_final_pose_mm[1] - arena_initial_pose_mm[1]
d_heading = self.robot.pose.rotation.angle_z - self.current_robot_pose.rotation.angle_z
difference_pose = Pose(x=d_x, y=d_y, z=0, angle_z=d_heading)
print("We think we moved ", convertPoseFromMmToInches(arena_final_pose_mm - arena_initial_pose_mm), "\n\n")
self.current_arena_pose = self.current_arena_pose + convertPoseFromMmToInches(arena_final_pose_mm - arena_initial_pose_mm)
print("Current pose is now ", self.current_arena_pose, "\n\n")
async def pick_up_cube(self, tries=5):
print("-" * 20 + "GETTING CUBE" + "-" * 20)
cube = await self.robot.world.wait_for_observed_light_cube(timeout=30)
print("Found cube: %s" % cube)
picked_up_cube = await self.robot.pickup_object(cube, num_retries=tries).wait_for_completed().obj
if (picked_up_cube == None):
print("Could not get the cube.")
await self.robot.say_text("Help me!").wait_for_completed()
asyncio.sleep(5)
else:
print("Picked up cube!")
async def set_down_cube(self):
print("-" * 20 + "SETTING DOWN CUBE" + "-" * 20)
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(degrees(3)).wait_for_completed()
async def __execute_directions(self, directions):
print("Current arena pose is:", self.current_arena_pose, "\n\n")
print("Current robot pose is:", self.robot.pose, "\n\n")
await self.robot.turn_in_place(angle=degrees(-self.current_arena_pose.rotation.angle_z.degrees)).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to X: ", self.robot.pose, "\n\n")
await self.robot.drive_straight(distance=distance_mm(directions.position.x * self.grid.scale), speed=self.drive_speed).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the X direction: ", self.robot.pose, "\n\n")
await self.robot.turn_in_place(angle=degrees(90)).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to Y: ", self.robot.pose, "\n\n")
await self.robot.drive_straight(distance=distance_mm(directions.position.y * self.grid.scale), speed=self.drive_speed).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the Y direction: ", self.robot.pose, "\n\n")
print("ROBOT is TURNING ", diff_heading_deg(directions.rotation.angle_z.degrees, 90), "degrees.", "\n\n")
await self.robot.turn_in_place(angle=degrees(diff_heading_deg(directions.rotation.angle_z.degrees, 90))).wait_for_completed()
print("ROBOT is at AFTER FINAL TURN", self.robot.pose, "\n\n")
async def localize(self, turn_angle=20):
print("-" * 20 + "LOCALIZING" + "-" * 20)
# reset our location estimates
conf = False
self.current_arena_pose = Pose(0,0,0,angle_z=degrees(0))
self.pf = ParticleFilter(self.grid)
# reset lift and head
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(degrees(3)).wait_for_completed()
while not conf:
# move a little
self.current_robot_pose = self.robot.pose
await self.robot.turn_in_place(angle=degrees(turn_angle)).wait_for_completed()
odometry = self.__compute_odometry()
detected_markers, camera_image = await self.__marker_processing()
# update, motion, and measurment with the odometry and marker data
curr_x, curr_y, curr_h, conf = self.pf.update(odometry, detected_markers)
# update gui
self.gui.show_particles(self.pf.particles)
self.gui.show_mean(curr_x, curr_y, curr_h)
self.gui.show_camera_image(camera_image)
self.gui.updated.set()
self.current_arena_pose = Pose(curr_x , curr_y, 0, angle_z=degrees(curr_h))
print("We localized to arena location ", self.current_arena_pose)
def __compute_odometry(self, cvt_inch=True):
'''
Compute the odometry given the current pose of the robot (use robot.pose)
Input:
- curr_pose: a cozmo.robot.Pose representing the robot's current location
- cvt_inch: converts the odometry into grid units
Returns:
- 3-tuple (dx, dy, dh) representing the odometry
'''
last_x, last_y, last_h = self.current_robot_pose.position.x, self.current_robot_pose.position.y, \
self.current_robot_pose.rotation.angle_z.degrees
curr_x, curr_y, curr_h = self.robot.pose.position.x, self.robot.pose.position.y, \
self.robot.pose.rotation.angle_z.degrees
dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)
if cvt_inch:
dx, dy = dx / self.grid.scale, dy / self.grid.scale
return (dx, dy, diff_heading_deg(curr_h, last_h))
async def __marker_processing(self, show_diagnostic_image=False):
'''
Obtain the visible markers from the current frame from Cozmo's camera.
Since this is an async function, it must be called using await, for example:
markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)
Input:
- robot: cozmo.robot.Robot object
- camera_settings: 3x3 matrix representing the camera calibration settings
- show_diagnostic_image: if True, shows what the marker detector sees after processing
Returns:
- a list of detected markers, each being a 3-tuple (rx, ry, rh)
(as expected by the particle filter's measurement update)
- a PIL Image of what Cozmo's camera sees with marker annotations
'''
# Wait for the latest image from Cozmo
image_event = await self.robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)
# Convert the image to grayscale
image = np.array(image_event.image)
image = color.rgb2gray(image)
# Detect the markers
markers, diag = detect.detect_markers(image, self.camera_settings, include_diagnostics=True)
# Measured marker list for the particle filter, scaled by the grid scale
marker_list = [marker['xyh'] for marker in markers]
marker_list = [(x/self.grid.scale, y/self.grid.scale, h) for x,y,h in marker_list]
# Annotate the camera image with the markers
if not show_diagnostic_image:
annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(annotated_image, markers, scale=2)
else:
diag_image = color.gray2rgb(diag['filtered_image'])
diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(diag_image, markers, scale=2)
annotated_image = diag_image
return marker_list, annotated_image
async def run(robot: cozmo.robot.Robot):
cosimo = CozmoWarehouseWorker(robot)
await cosimo.localize()
await cosimo.drive_to(cosimo.pick_up_pose)
while True:
await cosimo.pick_up_cube(tries=5)
await cosimo.drive_to(cosimo.drop_off_directions)
await cosimo.set_down_cube()
await cosimo.drive_to(cosimo.pick_up_directions)
class CozmoThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, daemon=False)
def run(self):
cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger
cozmo.run_program(run, use_viewer=False)
if __name__ == '__main__':
# cozmo thread
cozmo_thread = CozmoThread()
cozmo_thread.start()
| [
"[email protected]"
]
| |
8d7f11c56fe6bb5b741355a5dfad0460a1ea89f4 | 10b4db1d4f894897b5ee435780bddfdedd91caf7 | /thrift/compiler/test/fixtures/basic-annotations/gen-py3/module/types.pyi | d60450c59a3809ab28d5574573d39ae4ae414318 | [
"Apache-2.0"
]
| permissive | SammyEnigma/fbthrift | 04f4aca77a64c65f3d4537338f7fbf3b8214e06a | 31d7b90e30de5f90891e4a845f6704e4c13748df | refs/heads/master | 2021-11-11T16:59:04.628193 | 2021-10-12T11:19:22 | 2021-10-12T11:20:27 | 211,245,426 | 1 | 0 | Apache-2.0 | 2021-07-15T21:12:07 | 2019-09-27T05:50:42 | C++ | UTF-8 | Python | false | false | 4,129 | pyi | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
import folly.iobuf as _fbthrift_iobuf
import thrift.py3.types
import thrift.py3.exceptions
from thrift.py3.types import __NotSet, NOTSET
import typing as _typing
from typing_extensions import Final
import sys
import itertools
__property__ = property
class MyEnum(thrift.py3.types.Enum):
MyValue1: MyEnum = ...
MyValue2: MyEnum = ...
DOMAIN: MyEnum = ...
class MyStructNestedAnnotation(thrift.py3.types.Struct, _typing.Hashable):
class __fbthrift_IsSet:
name: bool
pass
name: Final[str] = ...
def __init__(
self, *,
name: _typing.Optional[str]=None
) -> None: ...
def __call__(
self, *,
name: _typing.Union[str, __NotSet, None]=NOTSET
) -> MyStructNestedAnnotation: ...
def __reduce__(self) -> _typing.Tuple[_typing.Callable, _typing.Tuple[_typing.Type['MyStructNestedAnnotation'], bytes]]: ...
def __hash__(self) -> int: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __lt__(self, other: 'MyStructNestedAnnotation') -> bool: ...
def __gt__(self, other: 'MyStructNestedAnnotation') -> bool: ...
def __le__(self, other: 'MyStructNestedAnnotation') -> bool: ...
def __ge__(self, other: 'MyStructNestedAnnotation') -> bool: ...
class MyStruct(thrift.py3.types.Struct, _typing.Hashable):
class __fbthrift_IsSet:
major: bool
package: bool
annotation_with_quote: bool
class_: bool
annotation_with_trailing_comma: bool
empty_annotations: bool
pass
major: Final[int] = ...
package: Final[str] = ...
annotation_with_quote: Final[str] = ...
class_: Final[str] = ...
annotation_with_trailing_comma: Final[str] = ...
empty_annotations: Final[str] = ...
def __init__(
self, *,
major: _typing.Optional[int]=None,
package: _typing.Optional[str]=None,
annotation_with_quote: _typing.Optional[str]=None,
class_: _typing.Optional[str]=None,
annotation_with_trailing_comma: _typing.Optional[str]=None,
empty_annotations: _typing.Optional[str]=None
) -> None: ...
def __call__(
self, *,
major: _typing.Union[int, __NotSet, None]=NOTSET,
package: _typing.Union[str, __NotSet, None]=NOTSET,
annotation_with_quote: _typing.Union[str, __NotSet, None]=NOTSET,
class_: _typing.Union[str, __NotSet, None]=NOTSET,
annotation_with_trailing_comma: _typing.Union[str, __NotSet, None]=NOTSET,
empty_annotations: _typing.Union[str, __NotSet, None]=NOTSET
) -> MyStruct: ...
def __reduce__(self) -> _typing.Tuple[_typing.Callable, _typing.Tuple[_typing.Type['MyStruct'], bytes]]: ...
def __hash__(self) -> int: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __lt__(self, other: 'MyStruct') -> bool: ...
def __gt__(self, other: 'MyStruct') -> bool: ...
def __le__(self, other: 'MyStruct') -> bool: ...
def __ge__(self, other: 'MyStruct') -> bool: ...
class SecretStruct(thrift.py3.types.Struct, _typing.Hashable):
class __fbthrift_IsSet:
id: bool
password: bool
pass
id: Final[int] = ...
password: Final[str] = ...
def __init__(
self, *,
id: _typing.Optional[int]=None,
password: _typing.Optional[str]=None
) -> None: ...
def __call__(
self, *,
id: _typing.Union[int, __NotSet, None]=NOTSET,
password: _typing.Union[str, __NotSet, None]=NOTSET
) -> SecretStruct: ...
def __reduce__(self) -> _typing.Tuple[_typing.Callable, _typing.Tuple[_typing.Type['SecretStruct'], bytes]]: ...
def __hash__(self) -> int: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __lt__(self, other: 'SecretStruct') -> bool: ...
def __gt__(self, other: 'SecretStruct') -> bool: ...
def __le__(self, other: 'SecretStruct') -> bool: ...
def __ge__(self, other: 'SecretStruct') -> bool: ...
| [
"[email protected]"
]
| |
f6bdb6fdae81f13cfe121cc6e8b2f81bffc9cc72 | 485cf3c70fcaa68689a2b690b6465f1d6bcf21bd | /Python3_Selenium3/第7章/7.28.py | 8d5897a67392f8b8f40cc6a250867a33283293b2 | []
| no_license | lxz0503/study_20190608 | 5ffe08c4704bb00ad8d1980baf16b8f5e7135ff4 | 47c37798140883b8d6dc21ec5da5bc7a20988ce9 | refs/heads/master | 2022-12-23T17:23:45.039015 | 2021-06-23T14:50:19 | 2021-06-23T14:50:19 | 190,884,812 | 1 | 3 | null | 2022-12-15T23:17:33 | 2019-06-08T12:22:56 | Python | GB18030 | Python | false | false | 455 | py |
###
###配套视频已出版,学习有疑问联系作者qq:2574674466###
###
#coding=utf-8
dict_1 = {'Name': 'Jack','Age':18,'Score':100}
print("操作字典元素之前,遍历并打印字典元素如下:")
for (key,value) in dict_1.items():
print(key + ":" + str(value))
dict_1.clear()
print("操作字典元素之后,遍历并打印字典元素如下:")
print(dict_1)
for (key,value) in dict_1.items():
print(key + ":" + str(value))
| [
"[email protected]"
]
| |
f185fb7d2592d7b702fbb0aa041313972b43ce49 | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/모듈과 패키지/외장함수_20200711174751.py | 99b2fc1648509015a8491be7758fc5ff48cd8b55 | []
| no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | # list of python modules라고 검색한다
# Python Module Index라는 페이지를 들어간다.
# # glob : 경로 내의 폴더 / 파일 목록 조회(윈도우 dir)
# import glob
# print(glob.glob("*.py")) # 확장자가 py 인 모든 파일
# os : 운영체제에서 제공하는 기본 기능
import os
print(os,get) | [
"[email protected]"
]
| |
0bbdd4bf7a5f32254ed7f31f8c35606cae64ef68 | 3e5ecad4d2f681f2f4f749109cc99deea1209ea4 | /tf114/tf11_2_diabetes.py | 25d2c7c7cc73f3820f81ab3f4d6d2093ecf8625e | []
| no_license | SunghoonSeok/Study | f41ede390079037b2090e6df20e5fb38f2e59b8f | 50f02b9c9bac904cd4f6923b41efabe524ff3d8a | refs/heads/master | 2023-06-18T06:47:55.545323 | 2021-07-05T00:47:55 | 2021-07-05T00:47:55 | 324,866,762 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | from sklearn.datasets import load_diabetes
import tensorflow as tf
tf.compat.v1.set_random_seed(66)
dataset = load_diabetes()
x_data = dataset.data
y_data = dataset.target
y_data = y_data.reshape(-1,1)
print(x_data.shape, y_data.shape) # (442, 10) (442,1)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, train_size=0.8, shuffle=True, random_state=66)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
x = tf.compat.v1.placeholder(tf.float32, shape=[None, 10])
y = tf.compat.v1.placeholder(tf.float32, shape=[None, 1])
y_true = tf.compat.v1.placeholder(tf.float32, shape=[None, 1])
y_pred = tf.compat.v1.placeholder(tf.float32, shape=[None, 1])
w = tf.Variable(tf.random.normal([10,1]), name='weight')
b = tf.Variable(tf.random.normal([1]), name='bias')
# hypothesis = x * w + b
hypothesis = tf.matmul(x, w) + b
cost = tf.reduce_mean(tf.square(hypothesis - y)) # loss='mse'
train = tf.train.AdamOptimizer(learning_rate=0.002).minimize(cost) # optimizer + train
from sklearn.metrics import r2_score
# r2 = r2_score(y_true, y_pred)
# with문 사용해서 자동으로 sess가 닫히도록 할수도 있다.
import numpy as np
with tf.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
for step in range(150001):
cost_val, w_val, b_val, hy_val, _ = sess.run([cost,w,b,hypothesis,train], feed_dict={x:x_train,y:y_train})
if step %20 == 0:
print(step, "loss :",cost_val) # epoch, loss
y_predict = sess.run([hypothesis], feed_dict={x:x_test,y:y_test})
y_predict = np.array(y_predict)
y_predict = y_predict.reshape(-1,1)
print(r2_score(y_test, y_predict))
# 0.5063167888110058
| [
"[email protected]"
]
| |
900a29135e6327fba64530bbf7efb62664e1e3e0 | d6f9856369de739eb1d48f36704032fc9d6ad279 | /01-spider/practice/seleniumJdGrandandProduct.py | 5ca58c43fb5dff43ab415094a4e3f08a04e7f0fa | []
| no_license | huchangchun/spider | 7fd1cfd1aced71887700d1a1db176b898ca75775 | 7349c91bc5abf4a633752cc7a33fe24756d2ac97 | refs/heads/master | 2020-03-13T06:54:04.316515 | 2019-12-20T02:08:45 | 2019-12-20T02:08:45 | 131,014,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,957 | py | #encoding=utf-8
from selenium.webdriver.support import ui
from selenium.webdriver import Chrome
import time,os
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from lxml import etree
import numpy as np
if os.path.exists("C:\\Users\zxy\AppData\Local\Google\Chrome\Application\chromedriver.exe"):
driver = Chrome("C:\\Users\zxy\AppData\Local\Google\Chrome\Application\chromedriver.exe")
else:
driver = Chrome("C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe")
"""
环境要求:
1. pip install selenium
2.需要将chromedriver.exe放在driver所指路径,下载时要与本地chrome版本匹配或更新,具体版本查看chrome
下载地址:http://npm.taobao.org/mirrors/chromedriver/
"""
def isNoneOrEmpty(s):
if s is None:
return True
if isinstance(s, list):
if len(s) == 0:
return True
else:
return False
if isinstance(s, tuple):
if len(s) == 0:
return True
else:
return False
if isinstance(s, str):
if len(s) == 0:
return True
else:
return False
if isinstance(s,dict):
if len(s) == 0:
return True
else:
return False
if isinstance(s, set):
if len(s) == 0:
return True
else:
return False
if isinstance(s, int):
return False
def grabBrands(url):
goodsname = []
try:
driver.get(url)
mulitipchose = "//*[@id='J_selector']/div[1]/div/div[3]/a[2]"
more = "//*[@id='J_selector']/div[1]/div/div[3]/a[1]"
cancelBtn = "//*[@id='J_selector']/div[1]/div/div[2]/div[4]/a[2]"
element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, mulitipchose)))
element.click()
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, cancelBtn)))
page = driver.page_source
soup = BeautifulSoup(page,'html.parser')
data1 = soup.find('ul',{"class":"J_valueList v-fixed"})
datali =data1.find_all('li')
for i in datali:
goodsname.append(i.a.attrs['title'])
print("品牌数量:", len(goodsname))
except Exception as ex:
# 关闭当前标签,也可以使用quit()关闭浏览器
return None
return goodsname
def grabGoodsTypeWithClass(url):
goodsname=[]
goodshref=[]
try:
brower = webdriver.Chrome()
brower.get(url)
page = brower.page_source
soup = BeautifulSoup(page,'html.parser')
dataAll = soup.find_all(attrs={"class":"J_selectorLine s-category"})
if isNoneOrEmpty(dataAll):
return None
for i in range(len(dataAll)):
curdata = dataAll[i].find("ul",{"class":"J_valueList"})
datali = curdata.find_all('li')
for i in datali:
goodsname.append(i.a.attrs['title'])
goodshref.append(i.a.attrs['href'])
print("当前数量:", len(goodsname))
print(goodsname[:10])
except Exception as ex:
print(ex)
# 关闭当前标签,也可以使用quit()关闭浏览器
return None
return goodsname, goodshref
def runGrabBrands(goodClass: list, notallowlist=None):
url = "https://search.jd.com/Search?keyword={}&enc=utf-8"
allgoods = []
allgoodsBrands=[]
for gcls in goodClass:
print("大类:", gcls)
flag = True
while flag:
curgoods, _ = grabGoodsTypeWithClass(url.format(gcls))
if not isNoneOrEmpty(curgoods):
allgoods.extend(curgoods)
print("当前总品类数:", len(allgoods))
flag = False
else:
print("{}获取异常,重试".format(gcls))
print("总品类数:", len(allgoods))
allgoods = list(set(allgoods))
allgoods = [g for g in allgoods if len(g) > 1]
print("去重后总品类数:", len(allgoods))
if notallowlist is not None:
allgoods = [g for g in allgoods if g not in notallowlist]
with open("{}.txt".format(",".join(goodClass)), mode='w', encoding='utf-8') as f:
f.write(",".join(allgoods))
print("前十个品类:", ",".join(allgoods[:10]))
for goodtype in allgoods:
print("获取品类品牌:{} ".format(goodtype))
flag = True
i= 0
while flag:
if isinstance(goodtype, list):
curgoodbrand = []
for gt in goodtype:
curgoodbrand.extend(grabBrands(url.format(gt)))
else:
curgoodbrand = grabBrands(url.format(goodtype))
if not isNoneOrEmpty(curgoodbrand):
print(curgoodbrand)
allgoodsBrands.extend(curgoodbrand)
print("当前总品牌数量:", len(allgoodsBrands))
flag = False
else:
print("{}获取异常,重试".format(goodtype))
goodtype = goodtype.split("/")
i += 1
if i == 3:
print("获取异常,重试{}次失败".format(i))
flag = True
print("总品牌数量:", len(allgoodsBrands))
print("去重后数量:", len(list(set(allgoodsBrands))))
if isNoneOrEmpty(allgoodsBrands):
print("数据为空")
return None
saveData("{}品牌.xlsx".format(",".join(goodClass)),list(set(allgoodsBrands)))
def getAliseFromBrand(brand):
import re
kuohaopattern = "((.*))"
curalias = re.findall(kuohaopattern, brand)
curbrand = re.sub(kuohaopattern,"", brand)
if len(curalias) > 0:
return curbrand,curalias[0]
else:
return curbrand,curbrand
def saveData(savefile, data):
brands, aliass = [],[]
import re
for b in data:
aliass.append(getAliseFromBrand(b)[1])
brands.append(getAliseFromBrand(b)[0])
assert len(brands) == len(aliass)
import pandas as pd
df = pd.DataFrame({"Brand": brands,"Alias": aliass})
df.to_excel(savefile, encoding='utf-8')
print("finnish")
def readBrandsFromXlsx(filepath, savefile):
import pandas as pd
df = pd.read_excel(filepath, encoding='utf-8')
brands = df['Brand'].tolist()
aliass = df['Alias'].tolist()
brandsdic ={}
for brand, alias in zip (brands, aliass):
if brand in brandsdic:
continue
else:
brandsdic[brand] = alias
df = pd.DataFrame({"Brand": list(brandsdic.keys()),"Alias": list(brandsdic.values())})
df.to_excel(savefile, encoding='utf-8')
def testGrabBrand():
good = '牙膏'
url = "https://search.jd.com/Search?keyword={}&enc=utf-8"
curgoodbrand = grabBrands(url.format(good))
print(curgoodbrand)
saveData("{}品牌.xlsx".format(good), curgoodbrand)
def grabGoodTitlesWithGoodType(url):
try:
brower = webdriver.Chrome()
brower.get(url)
page = brower.page_source
selector = etree.HTML(page)
titlespath = "//*[@id='J_goodsList']/ul/li/div/div[3]/a/@title"
subtitlespath = "//*[@id='J_goodsList']/ul/li[{}]/div/div[3]/a/em/text()"
subtypespath = "//*[@id='J_goodsList']/ul/li[{}]/div/div[3]/a/em/font/text()"
totalpagepath ='//*[@id="J_bottomPage"]/span[2]/em[1]/b/text()'
nextpagebtnpath = '//*[@id="J_bottomPage"]/span[1]/a[9]'
totalpageCount = int(selector.xpath(totalpagepath)[0])
if totalpageCount > 13:
print("超过13页,截断")
totalpageCount = 13
titles = []
def gettitles(slt):
try:
curtitles = []
emselectors = slt.xpath(titlespath)
for i in range(len(emselectors)):
emtypes = slt.xpath(subtypespath.format(i))
emtitles = slt.xpath(subtitlespath.format(i))
if isinstance(emtypes,list):
if len(emtypes) == 0:
emtypes =['']
if (emtitles) == 0:
continue
curtitle =''
emtypes = emtypes[::-1]
for i in range(len(emtitles)):
curtitle += emtitles[i]
if len(emtypes) > 0:
curtitle += emtypes.pop()
if len(emtypes) > 0:
for i in range(len(emtypes)):
curtitle += emtypes.pop()
curtitle = "".join(list(set(emtypes))) + "".join(emtitles)
if len(curtitle) !=0:
curtitles.append(curtitle)
return curtitles
except Exception as ex:
return []
curtitles = gettitles(selector)
if len(curtitles) != 0:
titles.extend(curtitles)
for i in range(totalpageCount - 1):
try:
brower.find_elements_by_xpath(nextpagebtnpath)[0].click()
WebDriverWait(brower, 10)#.until(EC.element_to_be_clickable((By.XPATH, nextpagebtnpath)))
page = brower.page_source
selector = etree.HTML(page)
if len(curtitles) != 0:
titles.extend(curtitles)
except Exception as ex:
pass
print(len(titles))
print(len(list(set(titles))))
print(titles)
brower.quit()
except Exception as ex:
print(ex)
# 关闭当前标签,也可以使用quit()关闭浏览器
return None
return titles
def runGrabGoodTitlesWithTypes(goodtypes:dict):
savetitles = []
savetypes = []
url = "https://search.jd.com/Search?keyword={}&enc=utf-8"
for gt, subgtlist in goodtypes.items():
for subgt in subgtlist:
curtitles = grabGoodTitlesWithGoodType(url.format(subgt))
if len(curtitles) == 0:
print("类型{} 没有取到数据".format(subgt))
else:
savetitles.extend(curtitles)
savetypes.extend([gt] * len(curtitles))
import pandas as pd
df = pd.DataFrame.from_dict({"title":savetitles, "goodtype":savetypes})
df.to_excel('商品标题分类信息.xlsx')
if __name__=="__main__":
#url = "https://search.jd.com/Search?keyword={}&enc=utf-8"
#grabGoodTitlesWithGoodType(url.format('花鸟盆栽'))
runGrabGoodTitlesWithTypes({"花鸟宠物":['宠物衣服','花鸟盆栽','花鸟宠物','宠物零食','宠物生活用品']})
#readBrandsFromXlsx("美妆,洗护,护肤,彩妆,口腔洗护品牌.xlsx","美妆,洗护,护肤,彩妆,口腔洗护品牌new.xlsx")
#testGrabBrand()
#runGrabBrands(['美妆','洗护','护肤','彩妆','口腔洗护'])
#notallowlist =['宗教用品',
#'特殊商品',
#'历史',
#'礼品文具',
#'港台图书',
#'礼品定制',
#'古董文玩',
#'婚庆节庆',
#'创意礼品',
#'配件',
#'工艺礼品',
#'电子礼品',
#'挂件/摆件/把件',
#'婚庆饰品',
#'美妆礼品']
#runGrabBrands(['花鸟盆栽'])
#notallowlist=['避孕套',
#' 充气/仿真娃娃',
#'辅酶Q10',
#'仿真阳具',
#'震动棒',
#'其他',
#'飞机杯',
#' 男用延时',
#'其他情趣用品',
#'倒模']
#runGrabBrands(['保健品','滋补','营养品'])
#runGrabBrands(['日用百货'])
#pass
| [
"[email protected]"
]
| |
f451b1b4faea36c7f6d7ca4ceec46e4a325c2715 | 4e5b20fdcca20f458322f0a8cd11bbdacb6fb3e5 | /test/shop/ShopInfoModifyTest.py | bf78087f7fbec6a3ef4fc0ab81ee164682b8ea35 | []
| no_license | shijingyu/sunningAPI | 241f33b0660dc84635ce39688fed499f5c57a5da | 4a3b2ef7f9bdc4707d1eaff185bc7eb636fe90d5 | refs/heads/master | 2020-04-24T22:15:11.584028 | 2019-02-24T06:41:20 | 2019-02-24T06:41:20 | 172,305,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | #!usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 2014-8-22
@author: suning
'''
import sys
sys.path.append("../../../api-sdk-python")
import suning.api as api
a=api.ShopInfoModifyRequest()
a.placard = "心心相印"
a.telphone = "010-11255555"
f = a.getResponse()
print(f)
| [
"[email protected]"
]
| |
d2a35ea2668ab07a1748e0d2a8759317926dfa88 | 02495eeb56c436d1dbf9f4700c43658d16ffe0ca | /03_P💀Spoopy/pylindrome/docker/app.py | 098301d96aed87ab4fa1b8e3ec47ee7f45351bbd | []
| no_license | ce8so9/csr-2020-tasks | 906a55c14bca0f7a14b228cbce08a38f7d2271eb | cd6ca7f98a40d5e7eb41c61f5b293537188b85c4 | refs/heads/master | 2023-01-12T09:33:02.928645 | 2020-11-10T16:19:30 | 2020-11-10T16:19:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | #!/usr/bin/env python3
import subprocess
def sandbox(toexec):
return subprocess.check_output(["sudo", "-u", "sandbox", "python3", "-c", toexec]).decode().strip()
try:
code = input()[:100]
for bad in ['#', '"""', "'''"]:
code = code.replace(bad, "")
assert code == code[::-1]
exec(sandbox(code))
except:
print(open(__file__,"r").read())
| [
"[email protected]"
]
| |
0c490d56b46e3f0a4b6cb6f26b399042af3e6b37 | b7f45072d056b80ed49e6bcde91877d8576e970d | /ImageJ/py/load_blobs.py | 3037ef28495247737fab6bf5bc930be4277273f8 | []
| no_license | jrminter/tips | 128a18ee55655a13085c174d532c77bcea412754 | f48f8b202f8bf9e36cb6d487a23208371c79718e | refs/heads/master | 2022-06-14T08:46:28.972743 | 2022-05-30T19:29:28 | 2022-05-30T19:29:28 | 11,463,325 | 5 | 8 | null | 2019-12-18T16:24:02 | 2013-07-17T00:16:43 | Jupyter Notebook | UTF-8 | Python | false | false | 137 | py | """
load_blobs.py
"""
from ij import IJ
IJ.run("Close All")
imp = IJ.openImage("http://imagej.nih.gov/ij/images/blobs.gif")
imp.show()
| [
"[email protected]"
]
| |
9634eb466219c63cc085cd1895ec57eb62ce0188 | 94ed98b2f4eec63be1510cc1555dad064bcc8f13 | /example/mypackage/gui.py | a7104897b98fd8b34cd2a7ddc4d9a617212b18c5 | [
"MIT"
]
| permissive | axju/setuptools_freeze | dae496e66e5c6dc5c3d28876a056c8ddd8b570d9 | c1d16bd714f5aec36ea07202f1a466eb0573d839 | refs/heads/master | 2020-07-24T05:43:06.920994 | 2019-09-11T13:32:18 | 2019-09-11T13:32:18 | 207,817,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,654 | py | import random
from tkinter import Tk, Label, Button, Entry, StringVar, DISABLED, NORMAL, END, W, E
class ConfiguratorGUI:
def __init__(self, master):
self.master = master
master.title("Guessing Game")
self.secret_number = random.randint(1, 100)
self.guess = None
self.num_guesses = 0
self.message = "Guess a number from 1 to 100"
self.label_text = StringVar()
self.label_text.set(self.message)
self.label = Label(master, textvariable=self.label_text)
vcmd = master.register(self.validate) # we have to wrap the command
self.entry = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
self.guess_button = Button(master, text="Guess", command=self.guess_number)
self.reset_button = Button(master, text="Play again", command=self.reset, state=DISABLED)
self.label.grid(row=0, column=0, columnspan=2, sticky=W+E)
self.entry.grid(row=1, column=0, columnspan=2, sticky=W+E)
self.guess_button.grid(row=2, column=0)
self.reset_button.grid(row=2, column=1)
def validate(self, new_text):
if not new_text: # the field is being cleared
self.guess = None
return True
try:
guess = int(new_text)
if 1 <= guess <= 100:
self.guess = guess
return True
else:
return False
except ValueError:
return False
def guess_number(self):
self.num_guesses += 1
if self.guess is None:
self.message = "Guess a number from 1 to 100"
elif self.guess == self.secret_number:
suffix = '' if self.num_guesses == 1 else 'es'
self.message = "Congratulations! You guessed the number after %d guess%s." % (self.num_guesses, suffix)
self.guess_button.configure(state=DISABLED)
self.reset_button.configure(state=NORMAL)
elif self.guess < self.secret_number:
self.message = "Too low! Guess again!"
else:
self.message = "Too high! Guess again!"
self.label_text.set(self.message)
def reset(self):
self.entry.delete(0, END)
self.secret_number = random.randint(1, 100)
self.guess = 0
self.num_guesses = 0
self.message = "Guess a number from 1 to 100"
self.label_text.set(self.message)
self.guess_button.configure(state=NORMAL)
self.reset_button.configure(state=DISABLED)
def main():
root = Tk()
ConfiguratorGUI(root)
root.mainloop()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
7a9eacaaff1dee09c8f626968b2da5d9c9330251 | 881041fab1b4d05f1c5371efed2f9276037eb609 | /tasks/airport-polygon/depositor.py | 901985aae38ae47c1dac4ee3d0ad64212ad37cc1 | []
| no_license | ResidentMario/urban-physiology-nyc-catalog | b568f3b6ee1a887a50c4df23c488f50c92e30625 | cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c | refs/heads/master | 2021-01-02T22:43:09.073952 | 2017-08-06T18:27:22 | 2017-08-06T18:27:22 | 99,377,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | import requests
r = requests.get("https://data.cityofnewyork.us/api/geospatial/xfhz-rhsk?method=export&format=GeoJSON")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/airport-polygon/data.geojson", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/airport-polygon/data.geojson"]
| [
"[email protected]"
]
| |
d996405520f5fadcbb45bb17b636f2011447af94 | f5b5a6e3f844d849a05ff56c497638e607f940e0 | /capitulo 05/05.02.py | 3c052b435e84ceae4eef942fd6fc518631fd4e89 | []
| no_license | alexrogeriodj/Caixa-Eletronico-em-Python | 9237fa2f7f8fab5f17b7dd008af215fb0aaed29f | 96b5238437c88e89aed7a7b9c34b303e1e7d61e5 | refs/heads/master | 2020-09-06T21:47:36.169855 | 2019-11-09T00:22:14 | 2019-11-09T00:22:14 | 220,563,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2019
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira edição - Janeiro/2019 - ISBN 978-85-7522-718-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem3\capítulo 05\05.02.py
# Descrição:
##############################################################################
x = 1
print(x)
x = 2
print(x)
x = 3
print(x)
| [
"[email protected]"
]
| |
5a5e52126f3d65f4e181b73cf8ef52d1509c7bbe | 49800e971c605d74d0841a9bb07a618ad1fc6e49 | /web/apps/nosari/urls.py | d7af7bec90c2311f11dcb3cfe7f3bacf8d6b4a99 | []
| no_license | cerfelix/AntiSARI | ab0c9bd96c8044cd806d26db7b6eea67cf008f70 | 8a217390c367d2af65fd373cbf5794eaa841efea | refs/heads/master | 2020-12-22T10:12:25.454134 | 2020-01-29T09:50:13 | 2020-01-29T09:50:13 | 236,748,324 | 0 | 0 | null | 2020-01-28T14:02:09 | 2020-01-28T14:02:08 | null | UTF-8 | Python | false | false | 235 | py | # _*_coding:utf-8_*_
"""
@ProjectName: AntiSARI
@Author: Javen Yan
@File: urls.py
@Software: PyCharm
@Time : 2020/1/28 下午1:58
"""
from web.apps.nosari.controller import NoSariHandler
urlpatterns = [
(r'', NoSariHandler)
]
| [
"[email protected]"
]
| |
70a6b84238efa4e023179a2ad24b371742532fce | fbb141c9b99c4c08ce2c0acfe13630d694d98744 | /7-stack/4.10-shu-zu-zhong-de-ni-xu-dui-lcof.py | f04c9947f38d38bf2a749719998cd041df3b5b3b | []
| no_license | huixian3/algorithm017 | 1534bc8a0364595b056e0f346cfe9fa8b8fee3bd | f43c99dc7810de863f8cd79115e272ac65ce9257 | refs/heads/master | 2023-04-02T07:10:03.670003 | 2021-04-13T14:38:36 | 2021-04-13T14:38:36 | 297,989,771 | 0 | 0 | null | 2020-09-23T14:05:41 | 2020-09-23T14:05:40 | null | UTF-8 | Python | false | false | 1,473 | py | '''
在数组中的两个数字,如果前面一个数字大于后面的数字,则这两个数字组成一个逆序对。
输入一个数组,求出这个数组中的逆序对的总数。
'''
# 归并排序 同 逆序对,分治
# 在megrge环节中计数即可,计数方法是,左边数据大于右边数据元素的pair数量
class Solution(object):
def reversePairs(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
self.cnt = 0
def merge(nums, start, mid, end, temp):
i, j = start, mid + 1
while i <= mid and j <= end:
if nums[i] <= nums[j]:
temp.append(nums[i])
i += 1
else:
self.cnt += mid - i + 1
temp.append(nums[j])
j += 1
while i <= mid:
temp.append(nums[i])
i += 1
while j <= end:
temp.append(nums[j])
j += 1
for i in range(len(temp)):
nums[start + i] = temp[i]
temp.clear()
def mergeSort(nums, start, end, temp):
if start >= end: return
mid = (start + end) >> 1
mergeSort(nums, start, mid, temp)
mergeSort(nums, mid + 1, end, temp)
merge(nums, start, mid, end, temp)
mergeSort(nums, 0, len(nums) - 1, [])
return self.cnt
| [
"[email protected]"
]
| |
8a0079dd597dba447df0d9aed6437df677f2accb | 710026f64d3a23913ae71d2300147b371f5cb75b | /gammapy/data/tests/test_all.py | 138034335de153523316b69996357d13979c5972 | []
| no_license | Cadair/gammapy | 557c01e33d93fe6cc2daaac35b53590d33e31fbc | 19f4fdd299b8c3495c732fc412f5d18cb9df3590 | refs/heads/master | 2020-12-13T21:52:37.790005 | 2014-02-20T15:15:10 | 2014-02-20T15:15:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division
from numpy.testing import assert_allclose
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
from .. import poisson_stats_image
def test_poisson_stats_image():
"""Get the data file via the gammapy.data.poisson_stats_image function"""
data = poisson_stats_image()
assert data.sum() == 40896
def test_poisson_stats_image_direct():
"""Get the data file directly via get_pkg_data_filename"""
filename = get_pkg_data_filename('../poisson_stats_image/counts.fits.gz')
data = fits.getdata(filename)
assert data.sum() == 40896
def test_poisson_stats_extra_info():
images = poisson_stats_image(extra_info=True)
refs = dict(counts=40896, model=41000, source=1000, background=40000)
for name, expected in refs.items():
assert_allclose(images[name].sum(), expected) | [
"[email protected]"
]
| |
155abf1cb8c24811bbe7251caef3d6eb6e1d3629 | 617ff229b63165368e32a303488b29c738a5378a | /src/bad smell/plot_smell_fft.py | c75518ce5eedbec995ac26155c38b340be32e0b4 | []
| no_license | dimpisingh/e-dom | a1ae76229a31c0a5dcc725a80e7a741be660a0da | a820874545e97ec10580db6dd11e35c7eec65abc | refs/heads/master | 2022-05-13T07:27:31.180506 | 2019-04-10T13:53:26 | 2019-04-10T13:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,425 | py | import os
import plotly
# plotly.tools.set_credentials_file(username='dichen001', api_key='czrCH0mQHmX5HLXSHBqS')
plotly.tools.set_credentials_file(username='amritbhanu', api_key='cuaXxPfbSxptk2irXf7P')
import plotly.plotly as py
import plotly.graph_objs as go
import cPickle
import pickle
cwd = os.getcwd()
data_path = os.path.join(cwd,"..","..","data", "smell")
details_path = os.path.join(data_path, 'smell_details_38-MDLP.pkl')
details = cPickle.load(open(details_path, 'rb'))
with open(os.path.join(data_path, 'dodge.pickle'), 'rb') as handle:
dodge = pickle.load(handle)
n1, n2, n3, n4 = "DataClass", "FeatureEnvy", "GodClass", "LongMethod"
t1, t2, t3, t4 = "DataClass", "FeatureEnvy", "GodClass", "LongMethod"
classifiers = ["DT", "RF", "LR", "kNN", "FFT-Dist2Heaven", "Dodge_0.2_30"]
colors = ["#AED6F1", "#5DADE2", "#2874A6", "#1B4F72", "#000000", "#FF5722"]#, "#E53935"]
data = []
l = len(details[n1][classifiers[0]]['dist2heaven'])
x = [t1] * l + [t2] * l + [t3] * l + [t4] * l
x1 = [t1] * 21 + [t2] * 21 + [t3] * 21 + [t4] * 21
for i, clf in enumerate(classifiers):
if clf != "Dodge_0.2_30":
tmp_bar = go.Box(
y=sorted(details[n1][clf]['dist2heaven']) +
sorted(details[n2][clf]['dist2heaven']) +
sorted(details[n3][clf]['dist2heaven']) +
sorted(details[n4][clf]['dist2heaven']),
x=x,
name=clf,
marker=dict(
color=colors[i]
)
)
else:
tmp_bar = go.Box(
y=sorted(dodge[n1]) +
sorted(dodge[n2]) +
sorted(dodge[n3]) +
sorted(dodge[n4]),
x=x1,
name=clf,
marker=dict(
color=colors[i]
)
)
data.append(tmp_bar)
layout = go.Layout(
autosize=True,
title="Bad Smell - 25 Times",
font=dict(size=18),
yaxis=dict(
title='Distance to Heaven',
zeroline=False,
titlefont=dict(size=20),
tickfont=dict(size=24),
automargin=True,
),
xaxis=dict(
title='Bad Smell Dataset (very small)',
zeroline=False,
titlefont=dict(size=24),
tickfont=dict(size=20),
tickangle=-45,
automargin=True,
),
boxmode='group',
legend=dict(font=dict(size=20)
)
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename="Smell - 25 Times")
| [
"[email protected]"
]
| |
6c6d5f913ad89423170d7e4e728f2d9b67184ad4 | 5bb8b4c7faeebd16da16ecbcd4a98aabaf688e8f | /data_tools/walker/src-cikm/build_graph2/citations.py | 2438338b186b181a26af7fd8e16ccbc3d15dfd74 | []
| no_license | xiaoqinzhe/vrdetection | 014fc2b61c9b30dd2699fdba41089b18b7f060be | 604a812a21a98d72ba8e23a716eb72153bdaa7c4 | refs/heads/master | 2023-07-04T07:44:12.141404 | 2021-08-01T06:21:17 | 2021-08-01T06:21:17 | 150,063,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | #coding:utf-8
import json
file_path = '/mnt/hdd2/dblp/dblp_ref.json'
citation_file_path = '/mnt/hdd2/cikm/citation.txt'
with open(file_path) as ifile, open(citation_file_path, 'w') as ofile:
for line in ifile:
paper = json.loads(line)
if 'references' not in paper:
continue
output_papers = [paper['_id']]
output_papers += paper['references']
ofile.write('{}\n'.format(' '.join(output_papers)))
| [
"[email protected]"
]
| |
d90f4c250ad6540185c4685ac49cf4e5df824ab7 | b4f661f1153637d9cfec18e4cf56b64582c31385 | /src/Python/304.二维区域和检索-矩阵不可变.py | fd58f373a91c848ff44f0bd8495b1cc29de69c8a | []
| no_license | Icedomain/LeetCode | 12dd24bbe2d7aba1f6ebe61bffe4c5e6284fbd06 | 4bc8e41499b9c884d64b5a44fe783fdb7030676e | refs/heads/master | 2021-02-15T15:12:15.009790 | 2020-09-22T11:37:59 | 2020-09-22T11:37:59 | 244,909,740 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | #
# @lc app=leetcode.cn id=304 lang=python3
#
# [304] 二维区域和检索 - 矩阵不可变
#
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
if not matrix:
return
n, m = len(matrix), len(matrix[0])
self.sums = [ [0 for j in range(m+1)] for i in range(n+1) ]
for i in range(1, n+1):
for j in range(1, m+1):
self.sums[i][j] = matrix[i-1][j-1] + self.sums[i][j-1] + self.sums[i-1][j] - self.sums[i-1][j-1]
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
row1, col1, row2, col2 = row1+1, col1+1, row2+1, col2+1
return self.sums[row2][col2] - self.sums[row2][col1-1] - self.sums[row1-1][col2] + self.sums[row1-1][col1-1]
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
| [
"[email protected]"
]
| |
849d980540c6fa535a1182553e06d3b90c074b5a | 711c11d0111a40055ba110e7089a231c2ba42b8e | /toontown/toon/DistributedToonUD.py | b4b001783dd576ab652bc7227ed27b1ab9f1b375 | [
"Apache-2.0"
]
| permissive | DeadMemez/ProjectAltis-OldAcornAcres | 03c8dc912ecccae8456d89790f6b332547b75cc3 | e8e0087389933795973e566782affcaec65a2980 | refs/heads/master | 2021-01-19T13:59:07.234192 | 2017-08-20T14:41:45 | 2017-08-20T14:41:45 | 100,869,782 | 0 | 2 | null | 2017-08-20T15:14:35 | 2017-08-20T15:14:35 | null | UTF-8 | Python | false | false | 10,675 | py | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectUD import DistributedObjectUD
from otp.avatar import DistributedAvatarUD
class DistributedToonUD(DistributedAvatarUD.DistributedAvatarUD):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedToonUD")
def __init__(self, air):
DistributedAvatarUD.DistributedAvatarUD.__init__(self, air)
self.air = air
def setDNAString(self, todo0):
pass
def setGM(self, todo0):
pass
def setMaxBankMoney(self, todo0):
pass
def setBankMoney(self, todo0):
pass
def setMoney(self, todo0):
pass
def setMaxMoney(self, todo0):
pass
def setMaxHp(self, todo0):
pass
def setHp(self, todo0):
pass
def toonUp(self, todo0):
pass
def takeDamage(self, todo0):
pass
def setBattleId(self, todo0):
pass
def setExperience(self, todo0):
pass
def setMaxCarry(self, todo0):
pass
def setTrackAccess(self, todo0):
pass
def setTrackProgress(self, todo0, todo1):
pass
def setTrackBonusLevel(self, todo0):
pass
def setInventory(self, todo0):
pass
def setMaxNPCFriends(self, todo0):
pass
def setNPCFriendsDict(self, todo0):
pass
def setDefaultShard(self, todo0):
pass
def setDefaultZone(self, todo0):
pass
def setShtickerBook(self, todo0):
pass
def setZonesVisited(self, todo0):
pass
def setHoodsVisited(self, todo0):
pass
def setInterface(self, todo0):
pass
def setLastHood(self, todo0):
pass
def setTutorialAck(self, todo0):
pass
def setMaxClothes(self, todo0):
pass
def setClothesTopsList(self, todo0):
pass
def setClothesBottomsList(self, todo0):
pass
def setMaxAccessories(self, todo0):
pass
def setHatList(self, todo0):
pass
def setGlassesList(self, todo0):
pass
def setBackpackList(self, todo0):
pass
def setShoesList(self, todo0):
pass
def setHat(self, todo0, todo1, todo2):
pass
def setGlasses(self, todo0, todo1, todo2):
pass
def setBackpack(self, todo0, todo1, todo2):
pass
def setShoes(self, todo0, todo1, todo2):
pass
def setGardenSpecials(self, todo0):
pass
def setEarnedExperience(self, todo0):
pass
def setTunnelIn(self, todo0, todo1, todo2, todo3, todo4, todo5):
pass
def setTunnelOut(self, todo0, todo1, todo2, todo3, todo4, todo5, todo6):
pass
def setAnimState(self, todo0, todo1, todo2):
pass
def setEmoteState(self, todo0, todo1, todo2):
pass
def setEmoteAccess(self, todo0):
pass
def setCustomMessages(self, todo0):
pass
def setSleepAutoReply(self, todo0):
pass
def setResistanceMessages(self, todo0):
pass
def setPetTrickPhrases(self, todo0):
pass
def setCatalogSchedule(self, todo0, todo1):
pass
def setCatalog(self, todo0, todo1, todo2):
pass
def setMailboxContents(self, todo0):
pass
def setDeliverySchedule(self, todo0):
pass
def setGiftSchedule(self, todo0):
pass
def setAwardMailboxContents(self, todo0):
pass
def setAwardSchedule(self, todo0):
pass
def setAwardNotify(self, todo0):
pass
def setCatalogNotify(self, todo0, todo1):
pass
def playSplashEffect(self, todo0, todo1, todo2):
pass
def setWhisperSCToontaskFrom(self, todo0, todo1, todo2, todo3, todo4):
pass
def setSCToontask(self, todo0, todo1, todo2, todo3):
pass
def reqSCResistance(self, todo0, todo1):
pass
def setSCResistance(self, todo0, todo1):
pass
def setSpeedChatStyleIndex(self, todo0):
pass
def setTrophyScore(self, todo0):
pass
def setTeleportAccess(self, todo0):
pass
def checkTeleportAccess(self, todo0):
pass
def setScavengerHunt(self, todo0):
pass
def battleSOS(self, todo0):
pass
def teleportQuery(self, todo0):
pass
def teleportResponse(self, todo0, todo1, todo2, todo3, todo4):
pass
def teleportResponseToAI(self, todo0, todo1, todo2, todo3, todo4, todo5):
pass
def teleportGiveup(self, todo0):
pass
def teleportGreeting(self, todo0):
pass
def setCogStatus(self, todo0):
pass
def setCogCount(self, todo0):
pass
def setCogRadar(self, todo0):
pass
def setBuildingRadar(self, todo0):
pass
def setCogLevels(self, todo0):
pass
def setCogTypes(self, todo0):
pass
def setCogParts(self, todo0):
pass
def setCogMerits(self, todo0):
pass
def setCogIndex(self, todo0):
pass
def setDisguisePageFlag(self, todo0):
pass
def setSosPageFlag(self, todo0):
pass
def setHouseId(self, todo0):
pass
def setQuests(self, todo0):
pass
def setQuestHistory(self, todo0):
pass
def setRewardHistory(self, todo0, todo1):
pass
def setQuestCarryLimit(self, todo0):
pass
def requestDeleteQuest(self, todo0):
pass
def setCheesyEffect(self, todo0, todo1, todo2):
pass
def setCheesyEffects(self, todo0):
pass
def setGhostMode(self, todo0):
pass
def setPosIndex(self, todo0):
pass
def setFishCollection(self, todo0, todo1, todo2):
pass
def setMaxFishTank(self, todo0):
pass
def setFishTank(self, todo0, todo1, todo2):
pass
def setFishingRod(self, todo0):
pass
def setFishingTrophies(self, todo0):
pass
def setFlowerCollection(self, todo0, todo1):
pass
def setFlowerBasket(self, todo0, todo1):
pass
def setMaxFlowerBasket(self, todo0):
pass
def setGardenTrophies(self, todo0):
pass
def setShovel(self, todo0):
pass
def setShovelSkill(self, todo0):
pass
def setWateringCan(self, todo0):
pass
def setWateringCanSkill(self, todo0):
pass
def promoteShovel(self, todo0):
pass
def promoteWateringCan(self, todo0):
pass
def reactivateWater(self):
pass
def presentPie(self, todo0, todo1, todo2, todo3, todo4, todo5, todo6):
pass
def tossPie(self, todo0, todo1, todo2, todo3, todo4, todo5, todo6, todo7, todo8):
pass
def pieSplat(self, todo0, todo1, todo2, todo3, todo4, todo5):
pass
def setPieType(self, todo0):
pass
def setNumPies(self, todo0):
pass
def catalogGenClothes(self, todo0):
pass
def catalogGenAccessories(self, todo0):
pass
def setPetId(self, todo0):
pass
def setPetMovie(self, todo0, todo1):
pass
def setPetTutorialDone(self, todo0):
pass
def setFishBingoTutorialDone(self, todo0):
pass
def setFishBingoMarkTutorialDone(self, todo0):
pass
def setKartBodyType(self, todo0):
pass
def setKartBodyColor(self, todo0):
pass
def setKartAccessoryColor(self, todo0):
pass
def setKartEngineBlockType(self, todo0):
pass
def setKartSpoilerType(self, todo0):
pass
def setKartFrontWheelWellType(self, todo0):
pass
def setKartBackWheelWellType(self, todo0):
pass
def setKartRimType(self, todo0):
pass
def setKartDecalType(self, todo0):
pass
def updateKartDNAField(self, todo0, todo1):
pass
def addOwnedAccessory(self, todo0):
pass
def removeOwnedAccessory(self, todo0):
pass
def setTickets(self, todo0):
pass
def setKartingHistory(self, todo0):
pass
def setKartingTrophies(self, todo0):
pass
def setKartingPersonalBest(self, todo0):
pass
def setKartingPersonalBest2(self, todo0):
pass
def setKartAccessoriesOwned(self, todo0):
pass
def setCurrentKart(self, todo0):
pass
def squish(self, todo0):
pass
def announceBingo(self):
pass
def trickOrTreatTargetMet(self, todo0):
pass
def trickOrTreatMilestoneMet(self):
pass
def winterCarolingTargetMet(self, todo0):
pass
def setCogSummonsEarned(self, todo0):
pass
def reqCogSummons(self, todo0, todo1):
pass
def cogSummonsResponse(self, todo0, todo1, todo2):
pass
def reqUseSpecial(self, todo0):
pass
def useSpecialResponse(self, todo0):
pass
def setGardenStarted(self, todo0):
pass
def sendToGolfCourse(self, todo0):
pass
def setGolfHistory(self, todo0):
pass
def setPackedGolfHoleBest(self, todo0):
pass
def setGolfCourseBest(self, todo0):
pass
def setUnlimitedSwing(self, todo0):
pass
def logSuspiciousEvent(self, todo0):
pass
def logMessage(self, todo0):
pass
def forceLogoutWithNotify(self):
pass
def setPinkSlips(self, todo0):
pass
def setNametagStyle(self, todo0):
pass
def setNametagStyles(self, todo):
pass
def setFishingRods(self, rods):
pass
def setMail(self, todo0):
pass
def setNumMailItems(self, todo0):
pass
def setSimpleMailNotify(self, todo0):
pass
def setInvites(self, todo0):
pass
def setPartiesInvitedTo(self, todo0):
pass
def setHostedParties(self, todo0):
pass
def setPartyReplies(self, todo0):
pass
def updateInvite(self, todo0, todo1):
pass
def updateReply(self, todo0, todo1, todo2):
pass
def setPartyCanStart(self, todo0):
pass
def setPartyStatus(self, todo0, todo1):
pass
def announcePartyStarted(self, todo0):
pass
def setNeverStartedPartyRefunded(self, todo0, todo1, todo2):
pass
def setModuleInfo(self, todo0):
pass
def setDISLname(self, todo0):
pass
def setDISLid(self, todo0):
pass
def flagAv(self, todo0, todo1, todo2):
pass
def setAchievements(self, achievements):
pass
def setTrueFriends(self, trueFriends):
pass
| [
"[email protected]"
]
| |
17c36a85b75b51e756e29a8ead5b24a5fa4896ea | 89e6c3548fbdd06178aae712de1ff19004bc2faa | /my_dulwich/contrib/test_swift_smoke.py | 222a609b4aa4864ef99696425775c51d3982d551 | []
| no_license | bhgv/ublog_git.hg.repo-django.python-engine | a3f3cdcbacc95ec98f022f9719d3b300dd6541d4 | 74cdae100bff5e8ab8fb9c3e8ba95623333c2d43 | refs/heads/master | 2020-03-23T01:04:07.431749 | 2018-07-25T12:59:21 | 2018-07-25T12:59:21 | 140,899,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,075 | py | # test_smoke.py -- Functional tests for the Swift backend.
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Author: Fabien Boucher <[email protected]>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Start functional tests
A Swift installation must be available before
starting those tests. The account and authentication method used
during this functional tests must be changed in the configuration file
passed as environment variable.
The container used to create a fake repository is defined
in cls.fakerepo and will be deleted after the tests.
DULWICH_SWIFT_CFG=/tmp/conf.cfg PYTHONPATH=. python -m unittest \
dulwich.tests_swift.test_smoke
"""
import os
import unittest
import tempfile
import shutil
import gevent
from gevent import monkey
monkey.patch_all()
from my_dulwich import ( # noqa:E402
server,
repo,
index,
client,
objects,
)
from my_dulwich.contrib import swift # noqa:E402
class DulwichServer():
"""Start the TCPGitServer with Swift backend
"""
def __init__(self, backend, port):
self.port = port
self.backend = backend
def run(self):
self.server = server.TCPGitServer(self.backend,
'localhost',
port=self.port)
self.job = gevent.spawn(self.server.serve_forever)
def stop(self):
self.server.shutdown()
gevent.joinall((self.job,))
class SwiftSystemBackend(server.Backend):
def open_repository(self, path):
return swift.SwiftRepo(path, conf=swift.load_conf())
class SwiftRepoSmokeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.backend = SwiftSystemBackend()
cls.port = 9148
cls.server_address = 'localhost'
cls.fakerepo = 'fakerepo'
cls.th_server = DulwichServer(cls.backend, cls.port)
cls.th_server.run()
cls.conf = swift.load_conf()
@classmethod
def tearDownClass(cls):
cls.th_server.stop()
def setUp(self):
self.scon = swift.SwiftConnector(self.fakerepo, self.conf)
if self.scon.test_root_exists():
try:
self.scon.del_root()
except swift.SwiftException:
pass
self.temp_d = tempfile.mkdtemp()
if os.path.isdir(self.temp_d):
shutil.rmtree(self.temp_d)
def tearDown(self):
if self.scon.test_root_exists():
try:
self.scon.del_root()
except swift.SwiftException:
pass
if os.path.isdir(self.temp_d):
shutil.rmtree(self.temp_d)
def test_init_bare(self):
swift.SwiftRepo.init_bare(self.scon, self.conf)
self.assertTrue(self.scon.test_root_exists())
obj = self.scon.get_container_objects()
filtered = [o for o in obj if o['name'] == 'info/refs'
or o['name'] == 'objects/pack']
self.assertEqual(len(filtered), 2)
def test_clone_bare(self):
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
remote_refs = tcp_client.fetch(self.fakerepo, local_repo)
# The remote repo is empty (no refs retreived)
self.assertEqual(remote_refs, None)
def test_push_commit(self):
def determine_wants(*args):
return {"refs/heads/master": local_repo.refs["HEAD"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
# Nothing in the staging area
local_repo.do_commit('Test commit', 'fbo@localhost')
sha = local_repo.refs.read_loose_ref('refs/heads/master')
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_data)
swift_repo = swift.SwiftRepo("fakerepo", self.conf)
remote_sha = swift_repo.refs.read_loose_ref('refs/heads/master')
self.assertEqual(sha, remote_sha)
def test_push_branch(self):
def determine_wants(*args):
return {"refs/heads/mybranch":
local_repo.refs["refs/heads/mybranch"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
# Nothing in the staging area
local_repo.do_commit('Test commit', 'fbo@localhost',
ref='refs/heads/mybranch')
sha = local_repo.refs.read_loose_ref('refs/heads/mybranch')
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack("/fakerepo",
determine_wants,
local_repo.object_store.generate_pack_data)
swift_repo = swift.SwiftRepo(self.fakerepo, self.conf)
remote_sha = swift_repo.refs.read_loose_ref('refs/heads/mybranch')
self.assertEqual(sha, remote_sha)
def test_push_multiple_branch(self):
def determine_wants(*args):
return {"refs/heads/mybranch":
local_repo.refs["refs/heads/mybranch"],
"refs/heads/master":
local_repo.refs["refs/heads/master"],
"refs/heads/pullr-108":
local_repo.refs["refs/heads/pullr-108"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
# Nothing in the staging area
local_shas = {}
remote_shas = {}
for branch in ('master', 'mybranch', 'pullr-108'):
local_shas[branch] = local_repo.do_commit(
'Test commit %s' % branch, 'fbo@localhost',
ref='refs/heads/%s' % branch)
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_data)
swift_repo = swift.SwiftRepo("fakerepo", self.conf)
for branch in ('master', 'mybranch', 'pullr-108'):
remote_shas[branch] = swift_repo.refs.read_loose_ref(
'refs/heads/%s' % branch)
self.assertDictEqual(local_shas, remote_shas)
def test_push_data_branch(self):
def determine_wants(*args):
return {"refs/heads/master": local_repo.refs["HEAD"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
os.mkdir(os.path.join(self.temp_d, "dir"))
files = ('testfile', 'testfile2', 'dir/testfile3')
i = 0
for f in files:
open(os.path.join(self.temp_d, f), 'w').write("DATA %s" % i)
i += 1
local_repo.stage(files)
local_repo.do_commit('Test commit', 'fbo@localhost',
ref='refs/heads/master')
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_data)
swift_repo = swift.SwiftRepo("fakerepo", self.conf)
commit_sha = swift_repo.refs.read_loose_ref('refs/heads/master')
otype, data = swift_repo.object_store.get_raw(commit_sha)
commit = objects.ShaFile.from_raw_string(otype, data)
otype, data = swift_repo.object_store.get_raw(commit._tree)
tree = objects.ShaFile.from_raw_string(otype, data)
objs = tree.items()
objs_ = []
for tree_entry in objs:
objs_.append(swift_repo.object_store.get_raw(tree_entry.sha))
# Blob
self.assertEqual(objs_[1][1], 'DATA 0')
self.assertEqual(objs_[2][1], 'DATA 1')
# Tree
self.assertEqual(objs_[0][0], 2)
def test_clone_then_push_data(self):
self.test_push_data_branch()
shutil.rmtree(self.temp_d)
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
remote_refs = tcp_client.fetch(self.fakerepo, local_repo)
files = (os.path.join(self.temp_d, 'testfile'),
os.path.join(self.temp_d, 'testfile2'))
local_repo["HEAD"] = remote_refs["refs/heads/master"]
indexfile = local_repo.index_path()
tree = local_repo["HEAD"].tree
index.build_index_from_tree(local_repo.path, indexfile,
local_repo.object_store, tree)
for f in files:
self.assertEqual(os.path.isfile(f), True)
def determine_wants(*args):
return {"refs/heads/master": local_repo.refs["HEAD"]}
os.mkdir(os.path.join(self.temp_d, "test"))
files = ('testfile11', 'testfile22', 'test/testfile33')
i = 0
for f in files:
open(os.path.join(self.temp_d, f), 'w').write("DATA %s" % i)
i += 1
local_repo.stage(files)
local_repo.do_commit('Test commit', 'fbo@localhost',
ref='refs/heads/master')
tcp_client.send_pack("/fakerepo",
determine_wants,
local_repo.object_store.generate_pack_data)
def test_push_remove_branch(self):
def determine_wants(*args):
return {"refs/heads/pullr-108": objects.ZERO_SHA,
"refs/heads/master":
local_repo.refs['refs/heads/master'],
"refs/heads/mybranch":
local_repo.refs['refs/heads/mybranch'],
}
self.test_push_multiple_branch()
local_repo = repo.Repo(self.temp_d)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_data)
swift_repo = swift.SwiftRepo("fakerepo", self.conf)
self.assertNotIn('refs/heads/pullr-108', swift_repo.refs.allkeys())
def test_push_annotated_tag(self):
def determine_wants(*args):
return {"refs/heads/master": local_repo.refs["HEAD"],
"refs/tags/v1.0": local_repo.refs["refs/tags/v1.0"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
# Nothing in the staging area
sha = local_repo.do_commit('Test commit', 'fbo@localhost')
otype, data = local_repo.object_store.get_raw(sha)
commit = objects.ShaFile.from_raw_string(otype, data)
tag = objects.Tag()
tag.tagger = "fbo@localhost"
tag.message = "Annotated tag"
tag.tag_timezone = objects.parse_timezone('-0200')[0]
tag.tag_time = commit.author_time
tag.object = (objects.Commit, commit.id)
tag.name = "v0.1"
local_repo.object_store.add_object(tag)
local_repo.refs['refs/tags/v1.0'] = tag.id
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_data)
swift_repo = swift.SwiftRepo(self.fakerepo, self.conf)
tag_sha = swift_repo.refs.read_loose_ref('refs/tags/v1.0')
otype, data = swift_repo.object_store.get_raw(tag_sha)
rtag = objects.ShaFile.from_raw_string(otype, data)
self.assertEqual(rtag.object[1], commit.id)
self.assertEqual(rtag.id, tag.id)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
b04f5902369c128c688593ca330bb43b55ffa29c | a8ed252f3b76a8d134f026ccf0204c5e5e918edb | /apps/common/views.py | 5651d62f133a31899ed83656bb5d35032146918d | [
"MIT"
]
| permissive | F483/bitcoin-bounties.com | a8c84bfe61df25bae93f1bfd3c055754414cbe27 | 64a4a973fa38a4fb54178d855c1b82ec18799628 | refs/heads/master | 2020-04-25T23:19:32.859170 | 2014-11-22T15:40:48 | 2014-11-22T15:40:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Fabian Barkhau <[email protected]>
# License: MIT (see LICENSE.TXT file)
from django.http import HttpResponseRedirect
from django.views.decorators.http import require_http_methods
from apps.common.utils.templates import render_response
@require_http_methods(['GET'])
def render_template(request, template, context=None):
return render_response(request, template, context and context or {})
@require_http_methods(['GET'])
def redirect_to(request, url):
return HttpResponseRedirect(url)
| [
"[email protected]"
]
| |
e66adf2a6d1f32f1467ae3ff1e1bdc2c509baa2b | 33fa46042e7decb01008b73202e5d24ce6bad03a | /config/settings/test.py | ece2ef2fbcdf6949a10c811059a6d1e4cacc7b42 | [
"MIT"
]
| permissive | rlaneyjr/project_pawz | 2d2ef8ef8a801e788c139a35bf82d72aafac8f69 | 27f316ef35968ed1319ec0585a050ebed795763a | refs/heads/master | 2022-12-05T11:39:04.384922 | 2019-05-28T22:24:24 | 2019-05-28T22:24:24 | 185,061,794 | 0 | 0 | MIT | 2022-12-03T08:21:14 | 2019-05-05T17:28:47 | JavaScript | UTF-8 | Python | false | false | 2,024 | py | """
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY", default="xCRogvYltWv6xc9QaA51CNCNXySMe9Oq1PY8x0avsZU15HEZq9kpa2aTphciScG0")
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": ""
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa F405
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# Your stuff...
# ------------------------------------------------------------------------------
| [
"[email protected]"
]
| |
abd3bcdbedfbf53aa74ec49c4c5efae200ede1c3 | 536656cd89e4fa3a92b5dcab28657d60d1d244bd | /tools/perf/core/results_processor/command_line_unittest.py | 5dd1f9abc72545c59b4f8dfebf02d90dec2e566e | [
"Zlib",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"MIT",
"APSL-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown"
]
| permissive | ECS-251-W2020/chromium | 79caebf50443f297557d9510620bf8d44a68399a | ac814e85cb870a6b569e184c7a60a70ff3cb19f9 | refs/heads/master | 2022-08-19T17:42:46.887573 | 2020-03-18T06:08:44 | 2020-03-18T06:08:44 | 248,141,336 | 7 | 8 | BSD-3-Clause | 2022-07-06T20:32:48 | 2020-03-18T04:52:18 | null | UTF-8 | Python | false | false | 7,545 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for results_processor.
These tests mostly test that argument parsing and processing work as expected.
They mock out accesses to the operating system, so no files are actually read
nor written.
"""
import datetime
import posixpath
import re
import unittest
import mock
from core.results_processor import command_line
# To easily mock module level symbols within the command_line module.
def module(symbol):
return 'core.results_processor.command_line.' + symbol
class ProcessOptionsTestCase(unittest.TestCase):
def setUp(self):
self.standalone = False
# Mock os module within results_processor so path manipulations do not
# depend on the file system of the test environment.
mock_os = mock.patch(module('os')).start()
def realpath(path):
return posixpath.normpath(posixpath.join(mock_os.getcwd(), path))
def expanduser(path):
return re.sub(r'~', '/path/to/home', path)
mock_os.getcwd.return_value = '/path/to/curdir'
mock_os.path.realpath.side_effect = realpath
mock_os.path.expanduser.side_effect = expanduser
mock_os.path.dirname.side_effect = posixpath.dirname
mock_os.path.join.side_effect = posixpath.join
mock.patch(module('_DefaultOutputDir'),
return_value='/path/to/output_dir').start()
mock.patch(module('path_util.GetChromiumSrcDir'),
return_value='/path/to/chromium').start()
def tearDown(self):
mock.patch.stopall()
def ParseArgs(self, args):
parser = command_line.ArgumentParser(standalone=self.standalone)
options = parser.parse_args(args)
command_line.ProcessOptions(options)
return options
class TestProcessOptions(ProcessOptionsTestCase):
def testOutputDir_default(self):
options = self.ParseArgs([])
self.assertEqual(options.output_dir, '/path/to/output_dir')
def testOutputDir_homeDir(self):
options = self.ParseArgs(['--output-dir', '~/my_outputs'])
self.assertEqual(options.output_dir, '/path/to/home/my_outputs')
def testOutputDir_relPath(self):
options = self.ParseArgs(['--output-dir', 'my_outputs'])
self.assertEqual(options.output_dir, '/path/to/curdir/my_outputs')
def testOutputDir_absPath(self):
options = self.ParseArgs(['--output-dir', '/path/to/somewhere/else'])
self.assertEqual(options.output_dir, '/path/to/somewhere/else')
@mock.patch(module('datetime'))
def testIntermediateDir_default(self, mock_datetime):
mock_datetime.datetime.utcnow.return_value = (
datetime.datetime(2015, 10, 21, 7, 28))
options = self.ParseArgs(['--output-dir', '/output'])
self.assertEqual(options.intermediate_dir,
'/output/artifacts/run_20151021T072800Z')
@mock.patch(module('datetime'))
def testIntermediateDir_withResultsLabel(self, mock_datetime):
mock_datetime.datetime.utcnow.return_value = (
datetime.datetime(2015, 10, 21, 7, 28))
options = self.ParseArgs(
['--output-dir', '/output', '--results-label', 'test my feature'])
self.assertEqual(options.intermediate_dir,
'/output/artifacts/test_my_feature_20151021T072800Z')
def testUploadBucket_noUploadResults(self):
options = self.ParseArgs([])
self.assertFalse(options.upload_results)
self.assertIsNone(options.upload_bucket)
@mock.patch(module('cloud_storage'))
def testUploadBucket_uploadResultsToDefaultBucket(self, mock_storage):
mock_storage.BUCKET_ALIASES = {'output': 'default-bucket'}
options = self.ParseArgs(['--upload-results'])
self.assertTrue(options.upload_results)
self.assertEqual(options.upload_bucket, 'default-bucket')
@mock.patch(module('cloud_storage'))
def testUploadBucket_uploadResultsToBucket(self, mock_storage):
mock_storage.BUCKET_ALIASES = {'output': 'default-bucket'}
options = self.ParseArgs(
['--upload-results', '--upload-bucket', 'my_bucket'])
self.assertTrue(options.upload_results)
self.assertEqual(options.upload_bucket, 'my_bucket')
@mock.patch(module('cloud_storage'))
def testUploadBucket_uploadResultsToAlias(self, mock_storage):
mock_storage.BUCKET_ALIASES = {
'output': 'default-bucket', 'special': 'some-special-bucket'}
options = self.ParseArgs(
['--upload-results', '--upload-bucket', 'special'])
self.assertTrue(options.upload_results)
self.assertEqual(options.upload_bucket, 'some-special-bucket')
def testDefaultOutputFormat(self):
options = self.ParseArgs([])
self.assertEqual(options.output_formats, ['html'])
def testUnkownOutputFormatRaises(self):
with self.assertRaises(SystemExit):
self.ParseArgs(['--output-format', 'unknown'])
def testNoDuplicateOutputFormats(self):
options = self.ParseArgs(
['--output-format', 'html', '--output-format', 'csv',
'--output-format', 'html', '--output-format', 'csv'])
self.assertEqual(options.output_formats, ['csv', 'html'])
def testTraceProcessorPath_noBuildDir(self):
with mock.patch(module('os.environ.get'), return_value=None):
options = self.ParseArgs([])
self.assertIsNone(options.trace_processor_path)
def testTraceProcessorPath_chromiumOutputDir(self):
def isfile(path):
return path == '/path/to/chromium/out_test/Debug/trace_processor_shell'
def env_get(name):
if name == 'CHROMIUM_OUTPUT_DIR':
return '/path/to/chromium/out_test/Debug'
with mock.patch(module('os.path.isfile')) as isfile_patch:
with mock.patch(module('os.environ.get')) as env_patch:
isfile_patch.side_effect = isfile
env_patch.side_effect = env_get
options = self.ParseArgs([])
self.assertEqual(options.trace_processor_path,
'/path/to/chromium/out_test/Debug/trace_processor_shell')
def testTraceProcessorPath_oneBuildDir(self):
def isfile(path):
return path == '/path/to/chromium/out/Release/trace_processor_shell'
with mock.patch(module('os.path.isfile')) as isfile_patch:
isfile_patch.side_effect = isfile
options = self.ParseArgs([])
self.assertEqual(options.trace_processor_path,
'/path/to/chromium/out/Release/trace_processor_shell')
def testTraceProcessorPath_twoBuildDirs(self):
def isfile(path):
return path in ['/path/to/chromium/out/Release/trace_processor_shell',
'/path/to/chromium/out/Debug/trace_processor_shell']
with mock.patch(module('os.path.isfile')) as isfile_patch:
isfile_patch.side_effect = isfile
options = self.ParseArgs([])
self.assertIsNone(options.trace_processor_path)
class StandaloneTestProcessOptions(ProcessOptionsTestCase):
def setUp(self):
super(StandaloneTestProcessOptions, self).setUp()
self.standalone = True
def testOutputFormatRequired(self):
with self.assertRaises(SystemExit):
self.ParseArgs([])
def testIntermediateDirRequired(self):
with self.assertRaises(SystemExit):
self.ParseArgs(['--output-format', 'json-test-results'])
def testSuccessful(self):
options = self.ParseArgs(
['--output-format', 'json-test-results',
'--intermediate-dir', 'some_dir'])
self.assertEqual(options.output_formats, ['json-test-results'])
self.assertEqual(options.intermediate_dir, '/path/to/curdir/some_dir')
self.assertEqual(options.output_dir, '/path/to/output_dir')
| [
"[email protected]"
]
| |
53bd21551303a9812df6895c3a5bcf7d5342dedb | d772869033c47a666622e9ee518bb306db5451a5 | /unified/modules/main/categories/crm/entities/deal.py | 0bcaee514289e3195334ae924481bbb68f1f6ee0 | []
| no_license | funny2code/unified_api | 920f1e19b2304e331b019f8a531d412b8759e725 | ffa28ba0e5c0bd8ad7dd44a468e3d1e777bba725 | refs/heads/main | 2023-08-31T16:00:17.074427 | 2021-10-04T04:09:45 | 2021-10-04T04:09:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | from dataclasses import dataclass
@dataclass
class Deal:
deal_id: str = None
account_id: str = None
name: str = None
close_date: str = None
description: str = None
stage_id: str = None
value: str = None
probability: str = None
owner_id : str = None
contact_id: str = None
currency_id: str = None | [
"[email protected]"
]
| |
611e284cb8350ee5e0530de97ff2121e728b6f84 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/279/66340/submittedfiles/testes.py | 7ccd166e96f0a4767cbe69f6d5511f7efefae093 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
print("andre bezerra de barrros ")
print("24")
print(11+1037)
print((9*35+160)/5)
print(3.14159*(10/2)*(10/2)*30)
| [
"[email protected]"
]
| |
1c460f138444384b52eda73ccc1a7db8da23d76b | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/3999/codes/1635_2442.py | 7d4cb10f6eb2bb08c1ebeeb9ad94276bb7866760 | []
| no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | # Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Use as mensagens de erro para corrigir seu código.
num=int(input("Digite um numero: "))
if (num%2==0):
mensagem="par"
else:
mensagem="impar"
print(mensagem) | [
"[email protected]"
]
| |
78cd02f35eb33e0dca1c10049960dc96d060c161 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part006597.py | f32e3be699fb19351abe7424a78bedb56216f820 | []
| no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher125926(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.3.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.3.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher125926._instance is None:
CommutativeMatcher125926._instance = CommutativeMatcher125926()
return CommutativeMatcher125926._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 125925
return
yield
from collections import deque | [
"[email protected]"
]
| |
9b2b62d6c9e2308e570b19de28085ae1f34c35a9 | 7bcb0b7f721c8fa31da7574f13ed0056127715b3 | /src/apps/api/resources/subscription.py | 62e5a4c74c86c9613ca6bd0c1ba0aeca5007fa3d | []
| no_license | simonchapman1986/ripe | 09eb9452ea16730c105c452eefb6a6791c1b4a69 | c129da2249b5f75015f528e4056e9a2957b7d884 | refs/heads/master | 2022-07-22T05:15:38.485619 | 2016-01-15T12:53:43 | 2016-01-15T12:53:43 | 49,718,671 | 1 | 0 | null | 2022-07-07T22:50:50 | 2016-01-15T12:53:09 | Python | UTF-8 | Python | false | false | 455 | py | from apps.base.models import FactServicesStorefrontSubscription
from tastypie.resources import ModelResource
class SubscriptionResource(ModelResource):
class Meta:
queryset = FactServicesStorefrontSubscription.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'entry'
filtering = {
'event_time': ['exact', 'range', 'gt', 'gte', 'lt', 'lte'],
} | [
"[email protected]"
]
| |
b42508610856a9da00e6b77138872e63aab1b223 | 50f04c633f36e9d64c40c4f1b434ed0c24e447c7 | /argparse-examples/positionalarg.py | 047332844d22ec1332227b4bb8bc6c545fec0f22 | []
| no_license | sarahchou/python-practice | 883ba7dedd60b2cc18d5d73ef7d3cbb74f09dede | 2a3d10144b74460d8ec513e3c7d49bdb48107596 | refs/heads/master | 2022-11-11T10:06:12.944579 | 2018-06-11T22:14:06 | 2018-06-11T22:14:06 | 136,985,077 | 0 | 1 | null | 2022-10-20T08:48:36 | 2018-06-11T21:54:46 | Python | UTF-8 | Python | false | false | 305 | py | #Introduction to positional arguments
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument("echo", help="echo the string you use here")
parser.add_argument("square", help="display a square of a given number", type=int)
args = parser.parse_args()
# print args.echo
print args.square**2
| [
"[email protected]"
]
| |
75bbbe754d344cb243580cb495baebe07914d27a | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/alhada001/question1.py | a7d1ab9e4c2362dd2297a16531f5457babdf6f3d | []
| no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | #Adam Alhadeff
import math
file = input("Enter the marks filename:\n")
f = open(file, "r")
length = len(open(file).readlines())
names = []
marks = []
line = f.readline()
count = 0
for i in range(length):
split = line.split(",")
names.append(split[0])
marks.append(split[1])
count += 1
line = f.readline()
total = 0
for i in range(len(marks)):
total = total + int(marks[i])
average = total/count
SDT = 0
for i in range(len(marks)):
SDT = SDT + (int(marks[i])-average)*(int(marks[i])-average)
SD = math.sqrt(SDT/count)
print("The average is:","%0.2f" % (average))
print("The std deviation is:","%0.2f" % (SD))
NumStudents = 0
for i in range(len(marks)):
if int(marks[i]) < (average-SD):
NumStudents += 1
if NumStudents != 0:
print("List of students who need to see an advisor:")
for i in range(len(marks)):
if int(marks[i]) < (average-SD):
print(names[i]) | [
"[email protected]"
]
| |
48cf3ed92a3e10d96e85fb1b15ba0340b11f90da | 9dba8607dce414f9905700d7a4ac44668de5e1f1 | /puente_quintanavides/combinaciones/def_hip_elscp_resumidas_xci.py | da24fbbb055eb1ffb3374131c83a39767b1d825f | []
| no_license | anaiortega/XCmodels | c0463ffe38531578aee281456e88528882255cd7 | e9b8c2f996a21b8aa3314242f3cc12b0e391b5df | refs/heads/master | 2023-08-16T22:44:01.168775 | 2023-08-14T18:15:10 | 2023-08-14T18:15:10 | 141,140,177 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,362 | py | \combinacion["ELSCP001"]{ descomp("1.00*G1 + 0.70*TC1V1")}
\combinacion["ELSCP002"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.20*NV")}
\combinacion["ELSCP009"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC3V2")}
\combinacion["ELSCP010"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC3V2 + 0.20*NV")}
\combinacion["ELSCP021"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC2V2")}
\combinacion["ELSCP022"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC2V2 + 0.20*NV")}
\combinacion["ELSCP041"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC1V2")}
\combinacion["ELSCP042"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC1V2 + 0.20*NV")}
\combinacion["ELSCP053"]{ descomp("1.00*G1 + 0.70*TC1V2")}
\combinacion["ELSCP054"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.20*NV")}
\combinacion["ELSCP061"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC3V1")}
\combinacion["ELSCP062"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC3V1 + 0.20*NV")}
\combinacion["ELSCP073"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC2V1")}
\combinacion["ELSCP074"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC2V1 + 0.20*NV")}
\combinacion["ELSCP093"]{ descomp("1.00*G1 + 0.70*TC2V1")}
\combinacion["ELSCP094"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.20*NV")}
\combinacion["ELSCP109"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC3V2")}
\combinacion["ELSCP110"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC3V2 + 0.20*NV")}
\combinacion["ELSCP129"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC2V2")}
\combinacion["ELSCP130"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC2V2 + 0.20*NV")}
\combinacion["ELSCP173"]{ descomp("1.00*G1 + 0.70*TC2V2")}
\combinacion["ELSCP174"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.20*NV")}
\combinacion["ELSCP189"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.70*TC3V1")}
\combinacion["ELSCP190"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.70*TC3V1 + 0.20*NV")}
\combinacion["ELSCP209"]{ descomp("1.00*G1 + 0.70*TC3V1")}
\combinacion["ELSCP210"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.20*NV")}
\combinacion["ELSCP217"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.70*TC3V2")}
\combinacion["ELSCP218"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.70*TC3V2 + 0.20*NV")}
\combinacion["ELSCP229"]{ descomp("1.00*G1 + 0.70*TC3V2")}
\combinacion["ELSCP230"]{ descomp("1.00*G1 + 0.70*TC3V2 + 0.20*NV")}
\combinacion["ELSCP453"]{ descomp("1.00*G1 + 0.60*NV")}
\combinacion["ELSCP454"]{ descomp("1.00*G1 + 0.70*TC3V2 + 0.60*NV")}
\combinacion["ELSCP456"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.60*NV")}
\combinacion["ELSCP458"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.70*TC3V2 + 0.60*NV")}
\combinacion["ELSCP461"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.60*NV")}
\combinacion["ELSCP465"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.70*TC3V1 + 0.60*NV")}
\combinacion["ELSCP470"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.60*NV")}
\combinacion["ELSCP474"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC3V2 + 0.60*NV")}
\combinacion["ELSCP479"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC2V2 + 0.60*NV")}
\combinacion["ELSCP490"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.60*NV")}
\combinacion["ELSCP492"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC3V1 + 0.60*NV")}
\combinacion["ELSCP495"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC2V1 + 0.60*NV")}
\combinacion["ELSCP500"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.60*NV")}
\combinacion["ELSCP502"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC3V2 + 0.60*NV")}
\combinacion["ELSCP505"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC2V2 + 0.60*NV")}
\combinacion["ELSCP510"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC1V2 + 0.60*NV")}
| [
"[email protected]"
]
| |
21b67cd73c3425afe749638e23831431e4628084 | 0f07107b016d2aee64788966b9f0d322ac46b998 | /moya/docgen/theme.py | 39c3d707e1310f7b2799f5a59c83826bd99563b2 | [
"MIT"
]
| permissive | fkztw/moya | 35f48cdc5d5723b04c671947099b0b1af1c7cc7a | 78b91d87b4519f91dfdd2b40dab44e72f201a843 | refs/heads/master | 2023-08-09T09:20:21.968908 | 2019-02-03T18:18:54 | 2019-02-03T18:18:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | from .. import iniparse
from fs.path import dirname, pathjoin
class Page(object):
def __init__(self, doc_class, settings):
self.doc_class = doc_class
self.settings = settings
def __repr__(self):
return "Page(%r, %r)" % (self.doc_class, self.settings)
def get(self, context, settings_name):
return context.sub(self.settings.get(settings_name, ""))
def get_path(self, context):
return context.sub(self.settings.get("path", ""))
class Theme(object):
def __init__(self, fs):
self.fs = fs
self.cfg = None
self.theme_settings = None
self.pages = []
self.read()
def get(self, section_name, key, default=None):
section = self.cfg.get(section_name, None)
if section is None:
return default
return section.get(key, default)
def read(self):
with self.fs.open("theme.ini", "rb") as settings_file:
cfg = iniparse.parse(settings_file)
self.cfg = cfg
self.theme_settings = cfg.get("theme", {})
for section, settings in cfg.items():
what, _, name = section.partition(":")
if what == "page":
page = Page(name, settings)
self.pages.append(page)
def get_pages(self, doc):
doc_class = doc.doc_class
for page in self.pages:
if page.doc_class == doc_class:
yield page
def get_relative_path(self, path):
ini_path = dirname(self.fs.getsyspath("theme.ini"))
path = pathjoin(ini_path, path)
return path
| [
"[email protected]"
]
| |
ca8c94fb16c3dcc6a3fee1dfea471e6a033318b8 | 46ee99adf99352b7879c5b2bdbb669f33549cc7c | /runner.py | 7d450ece39fe91252134ce0ec89ba0e639b08c1b | []
| no_license | webclinic017/TBDStructure | d85a01ec04a95a85bb82f2571b5fec898246d9f4 | 9b9b02088c7dcc5786f985dd17292e184b5ce6c2 | refs/heads/main | 2023-03-21T01:21:29.665270 | 2021-02-06T01:28:36 | 2021-02-06T01:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,199 | py | import sys
import traceback
from PyQt5.QtWidgets import QApplication
from multiprocessing import Queue, Process
from roboticks.data import DataHandler
from roboticks.portfolio import Portfolio
from roboticks.execution import ExecutionHandler
from db import UserDB, PriceDB
from strategies import STRATEGY
try:
from ebest import ebest_data
except:
print('Ebest import가 실패하였습니다.')
try:
from virtual.virtual_data import VirtualAPI
except:
print('Virtual import가 실패하였습니다')
try:
from kiwoom.realtime import KiwoomRealtimeAPI
except:
print('Kiwoom import가 실패하였습니다.')
class Runner:
def __init__(self, username: str):
"""
아직 실행된 적 없는 전략 같은 경우 initial_cap을 설정해줘야 하지만
이미 실행되고 있는 전략 같은 경우 DB에 저장하여 불러오는 방식으로 실행할 수 있다.
strategy name, initial cap, monitor stocks 등 전략과 관련된 모든 정보는 DB에서 관리한다.
"""
print('Starting Runner instance')
# 유저 이름이 DB에 없다면, 장고 인터페이스를 통하여 유저 등록을 하여야 한다.
# >> python manage.py createsuperuser
self.db = UserDB(username)
self.api_queue = Queue()
self.port_queue = Queue()
self.order_queue = Queue()
self.tmp_queue = Queue()
self.data_queues = []
self.source = None # 같은 소스를 공유하는 여러 전략 실행할 수 있음 (반대 x)
self.initial_cap = {} # 전략별 initial_cap
self.strategies = {}
self.symbol_list = {} # 전략별 트래킹하는 종목 리스트
self.monitor_stocks = []
def init_strategy(self, strategy_name: str):
"""
DB에 저장되어 있지 않은 전략을 실행하려면 초기 세팅을 설정해줘야 한다.
"""
self.db.set_strategy(strategy_name)
self.db.get_strategy() # get_strategy를 호출하면 데이터를 생성하여 리턴하거나 필터하여 리턴한다.
def add_strategy(self, strategy_name: str or list):
try:
if type(strategy_name) == str:
strategy_name = [strategy_name]
for strategy in strategy_name:
self.db.set_strategy(strategy)
st = self.db.get_strategy()
self.strategies[strategy] = STRATEGY[st['using_strategy']]
# adding universe
self.symbol_list[strategy] = list(set(self.db.universe()))
# adding initial cap
self.initial_cap[strategy] = st['capital']
self.data_queues.append(Queue())
for st, uni in self.symbol_list.items():
self.monitor_stocks = list(set(self.monitor_stocks + uni))
except:
print(f'{strategy_name}은 존재하지 않습니다. STRATEGY 상수를 확인해주시기 바랍니다.')
print(traceback.format_exc())
def update_strategy(self, strategy_name, using_strategy=None, source=None, server_type=None, capital=None, currency=None, monitor_stocks=[]):
self.db.set_strategy(strategy_name)
self.db.save_strategy(using_strategy=using_strategy, source=source, server_type=server_type, capital=capital, currency=currency)
self.db.add_to_universe(symbol=monitor_stocks)
def start_trading(self, source: str, date_from: str = None, date_to: str = None, exclude: list = []):
"""
source: virtual, kiwoom, ebest, crypto
if source == virtual: date_from은 required, date_to가 없다면 date_to = date_from
test를 진행하기 위해서 exclude를 인자로 추가하였다.
exclude에는 data, portfolio, execution, strategy를 포함할 수 있으며,
exclude된 프로세스는 실행되지 않는다.
"""
if len(self.strategies) == 0:
raise Exception('전략 설정을 먼저 하고 실행해주세요. (add_strategy를 실행하여야 합니다.)')
else:
# Data Handler로 넘길 data_queues 생성
self.data_queues = [Queue() for _ in range(len(self.strategies))]
if source == 'virtual' and date_from is None:
raise Exception('Virtual API를 사용하려면 date_from을 yyyy-mm-dd 형식으로 설정하여야 합니다.')
elif source == 'virtual' and date_to is None:
date_to = date_from
# Process Setup
# STEP #1: Data Handler process
if 'data' not in exclude:
dp = Process(target=self._data_handler_process, args=(source,), name='DataHandler')
dp.start()
shm_info = self.tmp_queue.get()
sec_mem_name = shm_info['sec_mem_name']
sec_mem_shape = shm_info['sec_mem_shape']
sec_mem_dtype = shm_info['sec_mem_dtype']
else:
sec_mem_name, sec_mem_shape, sec_mem_dtype = None, None, None
# STEP #2: Portfolio process
if 'portfolio' not in exclude:
pp = Process(target=self._portfolio_process, args=(sec_mem_name, sec_mem_shape, sec_mem_dtype), name="Portfolio")
pp.start()
# STEP #3: Strategy processes
if 'strategy' not in exclude:
self._start_strategies(sec_mem_name, sec_mem_shape, sec_mem_dtype, source)
# STEP #4: Execution process
if 'execution' not in exclude:
ep = Process(target=self._execution_process, args=(self.port_queue, self.order_queue, server, source), name="ExecutionHandler")
ep.start()
# STEP #5: Main thread program (source programs)
if source == 'virtual':
self._init_virtual_setup(date_from, date_to)
elif source == 'kiwoom':
self._init_kiwoom_setup()
elif source == 'ebest':
self._init_ebest_setup()
elif source == 'crypto':
self._init_crypto_setup()
## Processes
def _data_handler_process(self, source):
"""
source는 Data Handler에서 데이터를 처리하는 방식이 소스별로 다를 수 있기 때문에 추가하였지만, 추후 제외하여도 됨
"""
d = DataHandler(data_queues=self.data_queues, port_queue=self.port_queue, api_queue=self.api_queue,
monitor_stocks=self.monitor_stocks, source=source)
self.tmp_queue.put({
'sec_mem_name': d.sec_mem.name,
'sec_mem_shape': d.sec_mem_shape,
'sec_mem_dtype': d.sec_mem_dtype,
})
d.start_event_loop()
def _portfolio_process(self, sec_mem_name, sec_mem_shape, sec_mem_dtype):
"""
여러 전략별 포트 정보를 관리할 수 있도록 Portfolio 객체 수정하기
"""
e = Portfolio(port_queue=self.port_queue, order_queue=self.order_queue, initial_caps=self.initial_cap,
monitor_stocks=self.monitor_stocks, sec_mem_name=sec_mem_name, sec_mem_shape=sec_mem_shape,
sec_mem_dtype=sec_mem_dtype)
e.start_event_loop()
def _execution_process(self, port_queue, order_queue, server, source):
"""
이베스트 객체 분리시켜서 주문은 무조건 order_queue로 넣기
"""
ex = ExecutionHandler(port_queue, order_queue, server, source)
ex.start_execution_loop()
def _strategy_process(self, id, strategy_cls, strategy_name, strategy_universe, sec_mem_name, sec_mem_shape, sec_mem_dtype, source):
sp = strategy_cls(data_queue=self.data_queues[id], port_queue=self.port_queue, order_queue=self.order_queue,
strategy_name=strategy_name, strategy_universe=strategy_universe, monitor_stocks=self.monitor_stocks,
sec_mem_name=sec_mem_name, sec_mem_shape=sec_mem_shape, sec_mem_dtype=sec_mem_dtype, source=source)
sp.calc_signals()
# 전략 관련 메소드
def _start_strategies(self, sec_mem_name, sec_mem_shape, sec_mem_dtype, source):
"""
각 전략별로 프로세스를 분리하여 실행시키기
"""
strategies = []
id = 0
for st, st_cls in self.strategies.items():
strategies.append(Process(target=self._strategy_process, args=(id, st_cls, st, self.symbol_list[st],
sec_mem_name, sec_mem_shape, sec_mem_dtype, source)))
id += 1
_ = [st.start() for st in strategies]
# API setup
def _init_virtual_setup(self, date_from, date_to):
self.api = VirtualAPI(self.api_queue)
self.api.stream_data(date_from, date_to, monitor_stocks=self.monitor_stocks)
def _init_kiwoom_setup(self, monitor_stocks):
app = QApplication(sys.argv)
_ = KiwoomRealtimeAPI(self.api_queue, self.port_queue, self.order_queue, monitor_stocks)
sys.exit(app.exec_())
def _init_ebest_setup(self, monitor_stocks):
ebest_data.Main(self.api_queue, self.port_queue, self.order_queue, monitor_stocks)
def _init_crypto_setup(self):
"""
crypto setup은 binance, upbit, bithumb 등 다양한 거래소를 동시에 사용할 수 있도록 한다.
국내 거래소를 통하여 btc를 구매한 다음 binance로 전송하여 트레이딩 하는 등 다양한 전략 구사가
가능하게 하기 위함이다.
"""
pass
if __name__ == '__main__':
r = Runner(username='[email protected]')
# 전략이 없다면 생성한 다음 add_strategy를 한다.
r.update_strategy(
strategy_name='strategy_1_first',
using_strategy='strategy_1',
capital=1000000,
monitor_stocks=['005930', '000020', '000030']
)
r.update_strategy(
strategy_name='strategy_1_second',
using_strategy='strategy_1',
capital=10000000,
monitor_stocks=['005930', '000270']
)
r.add_strategy(['strategy_1_first', 'strategy_1_second'])
r.start_trading(source='virtual', date_from='2021-02-03', exclude=['execution'])
print('r')
| [
"[email protected]"
]
| |
992f7cf55c4bffc77e2110b339c9a3d091ef44f9 | d726a06a4fe344de854312cc2ae93558adefd206 | /pynet/datasets/primede.py | c26d45a1beb3a35e83cd5fbd315ba72c66df1ad0 | [
"LicenseRef-scancode-cecill-b-en"
]
| permissive | CorentinAmbroise/pynet | 6f52f296b4ab5c651c341715786cb131391eabf1 | c353e5f80e75f785a460422ab7b39fa8f776991a | refs/heads/master | 2023-03-29T13:10:10.391193 | 2021-02-10T17:39:14 | 2021-02-10T17:39:14 | 278,072,885 | 0 | 0 | NOASSERTION | 2020-07-08T11:37:51 | 2020-07-08T11:37:51 | null | UTF-8 | Python | false | false | 14,409 | py | # -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that provides functions to prepare the PRIME-DE dataset.
"""
# Imports
import os
import re
import glob
import copy
import json
import logging
import subprocess
import lxml.html as lh
from pprint import pprint
from collections import namedtuple
from collections import OrderedDict
import requests
import nibabel
import numpy as np
import pandas as pd
from pynet.datasets import Fetchers
# Global parameters
QC = [
"016032099-001",
"025032241-001",
"016032098-001",
"016032098-002",
"016032103-001",
"016032103-002",
"016032097-001",
"016032104-001",
"016032104-002",
"016032100-001",
"016032100-002",
"016032101-001",
"016032102-001"
]
URL = "https://s3.amazonaws.com/fcp-indi/data/Projects/INDI/PRIME/{0}.tar.gz"
DESC_URL = "http://fcon_1000.projects.nitrc.org/indi/PRIME/files/{0}.csv"
HOME_URL = "http://fcon_1000.projects.nitrc.org/indi/indiPRIME.html"
SITES = [
"amu",
"caltech",
"ecnu-chen",
"ecnu",
"ion",
"iscmj",
"mcgill",
"lyon",
"mountsinai-P",
"mountsinai-S",
"nki",
"NIMH_encrypted",
"NIMH-CT_encrypted",
"nin",
"neurospin",
"newcastle",
"ohsu",
"princeton",
"rockefeller",
"sbri",
"ucdavis",
"uminn",
"oxford_encrypted",
"oxford-PM",
"NINPBBUtrecht",
"uwo",
"georgetown"
]
TRANSCODING = dict((name, "site-{0}".format(name)) for name in SITES)
TRANSCODING["NINPBBUtrecht"] = "site-utrecht"
TRANSCODING["georgetown"] = "Archakov2020"
TRANSCODING["oxford-encrypted"] = "site-oxford"
HTML_SITES = {
"amu": "Aix-Marseille Université",
"caltech": "California Institute of Technology",
"ecnu-chen": "East China Normal University - Chen",
"ecnu": "East China Normal University - Kwok",
"ion": "Institute of Neuroscience",
"iscmj": "Institut des Sciences Cognitives Marc Jeannerod",
"mcgill": "McGill University",
"lyon": "Lyon Neuroscience Research Center",
"mountsinai-P": "Mount Sinai School of Medicine",
"mountsinai-S": "Mount Sinai School of Medicine",
"nki": "Nathan Kline Institute",
"NIMH-encrypted": "National Institute of Mental Health",
"NIMH-CT-encrypted": "National Institute of Mental Health",
"nin": "Netherlands Institute for Neuroscience",
"neurospin": "NeuroSpin",
"newcastle": "Newcastle University",
"ohsu": "Oregon Health and Science University",
"princeton": "Princeton University",
"rockefeller": "Rockefeller University",
"sbri": "Stem Cell and Brain Research Institute",
"ucdavis": "University of California, Davis",
"uminn": "University of Minnesota",
"oxford-encrypted": "University of Oxford",
"oxford-PM": "University of Oxford (PM)",
"NINPBBUtrecht": "NIN Primate Brain Bank/Utrecht University",
"uwo": "University of Western Ontario",
"georgetown": "Georgetown University"
}
EXTRA_SITE = dict((name, "{0}".format(name)) for name in SITES)
EXTRA_SITE["ecnu"] = "ecnu-kwok"
EXTRA_SITE["NIMH-encrypted"] = "NIMH-L"
EXTRA_SITE["NIMH-CT-encrypted"] = "NIMH-M"
EXTRA_SITE["sbri"] = "sbri_pheno"
EXTRA_SITE["oxford-encrypted"] = "oxford"
DATADIR = "/neurospin/lbi/monkeyfmri/PRIME_DE_database"
Item = namedtuple("Item", ["input_path", "output_path", "metadata_path"])
logger = logging.getLogger("pynet")
def download_primede(datasetdir):
""" Download the PRIME-DE dataset.
Reference: http://fcon_1000.projects.nitrc.org/indi/PRIMEdownloads.html
Parameters
----------
datasetdir: str
the dataset destination folder.
"""
logger.info("Download PRIME-DE dataset.")
if not os.path.isdir(datasetdir):
os.mkdir(datasetdir)
downloaddir = os.path.join(datasetdir, "download")
if not os.path.isdir(downloaddir):
os.mkdir(downloaddir)
for site in SITES:
localfile = os.path.join(downloaddir, "{0}.tar.gz".format(site))
if os.path.isfile(localfile):
logger.info(" - {0}".format(localfile))
continue
url = URL.format(site)
logger.info(" - {0}".format(url))
cmd = ["wget", "-P", downloaddir, url]
logger.debug(" ".join(cmd))
subprocess.check_call(cmd)
for site in SITES:
site = site.replace("_encrypted", "-encrypted")
localfile = os.path.join(downloaddir, "{0}.csv".format(
EXTRA_SITE[site]))
if os.path.isfile(localfile):
logger.info(" - {0}".format(localfile))
continue
url = DESC_URL.format(EXTRA_SITE[site])
logger.info(" - {0}".format(url))
cmd = ["wget", "-P", downloaddir, url]
logger.debug(" ".join(cmd))
try:
subprocess.check_call(cmd)
except:
pass
for site in SITES:
site = site.replace("_encrypted", "-encrypted")
tarballfile = os.path.join(downloaddir, "{0}.tar.gz".format(site))
if site not in TRANSCODING:
logger.info(" - {0}".format(site))
continue
localdir = os.path.join(downloaddir, "{0}".format(TRANSCODING[site]))
if os.path.isdir(localdir):
logger.info(" - {0}".format(localdir))
continue
cmd = ["tar", "-zxvf", tarballfile, "--directory", downloaddir]
logger.debug(" ".join(cmd))
subprocess.check_call(cmd)
infofile = os.path.join(downloaddir, "info.json")
info = convert_html_table(HOME_URL)
with open(infofile, "wt") as open_file:
json.dump(info, open_file, indent=4)
def convert_html_table(url):
""" Web scraping: HTML tables.
"""
page = requests.get(url)
doc = lh.fromstring(page.content)
tr_elements = doc.xpath("//tr")
assert all(len(tr_elements[0]) == len(row) for row in tr_elements)
data = []
for cnt, item in enumerate(tr_elements[0]):
name = item.text_content()
data.append((name, []))
for row in tr_elements[1:]:
for cnt, item in enumerate(row.iterchildren()):
value = item.text_content()
data[cnt][1].append(value)
return dict(data)
def organize_primede(datasetdir):
""" Organize the PRIME-DE dataset.
Put all the data in the same BIDS organized folder.
Parameters
----------
datasetdir: str
the dataset destination folder.
"""
logger.info("Download PRIME-DE dataset.")
downloaddir = os.path.join(datasetdir, "download")
rawdir = os.path.join(datasetdir, "rawdata")
if not os.path.isdir(rawdir):
os.mkdir(rawdir)
infofile = os.path.join(downloaddir, "info.json")
with open(infofile, "rt") as open_file:
info = json.load(open_file)
col_names = info.pop("")
info = dict((key, dict((_key, _val) for _key, _val in zip(col_names, val)))
for key, val in info.items())
metadata = OrderedDict(
(key, []) for key in ("participant_id", "site", "site_index",
"species", "scanner", "state", "age", "weight",
"housing", "sex", "implant", "usage_agreement"))
for site_idx, site in enumerate(SITES):
extrafile = os.path.join(downloaddir, "{0}.csv".format(
EXTRA_SITE[site]))
if os.path.isfile(extrafile):
df = pd.read_csv(extrafile, dtype=str)
if "SubID" in df.columns:
df["Subject ID"] = df["SubID"]
else:
df = pd.DataFrame.from_dict({"Subject ID": []})
if "Subject ID" not in df.columns:
raise ValueError("A 'Subject ID' column is mandatory in "
"'{0}'.".format(extrafile))
site_idx = str(site_idx + 1).zfill(3)
site = site.replace("_encrypted", "-encrypted")
if site not in TRANSCODING:
logger.info(" - {0}".format(site))
continue
localdir = os.path.join(downloaddir, "{0}".format(TRANSCODING[site]))
if not os.path.isdir(localdir):
logger.info(" - {0}".format(site))
continue
for sid in os.listdir(localdir):
if not sid.startswith("sub-"):
logger.info(" - {0}".format(sid))
continue
_sid = sid.replace("sub-", "")
_sid = re.sub("[^0-9]", "", _sid)
sidinfo = {}
if _sid in df["Subject ID"].values:
sidinfo = df[df["Subject ID"] == _sid]
elif _sid.lstrip("0") in df["Subject ID"].values:
sidinfo = df[df["Subject ID"] == _sid.lstrip("0")]
if len(sidinfo) > 1:
raise ValueError("Multiple match for '{0}' in '{1}'.".format(
_sid, extrafile))
elif len(sidinfo) > 0:
sidinfo = sidinfo.to_dict(orient="list")
sidinfo = dict((key.split(" ")[0].lower(), val[0])
for key, val in sidinfo.items())
if "sexe" in sidinfo:
sidinfo["sex"] = sidinfo["sexe"]
_sid = "sub-{0}{1}".format(site_idx, _sid)
siddir = os.path.join(localdir, sid)
subject = _sid.replace("sub-", "")
if subject in metadata["participant_id"]:
raise ValueError("Subject '{0}' is not unique.".format(sid))
metadata["participant_id"].append(subject)
metadata["site"].append(site)
metadata["site_index"].append(site_idx)
metadata["species"].append(info[HTML_SITES[site]]["Species"])
metadata["scanner"].append(info[HTML_SITES[site]]["Scanner"])
metadata["state"].append(info[HTML_SITES[site]]["State"])
metadata["age"].append(sidinfo.get("age", "nc"))
metadata["weight"].append(sidinfo.get("weight", "nc"))
metadata["housing"].append(sidinfo.get("housing", "nc"))
metadata["sex"].append(sidinfo.get("sex", "nc"))
metadata["implant"].append(sidinfo.get("implant", "nc"))
metadata["usage_agreement"].append(info[HTML_SITES[site]][
"Usage Agreement"])
cmd = ["mv", siddir, os.path.join(rawdir, _sid)]
logger.info(" ".join(cmd))
subprocess.check_call(cmd)
participantsfile = os.path.join(rawdir, "participants.tsv")
df = pd.DataFrame.from_dict(metadata)
df.to_csv(participantsfile, sep="\t", index=False)
desc = {
"Name": "primede",
"BIDSVersion": "1.0.2"
}
descfile = os.path.join(rawdir, "dataset_description.json")
with open(descfile, "wt") as open_file:
json.dump(desc, open_file, indent=4)
@Fetchers.register
def fetch_primede(datasetdir, maskdirname="brainmask"):
""" Fetch/prepare the PRIME-DE dataset for pynet.
Parameters
----------
datasetdir: str
the dataset destination folder.
maskdirname: str
name of the folder that contains the brain masks.
Returns
-------
item: namedtuple
a named tuple containing 'input_path', 'output_path', and
'metadata_path'.
"""
logger.info("Loading PRIME-DE dataset.")
imdirname = os.path.split(os.sep)[-1]
if not os.path.isdir(datasetdir):
os.mkdir(datasetdir)
desc_path = os.path.join(datasetdir, "pynet_primede.tsv")
input_path = os.path.join(datasetdir, "pynet_primede_inputs.npy")
output_path = os.path.join(datasetdir, "pynet_primede_outputs.npy")
if not os.path.isfile(desc_path):
metadata = OrderedDict(
(key, []) for key in ("participant_id", "site", "with_mask",
"valid", "session", "run"))
anat_files = glob.glob(os.path.join(
datasetdir, "sub-*", "ses-*", "anat", "*acq-nc1iso*.nii.gz"))
if len(anat_files) == 0:
raise ValueError("Your dataset directory must contain the Prime "
"DE data organized with the function provided in "
"this module and preprocessed.")
inputs = []
outputs = []
for path in anat_files:
sid = path.split(os.sep)[-4].replace("sub-", "")
ses = path.split(os.sep)[-3].replace("ses-", "")
inputs.append(nibabel.load(path).get_data())
mask_path = path.replace(imdirname, maskdirname).replace(
"acq-nc1iso", "acq-c1iso").replace(".nii.gz", "_mask.nii.gz")
with_mask = 0
if os.path.isfile(mask_path):
outputs.append(nibabel.load(mask_path).get_data())
with_mask = 1
else:
outputs.append(np.zeros((90, 90, 60), dtype=int))
basename = os.path.basename(path)
match = re.findall("run-(\d+)_", basename)
if len(match) == 1:
run = match[0]
else:
run = "nc"
valid = 1
if "{0}-{1}".format(sid, ses) in QC:
valid = 0
metadata["participant_id"].append(sid)
metadata["site"].append(sid[:3])
metadata["with_mask"].append(with_mask)
metadata["valid"].append(valid)
metadata["session"].append(ses)
metadata["run"].append(run)
inputs = np.asarray(inputs)
outputs = np.asarray(outputs)
inputs_im = nibabel.Nifti1Image(
inputs.transpose(1, 2, 3, 0), np.eye(4))
outputs_im = nibabel.Nifti1Image(
outputs.transpose(1, 2, 3, 0), np.eye(4))
inputs = np.expand_dims(inputs, axis=1)
outputs = np.expand_dims(outputs, axis=1)
np.save(input_path, inputs)
np.save(output_path, outputs)
nibabel.save(inputs_im, input_path.replace(".npy", ".nii.gz"))
nibabel.save(outputs_im, output_path.replace(".npy", ".nii.gz"))
df = pd.DataFrame.from_dict(metadata)
df.to_csv(desc_path, sep="\t", index=False)
return Item(input_path=input_path, output_path=output_path,
metadata_path=desc_path)
| [
"[email protected]"
]
| |
5783ce1e2789f35719b925425e95f886b574fd59 | 76d8f9d741d4e0bbd15a2c29fa77d041c01ea9bf | /exercise/keras/trafficsign.py | a422aaf4c134f2d7e34383236a64a9a9fb67fcf1 | []
| no_license | LevinJ/Behavioural-Cloning-P3 | d92bf3500797019a3fcf038a5c0e817f445e7a39 | fff8993ba2671c9664ab65899db952e2f5de37da | refs/heads/master | 2020-06-22T03:16:27.869561 | 2016-12-19T00:19:06 | 2016-12-19T00:19:06 | 74,758,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,848 | py | from utility.dumpload import DumpLoad
import numpy as np
from sklearn.preprocessing import scale
import pandas as pd
import os
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Activation
from sklearn.preprocessing import OneHotEncoder
from keras.optimizers import Adam
from sklearn.cross_validation import train_test_split
from keras.layers import Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
class TrafficeSign(object):
def __init__(self):
return
def __get_data(self, filepath):
dump_load = DumpLoad(filepath)
data = dump_load.load()
features = data['features']
labels = data['labels'][:, np.newaxis]
return features, labels
def load_data(self):
self.X_train, self.y_train =self. __get_data('./train.p')
self.X_test, self.y_test = self.__get_data('./test.p')
assert(self.X_train.shape[0] == self.y_train.shape[0]), "The number of images is not equal to the number of labels."
assert(self.X_train.shape[1:] == (32,32,3)), "The dimensions of the images are not 32 x 32 x 3."
return
def normalize_data(self):
max = 0.5
min = -0.5
train_min = self.X_train.min()
train_max = self.X_train.max()
self.X_train = self.X_train.astype('float32')
self.X_test = self.X_test.astype('float32')
#normalize training/val data
self.X_train = (self.X_train - train_min) / (train_max - train_min) * (max - min) + min
#normalize test data
self.X_test = ((self.X_test - train_min) / (train_max - train_min)) * (max - min) + min
# scaler = MinMaxScaler(feature_range=(-0.5, 0.5))
# self.X_train = scaler.fit_transform(self.X_train.ravel())
assert(round(np.mean(self.X_train)) == 0), "The mean of the input data is: %f" % np.mean(self.X_train)
assert(np.min(self.X_train) == -0.5 and np.max(self.X_train) == 0.5), "The range of the input data is: %.1f to %.1f" % (np.min(self.X_train), np.max(self.X_train))
return
def two_layer_net(self):
model = Sequential()
model.add(Dense(128, input_dim=32*32*3, name="hidden1"))
model.add(Activation("relu"))
model.add(Dense(output_dim=43, name="output"))
model.add(Activation("softmax"))
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert(model.get_layer(name="hidden1").input_shape == (None, 32*32*3)), "The input shape is: %s" % model.get_layer(name="hidden1").input_shape
assert(model.get_layer(name="output").output_shape == (None, 43)), "The output shape is: %s" % model.get_layer(name="output").output_shape
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
metrics=['accuracy'])
self.encoder = OneHotEncoder(sparse=False).fit(self.y_train)
y_train_encoded = self.encoder.transform(self.y_train)
history = model.fit(self.X_train.reshape(-1,32*32*3), y_train_encoded, nb_epoch=2, batch_size=32, verbose=2)
# STOP: Do not change the tests below. Your implementation should pass these tests.
print("The training accuracy was: {}".format( history.history['acc']))
assert(history.history['acc'][0] > 0.5), "The training accuracy was: {}".format( history.history['acc'])
return
def two_layer_net_split(self):
model = Sequential()
model.add(Dense(128, input_dim=32*32*3, name="hidden1"))
model.add(Activation("relu"))
model.add(Dense(output_dim=43, name="output"))
model.add(Activation("softmax"))
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert(model.get_layer(name="hidden1").input_shape == (None, 32*32*3)), "The input shape is: %s" % model.get_layer(name="hidden1").input_shape
assert(model.get_layer(name="output").output_shape == (None, 43)), "The output shape is: %s" % model.get_layer(name="output").output_shape
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
metrics=['accuracy'])
self.X_train, self.X_val, self.y_train, self.y_val = train_test_split(self.X_train, self.y_train, test_size=0.25, random_state=42)
self.encoder = OneHotEncoder(sparse=False,n_values = 43).fit(self.y_train)
y_train_encoded = self.encoder.transform(self.y_train)
y_val_encoded = self.encoder.transform(self.y_val)
history = model.fit(self.X_train.reshape(-1,32*32*3), y_train_encoded, nb_epoch=2, batch_size=32, verbose=2,
validation_data=(self.X_val.reshape(-1,32*32*3), y_val_encoded))
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert(round(self.X_train.shape[0] / float(self.X_val.shape[0])) == 3), "The training set is %.3f times larger than the validation set." % self.X_train.shape[0] / float(self.X_val.shape[0])
assert(history.history['val_acc'][0] > 0.6), "The validation accuracy is: %.3f" % history.history['val_acc'][0]
return
def cnn_net(self):
model = Sequential()
#layer 1
model.add(Convolution2D(32, 3, 3,
border_mode='valid',
input_shape=(32,32,3), name="conv1"))
model.add(Activation('relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.5))
#layer 2
model.add(Flatten())
model.add(Dense(128, name="hidden1"))
model.add(Activation("relu"))
#layer 3
model.add(Dense(output_dim=43, name="output"))
model.add(Activation("softmax"))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
metrics=['accuracy'])
self.X_train, self.X_val, self.y_train, self.y_val = train_test_split(self.X_train, self.y_train, test_size=0.25, random_state=42)
self.encoder = OneHotEncoder(sparse=False,n_values = 43).fit(self.y_train)
y_train_encoded = self.encoder.transform(self.y_train)
y_val_encoded = self.encoder.transform(self.y_val)
y_test_encoded = self.encoder.transform(self.y_test)
history = model.fit(self.X_train, y_train_encoded, nb_epoch=30, batch_size=32, verbose=2,
validation_data=(self.X_val, y_val_encoded))
# STOP: Do not change the tests below. Your implementation should pass these tests.
#assert(history.history['val_acc'][0] > 0.9), "The validation accuracy is: %.3f" % history.history['val_acc'][0]
_, train_acc = model.evaluate(self.X_train, y_train_encoded, verbose=0)
_, val_acc = model.evaluate(self.X_val, y_val_encoded, verbose=0)
_, test_acc = model.evaluate(self.X_test, y_test_encoded, verbose=0)
print('train{:.3f}, val{:.3f}: test{:.3f}'.format(train_acc, val_acc, test_acc))
return
def run(self):
self.load_data()
self.normalize_data()
# self.two_layer_net()
# self.two_layer_net_split()
self.cnn_net()
return
if __name__ == "__main__":
obj= TrafficeSign()
obj.run() | [
"[email protected]"
]
| |
92ea114b1907807cc47d45d2b77ee51981cafab8 | 887f2e664c6d92f17e784f57022333a2fb859d06 | /analysis/plotMove.py | 252a91a4c6be6dc9ba8b647cac05970a426f3080 | []
| no_license | ctorney/dolphinUnion | 1968e258c6045060b2c921bd723d0ef0daea0147 | 9d7212d172a8a48a36fc4870fcdb04d66130bb76 | refs/heads/master | 2021-01-19T04:40:57.286526 | 2017-08-17T20:44:58 | 2017-08-17T20:44:58 | 46,424,670 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,332 | py | import numpy as np
import pandas as pd
import os, re
import math
import time
from scipy import interpolate
from scipy import ndimage
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.animation as ani
HD = os.getenv('HOME')
FILELIST = HD + '/workspace/dolphinUnion/tracking/solo/fileList.csv'
DATADIR = HD + '/Dropbox/dolphin_union/2015_footage/Solo/processedTracks/'
df = pd.read_csv(FILELIST)
for index, row in df.iterrows():
noext, ext = os.path.splitext(row.filename)
posfilename = DATADIR + '/TRACKS_' + str(index) + '_' + noext + '.csv'
gridfilename = DATADIR + '/GRID_' + str(index) + '_' + noext + '.npy'
gridPosfilename = DATADIR + '/GRIDPOS_' + str(index) + '_' + noext + '.npy'
posDF = pd.read_csv(posfilename)
posDF = posDF[posDF['frame']%60==0]
# posDF['x']=posDF['x']-min(posDF['x'])
# posDF['y']=posDF['y']-min(posDF['y'])
# xrange = max(posDF['x'])
# yrange = max(posDF['y'])
# nx = math.ceil(xrange/32)
# ny = math.ceil(yrange/32)
# grid = np.zeros((nx,ny,2))
# gridPos = np.zeros((nx,ny,2))
# xh = np.cos(posDF['heading'].values)
# yh = np.sin(posDF['heading'].values)
# xdirs = posDF['dx'].values
# ydirs = posDF['dy'].values
# xp = posDF['x'].values
# yp = posDF['y'].values
# kappa = 32.0*32.0
# for i in range(nx):
# for j in range(ny):
# gx = i * 32
# gy = j * 32
# dists = (((posDF['x'].values - gx)**2 + (posDF['y'].values - gy)**2))
# weights = np.exp(-dists/kappa)
# gridPos[i,j,0]=gx
# gridPos[i,j,1]=gy
# xav = np.sum(weights*xdirs)/np.sum(weights)
# yav = np.sum(weights*ydirs)/np.sum(weights)
# grid[i,j,0]=xav/math.sqrt(xav**2+yav**2)
# grid[i,j,1]=yav/math.sqrt(xav**2+yav**2)
grid = np.load(gridfilename)
gridPos = np.load(gridPosfilename)
#plt.quiver(xp,yp,xh,yh,angles='xy', scale_units='xy', color='r', scale=1.0/32.0)
#plt.quiver(gridPos[:,:,0],gridPos[:,:,1],grid[:,:,0],grid[:,:,1],angles='xy', scale_units='xy', scale=1.0/32.0)
winLen = 30
w = np.kaiser(winLen,1)
w = w/w.sum()
maxRange = 0
flen = len(posDF.groupby('frame'))
Xcentroids = np.zeros((flen))
Ycentroids = np.zeros((flen))
fc=0
for fnum, frame in posDF.groupby('frame'):
dist = max(frame['x'].values)-min(frame['x'].values)
if dist>maxRange:
maxRange=dist
dist = max(frame['y'].values)-min(frame['y'].values)
if dist>maxRange:
maxRange=dist
Xcentroids[fc] = np.average(frame['x'].values)
Ycentroids[fc] = np.average(frame['y'].values)
fc=fc+1
Xcentroids = np.r_[np.ones((winLen))*Xcentroids[0],Xcentroids,np.ones((winLen))*Xcentroids[-1]]
Xcentroids = np.convolve(w/w.sum(),Xcentroids,mode='same')[(winLen):-(winLen)]
Ycentroids = np.r_[np.ones((winLen))*Ycentroids[0],Ycentroids,np.ones((winLen))*Ycentroids[-1]]
Ycentroids = np.convolve(w/w.sum(),Ycentroids,mode='same')[(winLen):-(winLen)]
sz = math.ceil(maxRange/32)*16
fig = plt.figure()#figsize=(10, 10), dpi=5)
totalFrames =500
fc = 0
#with writer.saving(fig, "move.mp4", totalFrames):# len(posDF.groupby('frame'))):
for fnum, frame in posDF.groupby('frame'):
fc = fc + 1
if fc>totalFrames:
break
#frame = frame[frame.c_id==0]
xp = frame['x'].values
yp = frame['y'].values
xh = 0.1*frame['dx'].values
yh = 0.1*frame['dy'].values
xc = Xcentroids[fc]
yc = Ycentroids[fc]
plt.clf()
plt.quiver(gridPos[:,:,0],gridPos[:,:,1],grid[:,:,0],grid[:,:,1],angles='xy', scale_units='xy', scale=1.0/32.0, headwidth=1)
l, = plt.plot(xp,yp, 'ro')
plt.quiver(xp,yp,xh,yh,angles='xy', scale_units='xy', color='r', scale=1.0/32.0, headwidth=1.5)
#plt.axis([0,4000, 2000,-2000])
plt.axis('equal')
l.axes.get_xaxis().set_visible(False)
l.axes.get_yaxis().set_visible(False)
l.set_data(xp, yp)
l.axes.set_xlim(xc-sz,xc+sz)
l.axes.set_ylim(yc-sz,yc+sz)
plt.savefig('frames/fig'+'{0:05d}'.format(fc)+'.png')
#writer.grab_frame()
break
| [
"[email protected]"
]
| |
7db1a2988c552372fb5395ea469d95dd7642b33f | f561a219c57bd75790d3155acac6f54299a88b08 | /splash_screen/migrations/0001_initial.py | 595c80d3d3c899474e567d2f95d683c19e6bc3ae | []
| no_license | ujjwalagrawal17/OfferCartServer | 1e81cf2dc17f19fa896062c2a084e6b232a8929e | b3cd1c5f8eecc167b6f4baebed3c4471140d905f | refs/heads/master | 2020-12-30T15:31:04.380084 | 2017-05-24T18:26:20 | 2017-05-24T18:26:20 | 91,155,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-06 17:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FcmData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fcm', models.CharField(blank=True, max_length=512, null=True)),
('created', models.DateTimeField(auto_now=True)),
('modified', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='VersionData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.SmallIntegerField(default=0)),
('compulsory_update', models.SmallIntegerField(default=0)),
('version_type', models.CharField(blank=True, max_length=120, null=True)),
('created', models.DateTimeField(auto_now=True)),
('modified', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"[email protected]"
]
| |
57ce4e23c369d0ac1c8990a08dd6f14bffa13f86 | ef3a7391b0a5c5d8e276355e97cbe4de621d500c | /venv/Lib/site-packages/spacy/tests/lang/en/test_exceptions.py | 6285a94089db310ac5481689b6030d62f9ea8679 | [
"Apache-2.0"
]
| permissive | countBMB/BenjiRepo | 143f6da5d198ea6f06404b4559e1f4528b71b3eb | 79d882263baaf2a11654ca67d2e5593074d36dfa | refs/heads/master | 2022-12-11T07:37:04.807143 | 2019-12-25T11:26:29 | 2019-12-25T11:26:29 | 230,090,428 | 1 | 1 | Apache-2.0 | 2022-12-08T03:21:09 | 2019-12-25T11:05:59 | Python | UTF-8 | Python | false | false | 4,097 | py | # coding: utf-8
from __future__ import unicode_literals
import pytest
def test_en_tokenizer_handles_basic_contraction(en_tokenizer):
text = "don't giggle"
tokens = en_tokenizer(text)
assert len(tokens) == 3
assert tokens[1].text == "n't"
text = "i said don't!"
tokens = en_tokenizer(text)
assert len(tokens) == 5
assert tokens[4].text == "!"
@pytest.mark.parametrize("text", ["`ain't", """"isn't""", "can't!"])
def test_en_tokenizer_handles_basic_contraction_punct(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize(
"text_poss,text", [("Robin's", "Robin"), ("Alexis's", "Alexis")]
)
def test_en_tokenizer_handles_poss_contraction(en_tokenizer, text_poss, text):
tokens = en_tokenizer(text_poss)
assert len(tokens) == 2
assert tokens[0].text == text
assert tokens[1].text == "'s"
@pytest.mark.parametrize("text", ["schools'", "Alexis'"])
def test_en_tokenizer_splits_trailing_apos(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
assert tokens[0].text == text.split("'")[0]
assert tokens[1].text == "'"
@pytest.mark.parametrize("text", ["'em", "nothin'", "ol'"])
def test_en_tokenizer_doesnt_split_apos_exc(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].text == text
@pytest.mark.parametrize("text", ["we'll", "You'll", "there'll"])
def test_en_tokenizer_handles_ll_contraction(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
assert tokens[0].text == text.split("'")[0]
assert tokens[1].text == "'ll"
assert tokens[1].lemma_ == "will"
@pytest.mark.parametrize(
"text_lower,text_title", [("can't", "Can't"), ("ain't", "Ain't")]
)
def test_en_tokenizer_handles_capitalization(en_tokenizer, text_lower, text_title):
tokens_lower = en_tokenizer(text_lower)
tokens_title = en_tokenizer(text_title)
assert tokens_title[0].text == tokens_lower[0].text.title()
assert tokens_lower[0].text == tokens_title[0].text.lower()
assert tokens_lower[1].text == tokens_title[1].text
@pytest.mark.parametrize("pron", ["I", "You", "He", "She", "It", "We", "They"])
@pytest.mark.parametrize("contraction", ["'ll", "'d"])
def test_en_tokenizer_keeps_title_case(en_tokenizer, pron, contraction):
tokens = en_tokenizer(pron + contraction)
assert tokens[0].text == pron
assert tokens[1].text == contraction
@pytest.mark.parametrize("exc", ["Ill", "ill", "Hell", "hell", "Well", "well"])
def test_en_tokenizer_excludes_ambiguous(en_tokenizer, exc):
tokens = en_tokenizer(exc)
assert len(tokens) == 1
@pytest.mark.parametrize(
"wo_punct,w_punct", [("We've", "`We've"), ("couldn't", "couldn't)")]
)
def test_en_tokenizer_splits_defined_punct(en_tokenizer, wo_punct, w_punct):
tokens = en_tokenizer(wo_punct)
assert len(tokens) == 2
tokens = en_tokenizer(w_punct)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["e.g.", "p.m.", "Jan.", "Dec.", "Inc."])
def test_en_tokenizer_handles_abbr(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 1
def test_en_tokenizer_handles_exc_in_text(en_tokenizer):
text = "It's mediocre i.e. bad."
tokens = en_tokenizer(text)
assert len(tokens) == 6
assert tokens[3].text == "i.e."
@pytest.mark.parametrize("text", ["1am", "12a.m.", "11p.m.", "4pm"])
def test_en_tokenizer_handles_times(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
assert tokens[1].lemma_ in ["a.m.", "p.m."]
@pytest.mark.parametrize(
"text,norms", [("I'm", ["i", "am"]), ("shan't", ["shall", "not"])]
)
def test_en_tokenizer_norm_exceptions(en_tokenizer, text, norms):
tokens = en_tokenizer(text)
assert [token.norm_ for token in tokens] == norms
@pytest.mark.parametrize(
"text,norm", [("radicalised", "radicalized"), ("cuz", "because")]
)
def test_en_lex_attrs_norm_exceptions(en_tokenizer, text, norm):
tokens = en_tokenizer(text)
assert tokens[0].norm_ == norm
| [
"[email protected]"
]
| |
78e8a604cecf27fe811b0c948ad111c099ce963d | e54e1a63bffbe913f5e5018ace56cfa3eab1a72b | /practice/Leetcode/1253_reconstruct_a_2_row_binary_matrix.py | 2b71d9f4f15e8a18ca410c4daa4699f6e1846cec | []
| no_license | rmodi6/scripts | 5e27a46ce8970cbf601f132a53164c273f1812ea | 7cc47eecac00e6bd0b3ec74d7eed8ec3e0e77a84 | refs/heads/master | 2022-02-14T20:41:28.765751 | 2022-01-20T06:59:40 | 2022-01-20T06:59:40 | 168,207,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | # https://leetcode.com/contest/weekly-contest-162/problems/reconstruct-a-2-row-binary-matrix/
import numpy as np
class Solution:
def reconstructMatrix(self, upper: int, lower: int, colsum: List[int]) -> List[List[int]]:
if upper + lower != sum(colsum):
return []
ans = np.zeros((2, len(colsum)), dtype='int32')
for i, n in enumerate(colsum):
if n == 2:
if upper > 0 and lower > 0:
ans[0][i], ans[1][i] = 1, 1
upper -= 1
lower -= 1
else:
return []
for i, n in enumerate(colsum):
if n == 1:
if upper > 0:
ans[0][i] = 1
upper -= 1
elif lower > 0:
ans[1][i] = 1
lower -= 1
else:
return []
return ans.tolist()
| [
"[email protected]"
]
| |
0212f9e5951c9f222ca5a846a070bf81530f2a1c | 47c175daf97051e1f5c37b161f16abbd5f5a506e | /modules/forward_attention.py | 1572b09366af7b47ef3fe8cd017f1bbae7507555 | [
"BSD-3-Clause"
]
| permissive | nii-yamagishilab/self-attention-tacotron | 947d1d2eb8bc25f70331fbc401bf44c93ef92673 | 0ebd96114feab5a499964402a8ab7e402f0083b4 | refs/heads/master | 2021-07-11T06:13:18.202669 | 2020-06-19T03:04:42 | 2020-06-19T03:04:42 | 156,176,608 | 116 | 35 | BSD-3-Clause | 2020-06-19T03:04:43 | 2018-11-05T07:21:46 | Python | UTF-8 | Python | false | false | 6,383 | py | # ==============================================================================
# Copyright (c) 2018, Yamagishi Laboratory, National Institute of Informatics
# Author: Yusuke Yasuda ([email protected])
# All rights reserved.
# ==============================================================================
""" """
import tensorflow as tf
from tensorflow.contrib.seq2seq import BahdanauAttention
from collections import namedtuple
def _location_sensitive_score(W_query, W_fill, W_keys):
dtype = W_query.dtype
num_units = W_keys.shape[-1].value or tf.shape(W_keys)[-1]
v_a = tf.get_variable("attention_variable",
shape=[num_units],
dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer())
b_a = tf.get_variable("attention_bias",
shape=[num_units],
dtype=dtype,
initializer=tf.zeros_initializer())
return tf.reduce_sum(v_a * tf.tanh(W_keys + W_query + W_fill + b_a), axis=[2])
def _calculate_context(alignments, values):
'''
This is a duplication of tensorflow.contrib.seq2seq.attention_wrapper._compute_attention.
ToDo: Avoid the redundant computation. This requires abstraction of AttentionWrapper itself.
:param alignments: [batch_size, 1, memory_time]
:param values: [batch_size, memory_time, memory_size]
:return:
'''
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = tf.expand_dims(alignments, 1)
context = tf.matmul(expanded_alignments, values) # [batch_size, 1, memory_size]
context = tf.squeeze(context, [1]) # [batch_size, memory_size]
return context
class ForwardAttentionState(namedtuple("ForwardAttentionState", ["alignments", "alpha", "u"])):
pass
class ForwardAttention(BahdanauAttention):
def __init__(self,
num_units,
memory,
memory_sequence_length,
attention_kernel,
attention_filters,
use_transition_agent=False,
cumulative_weights=True,
name="ForwardAttention"):
super(ForwardAttention, self).__init__(
num_units=num_units,
memory=memory,
memory_sequence_length=memory_sequence_length,
probability_fn=None,
name=name)
self._use_transition_agent = use_transition_agent
self._cumulative_weights = cumulative_weights
self.location_convolution = tf.layers.Conv1D(filters=attention_filters,
kernel_size=attention_kernel,
padding="SAME",
use_bias=True,
bias_initializer=tf.zeros_initializer(),
name="location_features_convolution")
self.location_layer = tf.layers.Dense(units=num_units,
use_bias=False,
dtype=memory.dtype,
name="location_features_layer")
if use_transition_agent:
# ToDo: support speed control bias
self.transition_factor_projection = tf.layers.Dense(units=1,
use_bias=True,
dtype=memory.dtype,
activation=tf.nn.sigmoid,
name="transition_factor_projection")
def __call__(self, query, state):
previous_alignments, prev_alpha, prev_u = state
with tf.variable_scope(None, "location_sensitive_attention", [query]):
# processed_query shape [batch_size, query_depth] -> [batch_size, attention_dim]
processed_query = self.query_layer(query) if self.query_layer else query
# -> [batch_size, 1, attention_dim]
expanded_processed_query = tf.expand_dims(processed_query, 1)
# [batch_size, max_time] -> [batch_size, max_time, 1]
expanded_alignments = tf.expand_dims(previous_alignments, axis=2)
# location features [batch_size, max_time, filters]
f = self.location_convolution(expanded_alignments)
processed_location_features = self.location_layer(f)
energy = _location_sensitive_score(expanded_processed_query, processed_location_features, self.keys)
alignments = self._probability_fn(energy, state)
# forward attention
prev_alpha_n_minus_1 = tf.pad(prev_alpha[:, :-1], paddings=[[0, 0], [1, 0]])
alpha = ((1 - prev_u) * prev_alpha + prev_u * prev_alpha_n_minus_1 + 1e-7) * alignments
alpha_normalized = alpha / tf.reduce_sum(alpha, axis=1, keep_dims=True)
if self._use_transition_agent:
context = _calculate_context(alpha_normalized, self.values)
transition_factor_input = tf.concat([context, processed_query], axis=-1)
transition_factor = self.transition_factor_projection(transition_factor_input)
else:
transition_factor = prev_u
if self._cumulative_weights:
next_state = ForwardAttentionState(alignments + previous_alignments, alpha_normalized, transition_factor)
else:
next_state = ForwardAttentionState(alignments, alpha_normalized, transition_factor)
return alpha_normalized, next_state
@property
def state_size(self):
return ForwardAttentionState(self._alignments_size, self._alignments_size, 1)
def initial_state(self, batch_size, dtype):
initial_alignments = self.initial_alignments(batch_size, dtype)
# alpha_0 = 1, alpha_n = 0 where n = 2, 3, ..., N
initial_alpha = tf.concat([
tf.ones([batch_size, 1], dtype=dtype),
tf.zeros_like(initial_alignments, dtype=dtype)[:, 1:]], axis=1)
# transition factor
initial_u = 0.5 * tf.ones([batch_size, 1], dtype=dtype)
return ForwardAttentionState(initial_alignments, initial_alpha, initial_u)
| [
"[email protected]"
]
| |
c208f338b0b8e717f7788e70ab415ccb06596ec2 | be6ce691a3667edf152859f16804e06aaa486a03 | /solution1/deprecated.py | f70d197db4f26a45a6e6cf1f3aaa93a6efa255a6 | []
| no_license | mik-laj/airflow-deprecation-sample | d9b7d068013884177fec833e234914c6a1ec8be3 | ae1f93ac6ab85cec4c57dcb62f956fec73d88bbe | refs/heads/master | 2020-04-23T00:13:41.579998 | 2019-07-30T13:17:29 | 2019-07-30T13:17:50 | 170,771,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | import warnings
from solution1.new import *
warnings.warn("solution1.deprecated has moved to solution1.new. Import of "
"solution.new will become unsupported in version 2",
DeprecationWarning, 2)
| [
"[email protected]"
]
| |
283845b8c4a81738b39c332e062e558f4a1fa42f | e03f502312775b01b41ea7c6f5cb3dfbafdb8509 | /aboutus/api/serializers.py | 189187d979c6f742e946a41c169302bc8c45fb14 | []
| no_license | Grechanin/Misteckiy-DjangoRest-React-Redux | e223e89310362b8c21e30c8c669d4e170d232db6 | f05eb50a6aec72432716672294df81c3dc939ddd | refs/heads/master | 2020-04-13T10:58:17.931584 | 2019-02-18T09:21:54 | 2019-02-18T09:21:54 | 163,159,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | from aboutus.models import AboutUs
from rest_framework import serializers
class AboutUsSerializer(serializers.ModelSerializer):
class Meta:
model = AboutUs
fields = (
'tab_title',
'title',
'short_description',
'description',
)
| [
"[email protected]"
]
| |
c3ca2d48fc3106d240183f94624a9d8af3cbb55a | 660e35c822423685aea19d038daa8356722dc744 | /stock_lot/tests/test_stock_lot.py | 7fdaf10723fc7fdb663b0c5c7b29412cb8e8023d | []
| no_license | saifkazi/tryton_modules | a05cb4a90ae2c46ba39d60d2005ffc18ce5e44bb | 94bd3a4e3fd86556725cdff33b314274dcb20afd | refs/heads/main | 2023-05-05T12:20:02.059236 | 2021-05-19T10:46:37 | 2021-05-19T10:46:37 | 368,768,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,168 | py | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import unittest
import doctest
import datetime
from dateutil.relativedelta import relativedelta
from decimal import Decimal
import trytond.tests.test_tryton
from trytond.tests.test_tryton import ModuleTestCase, with_transaction
from trytond.tests.test_tryton import doctest_teardown
from trytond.tests.test_tryton import doctest_checker
from trytond.transaction import Transaction
from trytond.pool import Pool
from trytond.modules.company.tests import create_company, set_company
class StockLotTestCase(ModuleTestCase):
'Test Stock Lot module'
module = 'stock_lot'
@with_transaction()
def test_products_by_location(self):
'Test products_by_location'
pool = Pool()
Uom = pool.get('product.uom')
Template = pool.get('product.template')
Product = pool.get('product.product')
Location = pool.get('stock.location')
Move = pool.get('stock.move')
Lot = pool.get('stock.lot')
kg, = Uom.search([('name', '=', 'Kilogram')])
g, = Uom.search([('name', '=', 'Gram')])
template, = Template.create([{
'name': 'Test products_by_location',
'type': 'goods',
'list_price': Decimal(0),
'default_uom': kg.id,
}])
product, = Product.create([{
'template': template.id,
}])
supplier, = Location.search([('code', '=', 'SUP')])
customer, = Location.search([('code', '=', 'CUS')])
storage, = Location.search([('code', '=', 'STO')])
company = create_company()
currency = company.currency
with set_company(company):
lot1, lot2 = Lot.create([{
'number': '1',
'product': product.id,
}, {
'number': '2',
'product': product.id,
}])
moves = Move.create([{
'product': product.id,
'lot': lot1.id,
'uom': kg.id,
'quantity': 5,
'from_location': supplier.id,
'to_location': storage.id,
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}, {
'product': product.id,
'lot': lot2.id,
'uom': kg.id,
'quantity': 10,
'from_location': supplier.id,
'to_location': storage.id,
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}, {
'product': product.id,
'lot': lot2.id,
'uom': kg.id,
'quantity': 2,
'from_location': storage.id,
'to_location': customer.id,
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}, {
'product': product.id,
'lot': None,
'uom': kg.id,
'quantity': 3,
'from_location': supplier.id,
'to_location': storage.id,
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}])
Move.do(moves)
self.assertEqual(Product.products_by_location([storage.id],
grouping_filter=([product.id],)), {
(storage.id, product.id): 16,
})
self.assertEqual(Product.products_by_location([storage.id],
grouping=('product', 'lot',),
grouping_filter=([product.id],)), {
(storage.id, product.id, lot1.id): 5,
(storage.id, product.id, lot2.id): 8,
(storage.id, product.id, None): 3,
})
with Transaction().set_context(locations=[storage.id]):
lot1, lot2 = Lot.browse([lot1, lot2])
self.assertEqual(lot1.quantity, 5)
self.assertEqual(lot2.quantity, 8)
@with_transaction()
def test_period(self):
'Test period'
pool = Pool()
Uom = pool.get('product.uom')
Template = pool.get('product.template')
Product = pool.get('product.product')
Location = pool.get('stock.location')
Move = pool.get('stock.move')
Lot = pool.get('stock.lot')
Period = pool.get('stock.period')
unit, = Uom.search([('name', '=', 'Unit')])
template, = Template.create([{
'name': 'Test period',
'type': 'goods',
'default_uom': unit.id,
'list_price': Decimal(0),
}])
product, = Product.create([{
'template': template.id,
}])
supplier, = Location.search([('code', '=', 'SUP')])
storage, = Location.search([('code', '=', 'STO')])
company = create_company()
currency = company.currency
with set_company(company):
lot1, lot2 = Lot.create([{
'number': '1',
'product': product.id,
}, {
'number': '2',
'product': product.id,
}])
today = datetime.date.today()
moves = Move.create([{
'product': product.id,
'lot': lot1.id,
'uom': unit.id,
'quantity': 5,
'from_location': supplier.id,
'to_location': storage.id,
'planned_date': today - relativedelta(days=1),
'effective_date': today - relativedelta(days=1),
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}, {
'product': product.id,
'lot': lot2.id,
'uom': unit.id,
'quantity': 10,
'from_location': supplier.id,
'to_location': storage.id,
'planned_date': today - relativedelta(days=1),
'effective_date': today - relativedelta(days=1),
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}, {
'product': product.id,
'lot': None,
'uom': unit.id,
'quantity': 3,
'from_location': supplier.id,
'to_location': storage.id,
'planned_date': today - relativedelta(days=1),
'effective_date': today - relativedelta(days=1),
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}])
Move.do(moves)
period, = Period.create([{
'date': today - relativedelta(days=1),
'company': company.id,
}])
Period.close([period])
self.assertEqual(period.state, 'closed')
quantities = {
supplier: -18,
storage: 18,
}
for cache in period.caches:
self.assertEqual(cache.product, product)
self.assertEqual(cache.internal_quantity,
quantities[cache.location])
quantities = {
(supplier, lot1): -5,
(storage, lot1): 5,
(supplier, lot2): -10,
(storage, lot2): 10,
(supplier, None): -3,
(storage, None): 3,
}
for lot_cache in period.lot_caches:
self.assertEqual(lot_cache.product, product)
self.assertEqual(lot_cache.internal_quantity,
quantities[(lot_cache.location, lot_cache.lot)])
@with_transaction
def test_assign_try_with_lot(self):
"Test Move assign_try with lot"
pool = Pool()
Template = pool.get('product.template')
Product = pool.get('product.product')
Uom = pool.get('product.uom')
Location = pool.get('stock.location')
Move = pool.get('stock.move')
Lot = pool.get('stock.lot')
uom, = Uom.search([('name', '=', 'Meter')])
template = Template(
name="Product",
type='goods',
list_price=Decimal(1),
default_uom=uom,
)
template.save()
product = Product(template=template.id)
product.save()
supplier, = Location.search([('code', '=', 'SUP')])
storage, = Location.search([('code', '=', 'STO')])
customer, = Location.search([('code', '=', 'CUS')])
company = create_company()
with set_company(company):
lot1, lot2 = Lot.create([{
'number': "1",
'product': product.id,
}, {
'number': "2",
'product': product.id,
}])
moves = Move.create([{
'product': product.id,
'lot': lot1.id,
'uom': uom.id,
'quantity': 2,
'from_location': supplier.id,
'to_location': storage.id,
'company': company.id,
'unit_price': Decimal(1),
'currency': company.currency.id,
}, {
'product': product.id,
'lot': lot2.id,
'uom': uom.id,
'quantity': 3,
'from_location': supplier.id,
'to_location': storage.id,
'company': company.id,
'unit_price': Decimal(1),
'currency': company.currency.id,
}, {
'product': product.id,
'lot': None,
'uom': uom.id,
'quantity': 3,
'from_location': supplier.id,
'to_location': storage.id,
'company': company.id,
'unit_price': Decimal(1),
'currency': company.currency.id,
}])
Move.do(moves)
move, = Move.create([{
'product': product.id,
'uom': uom.id,
'quantity': 10,
'from_location': storage.id,
'to_location': customer.id,
'company': company.id,
'unit_price': Decimal(1),
'currency': company.currency.id,
}])
self.assertFalse(
Move.assign_try([move], grouping=('product', 'lot')))
moves = Move.search([
('product', '=', product.id),
('from_location', '=', storage.id),
('to_location', '=', customer.id),
('company', '=', company.id),
])
self.assertEqual(len(moves), 4)
self.assertEqual({
(m.lot, m.quantity, m.state) for m in moves}, {
(lot1, 2, 'assigned'),
(lot2, 3, 'assigned'),
(None, 1, 'assigned'),
(None, 4, 'draft'),
})
def suite():
suite = trytond.tests.test_tryton.suite()
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(
StockLotTestCase))
suite.addTests(doctest.DocFileSuite('scenario_stock_lot_shipment_out.rst',
tearDown=doctest_teardown, encoding='utf-8',
checker=doctest_checker,
optionflags=doctest.REPORT_ONLY_FIRST_FAILURE))
return suite
| [
"[email protected]"
]
| |
84943acbf7b7b989ac08e4c3d173d53799243119 | 2dd560dc468af0af4ca44cb4cd37a0b807357063 | /Leetcode/1170. Compare Strings by Frequency of the Smallest Character/solution2.py | f55713f52de924d420434c926569d1d9fb130de7 | [
"MIT"
]
| permissive | hi0t/Outtalent | 460fe4a73788437ba6ce9ef1501291035c8ff1e8 | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | refs/heads/master | 2023-02-26T21:16:56.741589 | 2021-02-05T13:36:50 | 2021-02-05T13:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | from bisect import bisect
class Solution:
def numSmallerByFrequency(self, queries: List[str], words: List[str]) -> List[int]:
f = sorted([w.count(min(w)) for w in words])
return [len(f) - bisect(f, q.count(min(q))) for q in queries]
| [
"[email protected]"
]
| |
932f0f3ca464a0e327e0dcff6fe1f74ce0621071 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /routing_transformer/routing_tf_api.py | 62feaeaa11136632e25caf46ffb158383e6714e4 | [
"Apache-2.0",
"CC-BY-4.0"
]
| permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 7,727 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pdb
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.get_logger().setLevel('ERROR')
from tensor2tensor import models
from tensor2tensor import problems
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import hparams_lib
from tensor2tensor.utils import registry
from tensor2tensor.utils import metrics
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import problem
from routing_transformer.problems import pg19
from tensorflow.compat.v1 import estimator as tf_estimator
from tqdm import tqdm
from routing_transformer.sparse_transformer import SparseTransformer
import numpy as np
import random
from scipy.special import log_softmax
VOCAB_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-data/vocab.pg19_length8k.32768.subwords"
HPARAMS_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-models/rt-checkpoint/hparams.json"
CKPT_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-models/rt-checkpoint/ckpt-3530000"
MAX_SEQUENCE_LENGTH = 8192
class SparseTransformerWrapper(object):
def __init__(self, max_seq_length=None):
# Load hyperparameters
self.max_seq_length = max_seq_length or MAX_SEQUENCE_LENGTH
# Needed since RT uses blocks of size 256
assert self.max_seq_length % 256 == 0
hparams = hparams_lib.create_hparams_from_json(HPARAMS_PATH)
hparams.use_tpu = False
hparams = zero_dropout(hparams)
# Build TF1 graph of model
sptf_model = SparseTransformer(hparams, tf_estimator.ModeKeys.EVAL)
self.input_nodes = {
"targets": tf.placeholder(tf.int32, [None, self.max_seq_length])
}
self.output_nodes = sptf_model.body(self.input_nodes)
# Map the checkpoint variables to the graph
init_from_checkpoint(CKPT_PATH, variable_prefix="sparse_transformer/body")
# create a session object, and actually initialize the graph
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.encoder = text_encoder.SubwordTextEncoder(VOCAB_PATH)
def forward(self, sentences, encode_sentences=True, relevant_subsequences=None):
encoded_sents = []
encoded_seqs_no_pad = []
if encode_sentences:
for sent in sentences:
encoded = []
for line in sent.split("\n"):
new_tokens = self.encoder.encode(line.strip())
if len(encoded) + len(new_tokens) >= self.max_seq_length:
break
encoded.extend(new_tokens)
encoded.append(text_encoder.EOS_ID)
encoded_seqs_no_pad.append(encoded)
# pad shorter sequences to the full length
encoded = encoded + [text_encoder.PAD_ID for _ in range(self.max_seq_length - len(encoded))]
assert len(encoded) == self.max_seq_length
encoded_sents.append(encoded)
else:
# assume sentences are encoded, pad/truncate them
for sent in sentences:
sent = sent[:self.max_seq_length]
encoded_seqs_no_pad.append(sent)
sent = sent + [text_encoder.PAD_ID for _ in range(self.max_seq_length - len(sent))]
encoded_sents.append(sent)
feed_dict = {
self.input_nodes["targets"]: np.array(encoded_sents)
}
outputs = self.sess.run(self.output_nodes, feed_dict=feed_dict)
return_outputs = {
"logits": np.squeeze(outputs[0], axis=(2, 3)),
"loss": outputs[1]["training"],
"encoded_seqs_no_pad": encoded_seqs_no_pad
}
if relevant_subsequences is not None:
for i, rss in enumerate(relevant_subsequences):
encoded_subseq = self.encoder.encode(rss)
positions = find_sub_list(encoded_subseq, encoded_sents[i])
misaligned_prefix_length = 0
while positions is None:
misaligned_prefix_length += 1
encoded_subseq = encoded_subseq[1:]
positions = find_sub_list(encoded_subseq, encoded_sents[i])
start, end = positions[-1]
relevant_logits = return_outputs["logits"][i][start:end]
log_probs = log_softmax(relevant_logits, axis=1)
gold_log_probs = [lp[index] for index, lp in zip(encoded_subseq, log_probs)]
return_outputs["subseq_log_loss"] = -1 * np.mean(gold_log_probs)
return_outputs["misaligned_prefix_length"] = misaligned_prefix_length
return return_outputs
def close(self):
self.sess.close()
def find_sub_list(sl, l):
"""Find sub-string, so as to be able to compute ppl of a sub-string."""
sll=len(sl)
matches = []
for ind in (i for i,e in enumerate(l) if e == sl[0]):
if l[ind:ind + sll] == sl:
matches.append(
(ind, ind + sll)
)
if matches:
return matches
def zero_dropout(hparams):
hparams.input_dropout = 0.0
hparams.dropout = 0.0
hparams.relu_dropout = 0.0
hparams.attention_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
return hparams
def log_variables(name, var_names):
tf.logging.info("%s (%d total): %s", name, len(var_names),
random.sample(var_names, min(len(var_names), 5)))
def init_from_checkpoint(checkpoint_path,
checkpoint_prefix=None,
variable_prefix=None,
target_variables=None):
"""Initializes all of the variables using `init_checkpoint."""
tf.logging.info("Loading variables from %s", checkpoint_path)
checkpoint_variables = {
name: name for name, _ in tf.train.list_variables(checkpoint_path) if "Adafactor" not in name
}
if target_variables is None:
target_variables = tf.trainable_variables()
target_variables = {var.name.split(":")[0]: var for var in target_variables}
if checkpoint_prefix is not None:
checkpoint_variables = {
checkpoint_prefix + "/" + name: varname
for name, varname in checkpoint_variables.items()
}
if variable_prefix is not None:
target_variables = {
variable_prefix + "/" + name: var
for name, var in target_variables.items()
}
checkpoint_var_names = set(checkpoint_variables.keys())
target_var_names = set(target_variables.keys())
intersected_var_names = target_var_names & checkpoint_var_names
assignment_map = {
checkpoint_variables[name]: target_variables[name]
for name in intersected_var_names
}
tf.train.init_from_checkpoint(checkpoint_path, assignment_map)
log_variables("Loaded variables", intersected_var_names)
log_variables("Uninitialized variables", target_var_names - checkpoint_var_names)
log_variables("Unused variables", checkpoint_var_names - target_var_names)
| [
"[email protected]"
]
| |
64e7542df83df9bd0d6edf9f81dd3c5add9aef71 | 0800aac473cbb94f3ac263c202979498c326cf18 | /법인세_총설.py | a437c75324c85c0332211d27ad24fe8df470b893 | []
| no_license | DanielHennyKwon/TAX_LIM_JEONG | 8f12e072c044cd17646f196c17b51d1e0cae179e | a263b4e90f0ac78500382047bf7ae72380213ca8 | refs/heads/master | 2023-06-16T10:50:55.111407 | 2021-07-11T02:59:50 | 2021-07-11T02:59:50 | 384,847,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,432 | py | # -*- coding: utf-8 -*-
# 2018-12-24 권달현
import 결산의확정, 신고납부절차, 기한후신고, 수정신고, 경정청구, 법인의분류, 세금의종류, 실질과세, 소액주주, 대주주, 중소기업, 이월과세, 과세이연, 세무조정, 소득처분, 법인세비용, 세액계산_구조,세무조정_흐름도
_={
"결산의 확정":결산의확정.결산의확정,
"법인세의 신고납부절차":신고납부절차.법인세,
"기한후신고":기한후신고.법인세,
"수정신고":수정신고._,
"경정청구":경정청구._,
"법인세법상 법인의 분류":법인의분류.법인세,
"법인세의 종류":세금의종류.법인세,
"실질과세":실질과세.법인세,
"소액주주":소액주주.법인세,
"대주주":대주주.법인세,
"중소기업":중소기업._,
"이월과세":이월과세.법인세,
"과세이연":과세이연.법인세,
"세무조정 흐름도":세무조정_흐름도.법인세,
"세무조정":세무조정.법인세,
"소득처분":소득처분.법인세,
"법인의 각 사업연도소득과 과세표준 및 세액계산의 구조":세액계산_구조.법인세,
"법인세비용":법인세비용.법인세,
}
#___________________________________________________
제목='법인세 총설'
tax=_
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,parent=None,title=제목)
self.SetSize(420,320*2)
self.mainPanel=wx.Panel(self)
self.expandButton=wx.Button(self.mainPanel,label='펼침')
self.tree=wx.TreeCtrl(self.mainPanel)
root=self.tree.AddRoot(제목)
for i in tax:
ii=self.tree.AppendItem(root,i)
for j in tax[i]:
jj=self.tree.AppendItem(ii,j)
for k in tax[i][j]:
kk=self.tree.AppendItem(jj,k)
for m in tax[i][j][k]:
mm=self.tree.AppendItem(kk,m)
for n in tax[i][j][k][m]:
nn=self.tree.AppendItem(mm,n)
for p in tax[i][j][k][m][n]:
pp=self.tree.AppendItem(nn,p)
for q in tax[i][j][k][m][n][p]:
qq=self.tree.AppendItem(pp,q)
for r in tax[i][j][k][m][n][p][q]:
rr=self.tree.AppendItem(qq,r)
self.staticText =wx.TextCtrl(self.mainPanel,style=wx.TE_MULTILINE)
self.vtBoxSizer=wx.BoxSizer(wx.VERTICAL)
self.vtBoxSizer.Add(self.expandButton,0,wx.EXPAND|wx.ALL,5)
self.vtBoxSizer.Add(self.tree ,5,wx.EXPAND|wx.ALL,5)
self.vtBoxSizer.Add(self.staticText ,0,wx.EXPAND|wx.ALL,5)
self.mainPanel.SetSizer(self.vtBoxSizer)
self.Bind(wx.EVT_BUTTON ,self.OnExpandButton,self.expandButton)
self.Bind(wx.EVT_TREE_SEL_CHANGED,self.OnNodeSelected,self.tree)
def OnExpandButton(self,e):
self.tree.ExpandAll()
def OnNodeSelected(self,e):
selected=self.tree.GetSelection()
self.staticText.SetLabel(self.tree.GetItemText(selected))
self.mainPanel.Layout()
if __name__=='__main__':
app=wx.App()
frame=MyFrame()
frame.Show()
app.MainLoop()
#___________________________________________________ | [
"[email protected]"
]
| |
a84953050ba040b805bbbeb0174345a641f84c25 | d48b46f53cb1b54899e6f84863c5f275d17b0f0d | /lab_09/utils.py | aa6240241cbc03d043995fa83a36a0d0f79a1b72 | []
| no_license | ivaaahn/bmstu-cg | 36c26768aa398a56dd3661ef5f067be90a7abcd5 | 9cb70730d9c4f0e4ad172ff5018bed7ae6ccbd2d | refs/heads/main | 2023-06-11T23:15:33.751793 | 2021-06-22T20:23:24 | 2021-06-22T20:23:24 | 343,135,970 | 1 | 0 | null | 2021-03-13T13:17:32 | 2021-02-28T15:16:13 | Python | UTF-8 | Python | false | false | 105 | py | W, H = 1236, 941
def custom_round(num: float) -> int:
return int(num + (0.5 if num > 0 else -0.5))
| [
"[email protected]"
]
| |
0e2e19efd181694bd82a31e6ea9bd4fd1ccb7faf | 248d20fa6c37afc1501b47398451bf15dc8f0165 | /ryosuke/chapter04/knock38.py | 5e72a2bf383a6cd35a7e272d4ed196b6769cd017 | []
| no_license | tmu-nlp/100knock2016 | 20e9efd4698f59918aa850ba40163906f13dcb7f | d5f7a76286cb95bb374ff98bc0c9db3c796d113d | refs/heads/master | 2021-01-21T02:20:57.121371 | 2016-08-03T01:49:38 | 2016-08-03T01:49:38 | 55,942,482 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from knock30 import get_sentences
from collections import Counter
import matplotlib.pyplot as plt
vocab = Counter()
for sentence in get_sentences():
vocab += Counter(m['surface'] for m in sentence)
names, freqs = zip(*vocab.most_common())
plt.hist(freqs, bins=len(set(freqs)))
plt.show()
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.