hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b935f042bd8e2f0c715ab9c85da37dd8b7d47c59
| 1,358 |
py
|
Python
|
queries.py
|
shan-x/wp2pelican
|
9a843fcaeb065ddafba40695584e34188938db84
|
[
"Apache-2.0"
] | null | null | null |
queries.py
|
shan-x/wp2pelican
|
9a843fcaeb065ddafba40695584e34188938db84
|
[
"Apache-2.0"
] | null | null | null |
queries.py
|
shan-x/wp2pelican
|
9a843fcaeb065ddafba40695584e34188938db84
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# coding: utf-8
get_posts = """
query GET_POSTS($first: Int, $after: String) {
posts(first: $first, after: $after) {
pageInfo {
hasNextPage
endCursor
}
edges {
node {
title
date
modified
slug
content
categories {
nodes {
name
}
}
tags {
nodes {
name
}
}
}
}
}
}
"""
get_pages = """
query GET_PAGES($first: Int, $after: String) {
pages(first: $first, after: $after) {
pageInfo {
hasNextPage
endCursor
}
edges {
node {
title
date
modified
slug
content
}
}
}
}
"""
get_menu_id = """
{ menus {
nodes {
name
id
}
}
}
"""
get_menu = """
query GET_MENU($id: ID!) {
menu(id: $id) {
name
slug
menuItems {
nodes {
label
url
childItems {
nodes {
label
url
}
}
}
}
}
}
"""
| 16.166667 | 46 | 0.318115 |
234d7a31dd4a5b6a1a6e727a6d5b552ddab0756d
| 2,896 |
py
|
Python
|
tests/extmod/utimeq1.py
|
sebastien-riou/micropython
|
116c15842fd48ddb77b0bc016341d936a0756573
|
[
"MIT"
] | 13,648 |
2015-01-01T01:34:51.000Z
|
2022-03-31T16:19:53.000Z
|
tests/extmod/utimeq1.py
|
sebastien-riou/micropython
|
116c15842fd48ddb77b0bc016341d936a0756573
|
[
"MIT"
] | 7,092 |
2015-01-01T07:59:11.000Z
|
2022-03-31T23:52:18.000Z
|
tests/extmod/utimeq1.py
|
sebastien-riou/micropython
|
116c15842fd48ddb77b0bc016341d936a0756573
|
[
"MIT"
] | 4,942 |
2015-01-02T11:48:50.000Z
|
2022-03-31T19:57:10.000Z
|
# Test for utimeq module which implements task queue with support for
# wraparound time (utime.ticks_ms() style).
try:
from utime import ticks_add, ticks_diff
from utimeq import utimeq
except ImportError:
print("SKIP")
raise SystemExit
DEBUG = 0
MAX = ticks_add(0, -1)
MODULO_HALF = MAX // 2 + 1
if DEBUG:
def dprint(*v):
print(*v)
else:
def dprint(*v):
pass
# Try not to crash on invalid data
h = utimeq(10)
try:
h.push(1)
assert False
except TypeError:
pass
try:
h.pop(1)
assert False
except IndexError:
pass
# unsupported unary op
try:
~h
assert False
except TypeError:
pass
# pushing on full queue
h = utimeq(1)
h.push(1, 0, 0)
try:
h.push(2, 0, 0)
assert False
except IndexError:
pass
# popping into invalid type
try:
h.pop([])
assert False
except TypeError:
pass
# length
assert len(h) == 1
# peektime
assert h.peektime() == 1
# peektime with empty queue
try:
utimeq(1).peektime()
assert False
except IndexError:
pass
def pop_all(h):
l = []
while h:
item = [0, 0, 0]
h.pop(item)
# print("!", item)
l.append(tuple(item))
dprint(l)
return l
def add(h, v):
h.push(v, 0, 0)
dprint("-----")
# h.dump()
dprint("-----")
h = utimeq(10)
add(h, 0)
add(h, MAX)
add(h, MAX - 1)
add(h, 101)
add(h, 100)
add(h, MAX - 2)
dprint(h)
l = pop_all(h)
for i in range(len(l) - 1):
diff = ticks_diff(l[i + 1][0], l[i][0])
assert diff > 0
def edge_case(edge, offset):
h = utimeq(10)
add(h, ticks_add(0, offset))
add(h, ticks_add(edge, offset))
dprint(h)
l = pop_all(h)
diff = ticks_diff(l[1][0], l[0][0])
dprint(diff, diff > 0)
return diff
dprint("===")
diff = edge_case(MODULO_HALF - 1, 0)
assert diff == MODULO_HALF - 1
assert edge_case(MODULO_HALF - 1, 100) == diff
assert edge_case(MODULO_HALF - 1, -100) == diff
# We expect diff to be always positive, per the definition of heappop() which should return
# the smallest value.
# This is the edge case where this invariant breaks, due to assymetry of two's-complement
# range - there's one more negative integer than positive, so heappushing values like below
# will then make ticks_diff() return the minimum negative value. We could make heappop
# return them in a different order, but ticks_diff() result would be the same. Conclusion:
# never add to a heap values where (a - b) == MODULO_HALF (and which are >= MODULO_HALF
# ticks apart in real time of course).
dprint("===")
diff = edge_case(MODULO_HALF, 0)
assert diff == -MODULO_HALF
assert edge_case(MODULO_HALF, 100) == diff
assert edge_case(MODULO_HALF, -100) == diff
dprint("===")
diff = edge_case(MODULO_HALF + 1, 0)
assert diff == MODULO_HALF - 1
assert edge_case(MODULO_HALF + 1, 100) == diff
assert edge_case(MODULO_HALF + 1, -100) == diff
print("OK")
| 19.567568 | 91 | 0.642956 |
b0143f54e3b68a1fccd5d707258f8aca97d8446a
| 3,924 |
py
|
Python
|
main.py
|
ZhangPHEngr/Kalman-in-MOT
|
d915645cdba32c42957bcf5bc0357273435a880c
|
[
"Apache-2.0"
] | 54 |
2021-09-21T09:04:42.000Z
|
2022-03-31T06:08:22.000Z
|
main.py
|
ZhangPHEngr/Kalman-in-MOT
|
d915645cdba32c42957bcf5bc0357273435a880c
|
[
"Apache-2.0"
] | 2 |
2021-11-14T19:05:42.000Z
|
2021-12-20T11:20:10.000Z
|
main.py
|
ZhangPHEngr/Kalman-in-MOT
|
d915645cdba32c42957bcf5bc0357273435a880c
|
[
"Apache-2.0"
] | 23 |
2021-09-27T06:50:19.000Z
|
2022-03-17T07:59:46.000Z
|
# -*- coding: utf-8 -*-
"""
@Project: kalman-filter-in-single-object-tracking
@File : main.py
@Author : Zhang P.H
@Date : 2021/9/20
@Desc :
"""
import cv2
import numpy as np
import const
import utils
import measure
from kalman import Kalman
# --------------------------------Kalman参数---------------------------------------
# 状态转移矩阵,上一时刻的状态转移到当前时刻
A = np.array([[1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]])
# 控制输入矩阵B
B = None
# 过程噪声协方差矩阵Q,p(w)~N(0,Q),噪声来自真实世界中的不确定性,
# 在跟踪任务当中,过程噪声来自于目标移动的不确定性(突然加速、减速、转弯等)
Q = np.eye(A.shape[0]) * 0.1
# 状态观测矩阵
H = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0]])
# 观测噪声协方差矩阵R,p(v)~N(0,R)
# 观测噪声来自于检测框丢失、重叠等
R = np.eye(H.shape[0]) * 1
# 状态估计协方差矩阵P初始化
P = np.eye(A.shape[0])
# -------------------------------------------------------------------------------
def main():
# 1.载入视频和目标位置信息
cap = cv2.VideoCapture(const.VIDEO_PATH) # 穿插着视频是为了方便展示
meas_list_all = measure.load_measurement(const.FILE_DIR)
sz = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') # opencv3.0
video_writer = cv2.VideoWriter(const.VIDEO_OUTPUT_PATH, fourcc, const.FPS, sz, True)
# 2. 逐帧滤波
state_list = [] # 单帧目标状态信息,存kalman对象
frame_cnt = 1
for meas_list_frame in meas_list_all:
# --------------------------------------------加载当帧图像------------------------------------
ret, frame = cap.read()
if not ret:
break
# ---------------------------------------Kalman Filter for multi-objects-------------------
# 预测
for target in state_list:
target.predict()
# 关联
mea_list = [utils.box2meas(mea) for mea in meas_list_frame]
state_rem_list, mea_rem_list, match_list = Kalman.association(state_list, mea_list)
# 状态没匹配上的,更新一下,如果触发终止就删除
state_del = list()
for idx in state_rem_list:
status, _, _ = state_list[idx].update()
if not status:
state_del.append(idx)
state_list = [state_list[i] for i in range(len(state_list)) if i not in state_del]
# 量测没匹配上的,作为新生目标进行航迹起始
for idx in mea_rem_list:
state_list.append(Kalman(A, B, H, Q, R, utils.mea2state(mea_list[idx]), P))
# -----------------------------------------------可视化-----------------------------------
# 显示所有mea到图像上
for mea in meas_list_frame:
cv2.rectangle(frame, tuple(mea[:2]), tuple(mea[2:]), const.COLOR_MEA, thickness=1)
# 显示所有的state到图像上
for kalman in state_list:
pos = utils.state2box(kalman.X_posterior)
cv2.rectangle(frame, tuple(pos[:2]), tuple(pos[2:]), const.COLOR_STA, thickness=2)
# 将匹配关系画出来
for item in match_list:
cv2.line(frame, tuple(item[0][:2]), tuple(item[1][:2]), const.COLOR_MATCH, 3)
# 绘制轨迹
for kalman in state_list:
tracks_list = kalman.track
for idx in range(len(tracks_list) - 1):
last_frame = tracks_list[idx]
cur_frame = tracks_list[idx + 1]
# print(last_frame, cur_frame)
cv2.line(frame, last_frame, cur_frame, kalman.track_color, 2)
cv2.putText(frame, str(frame_cnt), (0, 50), color=const.RED, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.5)
cv2.imshow('Demo', frame)
cv2.imwrite("./image/{}.jpg".format(frame_cnt), frame)
video_writer.write(frame)
cv2.waitKey(100) # 显示 1000 ms 即 1s 后消失
frame_cnt += 1
cap.release()
cv2.destroyAllWindows()
video_writer.release()
if __name__ == '__main__':
main()
| 35.351351 | 118 | 0.522681 |
9a0ec4fae2558f2c73543fb6d90c69955bfefbf3
| 4,540 |
py
|
Python
|
starthinker_ui/recipe/views.py
|
dvandra/starthinker
|
07a8c1f8bf3c7493b1833d54ca0acc9305a04bc9
|
[
"Apache-2.0"
] | 1 |
2019-07-02T18:25:25.000Z
|
2019-07-02T18:25:25.000Z
|
starthinker_ui/recipe/views.py
|
dvandra/starthinker
|
07a8c1f8bf3c7493b1833d54ca0acc9305a04bc9
|
[
"Apache-2.0"
] | null | null | null |
starthinker_ui/recipe/views.py
|
dvandra/starthinker
|
07a8c1f8bf3c7493b1833d54ca0acc9305a04bc9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from __future__ import unicode_literals
import json
from django.shortcuts import render
from django.contrib import messages
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound
from starthinker_ui.account.decorators import permission_admin
from starthinker_ui.recipe.forms_script import ScriptForm
from starthinker_ui.recipe.models import Recipe
def recipe_list(request):
if request.user.is_authenticated():
recipes = request.user.recipe_set.all() if request.user.is_authenticated() else []
else:
recipes = []
return render(request, "recipe/recipe_list.html", { 'recipes':recipes })
@permission_admin()
def recipe_edit(request, pk=None):
recipe = request.user.recipe_set.get(pk=pk) if pk else None
if request.method == 'POST':
form_script = ScriptForm(recipe, request.user, request.POST)
if form_script.is_valid():
form_script.save()
messages.success(request, 'Recipe updated.')
return HttpResponseRedirect(form_script.instance.link_edit())
else:
print 'ERRORS', form_script.get_errors()
messages.error(request, 'Recipe Script Errors: %s' % form_script.get_errors())
else:
form_script = ScriptForm(recipe, request.user, scripts=request.GET.get('scripts', ''))
return render(request, "recipe/recipe_edit.html", { 'form_script':form_script })
@permission_admin()
def recipe_delete(request, pk=None):
request.user.recipe_set.filter(pk=pk).delete()
messages.success(request, 'Recipe deleted.')
return HttpResponseRedirect('/')
@permission_admin()
def recipe_run(request, pk):
try:
recipe = request.user.recipe_set.get(pk=pk)
if recipe.is_running():
messages.success(request, 'Recipe dispatched, will run once in progress task completes.')
else:
messages.success(request, 'Recipe dispatched, give it a few minutes to start.')
recipe.force()
except Recipe.DoesNotExist, e:
messages.error(request, str(e))
return HttpResponseRedirect('/recipe/edit/%s/' % pk)
@permission_admin()
def recipe_cancel(request, pk):
try:
recipe = request.user.recipe_set.get(pk=pk)
if recipe.is_running():
messages.success(request, 'Recipe cancelled, active task will stop shortly.')
else:
messages.success(request, 'Recipe cancelled, no tasks are running.')
recipe.cancel()
except Recipe.DoesNotExist, e:
messages.error(request, str(e))
return HttpResponseRedirect('/recipe/edit/%s/' % pk)
@csrf_exempt
def recipe_start(request):
try:
recipe = Recipe.objects.get(reference=request.POST.get('reference', 'invalid'))
if recipe.is_running():
response = HttpResponse('RECIPE INTERRUPTED')
else:
response = HttpResponse('RECIPE STARTED')
recipe.force()
except Recipe.DoesNotExist, e:
response = HttpResponseNotFound('RECIPE NOT FOUND')
return response
@csrf_exempt
def recipe_stop(request):
try:
recipe = Recipe.objects.get(reference=request.POST.get('reference', 'invalid'))
if recipe.is_running():
response = HttpResponse('RECIPE INTERRUPTED')
else:
response = HttpResponse('RECIPE STOPPED')
recipe.cancel()
except Recipe.DoesNotExist, e:
response = HttpResponseNotFound('RECIPE NOT FOUND')
return response
@permission_admin()
def recipe_download(request, pk):
try:
recipe = request.user.recipe_set.get(pk=pk)
data = recipe.get_json(credentials=False)
response = HttpResponse(json.dumps(data, indent=2), content_type='application/json')
response['Content-Disposition'] = 'attachment; filename=recipe_%s.json' % recipe.uid()
return response
except Exception, e:
messages.error(request, str(e))
return HttpResponseRedirect('/recipe/edit/%s/' % pk)
| 33.880597 | 95 | 0.705727 |
d216986abff147689f2114ccee44377092381792
| 11,275 |
py
|
Python
|
analyze.py
|
jang-chinseok/Visualizing-voice-data
|
f246cd2ab97dc1114b8c16d37761bc2e7657b45f
|
[
"Apache-2.0"
] | null | null | null |
analyze.py
|
jang-chinseok/Visualizing-voice-data
|
f246cd2ab97dc1114b8c16d37761bc2e7657b45f
|
[
"Apache-2.0"
] | null | null | null |
analyze.py
|
jang-chinseok/Visualizing-voice-data
|
f246cd2ab97dc1114b8c16d37761bc2e7657b45f
|
[
"Apache-2.0"
] | null | null | null |
# visualisation tools for mimic2
import matplotlib.pyplot as plt
from statistics import stdev, mode, mean, median
from statistics import StatisticsError
import argparse
import glob
import os
import csv
import copy
import seaborn as sns
import random
from text.cmudict import CMUDict
from jamo import h2j, j2hcj
from text import cleaners
from korean_romanizer.romanizer import Romanizer
import matplotlib.font_manager as fm
import "github.com/hangulize/hangulize" #한글라이즈 를 사용해 보고 싶었으나 적용하지 못함.
'''from g2pk import G2p''' #g2pk를 사용하고싶었지만, install과정에서 pip가 계속 고장나는 문제가 발생하여 포기.
fl = fm.FontProperties(fname="C:\WINDOWS\Fonts\malgun.ttf").get_name() #한글자모의 경우, 폰트 그기가 맞지 않아 matplotlib에 정상표기되지 않는문제가 있는데,
plt.rc('font',family=fl) #이를 해결하기 위해 그 크기를 맞도록 보정해 주는 코드.
def get_audio_seconds(frames):
return (frames*12.5)/1000
def append_data_statistics(meta_data):
# get data statistics
for char_cnt in meta_data:
data = meta_data[char_cnt]["data"]
audio_len_list = [d["audio_len"] for d in data]
mean_audio_len = mean(audio_len_list)
try:
mode_audio_list = [round(d["audio_len"], 2) for d in data]
mode_audio_len = mode(mode_audio_list)
except StatisticsError:
mode_audio_len = audio_len_list[0]
median_audio_len = median(audio_len_list)
try:
std = stdev(
d["audio_len"] for d in data
)
except:
std = 0
meta_data[char_cnt]["mean"] = mean_audio_len
meta_data[char_cnt]["median"] = median_audio_len
meta_data[char_cnt]["mode"] = mode_audio_len
meta_data[char_cnt]["std"] = std
return meta_data
def process_meta_data(path):
meta_data = {}
# load meta data
with open(path, 'r',encoding='utf-8') as f:
data = csv.reader(f, delimiter='|')
for row in data:
frames = int(row[2])
utt = row[3]
audio_len = get_audio_seconds(frames)
char_count = len(utt)
if not meta_data.get(char_count):
meta_data[char_count] = {
"data": []
}
meta_data[char_count]["data"].append(
{
"utt": utt,
"frames": frames,
"audio_len": audio_len,
"row": "{}|{}|{}|{}".format(row[0], row[1], row[2], row[3])
}
)
meta_data = append_data_statistics(meta_data)
return meta_data
def get_data_points(meta_data):
x = [char_cnt for char_cnt in meta_data]
y_avg = [meta_data[d]['mean'] for d in meta_data]
y_mode = [meta_data[d]['mode'] for d in meta_data]
y_median = [meta_data[d]['median'] for d in meta_data]
y_std = [meta_data[d]['std'] for d in meta_data]
y_num_samples = [len(meta_data[d]['data']) for d in meta_data]
return {
"x": x,
"y_avg": y_avg,
"y_mode": y_mode,
"y_median": y_median,
"y_std": y_std,
"y_num_samples": y_num_samples
}
def save_training(file_path, meta_data):
rows = []
for char_cnt in meta_data:
data = meta_data[char_cnt]['data']
for d in data:
rows.append(d['row'] + "\n")
random.shuffle(rows)
with open(file_path, 'w+') as f:
for row in rows:
f.write(row)
def plot(meta_data, save_path=None):
save = False
if save_path:
save = True
graph_data = get_data_points(meta_data)
x = graph_data['x']
y_avg = graph_data['y_avg']
y_std = graph_data['y_std']
y_mode = graph_data['y_mode']
y_median = graph_data['y_median']
y_num_samples = graph_data['y_num_samples']
plt.figure()
plt.plot(x, y_avg, 'ro')
plt.xlabel("character lengths", fontsize=30)
plt.ylabel("avg seconds", fontsize=30)
if save:
name = "char_len_vs_avg_secs"
plt.savefig(os.path.join(save_path, name))
plt.figure()
plt.plot(x, y_mode, 'ro')
plt.xlabel("character lengths", fontsize=30)
plt.ylabel("mode seconds", fontsize=30)
if save:
name = "char_len_vs_mode_secs"
plt.savefig(os.path.join(save_path, name))
plt.figure()
plt.plot(x, y_median, 'ro')
plt.xlabel("character lengths", fontsize=30)
plt.ylabel("median seconds", fontsize=30)
if save:
name = "char_len_vs_med_secs"
plt.savefig(os.path.join(save_path, name))
plt.figure()
plt.plot(x, y_std, 'ro')
plt.xlabel("character lengths", fontsize=30)
plt.ylabel("standard deviation", fontsize=30)
if save:
name = "char_len_vs_std"
plt.savefig(os.path.join(save_path, name))
plt.figure()
plt.plot(x, y_num_samples, 'ro')
plt.xlabel("character lengths", fontsize=30)
plt.ylabel("number of samples", fontsize=30)
if save:
name = "char_len_vs_num_samples"
plt.savefig(os.path.join(save_path, name))
def convert_phonemes_Symbols(word): #초성과 중성을 분류해 주기 위해서 만들어 준 함수-초성과 종성이 가지는 발음과 매칭시켜줌.
if word == 'ㅂ': #자모 라이브러리를 사용할 경우, 초 중 종으로 나누어지게 되는데, 이어지는 코드에서 이를 리스트로 저장함.
return 'p0' #이 코드는 거기에서 인덱스 기준으로 0,1에 한해서 적용하기 위한 함수임.
elif word == 'ㅍ':
return 'ph'
elif word == 'ㅃ':
return 'pp'
elif word == 'ㄷ':
return 't0'
elif word == 'ㅌ':
return 'th'
elif word == 'ㄸ':
return 'tt'
elif word == 'ㄱ':
return 'k0'
elif word == 'ㅋ':
return 'kh'
elif word == 'ㄲ':
return 'kk'
elif word == 'ㅅ':
return 's0'
elif word == 'ㅆ':
return 'ss'
elif word == 'ㅎ':
return 'h0'
elif word == 'ㅈ':
return 'c0'
elif word == 'ㅊ':
return 'ch'
elif word == 'ㅉ':
return 'cc'
elif word == 'ㅁ':
return 'mm'
elif word == 'ㄴ':
return 'nn'
elif word == 'ㄹ':
return 'rr'
#여기부터는 모음
elif word == 'ㅣ':
return 'ii'
elif word == 'ㅔ':
return 'ee'
elif word == 'ㅐ':
return 'qq'
elif word == 'ㅏ':
return 'aa'
elif word == 'ㅡ':
return 'xx'
elif word == 'ㅓ':
return 'vv'
elif word == 'ㅜ':
return 'uu'
elif word == 'ㅗ':
return 'oo'
elif word == 'ㅖ':
return 'ye'
elif word == 'ㅒ':
return 'yq'
elif word == 'ㅑ':
return 'ya'
elif word == 'ㅕ':
return 'yv'
elif word == 'ㅠ':
return 'yu'
elif word == 'ㅛ':
return 'yo'
elif word == 'ㅟ':
return 'wi'
elif word == 'ㅚ':
return 'wo'
elif word == 'ㅙ':
return 'wq'
elif word == 'ㅞ':
return 'we'
elif word == 'ㅘ':
return 'wa'
elif word == 'ㅝ':
return 'wv'
elif word == 'ㅢ':
return 'xi'
def convert_phonemes_Symbols_coda(word): #위 함수에서 초성과 종성을 구별했기 때문에, 마지막 리스트 인덱스 2번은 종성이 됨, 이는 존재 할 시에만 적용.
#이 아래로는 종성발음 #종성의 발음을
if word == 'ㅂ':
return 'pf'
elif word == 'ㅍ':
return 'ph'
elif word == 'ㄷ':
return 'tf'
elif word == 'ㅌ':
return 'th'
elif word == 'ㄱ':
return 'kf'
elif word == 'ㅋ':
return 'kh'
elif word == 'ㄲ':
return 'kk'
elif word == 'ㅅ':
return 's0'
elif word == 'ㅆ':
return 'ss'
elif word == 'ㅎ':
return 'h0'
elif word == 'ㅈ':
return 'c0'
elif word == 'ㅊ':
return 'ch'
elif word == 'ㅁ':
return 'mf'
elif word == 'ㄴ':
return 'nf'
elif word == 'ㅇ':
return 'ng'
elif word == 'ㄹ':
return 'll'
elif word == 'ㄳ':
return 'ks'
elif word == 'ㄵ':
return 'nc'
elif word == 'ㄶ':
return 'nh'
elif word == 'ㄺ':
return 'lk'
elif word == 'ㄻ':
return 'lm'
elif word == 'ㄼ':
return 'lb'
elif word == 'ㄽ':
return 'ls'
elif word == 'ㄾ':
return 'lt'
elif word == 'ㄿ':
return 'lp'
elif word == 'ㅀ':
return 'lh'
elif word == 'ㅄ':
return 'ps'
def plot_phonemes(train_path, cmu_dict_path, save_path):
cmudict = CMUDict(cmu_dict_path)
phonemes = {}
with open(train_path, 'r', encoding='utf-8') as f:
data = csv.reader(f, delimiter='|')
phonemes["None"] = 0
for row in data:
words = row[3].split() #데이터에서 3번째 인덱스가 문장이기 때문에, 이 데이터를 추출.
for word in words: #추출한 문자 데이터를. 공백을 기준으로 나누어 단어별로 분류 한 후 그 단어마다 작업을 진행.
'''parse = G2p(word)''' #G2p가 정상적으로 import 및 install되었을 경우 단어를 실제 발음으로 변화시기지 위한 코드.
word=list(word) #단어를 한개씩 작업하기 위해 리스트화 시키는 코드.
for i in word :
pho = j2hcj(h2j(i)) #한글자씩 자모를 통해 초 중 종성으로 분해 한 후에 저장.
if pho:
indie = list(pho)
if indie[0]!= '.'and indie[0] != '?' and indie[0] != '!' and indie[0] != ',': #문장기호 제외.
indie[0]=convert_phonemes_Symbols(indie[0]) #위에서 쓴 초성과 중성에 대한 매칭을 위한 함수 호출.
indie[1]=convert_phonemes_Symbols(indie[1])
if len(indie)==3:
indie[2]=convert_phonemes_Symbols_coda(indie[2]) #리스트의 길이가 3이라는 것은 종성이 존재. 이 경우에만 종성발음 변환.
for nemes in indie:
if phonemes.get(nemes): #변환된 발음들을 딕셔너리에 저장.
phonemes[nemes] += 1
print('nemes : ',nemes)
else:
phonemes[nemes] = 1
else:
phonemes["None"] += 1
x, y = [], []
for key in phonemes:
if key != '.'and key != '?' and key != '!' and key != ',':
x.append(key)
y.append(phonemes[key])
plt.figure()
plt.rcParams["figure.figsize"] = (50, 20)
plot = sns.barplot(x, y)
if save_path:
fig = plot.get_figure()
fig.savefig(os.path.join(save_path, "phoneme_dist"))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--train_file_path', required=True,
help='this is the path to the train.txt file that the preprocess.py script creates'
)
parser.add_argument(
'--save_to', help='path to save charts of data to'
)
parser.add_argument(
'--cmu_dict_path', help='give cmudict-0.7b to see phoneme distribution'
)
args = parser.parse_args()
meta_data = process_meta_data(args.train_file_path)
plt.rcParams["figure.figsize"] = (10, 5)
plot(meta_data, save_path=args.save_to)
if args.cmu_dict_path:
plt.rcParams["figure.figsize"] = (30, 10)
plot_phonemes(args.train_file_path, args.cmu_dict_path, args.save_to)
plt.show()
if __name__ == '__main__':
main()
| 29.671053 | 145 | 0.519557 |
7ea5d0f7e5e3b1959ee822ecfab0a3b216efcbb2
| 10,357 |
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/tunnelheadtrafficendpoint_399e6e14fa13954b413c4572ebd3725e.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/tunnelheadtrafficendpoint_399e6e14fa13954b413c4572ebd3725e.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/tunnelheadtrafficendpoint_399e6e14fa13954b413c4572ebd3725e.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class TunnelHeadTrafficEndPoint(Base):
"""The tunnelHeadTrafficEndPoint helps to configure the IP addresses to be used in the Source IP field in traffic to be sent over the LSPs originating from this Head Range.
The TunnelHeadTrafficEndPoint class encapsulates a list of tunnelHeadTrafficEndPoint resources that are managed by the user.
A list of resources can be retrieved from the server using the TunnelHeadTrafficEndPoint.find() method.
The list can be managed by using the TunnelHeadTrafficEndPoint.add() and TunnelHeadTrafficEndPoint.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'tunnelHeadTrafficEndPoint'
_SDM_ATT_MAP = {
'EndPointType': 'endPointType',
'InsertExplicitTrafficItem': 'insertExplicitTrafficItem',
'InsertIpv6ExplicitNull': 'insertIpv6ExplicitNull',
'IpCount': 'ipCount',
'IpStart': 'ipStart',
}
def __init__(self, parent):
super(TunnelHeadTrafficEndPoint, self).__init__(parent)
@property
def EndPointType(self):
"""
Returns
-------
- str(ipv4 | ipv6 | 17 | 18): IPv4/IPv6 address. It has the same values as of IP Type for traffic item in parent Tail Range.
"""
return self._get_attribute(self._SDM_ATT_MAP['EndPointType'])
@EndPointType.setter
def EndPointType(self, value):
self._set_attribute(self._SDM_ATT_MAP['EndPointType'], value)
@property
def InsertExplicitTrafficItem(self):
"""DEPRECATED
Returns
-------
- bool: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['InsertExplicitTrafficItem'])
@InsertExplicitTrafficItem.setter
def InsertExplicitTrafficItem(self, value):
self._set_attribute(self._SDM_ATT_MAP['InsertExplicitTrafficItem'], value)
@property
def InsertIpv6ExplicitNull(self):
"""
Returns
-------
- bool: This causes an IPv6 Explicit NULL to be inserted as the innermost label in addition to learned label when trying to generate IPv6 traffic over the IPv4 LSP. The purpose of this is to route the traffic to the IPv6 Protocol Stack at the egress for routing towards the IPv6 destination.
"""
return self._get_attribute(self._SDM_ATT_MAP['InsertIpv6ExplicitNull'])
@InsertIpv6ExplicitNull.setter
def InsertIpv6ExplicitNull(self, value):
self._set_attribute(self._SDM_ATT_MAP['InsertIpv6ExplicitNull'], value)
@property
def IpCount(self):
"""
Returns
-------
- number: Allows value greater than or equal to Tunnel Head IP Count (1 by default). This can be used to simulate traffic from multiple source endpoints to be sent over the LSPs originated from the Head Range.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpCount'])
@IpCount.setter
def IpCount(self, value):
self._set_attribute(self._SDM_ATT_MAP['IpCount'], value)
@property
def IpStart(self):
"""
Returns
-------
- str: The Source IP address, one of IPv4 or IPv6, to be used for traffic to be sent over LSPs from the Head End Point.
"""
return self._get_attribute(self._SDM_ATT_MAP['IpStart'])
@IpStart.setter
def IpStart(self, value):
self._set_attribute(self._SDM_ATT_MAP['IpStart'], value)
def update(self, EndPointType=None, InsertExplicitTrafficItem=None, InsertIpv6ExplicitNull=None, IpCount=None, IpStart=None):
"""Updates tunnelHeadTrafficEndPoint resource on the server.
Args
----
- EndPointType (str(ipv4 | ipv6 | 17 | 18)): IPv4/IPv6 address. It has the same values as of IP Type for traffic item in parent Tail Range.
- InsertExplicitTrafficItem (bool): NOT DEFINED
- InsertIpv6ExplicitNull (bool): This causes an IPv6 Explicit NULL to be inserted as the innermost label in addition to learned label when trying to generate IPv6 traffic over the IPv4 LSP. The purpose of this is to route the traffic to the IPv6 Protocol Stack at the egress for routing towards the IPv6 destination.
- IpCount (number): Allows value greater than or equal to Tunnel Head IP Count (1 by default). This can be used to simulate traffic from multiple source endpoints to be sent over the LSPs originated from the Head Range.
- IpStart (str): The Source IP address, one of IPv4 or IPv6, to be used for traffic to be sent over LSPs from the Head End Point.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, EndPointType=None, InsertExplicitTrafficItem=None, InsertIpv6ExplicitNull=None, IpCount=None, IpStart=None):
"""Adds a new tunnelHeadTrafficEndPoint resource on the server and adds it to the container.
Args
----
- EndPointType (str(ipv4 | ipv6 | 17 | 18)): IPv4/IPv6 address. It has the same values as of IP Type for traffic item in parent Tail Range.
- InsertExplicitTrafficItem (bool): NOT DEFINED
- InsertIpv6ExplicitNull (bool): This causes an IPv6 Explicit NULL to be inserted as the innermost label in addition to learned label when trying to generate IPv6 traffic over the IPv4 LSP. The purpose of this is to route the traffic to the IPv6 Protocol Stack at the egress for routing towards the IPv6 destination.
- IpCount (number): Allows value greater than or equal to Tunnel Head IP Count (1 by default). This can be used to simulate traffic from multiple source endpoints to be sent over the LSPs originated from the Head Range.
- IpStart (str): The Source IP address, one of IPv4 or IPv6, to be used for traffic to be sent over LSPs from the Head End Point.
Returns
-------
- self: This instance with all currently retrieved tunnelHeadTrafficEndPoint resources using find and the newly added tunnelHeadTrafficEndPoint resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained tunnelHeadTrafficEndPoint resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, EndPointType=None, InsertExplicitTrafficItem=None, InsertIpv6ExplicitNull=None, IpCount=None, IpStart=None):
"""Finds and retrieves tunnelHeadTrafficEndPoint resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve tunnelHeadTrafficEndPoint resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all tunnelHeadTrafficEndPoint resources from the server.
Args
----
- EndPointType (str(ipv4 | ipv6 | 17 | 18)): IPv4/IPv6 address. It has the same values as of IP Type for traffic item in parent Tail Range.
- InsertExplicitTrafficItem (bool): NOT DEFINED
- InsertIpv6ExplicitNull (bool): This causes an IPv6 Explicit NULL to be inserted as the innermost label in addition to learned label when trying to generate IPv6 traffic over the IPv4 LSP. The purpose of this is to route the traffic to the IPv6 Protocol Stack at the egress for routing towards the IPv6 destination.
- IpCount (number): Allows value greater than or equal to Tunnel Head IP Count (1 by default). This can be used to simulate traffic from multiple source endpoints to be sent over the LSPs originated from the Head Range.
- IpStart (str): The Source IP address, one of IPv4 or IPv6, to be used for traffic to be sent over LSPs from the Head End Point.
Returns
-------
- self: This instance with matching tunnelHeadTrafficEndPoint resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of tunnelHeadTrafficEndPoint data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the tunnelHeadTrafficEndPoint resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 52.841837 | 324 | 0.709182 |
8d52e6fbfa93bf559238b1af7e46e36743f36310
| 6,194 |
py
|
Python
|
blackmamba/lib/rope/base/oi/type_hinting/providers/docstrings.py
|
oz90210/blackmamba
|
65c82c8e99028d6fbb57098ce82d0a394df215a0
|
[
"MIT"
] | 463 |
2015-01-15T08:17:42.000Z
|
2022-03-28T15:10:20.000Z
|
blackmamba/lib/rope/base/oi/type_hinting/providers/docstrings.py
|
oz90210/blackmamba
|
65c82c8e99028d6fbb57098ce82d0a394df215a0
|
[
"MIT"
] | 52 |
2015-01-06T02:43:59.000Z
|
2022-03-14T11:15:21.000Z
|
blackmamba/lib/rope/base/oi/type_hinting/providers/docstrings.py
|
oz90210/blackmamba
|
65c82c8e99028d6fbb57098ce82d0a394df215a0
|
[
"MIT"
] | 249 |
2015-01-07T22:49:49.000Z
|
2022-03-18T02:32:06.000Z
|
"""
Hinting the type using docstring of class/function.
It's an irreplaceable thing if you are using Dependency Injection with passive class:
http://www.martinfowler.com/articles/injection.html
Some code extracted (or based on code) from:
https://github.com/davidhalter/jedi/blob/b489019f5bd5750051122b94cc767df47751ecb7/jedi/evaluate/docstrings.py
Thanks to @davidhalter for this utils under MIT License.
Similar solutions:
- https://www.jetbrains.com/pycharm/help/type-hinting-in-pycharm.html
- https://www.python.org/dev/peps/pep-0484/#type-comments
- http://www.pydev.org/manual_adv_type_hints.html
- https://jedi.readthedocs.org/en/latest/docs/features.html#type-hinting
Discussions:
- https://groups.google.com/d/topic/rope-dev/JlAzmZ83K1M/discussion
- https://groups.google.com/d/topic/rope-dev/LCFNN98vckI/discussion
"""
import re
from rope.base.oi.type_hinting import utils
from rope.base.oi.type_hinting.providers import interfaces
class ParamProvider(interfaces.IParamProvider):
def __init__(self, docstring_parser, resolver):
"""
:type docstring_parser: rope.base.oi.type_hinting.providers.docstrings.IParamParser
:type resolver: rope.base.oi.type_hinting.resolvers.interfaces.IResolver
"""
self._parse_docstring = docstring_parser
self._resolve = resolver
def __call__(self, pyfunc, param_name):
"""
:type pyfunc: rope.base.pyobjectsdef.PyFunction
:type param_name: str
:rtype: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject or None
"""
type_strs = self._parse_docstring(pyfunc.get_doc(), param_name)
if type_strs:
return self._resolve(type_strs[0], pyfunc)
class ReturnProvider(interfaces.IReturnProvider):
def __init__(self, docstring_parser, resolver):
"""
:type docstring_parser: rope.base.oi.type_hinting.providers.docstrings.IReturnParser
:type resolver: rope.base.oi.type_hinting.resolvers.interfaces.IResolver
"""
self._parse_docstring = docstring_parser
self._resolve = resolver
def __call__(self, pyfunc):
"""
:type pyfunc: rope.base.pyobjectsdef.PyFunction
:rtype: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject or None
"""
type_strs = self._parse_docstring(pyfunc.get_doc())
if type_strs:
return self._resolve(type_strs[0], pyfunc)
class AssignmentProvider(interfaces.IAssignmentProvider):
def __init__(self, docstring_parser, resolver):
"""
:type docstring_parser: rope.base.oi.type_hinting.providers.docstrings.IParamParser
:type resolver: rope.base.oi.type_hinting.resolvers.interfaces.IResolver
"""
self._parse_docstring = docstring_parser
self._resolve = resolver
def __call__(self, pyname):
"""
:type pyname: rope.base.pynamesdef.AssignedName
:rtype: rope.base.pyobjects.PyDefinedObject | rope.base.pyobjects.PyObject or None
"""
try:
pyclass, attr_name = utils.get_class_with_attr_name(pyname)
except TypeError:
return
else:
type_strs = self._parse_docstring(pyclass.get_doc(), attr_name)
if type_strs:
return self._resolve(type_strs[0], pyclass)
class IParamParser(object):
def __call__(self, docstring, param_name):
"""
:type docstring: str
:type param_name: str
"""
class IReturnParser(object):
def __call__(self, docstring):
"""
:type docstring: str
"""
class DocstringParamParser(IParamParser):
DOCSTRING_PARAM_PATTERNS = [
r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx
r'\s*:param\s+(\w+)\s+%s:[^\n]+', # Sphinx param with type
r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc
]
def __init__(self):
self._strip_rst_role = RSTRoleStrip()
def __call__(self, docstring, param_name):
"""Search `docstring` for type(-s) of `param_name`.
>>> DocstringParamParser()(':type param: int', 'param')
['int']
>>> DocstringParamParser()('@type param: int', 'param')
['int']
>>> DocstringParamParser()(':type param: :class:`threading.Thread`', 'param')
['threading.Thread']
>>> bool(DocstringParamParser()('no document', 'param'))
False
>>> DocstringParamParser()(':param int param: some description', 'param')
['int']
"""
if not docstring:
return []
patterns = [re.compile(p % re.escape(param_name))
for p in self.DOCSTRING_PARAM_PATTERNS]
for pattern in patterns:
match = pattern.search(docstring)
if match:
return [self._strip_rst_role(match.group(1))]
return []
class DocstringReturnParser(IReturnParser):
DOCSTRING_RETURN_PATTERNS = [
re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx
re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc
]
def __init__(self):
self._strip_rst_role = RSTRoleStrip()
def __call__(self, docstring):
if not docstring:
return []
for p in self.DOCSTRING_RETURN_PATTERNS:
match = p.search(docstring)
if match:
return [self._strip_rst_role(match.group(1))]
return []
class RSTRoleStrip(object):
RST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`')
def __call__(self, type_str):
"""
Strip off the part looks like a ReST role in `type_str`.
>>> RSTRoleStrip()(':class:`ClassName`') # strip off :class:
'ClassName'
>>> RSTRoleStrip()(':py:obj:`module.Object`') # works with domain
'module.Object'
>>> RSTRoleStrip()('ClassName') # do nothing when not ReST role
'ClassName'
See also:
http://sphinx-doc.org/domains.html#cross-referencing-python-objects
"""
match = self.RST_ROLE_PATTERN.match(type_str)
if match:
return match.group(1)
else:
return type_str
| 31.927835 | 109 | 0.633193 |
d951d27415fa7fb0a19cb64d651ff1c8f6d9d13c
| 223 |
py
|
Python
|
buildbot/osuosl/master/config/__init__.py
|
antiagainst/llvm-zorg
|
a5b58cdd800d0d45b1bdd1f7fe058db6acbfd918
|
[
"Apache-2.0"
] | 2 |
2021-12-06T10:53:27.000Z
|
2021-12-06T11:01:09.000Z
|
buildbot/osuosl/master/config/__init__.py
|
antiagainst/llvm-zorg
|
a5b58cdd800d0d45b1bdd1f7fe058db6acbfd918
|
[
"Apache-2.0"
] | null | null | null |
buildbot/osuosl/master/config/__init__.py
|
antiagainst/llvm-zorg
|
a5b58cdd800d0d45b1bdd1f7fe058db6acbfd918
|
[
"Apache-2.0"
] | null | null | null |
# Load local options.
import os
import ConfigParser
options = ConfigParser.RawConfigParser()
options.read(os.path.join(os.path.dirname(__file__), 'local.cfg'))
import builders
import schedulers
import slaves
import status
| 20.272727 | 66 | 0.807175 |
f14dd5e97932e63e82b61d3cdefaf863983a11e2
| 3,070 |
py
|
Python
|
scripts/tar2db.py
|
c0mix/firmadyne
|
137f25e02ecc47f5b3c4dd512e0194658dbab929
|
[
"MIT"
] | 1 |
2019-03-19T06:30:58.000Z
|
2019-03-19T06:30:58.000Z
|
scripts/tar2db.py
|
c0mix/firmadyne
|
137f25e02ecc47f5b3c4dd512e0194658dbab929
|
[
"MIT"
] | null | null | null |
scripts/tar2db.py
|
c0mix/firmadyne
|
137f25e02ecc47f5b3c4dd512e0194658dbab929
|
[
"MIT"
] | 1 |
2020-05-18T08:32:24.000Z
|
2020-05-18T08:32:24.000Z
|
#!/usr/bin/env python
import tarfile
import getopt
import sys
import re
import hashlib
import psycopg2
def getFileHashes(infile):
t = tarfile.open(infile)
files = list()
links = list()
for f in t.getmembers():
if f.isfile():
# we use f.name[1:] to get rid of the . at the beginning of the path
files.append((f.name[1:], hashlib.md5(t.extractfile(f).read()).hexdigest(),
f.uid, f.gid, f.mode))
elif f.issym():
links.append((f.name[1:], f.linkpath))
return (files, links)
def getOids(objs, cur):
# hashes ... all the hashes in the tar file
hashes = [x[1] for x in objs]
hashes_str = ",".join(["""'%s'""" % x for x in hashes])
query = """SELECT id,hash FROM object WHERE hash IN (%s)"""
cur.execute(query % hashes_str)
res = [(int(x), y) for (x, y) in cur.fetchall()]
existingHashes = [x[1] for x in res]
missingHashes = set(hashes).difference(set(existingHashes))
newObjs = createObjects(missingHashes, cur)
res += newObjs
result = dict([(y, x) for (x, y) in res])
return result
def createObjects(hashes, cur):
query = """INSERT INTO object (hash) VALUES (%(hash)s) RETURNING id"""
res = list()
for h in set(hashes):
cur.execute(query, {'hash':h})
oid = int(cur.fetchone()[0])
res.append((oid, h))
return res
def insertObjectToImage(iid, files2oids, links, cur):
query = """INSERT INTO object_to_image (iid, oid, filename, regular_file, uid, gid, permissions) VALUES (%(iid)s, %(oid)s, %(filename)s, %(regular_file)s, %(uid)s, %(gid)s, %(mode)s)"""
cur.executemany(query, [{'iid': iid, 'oid' : x[1], 'filename' : x[0][0],
'regular_file' : True, 'uid' : x[0][1],
'gid' : x[0][2], 'mode' : x[0][3]} \
for x in files2oids])
cur.executemany(query, [{'iid': iid, 'oid' : 1, 'filename' : x[0],
'regular_file' : False, 'uid' : None,
'gid' : None, 'mode' : None} \
for x in links])
def process(iid, infile):
dbh = psycopg2.connect(database="firmware", user="firmadyne",
password="firmadyne", host="127.0.0.1")
cur = dbh.cursor()
(files, links) = getFileHashes(infile)
oids = getOids(files, cur)
fdict = dict([(h, (filename, uid, gid, mode)) \
for (filename, h, uid, gid, mode) in files])
file2oid = [(fdict[h], oid) for (h, oid) in oids.iteritems()]
insertObjectToImage(iid, file2oid, links, cur)
dbh.commit()
dbh.close()
def main():
infile = iid = None
opts, argv = getopt.getopt(sys.argv[1:], "f:i:")
for k, v in opts:
if k == '-i':
iid = int(v)
if k == '-f':
infile = v
if infile and not iid:
m = re.search(r"(\d+)\.tar\.gz", infile)
if m:
iid = int(m.group(1))
process(iid, infile)
if __name__ == "__main__":
main()
| 30.39604 | 189 | 0.537459 |
d57298a3c86957435ec1afa28686f1ac72e3aae0
| 391 |
py
|
Python
|
wizards-django-rest/wizards/wizards/wsgi.py
|
tomyy4/wizards-project
|
be70ee3321427a6ea6991ebda7dafc638b4cfbab
|
[
"MIT"
] | null | null | null |
wizards-django-rest/wizards/wizards/wsgi.py
|
tomyy4/wizards-project
|
be70ee3321427a6ea6991ebda7dafc638b4cfbab
|
[
"MIT"
] | null | null | null |
wizards-django-rest/wizards/wizards/wsgi.py
|
tomyy4/wizards-project
|
be70ee3321427a6ea6991ebda7dafc638b4cfbab
|
[
"MIT"
] | null | null | null |
"""
WSGI config for wizards project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wizards.settings')
application = get_wsgi_application()
| 23 | 78 | 0.785166 |
0979bffe1112b0856c2bb778b672c70e0ffd62bc
| 466 |
py
|
Python
|
hc/api/migrations/0030_check_last_ping_body.py
|
opsct/healthchecks
|
069bc9b735c0473aed9946104ab85238d065bea1
|
[
"BSD-3-Clause"
] | null | null | null |
hc/api/migrations/0030_check_last_ping_body.py
|
opsct/healthchecks
|
069bc9b735c0473aed9946104ab85238d065bea1
|
[
"BSD-3-Clause"
] | 5 |
2021-04-08T21:56:59.000Z
|
2022-02-10T12:51:07.000Z
|
hc/api/migrations/0030_check_last_ping_body.py
|
fictional-tribble/healthchecks
|
3a8056f0e22a685ec2223c7d2363c0c131fb80d3
|
[
"BSD-3-Clause"
] | 1 |
2021-05-21T19:50:52.000Z
|
2021-05-21T19:50:52.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-08 18:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0029_auto_20170507_1251'),
]
operations = [
migrations.AddField(
model_name='check',
name='last_ping_body',
field=models.CharField(blank=True, max_length=1000),
),
]
| 22.190476 | 64 | 0.622318 |
dc08694c686320ff8514efca28d0bafb6bdbbb88
| 7,781 |
py
|
Python
|
tests/test_acid.py
|
sdjespersen/hszinc
|
975a37360518925c9532046c4464d79e49830c37
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_acid.py
|
sdjespersen/hszinc
|
975a37360518925c9532046c4464d79e49830c37
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_acid.py
|
sdjespersen/hszinc
|
975a37360518925c9532046c4464d79e49830c37
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Zinc dumping and parsing module
# (C) 2016 VRT Systems
#
# vim: set ts=4 sts=4 et tw=78 sw=4 si:
import hszinc
from .pint_enable import to_pint
import datetime
import pytz
import random
import string
import six
STR_CHARSET = string.ascii_letters + string.digits + '\n\r\t\f\b'
def gen_random_const():
return random.choice([True,False,None,hszinc.MARKER,hszinc.REMOVE])
def gen_random_ref():
# Generate a randomised reference.
name = gen_random_str(charset=\
string.ascii_letters + string.digits\
+ '_:-.~')
if random.choice([True,False]):
value = gen_random_str()
else:
value = None
return hszinc.Ref(name, value)
def gen_random_bin():
# Generate a randomized binary
return hszinc.Bin(random.choice([
'text/plain',
'text/html',
'text/zinc',
'application/json',
'application/octet-stream',
'image/png',
'image/jpeg',
]))
def gen_random_uri():
return hszinc.Uri(gen_random_str())
def gen_random_str(min_length=1, max_length=20,charset=STR_CHARSET):
# Generate a random 20-character string
return ''.join([random.choice(charset) for c in range(0,
random.randint(min_length, max_length))])
def gen_random_date():
# This might generate an invalid date, we keep trying until we get one.
while True:
try:
return datetime.date(random.randint(1,3000),
random.randint(1,12), random.randint(1,31))
except ValueError:
pass
def gen_random_time():
return datetime.time(random.randint(0,23), random.randint(0,59),
random.randint(0,59), random.randint(0,999999))
def gen_random_date_time():
# Pick a random timezone
tz_name = random.choice(list(hszinc.zoneinfo.get_tz_map().keys()))
tz = hszinc.zoneinfo.timezone(tz_name)
return tz.localize(datetime.datetime.combine(
gen_random_date(), gen_random_time()))
def gen_random_coordinate():
return hszinc.Coordinate(\
gen_random_num(360)-180.0,
gen_random_num(360)-180.0)
def gen_random_num(scale=1000,digits=2):
return round(random.random()*scale, digits)
def gen_random_quantity():
return hszinc.Quantity(gen_random_num(),
to_pint('percent'))
RANDOM_TYPES = [
gen_random_const, gen_random_ref, gen_random_bin, gen_random_uri,
gen_random_str, gen_random_date, gen_random_time, gen_random_date_time,
gen_random_coordinate, gen_random_num, gen_random_quantity
]
def gen_random_scalar():
return random.choice(RANDOM_TYPES)()
def gen_random_name(existing=None):
while True:
meta = random.choice(string.ascii_lowercase) \
+ gen_random_str(min_length=0, max_length=7, \
charset=string.ascii_letters + string.digits)
if (existing is None) or (meta not in existing):
return meta
def gen_random_meta():
meta = hszinc.MetadataObject()
names = set()
for n in range(0, random.randint(1,5)):
name = gen_random_name(existing=names)
value = gen_random_scalar()
meta[name] = value
return meta
def dump_grid(g):
print ('Version: %s' % g.version)
print ('Metadata:')
for k, v in g.metadata.items():
print (' %s = %r' % (k, v))
print ('Columns:')
for c, meta in g.column.items():
print (' %s:' % c)
for k, v in g.column[c].items():
print (' %s = %r' % (k, v))
print ('Rows:')
for row in g:
print ('---')
for c, v in row.items():
print (' %s = %r' % (c, v))
def approx_check(v1, v2):
# Check types match
if not (isinstance(v1, six.string_types) \
and isinstance(v2, six.string_types)):
assert type(v1) == type(v2), '%s != %s' % (type(v1), type(v2))
if isinstance(v1, datetime.time):
assert v1.replace(microsecond=0) == v2.replace(microsecond=0)
approx_check(v1.microsecond, v2.microsecond)
elif isinstance(v1, datetime.datetime):
assert v1.tzinfo == v2.tzinfo
assert v1.date() == v2.date()
approx_check(v1.time(), v2.time())
elif isinstance(v1, hszinc.Quantity):
assert v1.unit == v2.unit
approx_check(v1.value, v2.value)
elif isinstance(v1, hszinc.Coordinate):
approx_check(v1.latitude, v2.latitude)
approx_check(v1.longitude, v2.longitude)
elif isinstance(v1, float):
assert abs(v1 - v2) < 0.000001
else:
assert v1 == v2, '%r != %r' % (v1, v2)
def try_dump_parse():
# Generate a randomised grid of values and try parsing it back.
ref_grid = hszinc.Grid()
ref_grid.metadata.extend(gen_random_meta())
# Randomised columns
for n in range(0, random.randint(1,5)):
col_name = gen_random_name(existing=ref_grid.column)
if random.choice([True,False]):
ref_grid.column[col_name] = gen_random_meta()
else:
ref_grid.column[col_name] = {}
# Randomised rows
for n in range(0, random.randint(0,20)):
row = {}
for c in ref_grid.column.keys():
if random.choice([True,False]):
row[c] = gen_random_scalar()
ref_grid.append(row)
try:
# Dump the randomised grid to a string
grid_str = hszinc.dump(ref_grid)
except:
# Dump some detail about the grid
print ('Failed to dump grid.')
dump_grid(ref_grid)
raise
# Parse the grid string
try:
grid_list = hszinc.parse(grid_str)
except:
print ('Failed to parse dumped grid')
dump_grid(ref_grid)
print ('--- Parsed string ---')
print (grid_str)
raise
assert len(grid_list) == 1
parsed_grid = grid_list.pop(0)
# Check metadata matches
try:
assert list(ref_grid.metadata.keys()) \
== list(parsed_grid.metadata.keys())
for key in ref_grid.metadata.keys():
approx_check(ref_grid.metadata[key], parsed_grid.metadata[key])
except:
print ('Mismatch in metadata')
print ('Reference grid')
dump_grid(ref_grid)
print ('Parsed grid')
dump_grid(parsed_grid)
raise
try:
# Check column matches
assert list(ref_grid.column.keys()) \
== list(parsed_grid.column.keys())
except:
print ('Mismatch in column')
print ('Reference grid')
dump_grid(ref_grid)
print ('Parsed grid')
dump_grid(parsed_grid)
raise
for col in ref_grid.column.keys():
try:
for key in ref_grid.column[col].keys():
approx_check(ref_grid.column[col][key], \
parsed_grid.column[col][key])
except:
print ('Mismatch in metadata for column %s' % col)
print ('Reference: %r' % ref_grid.column[col])
print ('Parsed: %r' % parsed_grid.column[col])
raise
try:
# Check row matches
assert len(ref_grid) == len(parsed_grid)
except:
print ('Mismatch in row count')
print ('Reference grid')
dump_grid(ref_grid)
print ('Parsed grid')
dump_grid(parsed_grid)
for (ref_row, parsed_row) in zip(ref_grid, parsed_grid):
try:
for col in ref_grid.column.keys():
approx_check(ref_row.get(col), parsed_row.get(col))
except:
print ('Mismatch in row')
print ('Reference:')
print (ref_row)
print ('Parsed:')
print (parsed_row)
raise
def test_loopback():
for trial in range(0, 10):
try_dump_parse()
| 30.633858 | 75 | 0.600565 |
04a0cd2c6a105845feac3b9ccccd17cc8882d3df
| 5,925 |
py
|
Python
|
PoseDet/keypoints_nms.py
|
IIGROUP/PoseDet-Fast-Multi-Person-Pose-Estimation-Using-Pose-Embedding
|
edb1b50cb697e7eea576cd1d69d5c3c097234e58
|
[
"MIT"
] | 10 |
2021-07-31T06:25:06.000Z
|
2022-02-10T07:49:59.000Z
|
PoseDet/keypoints_nms.py
|
IIGROUP/PoseDet-Fast-Multi-Person-Pose-Estimation-Using-Pose-Embedding
|
edb1b50cb697e7eea576cd1d69d5c3c097234e58
|
[
"MIT"
] | null | null | null |
PoseDet/keypoints_nms.py
|
IIGROUP/PoseDet-Fast-Multi-Person-Pose-Estimation-Using-Pose-Embedding
|
edb1b50cb697e7eea576cd1d69d5c3c097234e58
|
[
"MIT"
] | 1 |
2021-11-11T07:02:34.000Z
|
2021-11-11T07:02:34.000Z
|
import torch
# from mmdet.ops.nms import batched_nms
# from mmdet.ops.nms import nms_ext
import time
def keypoints_nms(multi_scores,
score_thr,
nms_cfg,
max_num=-1,
score_factors=None,
multi_poses=None,
num_points=9,
):
#multi_poses: Tensor[N, num_points*3+1], 1是score channel
num_classes = multi_scores.size(1) - 1
pointsets = multi_poses[:, None].expand(-1, num_classes, num_points*3)
scores = multi_scores[:, :-1]
valid_mask = scores > score_thr
pointsets = pointsets[valid_mask]
scores = scores[valid_mask]
labels = valid_mask.nonzero()[:, 1]
if pointsets.numel() == 0:
pointsets = multi_poses.new_zeros((0, num_points*3 + 1))
labels = pointsets.new_zeros((0, ), dtype=torch.long)
return pointsets, labels
dets, keep = oks_nms(
torch.cat([pointsets, scores[:, None]], -1), iou_thr=nms_cfg['iou_thr'],num_points=num_points)
if max_num > 0:
dets = dets[:max_num]
keep = keep[:max_num]
#dets: Tenosr[N, num_points*3 + 1(score)]
return dets, labels[keep]
def oks_nms(dets, iou_thr, device_id=None,num_points=17):
if isinstance(dets, torch.Tensor):
is_numpy = False
dets_th = dets
elif isinstance(dets, np.ndarray):
is_numpy = True
device = 'cpu' if device_id is None else f'cuda:{device_id}'
dets_th = torch.from_numpy(dets).to(device)
else:
raise TypeError('dets must be either a Tensor or numpy array, '
f'but got {type(dets)}')
# execute cpu or cuda nms
if dets_th.shape[0] == 0:
inds = dets_th.new_zeros(0, dtype=torch.long)
else:
# torch.cuda.synchronize()
# t1 = time.time()
inds = _oks_nms(dets_th, iou_thr, num_points)
# import os
# import numpy as np
# dets_np = dets_th.detach().cpu().numpy()
# for i in range(501):
# path = './debug_img2/%d.npy'%i
# if not os.path.exists(path):
# np.save(path, dets_np)
# break
# inds = _oks_fast_nms(dets_th, iou_thr)
# torch.cuda.synchronize()
# t2 = time.time()
if is_numpy:
inds = inds.cpu().numpy()
return dets[inds, :], inds
def _oks_nms(dets, thresh, num_points):
if num_points==17:
sigmas = torch.tensor([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89], device=dets.device)/10.0
pointsets = dets[:,:-1]
pointsets = pointsets.view((pointsets.size()[0], -1, 3))
pointsets = pointsets[:,:,:2]
elif num_points==15:
sigmas = torch.tensor([.79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89, .79, .79], device=dets.device)/10.0
pointsets = dets[:,:-4] #the last points did not used in oks computation
pointsets = pointsets.view((pointsets.size()[0], -1, 3))
pointsets = pointsets[:,:,:2]
vars = (sigmas * 2)**2
vars = vars.unsqueeze(0).unsqueeze(0) #[1, 1, 17]
w_all = torch.max(pointsets[:,:,0], dim=1)[0] - torch.min(pointsets[:,:,0], dim=1)[0]
h_all = torch.max(pointsets[:,:,1], dim=1)[0] - torch.min(pointsets[:,:,1], dim=1)[0]
areas = w_all*h_all
areas = areas.clamp(32*32)
areas = (areas.unsqueeze(0)+areas.unsqueeze(1))/2
areas = areas.unsqueeze(-1) #[points_num, points_num, 1]
distance = ((pointsets.unsqueeze(0) - pointsets.unsqueeze(1))**2).sum(dim=-1) # [m, m, points_num]
oks = torch.exp(-distance/vars/areas).mean(dim=-1)
scores = dets[:,-1]
keep = []
index = scores.sort(descending=True)[1]
while index.size()[0] >0:
i = index[0] # every time the first is the biggst, and add it directly
keep.append(i)
if index.size()[0] == 1:
break
oks_selected = torch.index_select(oks[i], 0, index)
idx = torch.where(oks_selected<=thresh)[0]
index = index[idx]
keep = torch.stack(keep)
return keep
def _matrix_oks_nms(dets, thresh, num_points):
if num_points==17:
sigmas = torch.tensor([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89], device=dets.device)/10.0
pointsets = dets[:,:-1]
pointsets = pointsets.view((pointsets.size()[0], -1, 3))
pointsets = pointsets[:,:,:2]
elif num_points==15:
sigmas = torch.tensor([.79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89, .79, .79], device=dets.device)/10.0
pointsets = dets[:,:-4] #the last points did not used in oks computation
pointsets = pointsets.view((pointsets.size()[0], -1, 3))
pointsets = pointsets[:,:,:2]
vars = (sigmas * 2)**2
vars = vars.unsqueeze(0).unsqueeze(0) #[1, 1, 17]
w_all = torch.max(pointsets[:,:,0], dim=1)[0] - torch.min(pointsets[:,:,0], dim=1)[0]
h_all = torch.max(pointsets[:,:,1], dim=1)[0] - torch.min(pointsets[:,:,1], dim=1)[0]
areas = w_all*h_all
areas = areas.clamp(32*32)
areas = (areas.unsqueeze(0)+areas.unsqueeze(1))/2
areas = areas.unsqueeze(-1) #[points_num, points_num, 1]
distance = ((pointsets.unsqueeze(0) - pointsets.unsqueeze(1))**2).sum(dim=-1) # [m, m, points_num]
oks = torch.exp(-distance/vars/areas).mean(dim=-1)
scores = dets[:,-1]
keep = []
index = scores.sort(descending=True)[1]
while index.size()[0] >0:
i = index[0] # every time the first is the biggst, and add it directly
keep.append(i)
if index.size()[0] == 1:
break
oks_selected = torch.index_select(oks[i], 0, index)
idx = torch.where(oks_selected<=thresh)[0]
index = index[idx]
keep = torch.stack(keep)
return keep
| 34.447674 | 142 | 0.566414 |
75b8c0d85ddf99c515c96fcc7a1bd13c32a99b6d
| 5,663 |
py
|
Python
|
echopype/convert/set_groups_base.py
|
imranmaj/echopype
|
b2b51334fad78086f0ccb52cd4ddba4ab4ecf40c
|
[
"Apache-2.0"
] | null | null | null |
echopype/convert/set_groups_base.py
|
imranmaj/echopype
|
b2b51334fad78086f0ccb52cd4ddba4ab4ecf40c
|
[
"Apache-2.0"
] | null | null | null |
echopype/convert/set_groups_base.py
|
imranmaj/echopype
|
b2b51334fad78086f0ccb52cd4ddba4ab4ecf40c
|
[
"Apache-2.0"
] | null | null | null |
import pynmea2
from datetime import datetime as dt
import xarray as xr
import numpy as np
import zarr
from _echopype_version import version as ECHOPYPE_VERSION
COMPRESSION_SETTINGS = {
'netcdf4': {'zlib': True, 'complevel': 4},
'zarr': {'compressor': zarr.Blosc(cname='zstd', clevel=3, shuffle=2)}
}
DEFAULT_CHUNK_SIZE = {
'range_bin': 25000,
'ping_time': 2500
}
class SetGroupsBase:
"""Base class for saving groups to netcdf or zarr from echosounder data files.
"""
def __init__(self, parser_obj, input_file, output_path, sonar_model=None,
engine='zarr', compress=True, overwrite=True, params=None):
self.parser_obj = parser_obj # parser object ParseEK60/ParseAZFP/etc...
self.sonar_model = sonar_model # Used for when a sonar that is not AZFP/EK60/EK80 can still be saved
self.input_file = input_file
self.output_path = output_path
self.engine = engine
self.compress = compress
self.overwrite = overwrite
self.ui_param = params
if not self.compress:
self.compression_settings = None
else:
self.compression_settings = COMPRESSION_SETTINGS[self.engine]
# TODO: change the set_XXX methods to return a dataset to be saved in the overarching save method
def set_toplevel(self, sonar_model, date_created=None) -> xr.Dataset:
"""Set the top-level group.
"""
# Collect variables
tl_dict = {'conventions': 'CF-1.7, SONAR-netCDF4-1.0, ACDD-1.3',
'keywords': sonar_model,
'sonar_convention_authority': 'ICES',
'sonar_convention_name': 'SONAR-netCDF4',
'sonar_convention_version': '1.0',
'summary': '',
'title': '',
'date_created': np.datetime_as_string(date_created, 's') + 'Z',
'survey_name': self.ui_param['survey_name']}
# Save
ds = xr.Dataset()
ds = ds.assign_attrs(tl_dict)
return ds
def set_provenance(self) -> xr.Dataset:
"""Set the Provenance group.
"""
# Collect variables
prov_dict = {'conversion_software_name': 'echopype',
'conversion_software_version': ECHOPYPE_VERSION,
'conversion_time': dt.utcnow().isoformat(timespec='seconds') + 'Z', # use UTC time
'src_filenames': self.input_file}
# Save
ds = xr.Dataset()
ds = ds.assign_attrs(prov_dict)
return ds
def set_env(self) -> xr.Dataset:
"""Set the Environment group.
"""
pass
def set_sonar(self) -> xr.Dataset:
"""Set the Sonar group.
"""
pass
def set_beam(self) -> xr.Dataset:
"""Set the Beam group.
"""
pass
def set_platform(self) -> xr.Dataset:
"""Set the Platform group.
"""
pass
def set_nmea(self) -> xr.Dataset:
"""Set the Platform/NMEA group.
"""
# Save nan if nmea data is not encoded in the raw file
if len(self.parser_obj.nmea['nmea_string']) != 0:
# Convert np.datetime64 numbers to seconds since 1900-01-01
# due to xarray.to_netcdf() error on encoding np.datetime64 objects directly
time = (self.parser_obj.nmea['timestamp'] -
np.datetime64('1900-01-01T00:00:00')) / np.timedelta64(1, 's')
raw_nmea = self.parser_obj.nmea['nmea_string']
else:
time = [np.nan]
raw_nmea = [np.nan]
ds = xr.Dataset(
{'NMEA_datagram': (['location_time'], raw_nmea,
{'long_name': 'NMEA datagram'})
},
coords={'location_time': (['location_time'], time,
{'axis': 'T',
'calendar': 'gregorian',
'long_name': 'Timestamps for NMEA datagrams',
'standard_name': 'time',
'units': 'seconds since 1900-01-01'})},
attrs={'description': 'All NMEA sensor datagrams'})
return ds
def set_vendor(self) -> xr.Dataset:
"""Set the Vendor group.
"""
pass
# TODO: move this to be part of parser as it is not a "set" operation
def _parse_NMEA(self):
"""Get the lat and lon values from the raw nmea data"""
messages = [string[3:6] for string in self.parser_obj.nmea['nmea_string']]
idx_loc = np.argwhere(np.isin(messages,
self.ui_param['nmea_gps_sentence'])).squeeze()
if idx_loc.size == 1: # in case of only 1 matching message
idx_loc = np.expand_dims(idx_loc, axis=0)
nmea_msg = [pynmea2.parse(self.parser_obj.nmea['nmea_string'][x]) for x in idx_loc]
lat = np.array([x.latitude if hasattr(x, 'latitude') else np.nan
for x in nmea_msg]) if nmea_msg else [np.nan]
lon = np.array([x.longitude if hasattr(x, 'longitude') else np.nan
for x in nmea_msg]) if nmea_msg else [np.nan]
msg_type = np.array([x.sentence_type if hasattr(x, 'sentence_type') else np.nan
for x in nmea_msg]) if nmea_msg else [np.nan]
location_time = (np.array(self.parser_obj.nmea['timestamp'])[idx_loc] -
np.datetime64('1900-01-01T00:00:00')) / np.timedelta64(1, 's') if nmea_msg else [np.nan]
return location_time, msg_type, lat, lon
| 39.880282 | 113 | 0.562246 |
8d03b354b2b75149947d322255d7e6efa648ca13
| 139 |
py
|
Python
|
ders2/ders2.py
|
mustafacavusoglu/Python-openCV
|
5d7c9ef883b427ba60e3d83671ddea9f16b01ac9
|
[
"MIT"
] | null | null | null |
ders2/ders2.py
|
mustafacavusoglu/Python-openCV
|
5d7c9ef883b427ba60e3d83671ddea9f16b01ac9
|
[
"MIT"
] | null | null | null |
ders2/ders2.py
|
mustafacavusoglu/Python-openCV
|
5d7c9ef883b427ba60e3d83671ddea9f16b01ac9
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
resim = cv2.imread("ati.JPG")
cv2.imshow("resim",resim)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 12.636364 | 30 | 0.676259 |
94b29f2f664176dc085b1d485dbf0c68b0b1763e
| 14,890 |
py
|
Python
|
apps/data_cube_manager/tasks.py
|
SANSA-DESA/data_cube_ui
|
478a9bcce376a1bb347426df9e6520ddbafe907c
|
[
"Apache-2.0"
] | null | null | null |
apps/data_cube_manager/tasks.py
|
SANSA-DESA/data_cube_ui
|
478a9bcce376a1bb347426df9e6520ddbafe907c
|
[
"Apache-2.0"
] | null | null | null |
apps/data_cube_manager/tasks.py
|
SANSA-DESA/data_cube_ui
|
478a9bcce376a1bb347426df9e6520ddbafe907c
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from django.db import connections
from django.forms.models import model_to_dict
from django.db.models import Q
import celery
from celery.task import task
from celery import chain, group, chord
from celery.utils.log import get_task_logger
from celery.decorators import periodic_task
from celery.task.schedules import crontab
from datacube.index import index_connect
from datacube.executor import SerialExecutor
from datacube.config import LocalConfig
from datacube.scripts import ingest
import uuid
import os
import configparser
from glob import glob
import shutil
from apps.data_cube_manager.models import (Dataset, DatasetType, DatasetSource, DatasetLocation, IngestionRequest,
IngestionDetails)
from apps.data_cube_manager.templates.bulk_downloader import base_downloader_script, static_script
from data_cube_ui.utils_sansa_desa import SansaDesaDataAccessApi
logger = get_task_logger(__name__)
class IngestionBase(celery.Task):
"""Serves as a base class for ingestion tasks"""
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""Onfailure call for celery tasks
all tasks should have a kwarg 'ingestion_request_id' that can be used to 'get' the model
from the app.
"""
request_id = kwargs.get('ingestion_request_id')
try:
request = IngestionRequest.objects.get(pk=request_id)
request.update_status(
"ERROR",
"There was an unhandled exception during ingestion. Did you change the src_varname of any measurement?")
delete_ingestion_request.delay(ingestion_request_id=request_id)
cmd = "dropdb -U dc_user -h {} {}".format(settings.MASTER_NODE, request.get_database_name())
os.system(cmd)
except IngestionRequest.DoesNotExist:
pass
def on_success(self, retval, task_id, args, kwargs):
""""""
pass
@periodic_task(
name="data_cube_manager.get_data_cube_details",
#run_every=(30.0),
run_every=(crontab(hour=0, minute=0)),
ignore_result=True)
def update_data_cube_details(ingested_only=True):
dataset_types = DatasetType.objects.using('agdc').filter(
Q(definition__has_keys=['managed']) & Q(definition__has_keys=['measurements']))
dc = SansaDesaDataAccessApi()
for dataset_type in dataset_types:
ingestion_details, created = IngestionDetails.objects.get_or_create(
dataset_type_ref=dataset_type.id,
product=dataset_type.name,
platform=dataset_type.metadata['platform']['code'])
ingestion_details.update_with_query_metadata(dc.get_datacube_metadata(dataset_type.name))
dc.close()
@task(name="data_cube_manager.run_ingestion")
def run_ingestion(ingestion_definition):
"""Kick off the standard system database ingestion process using a user defined configuration
Args:
ingestion_definition: dict representing a Data Cube ingestion def produced using the utils func.
Returns:
The primary key of the new dataset type.
"""
conf_path = settings.DATACUBE_CONFIG_PATH
index = index_connect(local_config=LocalConfig.find([conf_path]))
source_type, output_type = ingest.make_output_type(index, ingestion_definition)
ingestion_work.delay(output_type, source_type, ingestion_definition)
index.close()
return output_type.id
@task(name="data_cube_manager.ingestion_work")
def ingestion_work(output_type, source_type, ingestion_definition):
"""Run the ingestion process for a user defined configuration
Args:
output_type, source_type: types produced by ingest.make_output_type
ingestion_definition: dict representing a Data Cube ingestion def produced using the utils func.
"""
conf_path = settings.DATACUBE_CONFIG_PATH
index = index_connect(local_config=LocalConfig.find([conf_path]))
tasks = ingest.create_task_list(index, output_type, None, source_type, ingestion_definition)
# this is a dry run
# paths = [ingest.get_filename(ingestion_definition, task['tile_index'], task['tile'].sources) for task in tasks]
# ingest.check_existing_files(paths)
# this actually ingests stuff
successful, failed = ingest.process_tasks(index, ingestion_definition, source_type, output_type, tasks, 3200,
get_executor(None, None))
index.close()
return 0
@task(name="data_cube_manager.ingestion_on_demand", base=IngestionBase, queue="data_cube_manager")
def ingestion_on_demand(ingestion_request_id=None):
"""Kick off the ingestion on demand/active subset process
Creates a Celery canvas that handles the full ingestion process.
Args:
ingestion_request_id: pk of a models.IngestionRequest obj.
"""
ingestion_request = IngestionRequest.objects.get(pk=ingestion_request_id)
ingestion_request.update_status("WAIT", "Creating base Data Cube database...")
ingestion_pipeline = (init_db.si(ingestion_request_id=ingestion_request_id) |
add_source_datasets.si(ingestion_request_id=ingestion_request_id) |
ingest_subset.si(ingestion_request_id=ingestion_request_id) |
prepare_output.si(ingestion_request_id=ingestion_request_id))()
@task(name="data_cube_manager.init_db", base=IngestionBase, queue="data_cube_manager")
def init_db(ingestion_request_id=None):
"""Creates a new database and initializes it with the standard agdc schema
Creates a new database named the user using a psql call and uses the agdc api
to initalize the schema.
"""
ingestion_request = IngestionRequest.objects.get(pk=ingestion_request_id)
cmd = "createdb -U dc_user -h {} {}".format(settings.MASTER_NODE, ingestion_request.get_database_name())
os.system(cmd)
config = get_config(ingestion_request.get_database_name())
index = index_connect(local_config=config, validate_connection=False)
try:
index.init_db(with_default_types=True, with_permissions=True)
index.metadata_types.check_field_indexes(allow_table_lock=True, rebuild_indexes=True, rebuild_views=True)
except:
index.close()
raise
index.close()
@task(name="data_cube_manager.add_source_datasets", base=IngestionBase, queue="data_cube_manager")
def add_source_datasets(ingestion_request_id=None):
"""Populate the newly created database with source datasets that match the criteria
Searches for datasets using the search criteria found on the IngestionRequest model and populates
the newly created database with the new data. The dataset type's id is reset to 0 to prevent collisions in
the agdc script.
A dataset type, datasets, dataset_locations, and dataset_sources are added to the new database.
"""
ingestion_request = IngestionRequest.objects.get(pk=ingestion_request_id)
ingestion_request.update_status("WAIT", "Populating database with source datasets...")
config = get_config(ingestion_request.get_database_name())
index = index_connect(local_config=config, validate_connection=True)
dataset_type = DatasetType.objects.using('agdc').get(id=ingestion_request.dataset_type_ref)
filtering_options = {
key: getattr(ingestion_request, key)
for key in [
'dataset_type_ref', 'start_date', 'end_date', 'latitude_min', 'latitude_max', 'longitude_min',
'longitude_max'
]
}
datasets = list(Dataset.filter_datasets(filtering_options))
dataset_locations = DatasetLocation.objects.using('agdc').filter(dataset_ref__in=datasets)
dataset_sources = DatasetSource.objects.using('agdc').filter(dataset_ref__in=datasets)
def create_source_dataset_models(dataset_sources, dataset_type_index=0):
source_datasets = Dataset.objects.using('agdc').filter(
pk__in=dataset_sources.values_list('source_dataset_ref', flat=True))
source_dataset_type = DatasetType.objects.using('agdc').get(id=source_datasets[0].dataset_type_ref.id)
source_dataset_locations = DatasetLocation.objects.using('agdc').filter(dataset_ref__in=source_datasets)
source_dataset_sources = DatasetSource.objects.using('agdc').filter(dataset_ref__in=source_datasets)
if source_dataset_sources.exists():
dataset_type_index = create_source_dataset_models(
source_dataset_sources, dataset_type_index=dataset_type_index)
source_dataset_type.id = dataset_type_index
source_dataset_type.save(using=ingestion_request.get_database_name())
for dataset in source_datasets:
dataset.dataset_type_ref_id = source_dataset_type.id
Dataset.objects.using(ingestion_request.get_database_name()).bulk_create(source_datasets)
DatasetLocation.objects.using(ingestion_request.get_database_name()).bulk_create(source_dataset_locations)
DatasetSource.objects.using(ingestion_request.get_database_name()).bulk_create(source_dataset_sources)
return dataset_type_index + 1
create_db(ingestion_request.get_database_name())
dataset_type_index = create_source_dataset_models(dataset_sources) if dataset_sources else 0
dataset_type.id = dataset_type_index
dataset_type.save(using=ingestion_request.get_database_name())
for dataset in datasets:
dataset.dataset_type_ref_id = dataset_type.id
Dataset.objects.using(ingestion_request.get_database_name()).bulk_create(datasets)
DatasetLocation.objects.using(ingestion_request.get_database_name()).bulk_create(dataset_locations)
DatasetSource.objects.using(ingestion_request.get_database_name()).bulk_create(dataset_sources)
cmd = "psql -U dc_user -h {} {} -c \"ALTER SEQUENCE agdc.dataset_type_id_seq RESTART WITH {};\"".format(
settings.MASTER_NODE, ingestion_request.get_database_name(), dataset_type_index + 1)
os.system(cmd)
close_db(ingestion_request.get_database_name())
index.close()
@task(name="data_cube_manager.ingest_subset", base=IngestionBase, queue="data_cube_manager", throws=(SystemExit))
def ingest_subset(ingestion_request_id=None):
"""Run the ingestion process on the new database
Open a connection to the new database and run ingestion based on the
ingestion configuration found on the IngestionRequest model.
"""
ingestion_request = IngestionRequest.objects.get(pk=ingestion_request_id)
config = get_config(ingestion_request.get_database_name())
index = index_connect(local_config=config, validate_connection=True)
# Thisis done because of something that the agdc guys do in ingest: https://github.com/opendatacube/datacube-core/blob/develop/datacube/scripts/ingest.py#L168
ingestion_request.ingestion_definition['filename'] = "ceos_data_cube_sample.yaml"
try:
# source_type, output_type = ingest.make_output_type(index, ingestion_request.ingestion_definition)
source_type = index.products.get_by_name(ingestion_request.ingestion_definition['source_type'])
output_type = index.products.add(
ingest.morph_dataset_type(source_type, ingestion_request.ingestion_definition), allow_table_lock=True)
tasks = list(
ingest.create_task_list(index, output_type, None, source_type, ingestion_request.ingestion_definition))
ingestion_request.total_storage_units = len(tasks)
ingestion_request.update_status("WAIT", "Starting the ingestion process...")
executor = SerialExecutor()
successful, failed = ingest.process_tasks(index, ingestion_request.ingestion_definition, source_type,
output_type, tasks, 3200, executor)
except:
index.close()
raise
index.close()
@task(name="data_cube_manager.prepare_output", base=IngestionBase, queue="data_cube_manager")
def prepare_output(ingestion_request_id=None):
"""Dump the database and perform cleanup functions
Drops the database, create the bulk download script, and dumps the database.
"""
ingestion_request = IngestionRequest.objects.get(pk=ingestion_request_id)
ingestion_request.update_status("WAIT", "Creating output products...")
cmd = "pg_dump -U dc_user -h {} -n agdc {} > {}".format(settings.MASTER_NODE,
ingestion_request.get_database_name(),
ingestion_request.get_database_dump_path())
os.system(cmd)
cmd = "dropdb -U dc_user -h {} {}".format(settings.MASTER_NODE, ingestion_request.get_database_name())
os.system(cmd)
ingestion_request.download_script_path = ingestion_request.get_base_data_path() + "/bulk_downloader.py"
with open(ingestion_request.download_script_path, "w+") as downloader:
file_list = ",".join('"{}"'.format(path) for path in glob(ingestion_request.get_base_data_path() + '/*.nc'))
download_script = base_downloader_script.format(
file_list=file_list,
database_dump_file=ingestion_request.get_database_dump_path(),
base_host=settings.BASE_HOST,
base_data_path=ingestion_request.get_base_data_path()) + static_script
downloader.write(download_script)
ingestion_request.update_status("OK", "Please follow the directions on the right side panel to download your cube.")
@task(name="data_cube_manager.delete_ingestion_request", base=IngestionBase, queue="data_cube_manager")
def delete_ingestion_request(ingestion_request_id=None):
"""Delete an existing ingestion request before proceeding with a new one"""
ingestion_request = IngestionRequest.objects.get(pk=ingestion_request_id)
try:
cmd = "dropdb -U dc_user -h {} {}".format(settings.MASTER_NODE, ingestion_request.get_database_name())
os.system(cmd)
shutil.rmtree(ingestion_request.get_base_data_path())
except:
pass
def get_config(username):
config = configparser.ConfigParser()
config['datacube'] = {
'db_password': settings.DATABASES['default']['PASSWORD'],
'db_connection_timeout': '60',
'db_username': settings.DATABASES['default']['USER'],
'db_database': username,
'db_hostname': settings.MASTER_NODE
}
return LocalConfig(config)
def create_db(username):
connections.databases[username] = {
'ENGINE': 'django.db.backends.postgresql',
'OPTIONS': {
'options': '-c search_path=agdc'
},
'NAME': username,
'USER': settings.DATABASES['default']['USER'],
'PASSWORD': settings.DATABASES['default']['PASSWORD'],
'HOST': settings.MASTER_NODE
}
def close_db(username):
connections[username].close()
connections.databases.pop(username)
| 41.943662 | 162 | 0.72908 |
f8dd786f51eb0a70196617958958a7d5dc29b8c5
| 1,804 |
py
|
Python
|
src/senor_octopus/sinks/tuya.py
|
betodealmeida/senor-octopus
|
362d4cb8d5ac7fea620c2b4d46e807bb614a59bd
|
[
"MIT"
] | 7 |
2021-03-24T18:29:21.000Z
|
2021-11-15T21:13:25.000Z
|
src/senor_octopus/sinks/tuya.py
|
betodealmeida/senor-octopus
|
362d4cb8d5ac7fea620c2b4d46e807bb614a59bd
|
[
"MIT"
] | null | null | null |
src/senor_octopus/sinks/tuya.py
|
betodealmeida/senor-octopus
|
362d4cb8d5ac7fea620c2b4d46e807bb614a59bd
|
[
"MIT"
] | null | null | null |
import logging
from functools import lru_cache
from senor_octopus.types import Stream
from tuyapy import TuyaApi
from typing_extensions import Literal
_logger = logging.getLogger(__name__)
@lru_cache(maxsize=None)
def authenticate(email: str, password: str, country: str, application: str) -> TuyaApi:
_logger.debug("Authenticating")
api = TuyaApi()
api.init(email, password, country, application)
return api
async def tuya(
stream: Stream,
device: str,
email: str,
password: str,
country: str = "1",
application: str = Literal["smart_life", "tuya"],
) -> None:
"""
Send commands to a Tuya/Smart Life device.
Currently this plugin supports sending on and off events, but
it can be easily modified to support changing the color of a
lightbulb.
Parameters
----------
stream
The incoming stream of events
device
The name of the device to be controlled
email
The email of the account
password
The password of the account
country
Country telephone code
application
The application code, either "tuya" or "smart_life"
"""
api = authenticate(email, password, country, application)
devices = {d.name(): d for d in api.get_all_devices()}
if device not in devices:
valid = ", ".join(f'"{name}"' for name in devices)
_logger.error('Device "%s" not found. Available devices: %s', device, valid)
return
async for event in stream: # pragma: no cover
_logger.debug(event)
if event["value"].lower() == "on":
devices[device].turn_on()
elif event["value"].lower() == "off":
devices[device].turn_off()
else:
_logger.warning("Unknown value: %s", event["value"])
| 28.1875 | 87 | 0.641353 |
4c9a4cdb13935e71ddf10bf2cb9785bf9c5e89dd
| 20,614 |
py
|
Python
|
research/object_detection/python-tkinter-gui-master/GUITools/GraphicsWidgets/CoordinateSpace.py
|
r08in279/Traffic-Management-Using-Drones
|
15fdba219cef04b3cb59a68901a8064c3795d8e3
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/python-tkinter-gui-master/GUITools/GraphicsWidgets/CoordinateSpace.py
|
r08in279/Traffic-Management-Using-Drones
|
15fdba219cef04b3cb59a68901a8064c3795d8e3
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/python-tkinter-gui-master/GUITools/GraphicsWidgets/CoordinateSpace.py
|
r08in279/Traffic-Management-Using-Drones
|
15fdba219cef04b3cb59a68901a8064c3795d8e3
|
[
"Apache-2.0"
] | null | null | null |
from .Shared import *
from .Canvas3D import Canvas3D as Canvas3D
from .ActionBuffer import ActionBuffer as ActionBuffer
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
class CoordinateSpace(Canvas3D):
shapeFlattener=LinAlg.Flattener(endtype='gen',dontExpand=(Shape,str))
addFlattener=LinAlg.Flattener(endtype='gen',dontExpand=(Shape,MultiShape,str))
def __init__(self,root=None,basisvectors=((1,0,0),(0,1,0),(0,0,1)),
name=None,unitscaling=1,zrotation=20,**kwargs):
self.frame=tk.Frame(root)
if not 'bg' in kwargs:
kwargs['bg']='grey95'
super().__init__(self.frame,**kwargs)
self.zrot=math
self.xdims=(0,0)
self.ydims=(0,0)
self.zdims=(0,0)
#Setting the axes, etc.
self.unitscale=unitscaling
self.scale*=unitscaling
pD=0
P=Point(0,0,0,priority=pD)
P.radius=P.radius/self.unitscale
self.axes=MultiShape(
P,
Line((0,0,0),self.basis[0] ,priority=pD),
Line((0,0,0),self.basis[0]*-1,dash=2,priority=pD),
Line((0,0,0),self.basis[1] ,priority=pD),
Line((0,0,0),self.basis[1]*-1,dash=2,priority=pD),
Line((0,0,0),self.basis[2] ,priority=pD),
Line((0,0,0),self.basis[2]*-1,dash=2,priority=pD),
)
self.axes.shapes=self.axes.Sorted()
#Object container intializations
self.objectmap={}
self.axesFlag=True
self.eventpoints=[]
self.actionBuffer=ActionBuffer(maxlen=35)
self.selected=MultiShape(name='Selected')
self.drawpoints=MultiShape(name='Drawing Points')
self.objects=MultiShape(self.selected,self.drawpoints)
#Interactivity bindings
self.baseaction=tk.StringVar()
self.baseaction.set('rotate')
self.actionloop=['rotate','scale','shift']
self.bind('<Button-1>',lambda e:self.focus_set())
self.bind('<Command-i>',lambda *e:self.Interact())
self.bind('<Command-r>',lambda *e:self.ResetPosition())
self.bind('<Configure>',lambda *e:self.Draw(draw_all=True))
def mulamb():
i=self.actionloop.index(self.baseaction)
i=(i+1)%len(self.actionloop)
self.baseaction.set(self.actionloop[i])
self.bind('<Tab>',lambda *e:mulamb())
self.bind('<B1-Motion>',
lambda e:(
self.LineAction(e,action=self.baseaction.get()),self.Draw(quality='fast'))
)
self.bind('<Alt-B1-Motion>',lambda e:(
self.LineAction(e,action=self.baseaction.get(),rounding=True),self.Draw(quality='fast'))
)
for x in ('Up','Down','Right','Left'):
self.bind('<{}>'.format(x),lambda e:(self.ArrowAction(e,action=self.baseaction.get()),self.Draw()))
self.arrowspeed=1
self.bind('<Control-B1-Motion>',
lambda e:(self.LineAction(e,action='scale'),self.Draw())
)
def keyset(event):
def rset():
self.baseaction.set('rotate')
def sset():
self.baseaction.set('shift')
def zset():
self.baseaction.set('scale')
if event.keysym in ('r','s','z'):
exec('{}set()'.format(event.keysym))
self.bind('<Control-Key>',lambda e:keyset(e))
self.bind('<Shift-B1-Motion>',lambda e:(self.LineAction(e,action='shift'),self.Draw(quality='fast')))
self.bind('<ButtonRelease-1>',self.EndAction)
self.bind('<Command-g>',lambda e:self.GroupSelected())
self.bind('<Shift-Button-1>',lambda e:(self.focus_set(),self.Select(e),self.Draw()))
self.bind('<Command-z>',lambda e:(self.actionBuffer.undo(),self.Draw()))
self.bind('<Command-y>',lambda e:(self.actionBuffer.redo(),self.Draw()))
self.selRec=None
self.bind('<Control-Shift-Button-1>',lambda e:(self.SelectConnected(e),self.Draw()))
#Making it so the frame it's packed in configures correctly
self.grid(row=0,column=0,sticky='nsew')
self.frame.grid_columnconfigure(0,weight=1)
self.frame.grid_rowconfigure(0,weight=1)
for x in ('pack','grid','place'):
for ext in ('','_forget'):
setattr(self,x+ext,getattr(self.frame,x+ext))
for y in ('row','column'):
name='grid_{}configure'.format(y)
setattr(self,name,getattr(self.frame,name))
#--
@staticmethod
def priority_function(ob):
ret=max((p[1]+p[2] for p in ob.points))
return ret+ob.priority
#-------------------------------------------------------------------------
def Draw(self,quality='good',draw_all=True):
self.objectmap={}
## Create Objects
if draw_all:
self.delete('all')
use=self.objects
use.Shade(self,1,mode='outline')
use[0].Shade(self,1,color='yellow',mode='outline')
if self.axesFlag:
## self.Dimensions(True)
maxz,minz=self.zdims
depth=abs(maxz-minz)
bound=min(1-depth/1000,.85)
m=max(maxz,bound)
S=self.axes*1000
i=zip(S,S.Draw(self,draw_all=draw_all))
a,l=next(i)
af=True
##MultiShape.Draw returns an iterator
D=use.Draw(self,quality=quality,draw_all=draw_all)
for o,s in D:
self.objectmap[o]=s
while af:
if self.priority_function(s)<self.priority_function(a):
break
## self.lift(a)
else:
self.lift(o)
try:
a,l=next(i)
except StopIteration:
af=False
for x in i:
pass
else:
D=use.Draw(self,quality=quality,draw_all=draw_all)
for o,s in D:
self.objectmap[o]=s
## self.delete('moved')
#-------------------------------------------------------------------------
def AddObjects(self,*objects,dimensioncall=True,buffer=True):
for ob in self.addFlattener.flatten(objects):
if not ob in self.objects:
self.objects.append(ob)
ob.multishapes.pop(-1)
if dimensioncall:
mx,MX=self.xdims
my,MY=self.ydims
mz,MZ=self.zdims
xs,ys,zs=ob.points.rowiter()
for x in xs:
if x<mx:
mx=x
elif x>MX:
MX=x
self.xdims=(mx,MX)
for y in ys:
if y<my:
my=y
elif y>MY:
MY=y
self.ydims=(my,MY)
for z in zs:
if z<mz:
mz=z
elif z>MZ:
MZ=z
self.zdims=(mz,MZ)
if buffer:
u=lambda a:self.Delete(*a,buffer=False)
r=lambda a:self.AddObjects(*a,buffer=False)
self.actionBuffer.append(self.actionBuffer.Action(objects,u,r))
#-------------------------------------------------------------------------
def Create(self,objecttype,points,*otherargs,**kwargs):
args='({})'.format(','.join((str(p) for p in points)))
otherargs=tuple(x for x in otherargs if x)
if otherargs:
args+=',{}'.format(','.join((str(x) for x in otherargs)))
try:
O=eval('{}({},parent=self,**kwargs)'.format(objecttype,args))
except:
print(args)
raise
self.AddObjects(O)
self.Refresh()
return O
#--
@staticmethod
def recurseRemove(shape,tryob):
def recurseRemove(shape,tryob):
try:
tryob.remove(shape)
return True
except ValueError:
for x in tryob:
if recurseRemove(shape,x):
break
else:
return False
except AttributeError:
return False
return recurseRemove(shape,tryob)
#-------------------------------------------------------------------------
def Delete(self,*objects,buffer=True):
readd=[]
for ob in objects:
if self.recurseRemove(ob,self.objects):
readd.append(ob)
ob.Delete()
if buffer:
u=lambda a:self.AddObjects(*a,buffer=False)
r=lambda a:self.Delete(*a,buffer=False)
self.actionBuffer.append(self.actionBuffer.Action(readd,u,r))
#-------------------------------------------------------------------------
def Dimensions(self,reset=False):
obs=(o.points for o in self.objects.flatshapes)
if reset:
mx=my=mz=MX=MY=MZ=0
for pset in obs:
xs,ys,zs=pset.rowiter()
for x in xs:
if x<mx:
mx=x
elif x>MX:
MX=x
self.xdims=(mx,MX)
for y in ys:
if y<my:
my=y
elif y>MY:
MY=y
self.ydims=(my,MY)
for z in zs:
if z<mz:
mz=z
elif z>MZ:
MZ=z
self.zdims=(mz,MZ)
return (self.xdims,self.ydims,self.zdims)
#-------------------------------------------------------------------------
def Shade(self,ob,degree,**kwargs):
try:
ob.Shade(self,degree,*kwargs)
except AttributeError:
raise Exception("Can't Shade this shit")
#-------------------------------------------------------------------------
def Clear(self,reset=False):
self.selected=MultiShape(name='Selected')
self.drawpoints=MultiShape(name='Drawing Points')
self.objects=MultiShape(self.selected,self.drawpoints)
if reset:
self.ResetPosition()
self.xdims=(0,0)
self.ydims=(0,0)
self.zdims=(0,0)
self.Refresh()
#-------------------------------------------------------------------------
def Refresh(self):
self.Draw()
#-------------------------------------------------------------------------
def Shift(self,x=0,y=0,z=0,moveto=False,wrap=True):
v=vector(x,y,z)
if not moveto:
self.origin.Shift(*v)
else:
c=vector(self.Center())
v=v-c
self.origin.Shift(*v)
if wrap:
c=self.Center()
if abs(self.origin[0])>c[0]:
self.origin.Shift(x=-2*self.origin[0])
if abs(self.origin[1])>c[1]:
self.origin.Shift(y=-2*self.origin[1])
self.Dimensions(True)
#--
def Deselect(self,*objects):
if isinstance(objects[0],str):
if objects[0].lower()=='all':
objects=tuple(self.selected)
readd=[]
for x in objects:
self.recurseRemove(x,self.selected)
tf=False
for m in x.multishapes:
tf=True
m.append(x)
if tf:
continue
readd.append(x)
self.AddObjects(readd,buffer=False)
#-------------------------------------------------------------------------
def Select(self,*objects):
selobs=self.shapeFlattener.flatten(self.selected)
objects=self.shapeFlattener.flatten(objects)
def addprocess(ob):
nonlocal objects,selobs
if ob in selobs:
self.Deselect(ob)
else:
self.recurseRemove(ob,self.objects)
self.selected.append(ob)
ob.multishapes.pop(-1)
for O in objects:
if isinstance(O,tk.Event):
ob=self.find_withtag('current')
if ob:
try:
ob=self.objectmap[ob[0]]
except:
pass
else:
addprocess(ob)
else:
self.Deselect('all')
else:
addprocess(O)
#-------------------------------------------------------------------------
def SelectGroup(self,*objects):
flatten=self.shapeFlattener.flatten
selobs=flatten(self.selected)
objects=flatten(objects)
def addprocess(ob,objects=objects,selobs=selobs):
if ob in selobs:
for gob in self.selected:
if gob==ob:
self.selected.remove(gob)
## for m in ob.multishapes:
## addprocess(flatten(m))
else:
if any(ob.multishapes):
for m in ob.multishapes:
for ob in flatten(m):
self.selected.append(ob)
else:
self.selected.append(ob)
for O in objects:
if isinstance(O,tk.Event):
ob=self.find_withtag('current')
if ob:
ob=self.objectmap[ob[0]]
addprocess(ob)
else:
self.Deselect('all')
else:
addprocess(O)
SelectConnected=SelectGroup
#-------------------------------------------------------------------------
def Scale(self,degree,mode='mult'):
if mode=='add':
self.scale+=degree
else:
self.scale*=degree
if self.scale<.25:
self.scale=.25
#-------------------------------------------------------------------------
def Rotate(self,x=0,y=0,z=0,mode='degree',withbasis=False,moveconnected=False):
moveflag=moveconnected
for q,ab in zip((x,y,z),('x','y','z')):
if q!=0:
R=LinAlg.RotationMatrix(q,mode=mode,about=ab)
if not self.selected:
obs=self.objects
else:
moveflag=True
obs=self.selected
for shape in obs:
shape.MatrixTransformation(R,moveconnected=moveflag)
#--
def ArrowAction(self,event,action='rotate'):
moveFlag=False
if action=='select':
pass
else:
x=0;y=0;z=0
arr=event.keysym
if arr=='Up':
y+=-self.arrowspeed
elif arr=='Down':
y+=self.arrowspeed
elif arr=='Left':
x+=self.arrowspeed
elif arr=='Right':
x+=-self.arrowspeed
else:
return None
if action=='rotate':
self.Rotate(-y,x,z,moveconnected=moveFlag)
elif action=='scale':
self.Scale(y/5,mode='add')
elif action=='shift':
self.Shift(*(q/self.scale for q in (x,y,z)))
elif action=='drag':
if self.selected:
moveFlag=True
obs=self.selected
else:
obs=self.objects
obs.Shift(*(-q/self.scale for q in (x,y,z)),moveconnected=moveFlag)
#-------------------------------------------------------------------------
def LineAction(self,event,action='rotate',rounding=False):
from math import cos
moveFlag=True
if len(self.objects)>20:
self.shading=False
if len(self.eventpoints)<2:
p=(event.x,event.y)
self.eventpoints.append(p)
elif len(self.eventpoints)==2:
p1,p2=[vector(p) for p in self.eventpoints]
v=p1-p2
if action!='select':
if action=='rotate':
## l=v.__mag__()
## a1=v.angle(vector(1,0));a2=v.angle(vector(0,1))
## m=max(a1,a2)
a2,a1=v
if rounding:
a2,a1=(round(x,0) for x in (a2,a1))
self.Rotate(a1,a2,0,moveconnected=moveFlag)
elif action=='shift':
v*=2
v.append(0)
v[1]*=-1
self.Shift(*v/self.scale)
elif action=='scale':
self.Scale(v[1],mode='add')
elif action=='drag':
u=v;u[0]=-u[0]
if self.selected:
obs=self.selected
moveFlag=True
else:
obs=self.objects
obs.Shift(*(u/self.scale),moveconnected=moveFlag)
self.eventpoints=[]
else:
if self.selRec:
self.selRec.points[1]=self.ClickCoordinates(self.eventpoints.pop(1))
else:
self.selRec=Rectangle(
(self.ClickCoordinates(e) for e in self.eventpoints),
priority=100,dash=2
)
self.drawpoints.append(self.selRec)
else:
self.eventpoints=self.eventpoints[:2]
#-------------------------------------------------------------------------
def CurrentCallback(self,event):
try:
ob=self.find_withtag('current')[0]
except IndexError:
pass
else:
ob=self.objectmap[ob]
ob.callback()
#-------------------------------------------------------------------------
def EndAction(self,event):
self.eventpoints.append((event.x,event.y))
if self.selRec:
try:
self.Delete(self.selRec)
except ValueError:
self.selRec=None
else:
self.Deselect('all')
es=self.find_overlapping(*(flatten(self.eventpoints,endtype=list)[:4]))
for x in es:
try:
O=self.objectmap[x]
except KeyError:
continue
if not O==self.selRec:
self.Select(O)
self.selRec=None
self.eventpoints=[]
self.shading=True
self.Draw()
#-------------------------------------------------------------------------
def Interact(self):
from .ExtraWidgets import Interactor
Interactor(self)
#-------------------------------------------------------------------------
def ResetPosition(self):
self.basis=[vector(v) for v in self.initialbasis]
self.mat=matrix(self.basis)
self.inv=self.mat.inverse()
self.origin=Point(0,0,0)
self.scale=self.unitscale
self.Draw()
#-------------------------------------------------------------------------
def GroupSelected(self):
self.GroupShapes('selected')
#-------------------------------------------------------------------------
def GroupShapes(self,*shapes,bound=None):
shapes=list(self.shapeFlattener.flatten(shapes))
s=shapes[0]
if s=='all':
shapes=self.shapeFlattener(self.objects)
elif s=='selected':
shapes=self.shapeFlattener(self.selected)
self.Deselect('all')
self.Draw()
shapes=list(shapes)
M=MultiShape(shapes,parent=self,boundob=bound)
for shape in shapes:
if not self.recurseRemove(shape,self.objects):
print(shape)
self.AddObjects(M)
#-------------------------------------------------------------------------
def __iter__(self):
return iter(self.objects)
| 37.008977 | 111 | 0.438391 |
3106a027add2c1389e00ac568ef70311d60697c5
| 26,378 |
py
|
Python
|
tests/mobly/controllers/android_device_lib/services/logcat_test.py
|
northwhisper/mobly
|
2af367e486d8aa0e0a587d87367ea101df3030ab
|
[
"Apache-2.0"
] | null | null | null |
tests/mobly/controllers/android_device_lib/services/logcat_test.py
|
northwhisper/mobly
|
2af367e486d8aa0e0a587d87367ea101df3030ab
|
[
"Apache-2.0"
] | null | null | null |
tests/mobly/controllers/android_device_lib/services/logcat_test.py
|
northwhisper/mobly
|
2af367e486d8aa0e0a587d87367ea101df3030ab
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import logging
import mock
import os
import shutil
import tempfile
from future.tests.base import unittest
from mobly import utils
from mobly import runtime_test_info
from mobly.controllers import android_device
from mobly.controllers.android_device_lib import adb
from mobly.controllers.android_device_lib.services import logcat
from tests.lib import mock_android_device
# The expected result of the cat adb operation.
MOCK_ADB_LOGCAT_CAT_RESULT = [
'02-29 14:02:21.456 4454 Something\n',
'02-29 14:02:21.789 4454 Something again\n'
]
# A mocked piece of adb logcat output.
MOCK_ADB_LOGCAT = (u'02-29 14:02:19.123 4454 Nothing\n'
u'%s'
u'02-29 14:02:22.123 4454 Something again and again\n'
) % u''.join(MOCK_ADB_LOGCAT_CAT_RESULT)
# The expected result of the cat adb operation.
MOCK_ADB_UNICODE_LOGCAT_CAT_RESULT = [
'02-29 14:02:21.456 4454 Something \u901a\n',
'02-29 14:02:21.789 4454 Something again\n'
]
# A mocked piece of adb logcat output.
MOCK_ADB_UNICODE_LOGCAT = (
u'02-29 14:02:19.123 4454 Nothing\n'
u'%s'
u'02-29 14:02:22.123 4454 Something again and again\n'
) % u''.join(MOCK_ADB_UNICODE_LOGCAT_CAT_RESULT)
# Mock start and end time of the adb cat.
MOCK_ADB_LOGCAT_BEGIN_TIME = '02-29 14:02:20.123'
MOCK_ADB_LOGCAT_END_TIME = '02-29 14:02:22.000'
# Mock AdbError for missing logpersist scripts
MOCK_LOGPERSIST_STOP_MISSING_ADB_ERROR = adb.AdbError(
'logpersist.stop --clear', b'',
'/system/bin/sh: logpersist.stop: not found', 0)
MOCK_LOGPERSIST_START_MISSING_ADB_ERROR = adb.AdbError(
'logpersist.start --clear', b'',
b'/system/bin/sh: logpersist.stop: not found', 0)
class LogcatTest(unittest.TestCase):
"""Tests for Logcat service and its integration with AndroidDevice."""
def setUp(self):
# Set log_path to logging since mobly logger setup is not called.
if not hasattr(logging, 'log_path'):
setattr(logging, 'log_path', '/tmp/logs')
# Creates a temp dir to be used by tests in this test class.
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
"""Removes the temp dir.
"""
shutil.rmtree(self.tmp_dir)
def AssertFileContains(self, content, file_path):
with open(file_path, 'r') as f:
output = f.read()
self.assertIn(content, output)
def AssertFileDoesNotContain(self, content, file_path):
with open(file_path, 'r') as f:
output = f.read()
self.assertNotIn(content, output)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
def test_start_and_stop(self, stop_proc_mock, start_proc_mock,
create_dir_mock, FastbootProxy, MockAdbProxy):
"""Verifies the steps of collecting adb logcat on an AndroidDevice
object, including various function calls and the expected behaviors of
the calls.
"""
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service.start()
# Verify start did the correct operations.
self.assertTrue(logcat_service._adb_logcat_process)
expected_log_path = os.path.join(logging.log_path,
'AndroidDevice%s' % ad.serial,
'adblog,fakemodel,%s.txt' % ad.serial)
create_dir_mock.assert_called_with(os.path.dirname(expected_log_path))
adb_cmd = '"adb" -s %s logcat -v threadtime >> %s'
start_proc_mock.assert_called_with(
adb_cmd % (ad.serial, '"%s"' % expected_log_path), shell=True)
self.assertEqual(logcat_service.adb_logcat_file_path,
expected_log_path)
expected_msg = (
'Logcat thread is already running, cannot start another'
' one.')
# Expect error if start is called back to back.
with self.assertRaisesRegex(logcat.Error, expected_msg):
logcat_service.start()
# Verify stop did the correct operations.
logcat_service.stop()
stop_proc_mock.assert_called_with('process')
self.assertIsNone(logcat_service._adb_logcat_process)
self.assertEqual(logcat_service.adb_logcat_file_path,
expected_log_path)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
def test_update_config(self, stop_proc_mock, start_proc_mock,
create_dir_mock, FastbootProxy, MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service.start()
logcat_service.stop()
new_log_params = '-a -b -c'
new_file_path = 'some/path/log.txt'
new_config = logcat.Config(logcat_params=new_log_params,
output_file_path=new_file_path)
logcat_service.update_config(new_config)
logcat_service.start()
self.assertTrue(logcat_service._adb_logcat_process)
create_dir_mock.assert_has_calls([mock.call('some/path')])
expected_adb_cmd = ('"adb" -s 1 logcat -v threadtime -a -b -c >> '
'"some/path/log.txt"')
start_proc_mock.assert_called_with(expected_adb_cmd, shell=True)
self.assertEqual(logcat_service.adb_logcat_file_path,
'some/path/log.txt')
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
def test_update_config_while_running(self, stop_proc_mock, start_proc_mock,
create_dir_mock, FastbootProxy,
MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service.start()
new_config = logcat.Config(logcat_params='-blah',
output_file_path='some/path/file.txt')
with self.assertRaisesRegex(
logcat.Error,
'Logcat thread is already running, cannot start another one'):
logcat_service.update_config(new_config)
self.assertTrue(logcat_service.is_alive)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch(
'mobly.controllers.android_device_lib.services.logcat.Logcat.clear_adb_log',
return_value=mock_android_device.MockAdbProxy('1'))
def test_pause_and_resume(self, clear_adb_mock, stop_proc_mock,
start_proc_mock, create_dir_mock, FastbootProxy,
MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad, logcat.Config(clear_log=True))
logcat_service.start()
clear_adb_mock.assert_called_once_with()
self.assertTrue(logcat_service.is_alive)
logcat_service.pause()
self.assertFalse(logcat_service.is_alive)
stop_proc_mock.assert_called_with('process')
self.assertIsNone(logcat_service._adb_logcat_process)
clear_adb_mock.reset_mock()
logcat_service.resume()
self.assertTrue(logcat_service.is_alive)
clear_adb_mock.assert_not_called()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch(
'mobly.controllers.android_device_lib.services.logcat.Logcat.clear_adb_log',
return_value=mock_android_device.MockAdbProxy('1'))
def test_logcat_service_create_excerpt(self, clear_adb_mock,
stop_proc_mock, start_proc_mock,
FastbootProxy, MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service.start()
FILE_CONTENT = 'Some log.\n'
with open(logcat_service.adb_logcat_file_path, 'w') as f:
f.write(FILE_CONTENT)
test_output_dir = os.path.join(self.tmp_dir, 'test_foo')
mock_record = mock.MagicMock()
mock_record.begin_time = 123
test_run_info = runtime_test_info.RuntimeTestInfo(
'test_foo', test_output_dir, mock_record)
logcat_service.create_per_test_excerpt(test_run_info)
expected_path1 = os.path.join(test_output_dir, 'test_foo-123',
'adblog,fakemodel,1.txt')
self.assertTrue(os.path.exists(expected_path1))
self.AssertFileContains(FILE_CONTENT, expected_path1)
self.assertFalse(os.path.exists(logcat_service.adb_logcat_file_path))
# Generate some new logs and do another excerpt.
FILE_CONTENT = 'Some more logs!!!\n'
with open(logcat_service.adb_logcat_file_path, 'w') as f:
f.write(FILE_CONTENT)
test_output_dir = os.path.join(self.tmp_dir, 'test_bar')
mock_record = mock.MagicMock()
mock_record.begin_time = 456
test_run_info = runtime_test_info.RuntimeTestInfo(
'test_bar', test_output_dir, mock_record)
logcat_service.create_per_test_excerpt(test_run_info)
expected_path2 = os.path.join(test_output_dir, 'test_bar-456',
'adblog,fakemodel,1.txt')
self.assertTrue(os.path.exists(expected_path2))
self.AssertFileContains(FILE_CONTENT, expected_path2)
self.AssertFileDoesNotContain(FILE_CONTENT, expected_path1)
self.assertFalse(os.path.exists(logcat_service.adb_logcat_file_path))
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.create_dir')
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
def test_take_logcat_with_extra_params(self, stop_proc_mock,
start_proc_mock, create_dir_mock,
FastbootProxy, MockAdbProxy):
"""Verifies the steps of collecting adb logcat on an AndroidDevice
object, including various function calls and the expected behaviors of
the calls.
"""
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
configs = logcat.Config()
configs.logcat_params = '-b radio'
logcat_service = logcat.Logcat(ad, configs)
logcat_service.start()
# Verify start did the correct operations.
self.assertTrue(logcat_service._adb_logcat_process)
expected_log_path = os.path.join(logging.log_path,
'AndroidDevice%s' % ad.serial,
'adblog,fakemodel,%s.txt' % ad.serial)
create_dir_mock.assert_called_with(os.path.dirname(expected_log_path))
adb_cmd = '"adb" -s %s logcat -v threadtime -b radio >> %s'
start_proc_mock.assert_called_with(
adb_cmd % (ad.serial, '"%s"' % expected_log_path), shell=True)
self.assertEqual(logcat_service.adb_logcat_file_path,
expected_log_path)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test_instantiation(self, MockFastboot, MockAdbProxy):
"""Verifies the AndroidDevice object's basic attributes are correctly
set after instantiation.
"""
mock_serial = 1
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
self.assertIsNone(logcat_service._adb_logcat_process)
self.assertIsNone(logcat_service.adb_logcat_file_path)
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch('mobly.logger.get_log_line_timestamp',
return_value=MOCK_ADB_LOGCAT_END_TIME)
def test_cat_adb_log(self, mock_timestamp_getter, stop_proc_mock,
start_proc_mock, FastbootProxy, MockAdbProxy):
"""Verifies that AndroidDevice.cat_adb_log loads the correct adb log
file, locates the correct adb log lines within the given time range,
and writes the lines to the correct output file.
"""
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
# Direct the log path of the ad to a temp dir to avoid racing.
logcat_service._ad._log_path = self.tmp_dir
# Expect error if attempted to cat adb log before starting adb logcat.
expected_msg = ('.* Attempting to cat adb log when none'
' has been collected.')
with self.assertRaisesRegex(logcat.Error, expected_msg):
logcat_service.cat_adb_log('some_test', MOCK_ADB_LOGCAT_BEGIN_TIME)
logcat_service.start()
utils.create_dir(ad.log_path)
mock_adb_log_path = os.path.join(
ad.log_path, 'adblog,%s,%s.txt' % (ad.model, ad.serial))
with io.open(mock_adb_log_path, 'w', encoding='utf-8') as f:
f.write(MOCK_ADB_LOGCAT)
logcat_service.cat_adb_log('some_test', MOCK_ADB_LOGCAT_BEGIN_TIME)
cat_file_path = os.path.join(
ad.log_path, 'AdbLogExcerpts',
('some_test,02-29 14-02-20.123,%s,%s.txt') % (ad.model, ad.serial))
with io.open(cat_file_path, 'r', encoding='utf-8') as f:
actual_cat = f.read()
self.assertEqual(actual_cat, ''.join(MOCK_ADB_LOGCAT_CAT_RESULT))
# Stops adb logcat.
logcat_service.stop()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock_android_device.MockAdbProxy('1'))
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
@mock.patch('mobly.utils.start_standing_subprocess',
return_value='process')
@mock.patch('mobly.utils.stop_standing_subprocess')
@mock.patch('mobly.logger.get_log_line_timestamp',
return_value=MOCK_ADB_LOGCAT_END_TIME)
def test_cat_adb_log_with_unicode(self, mock_timestamp_getter,
stop_proc_mock, start_proc_mock,
FastbootProxy, MockAdbProxy):
"""Verifies that AndroidDevice.cat_adb_log loads the correct adb log
file, locates the correct adb log lines within the given time range,
and writes the lines to the correct output file.
"""
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
# Direct the log path of the ad to a temp dir to avoid racing.
logcat_service._ad._log_path = self.tmp_dir
# Expect error if attempted to cat adb log before starting adb logcat.
expected_msg = ('.* Attempting to cat adb log when none'
' has been collected.')
with self.assertRaisesRegex(logcat.Error, expected_msg):
logcat_service.cat_adb_log('some_test', MOCK_ADB_LOGCAT_BEGIN_TIME)
logcat_service.start()
utils.create_dir(ad.log_path)
mock_adb_log_path = os.path.join(
ad.log_path, 'adblog,%s,%s.txt' % (ad.model, ad.serial))
with io.open(mock_adb_log_path, 'w', encoding='utf-8') as f:
f.write(MOCK_ADB_UNICODE_LOGCAT)
logcat_service.cat_adb_log('some_test', MOCK_ADB_LOGCAT_BEGIN_TIME)
cat_file_path = os.path.join(
ad.log_path, 'AdbLogExcerpts',
('some_test,02-29 14-02-20.123,%s,%s.txt') % (ad.model, ad.serial))
with io.open(cat_file_path, 'r', encoding='utf-8') as f:
actual_cat = f.read()
self.assertEqual(actual_cat,
''.join(MOCK_ADB_UNICODE_LOGCAT_CAT_RESULT))
# Stops adb logcat.
logcat_service.stop()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_logpersist(self, MockFastboot,
MockAdbProxy):
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'userdebug',
'ro.debuggable': '1',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': True,
'logpersist.stop': True,
}[command]
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_has_calls([
mock.call('logpersist.stop --clear'),
mock.call('logpersist.start'),
])
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_user_build_device(self, MockFastboot,
MockAdbProxy):
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'user',
'ro.debuggable': '0',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': True,
'logpersist.stop': True,
}[command]
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_not_called()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_missing_all_logpersist(
self, MockFastboot, MockAdbProxy):
def adb_shell_helper(command):
if command == 'logpersist.start':
raise MOCK_LOGPERSIST_START_MISSING_ADB_ERROR
elif command == 'logpersist.stop --clear':
raise MOCK_LOGPERSIST_STOP_MISSING_ADB_ERROR
else:
return b''
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'userdebug',
'ro.debuggable': '1',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': False,
'logpersist.stop': False,
}[command]
mock_adb_proxy.shell.side_effect = adb_shell_helper
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_not_called()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_missing_logpersist_stop(
self, MockFastboot, MockAdbProxy):
def adb_shell_helper(command):
if command == 'logpersist.stop --clear':
raise MOCK_LOGPERSIST_STOP_MISSING_ADB_ERROR
else:
return b''
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'userdebug',
'ro.debuggable': '1',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': True,
'logpersist.stop': False,
}[command]
mock_adb_proxy.shell.side_effect = adb_shell_helper
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_has_calls([
mock.call('logpersist.stop --clear'),
])
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy',
return_value=mock.MagicMock())
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test__enable_logpersist_with_missing_logpersist_start(
self, MockFastboot, MockAdbProxy):
def adb_shell_helper(command):
if command == 'logpersist.start':
raise MOCK_LOGPERSIST_START_MISSING_ADB_ERROR
else:
return b''
mock_serial = '1'
mock_adb_proxy = MockAdbProxy.return_value
mock_adb_proxy.getprops.return_value = {
'ro.build.id': 'AB42',
'ro.build.type': 'userdebug',
'ro.debuggable': '1',
}
mock_adb_proxy.has_shell_command.side_effect = lambda command: {
'logpersist.start': False,
'logpersist.stop': True,
}[command]
mock_adb_proxy.shell.side_effect = adb_shell_helper
ad = android_device.AndroidDevice(serial=mock_serial)
logcat_service = logcat.Logcat(ad)
logcat_service._enable_logpersist()
mock_adb_proxy.shell.assert_not_called()
@mock.patch('mobly.controllers.android_device_lib.adb.AdbProxy')
@mock.patch('mobly.controllers.android_device_lib.fastboot.FastbootProxy',
return_value=mock_android_device.MockFastbootProxy('1'))
def test_clear_adb_log(self, MockFastboot, MockAdbProxy):
mock_serial = '1'
ad = android_device.AndroidDevice(serial=mock_serial)
ad.adb.logcat = mock.MagicMock()
ad.adb.logcat.side_effect = adb.AdbError(
cmd='cmd',
stdout=b'',
stderr=b'failed to clear "main" log',
ret_code=1)
logcat_service = logcat.Logcat(ad)
logcat_service.clear_adb_log()
if __name__ == '__main__':
unittest.main()
| 47.872958 | 84 | 0.656797 |
7fa9d500226dd0bafe2837075d705ecfe3c22fe5
| 4,770 |
py
|
Python
|
src/dxtbx/format/FormatHDF5Lambda.py
|
toastisme/dxtbx
|
fc24e215a8052e7e17be4ad4b41f9dbb474d852a
|
[
"BSD-3-Clause"
] | null | null | null |
src/dxtbx/format/FormatHDF5Lambda.py
|
toastisme/dxtbx
|
fc24e215a8052e7e17be4ad4b41f9dbb474d852a
|
[
"BSD-3-Clause"
] | null | null | null |
src/dxtbx/format/FormatHDF5Lambda.py
|
toastisme/dxtbx
|
fc24e215a8052e7e17be4ad4b41f9dbb474d852a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Experimental format for the X-Spectrum LAMBDA detector
http://www.x-spectrum.de/
"""
import sys
import h5py
from cctbx.eltbx import attenuation_coefficient
from scitbx import matrix
from scitbx.array_family import flex
from dxtbx.format.Format import Format
from dxtbx.format.FormatHDF5 import FormatHDF5
from dxtbx.model import ParallaxCorrectedPxMmStrategy
class FormatHDF5Lambda(FormatHDF5):
@staticmethod
def understand(image_file):
try:
tag = FormatHDF5.open_file(image_file, "rb").read(8)
except OSError:
return False
# check that this is a HDF5 file (should not have got here if not
# anyway...)
if tag != "\211HDF\r\n\032\n":
return False
with h5py.File(image_file, "r") as h5_handle:
try:
desc = h5_handle["entry/instrument/detector/description"]
except KeyError:
return False
if "Lambda" in desc[()][0]:
return True
return False
def _start(self):
self._h5_handle = h5py.File(self.get_image_file(), "r")
def _goniometer(self):
"""Dummy goniometer - assume vertical (EMBl-HH P14)"""
return self._goniometer_factory.known_axis((0, 1, 0))
def _detector(self):
"""Partly dummy detector"""
# Get the detector geometry
entry = self._h5_handle["entry"]
instrument = entry["instrument"]
detector = instrument["detector"]
# Initialise detector frame - origin at 0,0,0
fast = matrix.col((1.0, 0.0, 0.0))
slow = matrix.col((0.0, 1.0, 0.0))
orig = matrix.col((0.0, 0.0, 0.0))
# Get the pixel and image size
pixel_size = (
1.0e-3 * detector["x_pixel_size"].value,
1.0e-3 * detector["y_pixel_size"].value,
)
layout = detector["layout"].value[0].split("X")
image_size = int(layout[0]), int(layout[1])
trusted_range = (-1, detector["saturation_value"][0])
thickness = float(detector["sensor_thickness"].value) / 1000.0
material = str(detector["sensor_material"].value[0])
# Make the detector
detector = self._detector_factory.make_detector(
"PAD",
fast,
slow,
orig,
pixel_size,
image_size,
trusted_range,
name="Panel",
thickness=thickness,
material=material,
)
# At the moment, beam is a dummy object because wavelength is not set in
# the header. Therefore, the px<-->mm strategy will generally be
# incorrect. Set it anyway, to override later.
beam = self._beam()
wavelength = beam.get_wavelength()
# this will fail for undefined composite materials
table = attenuation_coefficient.get_table(material)
# mu_at_angstrom returns cm^-1, but need mu in mm^-1
mu = table.mu_at_angstrom(wavelength) / 10.0
for panel in detector:
panel.set_mu(mu)
panel.set_px_mm_strategy(ParallaxCorrectedPxMmStrategy(mu, thickness))
return detector
def _beam(self):
"""Dummy beam"""
wavelength = 1.0
return self._beam_factory.make_simple_beam(wavelength)
def _scan(self):
"""Dummy scan"""
entry = self._h5_handle["entry"]
nframes = int(entry["instrument/detector/collection/frame_numbers"].value)
image_range = (1, nframes)
exposure_times = 0.0
oscillation = (0, 1)
epochs = [0] * nframes
return self._sequence_factory.make_scan(
image_range, exposure_times, oscillation, epochs, deg=True
)
def get_num_images(self):
detector = self._h5_handle["entry/instrument/detector"]
data = detector["data"]
return data.shape[0]
def get_goniometer(self, index=None):
return Format.get_goniometer(self)
def get_detector(self, index=None):
return Format.get_detector(self)
def get_beam(self, index=None):
return Format.get_beam(self)
def get_sequence(self, index=None):
if index is None:
return Format.get_sequence(self)
else:
scan = Format.get_sequence(self)
return scan[index]
def get_raw_data(self, index):
detector = self._h5_handle["entry/instrument/detector"]
data = detector["data"]
im = data[index, :, :].astype("int32") # convert from int16
return flex.int(im)
def get_image_file(self, index=None):
return Format.get_image_file(self)
if __name__ == "__main__":
for arg in sys.argv[1:]:
print(FormatHDF5Lambda.understand(arg))
| 29.444444 | 82 | 0.60608 |
00f527388f7a6ee2f39ab62c0aacff3b8d31dc60
| 552 |
py
|
Python
|
DjangoSignUP/MySignUpApp/migrations/0001_initial.py
|
RATHINAMOORTHY/Django_SignUp_Project
|
294661ac5809bbd37d5bd02dae6305976e202d93
|
[
"MIT"
] | null | null | null |
DjangoSignUP/MySignUpApp/migrations/0001_initial.py
|
RATHINAMOORTHY/Django_SignUp_Project
|
294661ac5809bbd37d5bd02dae6305976e202d93
|
[
"MIT"
] | null | null | null |
DjangoSignUP/MySignUpApp/migrations/0001_initial.py
|
RATHINAMOORTHY/Django_SignUp_Project
|
294661ac5809bbd37d5bd02dae6305976e202d93
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-06-29 19:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UsersModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
],
),
]
| 24 | 114 | 0.574275 |
44b97173ddbd57a0714e402c942529880955945c
| 1,621 |
py
|
Python
|
userbot/modules/covid.py
|
NaomiFutaba01/One4uBot
|
3a390d31170ffa368b6e4b3e7a2256ae9ead9291
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/covid.py
|
NaomiFutaba01/One4uBot
|
3a390d31170ffa368b6e4b3e7a2256ae9ead9291
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/covid.py
|
NaomiFutaba01/One4uBot
|
3a390d31170ffa368b6e4b3e7a2256ae9ead9291
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 |
2020-03-18T06:04:39.000Z
|
2020-04-08T13:14:46.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Port to UserBot by @MoveAngel
from datetime import datetime
from covid import Covid
from userbot import CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern="^.covid (.*)")
async def corona(event):
await event.edit("`Processing...`")
country = event.pattern_match.group(1)
covid = Covid(source="worldometers")
country_data = covid.get_status_by_country_name(country)
if country_data:
output_text = f"`Confirmed : {country_data['confirmed']}`\n"
output_text += f"`Active : {country_data['active']}`\n"
output_text += f"`Deaths : {country_data['deaths']}`\n"
output_text += f"`Recovered : {country_data['recovered']}`\n"
output_text += f"`New Cases : {country_data['new_cases']}`\n"
output_text += f"`New Deaths : {country_data['new_deaths']}`\n"
output_text += f"`Critical : {country_data['critical']}`\n"
output_text += f"`Total Tests : {country_data['total_tests']}`\n\n"
output_text += f"Data provided by [Worldometer](https://www.worldometers.info/coronavirus/country/{country})"
else:
output_text = "No information yet about this country!"
await event.edit(f"Corona Virus Info in {country}:\n\n{output_text}")
CMD_HELP.update({
"covid":
".covid <country>"
"\nUsage: Get an information about data covid-19 in your country.\n"
})
| 42.657895 | 117 | 0.664405 |
edccc69a9917b0607e75af22f25182fae2b91bd0
| 525 |
py
|
Python
|
reader/migrations/0016_auto_20180312_1423.py
|
Kadantte/comicake
|
205f327b920cbfdec86e57034d98a42f335d64ee
|
[
"ISC"
] | 18 |
2018-05-01T04:56:37.000Z
|
2022-03-27T16:11:57.000Z
|
reader/migrations/0016_auto_20180312_1423.py
|
Kadantte/comicake
|
205f327b920cbfdec86e57034d98a42f335d64ee
|
[
"ISC"
] | 32 |
2018-06-08T06:29:35.000Z
|
2022-03-11T23:32:21.000Z
|
reader/migrations/0016_auto_20180312_1423.py
|
Kadantte/comicake
|
205f327b920cbfdec86e57034d98a42f335d64ee
|
[
"ISC"
] | 13 |
2018-04-25T20:14:01.000Z
|
2021-12-12T10:02:05.000Z
|
# Generated by Django 2.0.2 on 2018-03-12 21:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reader', '0015_auto_20180312_1418'),
]
operations = [
migrations.RemoveField(
model_name='chapter',
name='slug',
),
migrations.AlterField(
model_name='comic',
name='slug',
field=models.SlugField(help_text='Changing this will break URLs', unique=True),
),
]
| 22.826087 | 91 | 0.580952 |
03a37cee0c04c89b65fcd8ee86583889a4914747
| 1,788 |
py
|
Python
|
galaxy/main/migrations/0133_collection_tasks.py
|
bmclaughlin/galaxy
|
3f57e3684c27cb88d45881eaec16dc3095ac4e6d
|
[
"Apache-2.0"
] | 904 |
2016-10-11T13:35:19.000Z
|
2022-03-25T09:29:09.000Z
|
galaxy/main/migrations/0133_collection_tasks.py
|
bmclaughlin/galaxy
|
3f57e3684c27cb88d45881eaec16dc3095ac4e6d
|
[
"Apache-2.0"
] | 1,866 |
2016-10-15T21:28:09.000Z
|
2022-03-29T18:09:20.000Z
|
galaxy/main/migrations/0133_collection_tasks.py
|
bmclaughlin/galaxy
|
3f57e3684c27cb88d45881eaec16dc3095ac4e6d
|
[
"Apache-2.0"
] | 368 |
2016-10-11T13:44:08.000Z
|
2022-03-30T02:23:12.000Z
|
from django.contrib.postgres import fields as psql_fields
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('pulp_app', '0002_task_name'),
('main', '0132_update_collecion_scores'),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(
auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('params', psql_fields.JSONField(null=True)),
('result', psql_fields.JSONField(null=True)),
],
),
migrations.CreateModel(
name='CollectionImport',
fields=[
('task_ptr', models.OneToOneField(
auto_created=True,
on_delete=models.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to='main.Task')),
('name', models.CharField(max_length=64)),
('version', models.CharField(max_length=64)),
('messages', psql_fields.JSONField(default=list)),
('lint_records', psql_fields.JSONField(default=list)),
('namespace', models.ForeignKey(
on_delete=models.CASCADE,
to='main.Namespace')),
],
bases=('main.task',),
),
migrations.AddField(
model_name='task',
name='pulp_task',
field=models.OneToOneField(
on_delete=models.CASCADE,
related_name='galaxy_task',
to='pulp_app.Task'),
),
]
| 34.384615 | 70 | 0.506711 |
122cd7f483a80960c9a23fc31e89de467e20a4ed
| 3,969 |
py
|
Python
|
tensorflow/python/saved_model/function_serialization.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/saved_model/function_serialization.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/saved_model/function_serialization.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for serializing `Function`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import saved_object_graph_pb2
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.saved_model import nested_structure_coder
def _serialize_function_spec(function_spec, coder):
"""Serialize a FunctionSpec object into its proto representation."""
if function_spec.is_method and not function_spec.fullargspec.args:
raise NotImplementedError(
"Missing support to serialize a method function without a named "
"'self' argument.")
proto = saved_object_graph_pb2.FunctionSpec()
proto.fullargspec.CopyFrom(coder.encode_structure(function_spec.fullargspec))
proto.is_method = function_spec.is_method
proto.input_signature.CopyFrom(
coder.encode_structure(function_spec.input_signature))
return proto
def serialize_concrete_function(concrete_function, node_ids, coder):
"""Build a SavedConcreteFunction."""
bound_inputs = []
try:
for capture in concrete_function.captured_inputs:
bound_inputs.append(node_ids[capture])
except KeyError:
raise KeyError(
"Failed to add concrete function %s to object based saved model as it "
"captures tensor %s which is unsupported or not reachable from root. "
"One reason could be that a stateful object or a variable that the "
"function depends on is not assigned to an attribute of the serialized "
"trackable object "
"(see SaveTest.test_captures_unreachable_variable)."
% (concrete_function.name, capture))
concrete_function_proto = saved_object_graph_pb2.SavedConcreteFunction()
structured_outputs = func_graph_module.convert_structure_to_signature(
concrete_function.structured_outputs)
concrete_function_proto.canonicalized_input_signature.CopyFrom(
coder.encode_structure(concrete_function.structured_input_signature))
concrete_function_proto.output_signature.CopyFrom(
coder.encode_structure(structured_outputs))
concrete_function_proto.bound_inputs.extend(bound_inputs)
return concrete_function_proto
def serialize_bare_concrete_function(concrete_function):
"""Build a SavedBareConcreteFunction."""
# pylint: disable=protected-access
return saved_object_graph_pb2.SavedBareConcreteFunction(
concrete_function_name=concrete_function.name,
allowed_positional_arguments=concrete_function._num_positional_args,
argument_keywords=concrete_function._arg_keywords)
# pylint: enable=protected-access
def serialize_function(function):
"""Build a SavedFunction proto."""
coder = nested_structure_coder.StructureCoder()
proto = saved_object_graph_pb2.SavedFunction()
function_spec_proto = _serialize_function_spec(function.function_spec, coder)
proto.function_spec.CopyFrom(function_spec_proto)
all_concrete_functions = \
function._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
for concrete_function in all_concrete_functions:
proto.concrete_functions.append(concrete_function.name)
return proto
| 45.102273 | 100 | 0.761401 |
c78d22d99bd240f72adde91eba6de9755771dab0
| 655 |
py
|
Python
|
users/permissions.py
|
LucasSRocha/CashBack_GB
|
184f124bfcbd0c18472980a88bd95c0652a86e2f
|
[
"MIT"
] | null | null | null |
users/permissions.py
|
LucasSRocha/CashBack_GB
|
184f124bfcbd0c18472980a88bd95c0652a86e2f
|
[
"MIT"
] | 1 |
2020-06-30T07:44:53.000Z
|
2020-06-30T07:44:53.000Z
|
users/permissions.py
|
LucasSRocha/CashBack_GB
|
184f124bfcbd0c18472980a88bd95c0652a86e2f
|
[
"MIT"
] | null | null | null |
from rest_framework.permissions import BasePermission
class IsAdminOrSelf(BasePermission):
"""
Allow access to admin users or the user himself.
"""
def has_object_permission(self, request, view, obj):
if request.user and request.user.is_staff or request.user.is_superuser:
return True
elif request.user and type(obj) == type(request.user) and obj == request.user:
return True
return False
class AnonCreateUser(BasePermission):
def has_permission(self, request, view):
if request.user.is_authenticated or view.action == "create":
return True
return False
| 29.772727 | 86 | 0.676336 |
fa15f4707b92e2a18f65cb988016b110d1325537
| 4,736 |
py
|
Python
|
scripts/viralrecall_qc.py
|
Achuan-2/ncldv_recover
|
c975a93c8781ef54414c6f06f3647b7b53b0b6f2
|
[
"MIT"
] | 1 |
2022-03-29T14:09:19.000Z
|
2022-03-29T14:09:19.000Z
|
scripts/viralrecall_qc.py
|
Achuan-2/ncldv_recover
|
c975a93c8781ef54414c6f06f3647b7b53b0b6f2
|
[
"MIT"
] | null | null | null |
scripts/viralrecall_qc.py
|
Achuan-2/ncldv_recover
|
c975a93c8781ef54414c6f06f3647b7b53b0b6f2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
viralrecall 质检标准
* 剔除污染的contig:只保留score > 0 & num_viralhits >3 & contig_length > 5000 过滤 bin.fa 文件中的contig
* 剔除污染contig后,只保留总大小超过 100 kb & mean score >1 & marker gene hit有3个及以上或要有mcp基因的bin
"""
import pandas as pd
import os.path
import argparse
def main(project, output):
infer_NCLDV_num = 0
table = []
bin_dirs = traversalDir_FirstDir(f"{project}")
for bin_dir in bin_dirs:
#分箱水平
bin_path = f"{project}/{bin_dir}/{bin_dir}"
summary_table = f"{bin_path}.summary.tsv"
origin_fasta = f"{bin_path}.fa"
output_path = f"{output}/bin_dir"
df = pd.read_table(summary_table)
filter_df = df[(df["score"] > 0) & (df["num_viralhits"]
>= 3) & (df["contig_length"] >= 5000)]
filter_contig = len(df)-len(filter_df)
filter_id = filter_df["replicon"]
fasta_dict = read_fasta(origin_fasta)
filtered_dict = {key: value for key,
value in fasta_dict.items() if key in list(filter_id)}
# 计算总长度和平均分
length_sum = 0
for seq in filtered_dict.values():
length_sum += len(seq)
mean_score = filter_df["score"].mean()
# 筛选bin:bin 的总长度要超过100kb且平均分要高于1才可以通过
maker_set = set(filter_df["markerhits"])
if '-' in maker_set:
maker_set.remove('-')
marker_list = []
for i in maker_set:
marker = i.split(':')[0]
if marker not in marker_list:
marker_list.append(marker)
flag = ("mcp" in marker_list) | (len(marker_list) >= 3)
if length_sum > 100000 and mean_score > 1 and flag:
print("1")
infer_NCLDV_num += 1
row = {'bin': bin_dir, 'before_contig_num': df.shape[0], 'before_length': df["contig_length"].sum(), 'before_mean_score': df["score"].mean(),
'filter_contig_num': filter_contig, 'contig_num': filter_df.shape[0], 'length': length_sum, 'mean_score': mean_score}
table.append(row)
# 如果认为是NCLDV就生成得分pdf、统计tsv和过滤后的fasta序列
mkdir(output_path)
os.system(
f"ln -s {bin_path}.pdf {output_path}/{bin_dir}.pdf ")
filter_df.to_csv(
f"{output_path}/{bin_dir}.summray_filter.tsv", sep="\t", index=False)
write_fasta(f"{output_path}/{bin_dir}_filtered.fa", filtered_dict)
print(f"共筛选出{infer_NCLDV_num}个NCLDV!")
infer_NCLDV_df = pd.DataFrame(table)
if not infer_NCLDV_df.empty:
infer_NCLDV_df = infer_NCLDV_df.sort_values(
by="mean_score", ascending=False)
infer_NCLDV_df.to_csv(
f"{output}/infer.tsv", sep="\t", index=False)
#定义一个函数,path为你的路径
def traversalDir_FirstDir(path):
#定义一个列表,用来存储结果
path_list = []
#判断路径是否存在
if (os.path.exists(path)):
#获取该目录下的所有文件或文件夹目录
files = os.listdir(path)
for file in files:
#得到该文件下所有目录的路径
m = os.path.join(path, file)
#判断该路径下是否是文件夹
if (os.path.isdir(m)):
h = os.path.split(m)
path_list.append(h[1])
return sorted(path_list)
def read_fasta(filename):
dict = {}
with open(filename, 'r') as fasta_f:
for line in fasta_f:
if line.startswith('>'):
name = line.rstrip()[1:]
dict[name] = ''
else:
dict[name] += line.rstrip() # 读取整个fasta文件构成字典
return dict
def write_fasta(output, fasta_dict):
with open(output, 'w') as fasta_f:
for key in fasta_dict.keys(): # 选取包含所需名称的基因名和序列
fasta_f.write(">"+key + '\n')
fasta_f.write(fasta_dict[key] + '\n')
def mkdir(path):
folder = os.path.exists(path)
if not folder: # 判断是否存在文件夹如果不存在则创建为文件夹
os.makedirs(path) # makedirs 创建文件时如果路径不存在会创建这个路径
if __name__=="__main__":
# 0. 读取参数
# 创建一个ArgumentParser对象,以存储实参信息
description = """
viralrecall 质检标准
* 剔除污染的contig:只保留score > 0 & num_viralhits >3 & contig_length > 5000 过滤 bin.fa 文件中的contig
* 剔除污染contig后,只保留总大小超过 100 kb & mean score >1 & marker gene hit有4个及以上或要有mcp基因的bin
"""
parser = argparse.ArgumentParser(
description=description, formatter_class=argparse.RawTextHelpFormatter)
# 方法add_argument()添加要解析的命令内容
parser.add_argument(
'--input', '-i', type=str, help="input_dir: viralrecall result dir", required=True)
parser.add_argument(
'--output', '-o', type=str, help="output_dir: filtered output dir", required=True)
args = parser.parse_args() # 读入输入的参数,生成一个列表args
work_dir = args.input
output = args.output
# 1.主函数
main(work_dir,output)
| 34.318841 | 153 | 0.600929 |
ee63b91cf836019ec4abb6e6d7214d27c2db4ec1
| 2,086 |
py
|
Python
|
api/base/urls.py
|
DanielSBrown/osf.io
|
98dda2ac237377197acacce78274bc0a4ce8f303
|
[
"Apache-2.0"
] | null | null | null |
api/base/urls.py
|
DanielSBrown/osf.io
|
98dda2ac237377197acacce78274bc0a4ce8f303
|
[
"Apache-2.0"
] | null | null | null |
api/base/urls.py
|
DanielSBrown/osf.io
|
98dda2ac237377197acacce78274bc0a4ce8f303
|
[
"Apache-2.0"
] | null | null | null |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.views.generic.base import RedirectView
from settings import API_BASE
from . import views
base_pattern = '^{}'.format(API_BASE)
urlpatterns = [
url(base_pattern,
include(
[
url(r'^$', views.root, name='root'),
url(r'^applications/', include('api.applications.urls', namespace='applications')),
url(r'^addons/', include('api.addons.urls', namespace='addons')),
url(r'^comments/', include('api.comments.urls', namespace='comments')),
url(r'^nodes/', include('api.nodes.urls', namespace='nodes')),
url(r'^registrations/', include('api.registrations.urls', namespace='registrations')),
url(r'^metaschemas/', include('api.metaschemas.urls', namespace='metaschemas')),
url(r'^users/', include('api.users.urls', namespace='users')),
url(r'^tokens/', include('api.tokens.urls', namespace='tokens')),
url(r'^logs/', include('api.logs.urls', namespace='logs')),
url(r'^files/', include('api.files.urls', namespace='files')),
url(r'^docs/', include('rest_framework_swagger.urls')),
url(r'^institutions/', include('api.institutions.urls', namespace='institutions')),
url(r'^collections/', include('api.collections.urls', namespace='collections')),
url(r'^guids/', include('api.guids.urls', namespace='guids')),
url(r'^licenses/', include('api.licenses.urls', namespace='licenses')),
url(r'^wikis/', include('api.wikis.urls', namespace='wikis')),
url(r'^identifiers/', include('api.identifiers.urls', namespace='identifiers')),
],
)
),
url(r'^$', RedirectView.as_view(pattern_name=views.root), name='redirect-to-root')
]
urlpatterns += static('/static/', document_root=settings.STATIC_ROOT)
handler404 = views.error_404
| 48.511628 | 102 | 0.603068 |
8c5a3c0bc6e668510a5bfdc237733505091f31f9
| 1,542 |
py
|
Python
|
pychron/dvc/tasks/__init__.py
|
WiscAr/pychron
|
8d335d53ba7a5fc70760d9a7cb60540ad169ae84
|
[
"Apache-2.0"
] | 1 |
2019-02-27T21:57:44.000Z
|
2019-02-27T21:57:44.000Z
|
pychron/dvc/tasks/__init__.py
|
WiscAr/pychron
|
8d335d53ba7a5fc70760d9a7cb60540ad169ae84
|
[
"Apache-2.0"
] | 80 |
2018-07-17T20:10:20.000Z
|
2021-08-17T15:38:24.000Z
|
pychron/dvc/tasks/__init__.py
|
AGESLDEO/pychron
|
1a81e05d9fba43b797f335ceff6837c016633bcf
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
import os
from git import Repo
from pychron.dvc import repository_path
from pychron.paths import paths
def list_local_repos():
for i in os.listdir(paths.repository_dataset_dir):
if i.startswith('.'):
continue
elif i.startswith('~'):
continue
d = repository_path(i)
if os.path.isdir(d):
gd = os.path.join(d, '.git')
if os.path.isdir(gd):
r = Repo(d)
yield i, r.active_branch.name
# ============= EOF =============================================
| 32.125 | 81 | 0.533722 |
5f80e255ef2cd9511e9341aa620d4d715a8dec11
| 8,248 |
py
|
Python
|
shap/datasets.py
|
llja0112/shap
|
00631d3f2bf8eb045ca3f3bf0f972b2c796adab6
|
[
"MIT"
] | 2 |
2019-01-18T14:59:11.000Z
|
2020-01-02T21:09:31.000Z
|
shap/datasets.py
|
Pacmed/shap
|
cc90a0b17f62a5a446a7b631cbc123a04d143e15
|
[
"MIT"
] | null | null | null |
shap/datasets.py
|
Pacmed/shap
|
cc90a0b17f62a5a446a7b631cbc123a04d143e15
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import sklearn.datasets
import os
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
github_data_url = "https://github.com/slundberg/shap/raw/master/data/"
def imagenet50(display=False, resolution=224):
""" This is a set of 50 images representative of ImageNet images.
This dataset was collected by randomly finding a working ImageNet link and then pasting the
original ImageNet image into Google image search restricted to images licensed for reuse. A
similar image (now with rights to reuse) was downloaded as a rough replacment for the original
ImageNet image. The point is to have a random sample of ImageNet for use as a background
distribution for explaining models trained on ImageNet data.
Note that because the images are only rough replacements the labels might no longer be correct.
"""
prefix = github_data_url + "imagenet50_"
X = np.load(cache(prefix + "%sx%s.npy" % (resolution, resolution))).astype(np.float32)
y = np.loadtxt(cache(prefix + "labels.csv"))
return X, y
def boston(display=False):
""" Return the boston housing data in a nice package. """
d = sklearn.datasets.load_boston()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
return df, d.target # pylint: disable=E1101
def imdb(display=False):
""" Return the clssic IMDB sentiment analysis training data in a nice package.
Full data is at: http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
Paper to cite when using the data is: http://www.aclweb.org/anthology/P11-1015
"""
with open(cache(github_data_url + "imdb_train.txt")) as f:
data = f.readlines()
y = np.ones(25000, dtype=np.bool)
y[:12500] = 0
return data, y
def communitiesandcrime(display=False):
""" Predict total number of non-violent crimes per 100K popuation.
This dataset is from the classic UCI Machine Learning repository:
https://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized
"""
raw_data = pd.read_csv(
cache(github_data_url + "CommViolPredUnnormalizedData.txt"),
na_values="?"
)
# find the indices where the total violent crimes are known
valid_inds = np.where(np.invert(np.isnan(raw_data.iloc[:,-2])))[0]
y = np.array(raw_data.iloc[valid_inds,-2], dtype=np.float)
# extract the predictive features and remove columns with missing values
X = raw_data.iloc[valid_inds,5:-18]
valid_cols = np.where(np.isnan(X.values).sum(0) == 0)[0]
X = X.iloc[:,valid_cols]
return X, y
def diabetes(display=False):
""" Return the diabetes housing data in a nice package. """
d = sklearn.datasets.load_diabetes()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
return df, d.target # pylint: disable=E1101
def iris(display=False):
""" Return the classic iris data in a nice package. """
d = sklearn.datasets.load_iris()
df = pd.DataFrame(data=d.data, columns=d.feature_names) # pylint: disable=E1101
if display:
return df, [d.target_names[v] for v in d.target] # pylint: disable=E1101
else:
return df, d.target # pylint: disable=E1101
def adult(display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_data = pd.read_csv(
cache(github_data_url + "adult.data"),
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
data = raw_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
data["Target"] = data["Target"] == " >50K"
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
data[k] = np.array([rcode[v.strip()] for v in data[k]])
else:
data[k] = data[k].cat.codes
if display:
return raw_data.drop(["Education", "Target", "fnlwgt"], axis=1), data["Target"].values
else:
return data.drop(["Target", "fnlwgt"], axis=1), data["Target"].values
def nhanesi(display=False):
""" A nicely packaged version of NHANES I data with surivival times as labels.
"""
X = pd.read_csv(cache(github_data_url + "NHANESI_subset_X.csv"))
y = pd.read_csv(cache(github_data_url + "NHANESI_subset_y.csv"))["y"]
if display:
X_display = X.copy()
X_display["Sex"] = ["Male" if v == 1 else "Female" for v in X["Sex"]]
return X_display, np.array(y)
else:
return X, np.array(y)
def cric(display=False):
""" A nicely packaged version of CRIC data with progression to ESRD within 4 years as the label.
"""
X = pd.read_csv(cache(github_data_url + "CRIC_time_4yearESRD_X.csv"))
y = np.loadtxt(cache(github_data_url + "CRIC_time_4yearESRD_y.csv"))
if display:
X_display = X.copy()
return X_display, y
else:
return X, y
def corrgroups60(display=False):
""" A simulated dataset with tight correlations among distinct groups of features.
"""
# set a constant seed
old_seed = np.random.seed()
np.random.seed(0)
# generate dataset with known correlation
N = 1000
M = 60
# set one coefficent from each group of 3 to 1
beta = np.zeros(M)
beta[0:30:3] = 1
# build a correlation matrix with groups of 3 tightly correlated features
C = np.eye(M)
for i in range(0,30,3):
C[i,i+1] = C[i+1,i] = 0.99
C[i,i+2] = C[i+2,i] = 0.99
C[i+1,i+2] = C[i+2,i+1] = 0.99
f = lambda X: np.matmul(X, beta)
# Make sure the sample correlation is a perfect match
X_start = np.random.randn(N, M)
X_centered = X_start - X_start.mean(0)
Sigma = np.matmul(X_centered.T, X_centered) / X_centered.shape[0]
W = np.linalg.cholesky(np.linalg.inv(Sigma)).T
X_white = np.matmul(X_centered, W.T)
assert np.linalg.norm(np.corrcoef(np.matmul(X_centered, W.T).T) - np.eye(M)) < 1e-6 # ensure this decorrelates the data
# create the final data
X_final = np.matmul(X_white, np.linalg.cholesky(C).T)
X = X_final
y = f(X) + np.random.randn(N) * 1e-2
# restore the previous numpy random seed
np.random.seed(old_seed)
return pd.DataFrame(X), y
def independentlinear60(display=False):
""" A simulated dataset with tight correlations among distinct groups of features.
"""
# set a constant seed
old_seed = np.random.seed()
np.random.seed(0)
# generate dataset with known correlation
N = 1000
M = 60
# set one coefficent from each group of 3 to 1
beta = np.zeros(M)
beta[0:30:3] = 1
f = lambda X: np.matmul(X, beta)
# Make sure the sample correlation is a perfect match
X_start = np.random.randn(N, M)
X = X_start - X_start.mean(0)
y = f(X) + np.random.randn(N) * 1e-2
# restore the previous numpy random seed
np.random.seed(old_seed)
return pd.DataFrame(X), y
def a1a():
""" A sparse dataset in scipy csr matrix format.
"""
return sklearn.datasets.load_svmlight_file(cache(github_data_url + 'a1a.svmlight'))
def cache(url, file_name=None):
if file_name is None:
file_name = os.path.basename(url)
data_dir = os.path.join(os.path.dirname(__file__), "cached_data")
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
file_path = os.path.join(data_dir, file_name)
if not os.path.isfile(file_path):
urlretrieve(url, file_path)
return file_path
| 33.803279 | 123 | 0.647066 |
6cad0041a4b3c9444b7d4259871485d4608aea64
| 13,363 |
py
|
Python
|
examples/moleculardynamics/freenrg/freenrg.py
|
radical-cybertools/BigJobAsync
|
190540c61dd00dc538ece189ba858ce9bbc0d5ae
|
[
"MIT"
] | null | null | null |
examples/moleculardynamics/freenrg/freenrg.py
|
radical-cybertools/BigJobAsync
|
190540c61dd00dc538ece189ba858ce9bbc0d5ae
|
[
"MIT"
] | null | null | null |
examples/moleculardynamics/freenrg/freenrg.py
|
radical-cybertools/BigJobAsync
|
190540c61dd00dc538ece189ba858ce9bbc0d5ae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""This example illustrates how to run free energy calculations with Amber
MMPBSA.py for N replicas.
"""
__author__ = "Ole Weidner"
__email__ = "[email protected]"
__copyright__ = "Copyright 2013-2014, The RADICAL Project at Rutgers"
__license__ = "MIT"
import os, sys, uuid
import urllib
import optparse
import bigjobasync
from kernel import KERNEL
# ----------------------------------------------------------------------------
#
def resource_cb(origin, old_state, new_state):
"""Resource callback function: writes resource allocation state
changes to STDERR.
"""
msg = " * Resource '%s' state changed from '%s' to '%s'.\n" % \
(str(origin), old_state, new_state)
sys.stderr.write(msg)
if new_state == bigjobasync.FAILED:
# Print the log and exit if big job has failed
for entry in origin.log:
print " * LOG: %s" % entry
sys.stderr.write(" * EXITING.\n")
sys.exit(-1)
# ----------------------------------------------------------------------------
#
def task_cb(origin, old_state, new_state):
"""Task callback function: writes task state changes to STDERR
"""
msg = " * Task %s state changed from '%s' to '%s'.\n" % \
(str(origin), old_state, new_state)
sys.stderr.write(msg)
if new_state == bigjobasync.FAILED:
# Print the log entry if task has failed to run
for entry in origin.log:
print " LOG: %s" % entry
# ----------------------------------------------------------------------------
#
def run_workload(config, workload):
"""Runs the FE tasks defined in `workload`.
"""
resource_name = config['resource']
username = config['username']
workdir = config['workdir']
allocation = config['allocation']
numtasks = len(workload)
longest_runtime = 0
for task in workload:
if int(task['runtime']) > longest_runtime:
longest_runtime = int(task['runtime'])
############################################################
# The resource allocation
cluster = bigjobasync.Resource(
name = resource_name,
resource = bigjobasync.RESOURCES[resource_name],
username = username,
runtime = longest_runtime,
cores = numtasks,
workdir = workdir,
project_id = allocation
)
cluster.register_callbacks(resource_cb)
cluster.allocate(terminate_on_empty_queue=True)
############################################################
# The workload
tasknum = 0
all_tasks = []
for task in workload:
tasknum += 1
input_nmode = task["nmode"]
input_com = task["com"]
input_rec = task["rec"]
input_lig = task["lig"]
input_traj = task["traj"]
kernelcfg = KERNEL["MMPBSA"]["resources"][resource_name]
mmpbsa_task = bigjobasync.Task(
name = "MMPBSA-fe-task-%s" % tasknum,
cores = 1,
environment = kernelcfg["environment"],
executable = "/bin/bash",
arguments = ["-l", "-c", "\"%s && %s -i %s -cp %s -rp %s -lp %s -y %s \"" % \
(kernelcfg["pre_execution"],
kernelcfg["executable"],
os.path.basename(input_nmode),
os.path.basename(input_com),
os.path.basename(input_rec),
os.path.basename(input_lig),
os.path.basename(input_traj)
)],
input = [
{
"mode" : bigjobasync.LINK,
"origin" : bigjobasync.REMOTE,
"origin_path" : input_nmode,
},
{
"mode" : bigjobasync.LINK,
"origin" : bigjobasync.REMOTE,
"origin_path" : input_com,
},
{
"mode" : bigjobasync.LINK,
"origin" : bigjobasync.REMOTE,
"origin_path" : input_rec,
},
{
"mode" : bigjobasync.LINK,
"origin" : bigjobasync.REMOTE,
"origin_path" : input_lig,
},
{
"mode" : bigjobasync.LINK,
"origin" : bigjobasync.REMOTE,
"origin_path" : input_traj,
},
],
output = [
{
"mode" : bigjobasync.COPY,
"origin_path" : "FINAL_RESULTS_MMPBSA.dat",
"destination" : bigjobasync.LOCAL,
"destination_path" : "./traj-%s-FINAL_RESULTS_MMPBSA.dat" % tasknum
}
]
)
mmpbsa_task.register_callbacks(task_cb)
all_tasks.append(mmpbsa_task)
cluster.schedule_tasks(all_tasks)
cluster.wait()
print "DONE -- All trajectories have been processed."
return 0
# ----------------------------------------------------------------------------
#
def run_test_job(config):
"""Runs a single FE test job.
"""
resource_name = config['resource']
username = config['username']
workdir = config['workdir']
allocation = config['allocation']
# Download the sample data from MDStack server
sampledata = {
"nmode.5h.py" : "http://repex2.tacc.utexas.edu/cybertools/sampledata/MMBPSA/nmode.5h.py",
"com.top.2" : "http://repex2.tacc.utexas.edu/cybertools/sampledata/MMBPSA/com.top.2",
"rec.top.2" : "http://repex2.tacc.utexas.edu/cybertools/sampledata/MMBPSA/rec.top.2",
"lig.top" : "http://repex2.tacc.utexas.edu/cybertools/sampledata/MMBPSA/lig.top",
"rep1.traj" : "http://repex2.tacc.utexas.edu/cybertools/sampledata/MMBPSA/trajectories/rep1.traj"
}
try:
for key, val in sampledata.iteritems():
print " * Downloading sample input data %s" % val
urllib.urlretrieve(val, key)
except Exception, ex:
print "ERROR - Couldn't download sample data: %s" % str(ex)
return 1
############################################################
# The resource allocation
cluster = bigjobasync.Resource(
name = resource_name,
resource = bigjobasync.RESOURCES[resource_name],
username = username,
runtime = 60,
cores = 16,
workdir = workdir,
project_id = allocation
)
cluster.register_callbacks(resource_cb)
cluster.allocate(terminate_on_empty_queue=True)
############################################################
# The test task
output_file = "./MMPBSA-test-task-%s" % str(uuid.uuid4())
kernelcfg = KERNEL["MMPBSA"]["resources"][resource_name]
mmpbsa_test_task = bigjobasync.Task(
name = "MMPBSA-fe-test-task",
cores = 1,
environment = kernelcfg["environment"],
executable = "/bin/bash",
arguments = ["-l", "-c", "\"%s && %s -i nmode.5h.py -cp com.top.2 -rp rec.top.2 -lp lig.top -y rep1.traj \"" % \
(kernelcfg["pre_execution"], kernelcfg["executable"])],
input = [
{
"mode" : bigjobasync.COPY,
"origin" : bigjobasync.LOCAL,
"origin_path" : "/%s/nmode.5h.py" % os.getcwd(),
},
{
"mode" : bigjobasync.COPY,
"origin" : bigjobasync.LOCAL,
"origin_path" : "/%s/com.top.2" % os.getcwd(),
},
{
"mode" : bigjobasync.COPY,
"origin" : bigjobasync.LOCAL,
"origin_path" : "/%s/rec.top.2" % os.getcwd(),
},
{
"mode" : bigjobasync.COPY,
"origin" : bigjobasync.LOCAL,
"origin_path" : "/%s/lig.top" % os.getcwd(),
},
{
"mode" : bigjobasync.COPY,
"origin" : bigjobasync.LOCAL,
"origin_path" : "/%s/rep1.traj" % os.getcwd(),
},
],
output = [
{
"mode" : bigjobasync.COPY,
"origin_path" : "STDOUT" ,
"destination" : bigjobasync.LOCAL,
"destination_path" : output_file,
"trasfer_if_failed" : True
}
]
)
mmpbsa_test_task.register_callbacks(task_cb)
cluster.schedule_tasks([mmpbsa_test_task])
cluster.wait()
try:
with open(output_file, 'r') as content_file:
content = content_file.read()
print content
os.remove(output_file)
for key, val in sampledata.iteritems():
os.remove("./%s" % key)
except Exception:
pass
return 0
# ----------------------------------------------------------------------------
#
def run_sanity_check(config):
"""Runs a simple job that performs some sanity tests, determines
AMBER version, etc.
"""
resource_name = config['resource']
username = config['username']
workdir = config['workdir']
allocation = config['allocation']
############################################################
# The resource allocation
cluster = bigjobasync.Resource(
name = resource_name,
resource = bigjobasync.RESOURCES[resource_name],
username = username,
runtime = 5,
cores = 16,
workdir = workdir,
project_id = allocation
)
cluster.register_callbacks(resource_cb)
cluster.allocate(terminate_on_empty_queue=True)
############################################################
# The test task
output_file = "./MMPBSA-test-task-%s.OUT" % str(uuid.uuid4())
kernelcfg = KERNEL["MMPBSA"]["resources"][resource_name]
mmpbsa_check_task = bigjobasync.Task(
name = "MMPBSA-check-task",
cores = 1,
environment = kernelcfg["environment"],
executable = "/bin/bash",
arguments = ["-l", "-c", "\"%s && echo -n MMPBSA path: && which %s && echo -n MMPBSA version: && %s --version\"" % \
(kernelcfg["pre_execution"], kernelcfg["executable"], kernelcfg["executable"]) ],
output = [
{
"mode" : bigjobasync.COPY,
"origin_path" : "STDOUT" ,
"destination" : bigjobasync.LOCAL,
"destination_path" : output_file,
"trasfer_if_failed" : False
}
]
)
mmpbsa_check_task.register_callbacks(task_cb)
cluster.schedule_tasks([mmpbsa_check_task])
cluster.wait()
try:
with open(output_file, 'r') as content_file:
content = content_file.read()
print content
os.remove(output_file)
except Exception:
pass
return 0
# ----------------------------------------------------------------------------
#
if __name__ == "__main__":
usage = "usage: %prog --config [--checkenv, --testjob, --workload]"
parser = optparse.OptionParser(usage=usage)
parser.add_option('--checkenv',
dest='checkenv',
action="store_true",
help='Launches a test job to check the execution environment.')
parser.add_option('--testjob',
dest='testjob',
action="store_true",
help='Launches a test job with a single FE calculation.')
parser.add_option('-c', '--config',
metavar='CONFIG',
dest='config',
help='The machine / resource configuration file. (REQUIRED)')
parser.add_option('-w', '--workload',
metavar='WORKLOAD',
dest='workload',
help='Launches the FE tasks defined in the provided WORKLOAD file.')
# PARSE THE CMD LINE OPTIONS
(options, args) = parser.parse_args()
if options.config is None:
parser.error("You must define a configuration (-c/--config). Try --help for help.")
config = __import__(options.config.split(".")[0])
from config import CONFIG
if options.checkenv is True:
# RUN THE CHECK ENVIRONMENT JOB
result = run_sanity_check(config=CONFIG)
sys.exit(result)
elif options.testjob is True:
# RUN THE FE TEST JOB
result = run_test_job(config=CONFIG)
sys.exit(result)
elif options.workload is not None:
# RUN A WORKLOAD
workload = __import__(options.workload.split(".")[0])
from workload import WORKLOAD
result = run_workload(config=CONFIG, workload=WORKLOAD)
sys.exit(result)
else:
# ERROR - INVALID PARAMETERS
parser.error("You must run either --checkenv, --testjob or --workload. Try --help for help.")
sys.exit(1)
| 33.916244 | 126 | 0.493826 |
a3d0951ead94f48cac70dc2278c36104561bd701
| 13,194 |
py
|
Python
|
src/systems/coref_system.py
|
norikinishida/CoreferenceResolution
|
5334cc4953d76ed6358bf8124df54803569f09ac
|
[
"Apache-2.0"
] | null | null | null |
src/systems/coref_system.py
|
norikinishida/CoreferenceResolution
|
5334cc4953d76ed6358bf8124df54803569f09ac
|
[
"Apache-2.0"
] | null | null | null |
src/systems/coref_system.py
|
norikinishida/CoreferenceResolution
|
5334cc4953d76ed6358bf8124df54803569f09ac
|
[
"Apache-2.0"
] | null | null | null |
import random
import numpy as np
import torch
import utils
import models
class CorefSystem:
def __init__(self, device, config):
"""
Parameters
----------
device: str
config: utils.Config
"""
self.device = device
self.config = config
# Initialize model
if self.config["model_name"] == "joshi2020":
self.model = models.Joshi2020(device=device,
config=config)
else:
raise Exception("Invalid model_name %s" % self.config["model_name"])
# Show parameter shapes
utils.writelog("Model parameters:")
for name, param in self.model.named_parameters():
utils.writelog("%s: %s" % (name, tuple(param.shape)))
def load_model(self, path):
"""
Parameters
----------
path: str
"""
self.model.load_state_dict(torch.load(path, map_location=torch.device("cpu")), strict=False)
def save_model(self, path):
"""
Parameters
----------
path: str
"""
torch.save(self.model.state_dict(), path)
def to_gpu(self, device):
"""
Parameters
----------
device: str
"""
self.model.to(device)
def compute_loss(self, data):
"""
Parameters
----------
data: utils.DataInstance
Returns
-------
torch.Tensor
"""
# Tensorize inputs
# data_gpu = [x.to(self.device) for x in data] # old code
input_ids = data.input_ids
input_mask = data.input_mask
speaker_ids = data.speaker_ids
segment_len = data.segment_len
genre = data.genre
sentence_map = data.sentence_map
is_training = data.is_training
gold_starts = data.gold_starts
gold_ends = data.gold_ends
gold_mention_cluster_map = data.gold_mention_cluster_map
if len(data.segments) > self.config["truncation_size"]:
input_ids, input_mask, speaker_ids, segment_len, genre, sentence_map, \
is_training, gold_starts, gold_ends, gold_mention_cluster_map \
= self.truncate_example(input_ids=input_ids,
input_mask=input_mask,
speaker_ids=speaker_ids,
segment_len=segment_len,
genre=genre,
sentence_map=sentence_map,
is_training=is_training,
gold_starts=gold_starts,
gold_ends=gold_ends,
gold_mention_cluster_map=gold_mention_cluster_map)
input_ids = torch.tensor(input_ids, dtype=torch.long, device=self.device)
input_mask = torch.tensor(input_mask, dtype=torch.long, device=self.device)
speaker_ids = torch.tensor(speaker_ids, dtype=torch.long, device=self.device)
segment_len = torch.tensor(segment_len, dtype=torch.long, device=self.device)
genre = torch.tensor(genre, dtype=torch.long, device=self.device)
sentence_map = torch.tensor(sentence_map, dtype=torch.long, device=self.device)
is_training = torch.tensor(is_training, dtype=torch.bool, device=self.device)
gold_starts = torch.tensor(gold_starts, dtype=torch.long, device=self.device)
gold_ends = torch.tensor(gold_ends, dtype=torch.long, device=self.device)
gold_mention_cluster_map = torch.tensor(gold_mention_cluster_map, dtype=torch.long, device=self.device)
# Switch to training mode
self.model.train()
# Forward
_, loss = self.model.forward(input_ids=input_ids,
input_mask=input_mask,
speaker_ids=speaker_ids,
segment_len=segment_len,
genre=genre,
sentence_map=sentence_map,
is_training=is_training,
gold_starts=gold_starts,
gold_ends=gold_ends,
gold_mention_cluster_map=gold_mention_cluster_map)
return loss
def predict(self, data, evaluator=None, gold_clusters=None):
"""
Parameters
----------
data: utils.DataInstance
evaluator: CorefEvaluator, default None
gold_clusters: list[list[(int, int)]], default None
Returns
-------
list[list[(int, int)]]
CorefEvaluator or None
"""
if evaluator is None or gold_clusters is None:
assert evaluator is None and gold_clusters is None
# Tensorize inputs
input_ids = torch.tensor(data.input_ids, dtype=torch.long, device=self.device)
input_mask = torch.tensor(data.input_mask, dtype=torch.long, device=self.device)
speaker_ids = torch.tensor(data.speaker_ids, dtype=torch.long, device=self.device)
segment_len = torch.tensor(data.segment_len, dtype=torch.long, device=self.device)
genre = torch.tensor(data.genre, dtype=torch.long, device=self.device)
sentence_map = torch.tensor(data.sentence_map, dtype=torch.long, device=self.device)
is_training = torch.tensor(data.is_training, dtype=torch.bool, device=self.device)
# Tensorize targets
# gold_starts = torch.tensor(data.gold_starts, dtype=torch.long, device=self.device)
# gold_ends = torch.tensor(data.gold_ends, dtype=torch.long, device=self.device)
# gold_mention_cluster_map = torch.tensor(data.gold_mention_cluster_map, dtype=torch.long, device=self.device)
# Switch to inference mode
self.model.eval()
# Forward
(span_starts, span_ends, antecedent_indices, antecedent_scores), _ \
= self.model.forward(input_ids=input_ids,
input_mask=input_mask,
speaker_ids=speaker_ids,
segment_len=segment_len,
genre=genre,
sentence_map=sentence_map,
is_training=is_training,
gold_starts=None,
gold_ends=None,
gold_mention_cluster_map=None)
span_starts = span_starts.tolist()
span_ends = span_ends.tolist()
antecedent_indices = antecedent_indices.tolist()
antecedent_scores = antecedent_scores.tolist()
# Get predicted antecedents
predicted_antecedents = self.get_predicted_antecedents(antecedent_indices=antecedent_indices, antecedent_scores=antecedent_scores)
# Get clusters
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(
span_starts=span_starts,
span_ends=span_ends,
predicted_antecedents=predicted_antecedents)
if evaluator is None:
return predicted_clusters, None
# Update evaluator
# mention_to_predicted = {m: predicted_clusters[cluster_idx] for m, cluster_idx in mention_to_cluster_id.items()}
gold_clusters = [tuple(tuple(m) for m in cluster) for cluster in gold_clusters]
mention_to_gold = {m: cluster for cluster in gold_clusters for m in cluster}
evaluator.update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold)
return predicted_clusters, evaluator
def get_predicted_antecedents(self, antecedent_indices, antecedent_scores):
"""
Parameters
----------
antecedent_indices: list[list[int]]
shape (n_top_spans, n_ant_spans)
antecedent_scores: list[list[float]]
shape (n_top_spans, 1 + n_ant_spans)
Returns
-------
list[int]
"""
predicted_antecedents = []
for i, idx in enumerate(np.argmax(antecedent_scores, axis=1) - 1):
if idx < 0:
# The dummy antecedent is selected.
# Since the coreference score to the dummy antecedent is always set to zero,
# the coreference scores to the non-dummy candidates are all negative.
predicted_antecedents.append(-1)
else:
# The maximum antecedent score is positive,
# and the selected antecedent is not dummy.
predicted_antecedents.append(antecedent_indices[i][idx])
return predicted_antecedents
def get_predicted_clusters(self, span_starts, span_ends, predicted_antecedents):
"""
Parameters
----------
span_starts: list[int]
span_ends: list[int]
predicted_antecedents: list[int]
Returns
-------
list[list[(int, int)]]
dict[(int, int), int]
"""
# Get predicted clusters
predicted_clusters = [] # list[list[(int, int)]]
mention_to_cluster_id = {} # dict[(int, int), int]
for mention_i, antecedent_i in enumerate(predicted_antecedents):
# No coreference
if antecedent_i < 0:
continue
# Check whether the coreference is valid
assert antecedent_i < mention_i, f'antecedent (index {antecedent_i}) must appear earlier than span (index {mention_i})'
# Add antecedent to cluster (if the antecedent is chosen for the first time)
antecedent = (int(span_starts[antecedent_i]), int(span_ends[antecedent_i])) # Antecedent span
antecedent_cluster_id = mention_to_cluster_id.get(antecedent, -1) # Cluster ID
if antecedent_cluster_id == -1:
# Add a new cluster
antecedent_cluster_id = len(predicted_clusters) # New cluster ID
predicted_clusters.append([antecedent]) # Add antecedent to cluster
mention_to_cluster_id[antecedent] = antecedent_cluster_id
else:
# This (antecedent) span is already selected as an antecedent of the previous mention(s)
pass
# Add mention to cluster
mention = (int(span_starts[mention_i]), int(span_ends[mention_i])) # Mention span
assert not mention in predicted_clusters[antecedent_cluster_id]
assert not mention in mention_to_cluster_id
predicted_clusters[antecedent_cluster_id].append(mention) # Add mention to cluster
mention_to_cluster_id[mention] = antecedent_cluster_id
predicted_clusters = [tuple(c) for c in predicted_clusters]
mention_to_predicted = {m: predicted_clusters[cluster_idx] for m, cluster_idx in mention_to_cluster_id.items()}
return predicted_clusters, mention_to_predicted
def truncate_example(self,
input_ids,
input_mask,
speaker_ids,
segment_len,
genre,
sentence_map,
is_training,
gold_starts,
gold_ends,
gold_mention_cluster_map,
segment_offset=None):
truncation_size = self.config["truncation_size"]
num_segments = input_ids.shape[0]
assert num_segments > truncation_size
# Get offsets
if segment_offset is None:
segment_offset = random.randint(0, num_segments - truncation_size) # Random!
word_offset = segment_len[:segment_offset].sum()
num_words = segment_len[segment_offset: segment_offset + truncation_size].sum()
# Extract continuous segments
input_ids = input_ids[segment_offset: segment_offset + truncation_size, :]
input_mask = input_mask[segment_offset: segment_offset + truncation_size, :]
speaker_ids = speaker_ids[segment_offset: segment_offset + truncation_size, :]
segment_len = segment_len[segment_offset: segment_offset + truncation_size]
sentence_map = sentence_map[word_offset: word_offset + num_words]
# Get gold spans within the window
gold_spans = (gold_starts < word_offset + num_words) & (gold_ends >= word_offset)
gold_starts = gold_starts[gold_spans] - word_offset # Adjust token indices
gold_ends = gold_ends[gold_spans] - word_offset # Adjust token indices
gold_mention_cluster_map = gold_mention_cluster_map[gold_spans]
return input_ids, input_mask, speaker_ids, segment_len, genre, sentence_map, \
is_training, gold_starts, gold_ends, gold_mention_cluster_map
| 43.117647 | 138 | 0.585797 |
dd4d1cf2a47e3ca66f99e6881664cabc51487712
| 3,008 |
py
|
Python
|
python/tests/test_convert.py
|
rommelDB/custrings
|
7fd21afb7af300d06aeb21553ea49508fd79b828
|
[
"Apache-2.0"
] | null | null | null |
python/tests/test_convert.py
|
rommelDB/custrings
|
7fd21afb7af300d06aeb21553ea49508fd79b828
|
[
"Apache-2.0"
] | null | null | null |
python/tests/test_convert.py
|
rommelDB/custrings
|
7fd21afb7af300d06aeb21553ea49508fd79b828
|
[
"Apache-2.0"
] | null | null | null |
#
import nvstrings
import numpy as np
#
from librmm_cffi import librmm as rmm
from librmm_cffi import librmm_config as rmm_cfg
rmm_cfg.use_pool_allocator = True
rmm.initialize()
#
s = nvstrings.to_device(["1234","5678","90",None,"-876","543.2","-0.12",".55","-.002","","de","abc123","123abc"])
print(s)
#
print(".stoi():",s.stoi())
arr = np.arange(s.size(),dtype=np.int32)
d_arr = rmm.to_device(arr)
s.stoi(d_arr.device_ctypes_pointer.value)
print(".stoi(devptr):",d_arr.copy_to_host())
#
print(".stof():",s.stof())
arr = np.arange(s.size(),dtype=np.float32)
d_arr = rmm.to_device(arr)
s.stof(d_arr.device_ctypes_pointer.value)
print(".stof(devptr):",d_arr.copy_to_host())
#
print(".hash():",s.hash())
arr = np.arange(s.size(),dtype=np.uint32)
d_arr = rmm.to_device(arr)
s.hash(d_arr.device_ctypes_pointer.value)
print(".hash(devptr):",d_arr.copy_to_host())
#
s = nvstrings.to_device(['1234567890', 'de', '1.75', '-34', '+9.8', '7¼', 'x³', '2³', '12⅝','','\t\r\n '])
print(s)
arr = np.arange(s.size(),dtype=np.byte)
d_arr = rmm.to_device(arr)
#
print(".isalnum():",s.isalnum())
s.isalnum(d_arr.device_ctypes_pointer.value)
print(".isalnum(devptr):",d_arr.copy_to_host())
#
print(".isalpha():",s.isalpha())
s.isalpha(d_arr.device_ctypes_pointer.value)
print(".isalpha(devptr):",d_arr.copy_to_host())
#
print(".isdigit():",s.isdigit())
s.isdigit(d_arr.device_ctypes_pointer.value)
print(".isdigit(devptr):",d_arr.copy_to_host())
#
print(".isdecimal():",s.isdecimal())
s.isdecimal(d_arr.device_ctypes_pointer.value)
print(".isdecimal(devptr):",d_arr.copy_to_host())
#
print(".isspace():",s.isspace())
s.isspace(d_arr.device_ctypes_pointer.value)
print(".isspace(devptr):",d_arr.copy_to_host())
#
print(".isnumeric():",s.isnumeric())
s.isnumeric(d_arr.device_ctypes_pointer.value)
print(".isnumeric(devptr):",d_arr.copy_to_host())
s = nvstrings.to_device(["1234","ABCDEF","1A2","cafe"])
print(s)
print(".htoi()",s.htoi())
arr = np.arange(s.size(),dtype=np.uint32)
d_arr = rmm.to_device(arr)
s.htoi(d_arr.device_ctypes_pointer.value)
print(".htoi(devptr)",d_arr.copy_to_host())
print("itos():",nvstrings.itos(d_arr))
nulls = np.empty(int(s.size()/8)+1, dtype=np.int8)
nulls[0] = 11
arr = d_arr.copy_to_host()
print("itos(nulls=\\b1011):",nvstrings.itos(arr,nulls=nulls))
s = nvstrings.to_device(["192.168.0.1","10.0.0.1",None,"","hello"])
print(s)
print(".ip2int()",s.ip2int())
print("int2ip()",nvstrings.int2ip(s.ip2int()))
s = nvstrings.to_device(["2019-03-20T12:34:56.789Z","2020-02-29"])
print(s)
print(".timestamp2int()",s.timestamp2int())
print(".timestamp2int(ms)",s.timestamp2int(units='milliseconds'))
print("int2timestamp()",nvstrings.int2timestamp([1553085296,1582934400]))
print("int2timestamp(ms)",nvstrings.int2timestamp([1553085296789,1582934400000],units='milliseconds'))
s = nvstrings.to_device(["true","false",None,"","true"])
print(s)
print(".to_booleans()",s.to_booleans(true="true"))
print("from_booleans",nvstrings.from_booleans([True,False,False,True],nulls=[11]))
s = None
| 29.490196 | 113 | 0.705785 |
4384fe9174f31515876ad5ee91f09d87cac9b5dd
| 1,333 |
py
|
Python
|
src/advertisingapp/forms.py
|
robertsmoto/sodavault
|
200e843be7abe6cc447647bba55c7c1309092e5e
|
[
"BSD-3-Clause"
] | null | null | null |
src/advertisingapp/forms.py
|
robertsmoto/sodavault
|
200e843be7abe6cc447647bba55c7c1309092e5e
|
[
"BSD-3-Clause"
] | null | null | null |
src/advertisingapp/forms.py
|
robertsmoto/sodavault
|
200e843be7abe6cc447647bba55c7c1309092e5e
|
[
"BSD-3-Clause"
] | null | null | null |
from dal import autocomplete
from django import forms
from .models import Campaign, Banner
class CampaignForm(forms.ModelForm):
class Meta:
model = Campaign
fields = ('__all__')
# class AssettForm(forms.ModelForm):
# campaign = forms.ModelChoiceField(
# queryset=Assett.objects.all(),
# widget=autocomplete.ModelSelect2(url='campaign-autocomplete',
# attrs={
# # Set some placeholder
# 'data-placeholder': 'Autocomplete ...',
# # Only trigger autocompletion after 3 characters have been typed
# # 'data-minimum-input-length': 3,
# },
# )
# )
# class Meta:
# model = Assett
# fields = ('__all__')
# class BannerForm(forms.ModelForm):
# campaign = forms.ModelChoiceField(
# queryset=Assett.objects.all(),
# widget=autocomplete.ModelSelect2(
# url='campaign-autocomplete',
# attrs={
# # Set some placeholder
# 'data-placeholder': 'Autocomplete ...',
# # Only trigger autocompletion after 3 characters have been typed
# # 'data-minimum-input-length': 3,
# },
# )
# )
# class Meta:
# model = Banner
# fields = ('__all__')
| 28.361702 | 82 | 0.549137 |
7bd858bfafab19e41d73a903ffea26c9c2c1d48e
| 3,330 |
py
|
Python
|
subcmds/cherry_pick.py
|
kimi98-2020/git-repo
|
63df80ccefe364c6794e049f98e2db5ce61bb623
|
[
"Apache-2.0"
] | 168 |
2017-07-05T04:26:54.000Z
|
2022-03-27T06:34:29.000Z
|
subcmds/cherry_pick.py
|
kimi98-2020/git-repo
|
63df80ccefe364c6794e049f98e2db5ce61bb623
|
[
"Apache-2.0"
] | 2 |
2020-09-06T17:25:29.000Z
|
2021-04-16T09:55:20.000Z
|
subcmds/cherry_pick.py
|
kimi98-2020/git-repo
|
63df80ccefe364c6794e049f98e2db5ce61bb623
|
[
"Apache-2.0"
] | 107 |
2020-02-02T14:54:37.000Z
|
2022-03-22T07:40:47.000Z
|
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from command import Command
from git_command import GitCommand
CHANGE_ID_RE = re.compile(r'^\s*Change-Id: I([0-9a-f]{40})\s*$')
class CherryPick(Command):
COMMON = True
helpSummary = "Cherry-pick a change."
helpUsage = """
%prog <sha1>
"""
helpDescription = """
'%prog' cherry-picks a change from one branch to another.
The change id will be updated, and a reference to the old
change id will be added.
"""
def ValidateOptions(self, opt, args):
if len(args) != 1:
self.Usage()
def Execute(self, opt, args):
reference = args[0]
p = GitCommand(None,
['rev-parse', '--verify', reference],
capture_stdout=True,
capture_stderr=True)
if p.Wait() != 0:
print(p.stderr, file=sys.stderr)
sys.exit(1)
sha1 = p.stdout.strip()
p = GitCommand(None, ['cat-file', 'commit', sha1], capture_stdout=True)
if p.Wait() != 0:
print("error: Failed to retrieve old commit message", file=sys.stderr)
sys.exit(1)
old_msg = self._StripHeader(p.stdout)
p = GitCommand(None,
['cherry-pick', sha1],
capture_stdout=True,
capture_stderr=True)
status = p.Wait()
print(p.stdout, file=sys.stdout)
print(p.stderr, file=sys.stderr)
if status == 0:
# The cherry-pick was applied correctly. We just need to edit the
# commit message.
new_msg = self._Reformat(old_msg, sha1)
p = GitCommand(None, ['commit', '--amend', '-F', '-'],
input=new_msg,
capture_stdout=True,
capture_stderr=True)
if p.Wait() != 0:
print("error: Failed to update commit message", file=sys.stderr)
sys.exit(1)
else:
print('NOTE: When committing (please see above) and editing the commit '
'message, please remove the old Change-Id-line and add:')
print(self._GetReference(sha1), file=sys.stderr)
print(file=sys.stderr)
def _IsChangeId(self, line):
return CHANGE_ID_RE.match(line)
def _GetReference(self, sha1):
return "(cherry picked from commit %s)" % sha1
def _StripHeader(self, commit_msg):
lines = commit_msg.splitlines()
return "\n".join(lines[lines.index("") + 1:])
def _Reformat(self, old_msg, sha1):
new_msg = []
for line in old_msg.splitlines():
if not self._IsChangeId(line):
new_msg.append(line)
# Add a blank line between the message and the change id/reference
try:
if new_msg[-1].strip() != "":
new_msg.append("")
except IndexError:
pass
new_msg.append(self._GetReference(sha1))
return "\n".join(new_msg)
| 30 | 78 | 0.633033 |
c67f21576460156119984b01313da4e5236a5478
| 384 |
py
|
Python
|
src/tools/PdfTools.py
|
Potato-DiGua/Tools
|
1e0dc6c115f665560cf4e254f35a9b8e1d4e0d02
|
[
"MIT"
] | null | null | null |
src/tools/PdfTools.py
|
Potato-DiGua/Tools
|
1e0dc6c115f665560cf4e254f35a9b8e1d4e0d02
|
[
"MIT"
] | null | null | null |
src/tools/PdfTools.py
|
Potato-DiGua/Tools
|
1e0dc6c115f665560cf4e254f35a9b8e1d4e0d02
|
[
"MIT"
] | null | null | null |
from typing import List
from PyPDF2 import PdfFileMerger
def merge_pdf(pdf_path: List[str], output_path: str):
files = [item for item in pdf_path if str(item).endswith(".pdf")]
file_merger = PdfFileMerger()
for pdf in files:
file_merger.append(pdf) # 合并pdf文件
file_merger.write(output_path)
print("[" + ",".join(pdf_path) + "]" + "->" + output_path)
| 24 | 69 | 0.664063 |
ec7373cbc58562d98818bb149e6302c820b3c181
| 2,345 |
py
|
Python
|
starthinker_airflow/dags/sheets_copy_dag.py
|
RMStanford/starthinker
|
0a2df38bebb9d089bf91b6df01598d11a354eed3
|
[
"Apache-2.0"
] | null | null | null |
starthinker_airflow/dags/sheets_copy_dag.py
|
RMStanford/starthinker
|
0a2df38bebb9d089bf91b6df01598d11a354eed3
|
[
"Apache-2.0"
] | null | null | null |
starthinker_airflow/dags/sheets_copy_dag.py
|
RMStanford/starthinker
|
0a2df38bebb9d089bf91b6df01598d11a354eed3
|
[
"Apache-2.0"
] | null | null | null |
###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
'''
Sheet Copy
Copy tab from a sheet to a sheet.
P
r
o
v
i
d
e
t
h
e
f
u
l
l
e
d
i
t
U
R
L
f
o
r
b
o
t
h
s
h
e
e
t
s
.
P
r
o
v
i
d
e
t
h
e
t
a
b
n
a
m
e
f
o
r
b
o
t
h
s
h
e
e
t
s
.
T
h
e
t
a
b
w
i
l
l
o
n
l
y
b
e
c
o
p
i
e
d
i
f
i
t
d
o
e
s
n
o
t
a
l
r
e
a
d
y
e
x
i
s
t
.
'''
from starthinker_airflow.factory import DAG_Factory
USER_CONN_ID = "google_cloud_default" # The connection to use for user authentication.
GCP_CONN_ID = "" # The connection to use for service authentication.
INPUTS = {
'from_sheet': '',
'from_tab': '',
'to_sheet': '',
'to_tab': '',
}
TASKS = [
{
'sheets': {
'auth': 'user',
'template': {
'sheet': {
'field': {
'name': 'from_sheet',
'kind': 'string',
'order': 1,
'default': ''
}
},
'tab': {
'field': {
'name': 'from_tab',
'kind': 'string',
'order': 2,
'default': ''
}
}
},
'sheet': {
'field': {
'name': 'to_sheet',
'kind': 'string',
'order': 3,
'default': ''
}
},
'tab': {
'field': {
'name': 'to_tab',
'kind': 'string',
'order': 4,
'default': ''
}
}
}
}
]
DAG_FACTORY = DAG_Factory('sheets_copy', { 'tasks':TASKS }, INPUTS)
DAG_FACTORY.apply_credentails(USER_CONN_ID, GCP_CONN_ID)
DAG = DAG_FACTORY.execute()
if __name__ == "__main__":
DAG_FACTORY.print_commandline()
| 10.330396 | 86 | 0.495096 |
65a7e2d26539ac65bff28008bed6eb5d7ded799f
| 6,039 |
py
|
Python
|
micron/extraFunctions.py
|
zhengyang-c/photonLauncher
|
76215f47ccd1178f1826834533f5702c4b8f2c35
|
[
"Apache-2.0"
] | 6 |
2015-11-26T15:03:38.000Z
|
2020-10-05T14:08:54.000Z
|
micron/extraFunctions.py
|
zhengyang-c/photonLauncher
|
76215f47ccd1178f1826834533f5702c4b8f2c35
|
[
"Apache-2.0"
] | 7 |
2015-12-09T06:44:34.000Z
|
2021-12-14T15:51:28.000Z
|
micron/extraFunctions.py
|
zhengyang-c/photonLauncher
|
76215f47ccd1178f1826834533f5702c4b8f2c35
|
[
"Apache-2.0"
] | 3 |
2016-07-25T10:43:21.000Z
|
2021-12-07T14:12:47.000Z
|
#!/usr/bin/env python3
import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
from PyQt5 import QtCore, QtGui, QtWidgets
def moveToCentre(QtObj, host = None):
# https://stackoverflow.com/a/42326134/3211506
if host is None:
host = QtObj.parentWidget()
if host:
hostRect = host.frameGeometry()
QtObj.move(hostRect.center() - QtObj.rect().center())
else:
screenGeometry = QtWidgets.QDesktopWidget().availableGeometry()
try:
ObjWidth = QtObj.width()
ObjHeight = QtObj.height()
except TypeError as e:
ObjWidth = QtObj.width
ObjHeight = QtObj.height
_x = (screenGeometry.width() - ObjWidth) / 2;
_y = (screenGeometry.height() - ObjHeight) / 2;
QtObj.move(_x, _y);
# Rewritten play function
# We enforce PyAudio
# https://github.com/jiaaro/pydub/pull/421
# NOTE: REMOVE IF PR IS MERGED AND DEPLOYED
from pydub.utils import make_chunks
from pydub.playback import play as pydub_play
def _play_with_pyaudio(seg):
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(seg.sample_width),
channels=seg.channels,
rate=seg.frame_rate,
output=True)
# Just in case there were any exceptions/interrupts, we release the resource
# So as not to raise OSError: Device Unavailable should play() be used again
try:
# break audio into half-second chunks (to allows keyboard interrupts)
for chunk in make_chunks(seg, 500):
stream.write(chunk._data)
finally:
stream.stop_stream()
stream.close()
p.terminate()
def play(audio_segment):
try:
_play_with_pyaudio(audio_segment)
return
except ImportError:
pass
else:
return
pydub_play(audio_segment)
# https://stackoverflow.com/a/325528/3211506
import threading, time, ctypes, inspect
def _async_raise(tid, exctype):
'''Raises an exception in the threads with id tid'''
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid),
ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# "if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), None)
raise SystemError("PyThreadState_SetAsyncExc failed")
class ThreadWithExc(threading.Thread):
'''A thread class that supports raising exception in the thread from
another thread.
'''
def _get_my_tid(self):
"""determines this (self's) thread id
CAREFUL : this function is executed in the context of the caller
thread, to get the identity of the thread represented by this
instance.
"""
if not self.isAlive():
raise threading.ThreadError("the thread is not active")
# do we have it cached?
if hasattr(self, "_thread_id"):
return self._thread_id
# no, look for it in the _active dict
for tid, tobj in threading._active.items():
if tobj is self:
self._thread_id = tid
return tid
# TODO: in python 2.6, there's a simpler way to do : self.ident
raise AssertionError("could not determine the thread's id")
def raiseExc(self, exctype):
"""Raises the given exception type in the context of this thread.
If the thread is busy in a system call (time.sleep(),
socket.accept(), ...), the exception is simply ignored.
If you are sure that your exception should terminate the thread,
one way to ensure that it works is:
t = ThreadWithExc( ... )
...
t.raiseExc( SomeException )
while t.isAlive():
time.sleep( 0.1 )
t.raiseExc( SomeException )
If the exception is to be caught by the thread, you need a way to
check that your thread has caught it.
CAREFUL : this function is executed in the context of the
caller thread, to raise an excpetion in the context of the
thread represented by this instance.
"""
if not hasattr(self, 'my_tid'):
self.my_tid = self._get_my_tid()
_async_raise( self.my_tid, exctype )
def terminate(self):
if self.isAlive() and (not hasattr(self, 'terminateRequested') or not self.terminateRequested):
self.terminateRequested = True
self.raiseExc(SystemExit)
self.join()
class DoneObject():
def __init__(self):
pass
def __repr__(self):
return "<Done Object>"
| 32.643243 | 103 | 0.60871 |
a041bc558de06f3da834984e9cc22b002b5a2c8e
| 2,749 |
py
|
Python
|
python2.7/site-packages/twisted/words/test/test_jabberjid.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 4 |
2020-10-31T19:52:05.000Z
|
2021-09-22T11:39:27.000Z
|
python2.7/site-packages/twisted/words/test/test_jabberjid.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | null | null | null |
python2.7/site-packages/twisted/words/test/test_jabberjid.py
|
84KaliPleXon3/sslstrip-hsts-openwrt
|
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
|
[
"MIT"
] | 2 |
2020-02-27T08:28:35.000Z
|
2020-09-13T12:39:26.000Z
|
# Copyright (c) 2001-2005 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.words.protocols.jabber import jid
class JIDParsingTest(unittest.TestCase):
def testParse(self):
# Basic forms
self.assertEquals(jid.parse("user@host/resource"),
("user", "host", "resource"))
self.assertEquals(jid.parse("user@host"),
("user", "host", None))
self.assertEquals(jid.parse("host"),
(None, "host", None))
self.assertEquals(jid.parse("host/resource"),
(None, "host", "resource"))
# More interesting forms
self.assertEquals(jid.parse("foo/bar@baz"),
(None, "foo", "bar@baz"))
self.assertEquals(jid.parse("boo@foo/bar@baz"),
("boo", "foo", "bar@baz"))
self.assertEquals(jid.parse("boo@foo/bar/baz"),
("boo", "foo", "bar/baz"))
self.assertEquals(jid.parse("boo/foo@bar@baz"),
(None, "boo", "foo@bar@baz"))
self.assertEquals(jid.parse("boo/foo/bar"),
(None, "boo", "foo/bar"))
self.assertEquals(jid.parse("boo//foo"),
(None, "boo", "/foo"))
def testInvalid(self):
# No host
try:
jid.parse("user@")
assert 0
except jid.InvalidFormat:
assert 1
# Double @@
try:
jid.parse("user@@host")
assert 0
except jid.InvalidFormat:
assert 1
# Multiple @
try:
jid.parse("user@host@host")
assert 0
except jid.InvalidFormat:
assert 1
def testPrep(self):
# case map of user
self.assertEquals(jid.prep("UsEr", "host", "resource"),
("user", "host", "resource"))
# case map of host
self.assertEquals(jid.prep("user", "hoST", "resource"),
("user", "host", "resource"))
# no case map of resource
self.assertNotEquals(jid.prep("user", "host", "Resource"),
("user", "host", "resource"))
class JIDClassTest(unittest.TestCase):
def testBasic(self):
j = jid.internJID("user@host")
self.assertEquals(j.userhost(), "user@host")
self.assertEquals(j.user, "user")
self.assertEquals(j.host, "host")
self.assertEquals(j.resource, None)
j2 = jid.internJID("user@host")
self.assertEquals(id(j), id(j2))
j_uhj = j.userhostJID()
self.assertEquals(id(j), id(j_uhj))
| 33.52439 | 66 | 0.504547 |
b0dd2218187a5459d1e744274981259433bfa75c
| 1,522 |
py
|
Python
|
ch2/plotmap.py
|
gili-Katagiri/ep-PML2nd
|
e32ac199a87b10d63d510860ef5f687254b3e0c5
|
[
"MIT"
] | null | null | null |
ch2/plotmap.py
|
gili-Katagiri/ep-PML2nd
|
e32ac199a87b10d63d510860ef5f687254b3e0c5
|
[
"MIT"
] | null | null | null |
ch2/plotmap.py
|
gili-Katagiri/ep-PML2nd
|
e32ac199a87b10d63d510860ef5f687254b3e0c5
|
[
"MIT"
] | null | null | null |
import numpy as np
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, ax, resolution=0.02):
markers = ('s','x','o','^','v')
colors = ('red','blue','lightgreen','gray','cyan')
cmap = ListedColormap( colors[:len(np.unique(y))] )
x1_min, x1_max = X[:,0].min()-1, X[:,0].max()+1
x2_min, x2_max = X[:,1].min()-1, X[:,1].max()+1
xx1, xx2 = np.meshgrid( np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution) )
Z = classifier.predict(np.array( [xx1.ravel(), xx2.ravel()]).T )
Z = Z.reshape(xx1.shape)
ax.contourf( xx1, xx2, Z, alpha=0.3, cmap=cmap)
ax.set_xlim(xx1.min(), xx1.max())
ax.set_ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
ax.scatter(x=X[y==cl, 0], y=X[y==cl, 1],
alpha=0.8, c=colors[idx],
marker=markers[idx],
label=cl,
edgecolor='black')
if __name__=='__main__':
from perceptron import Perceptron
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
dtpath = Path(__file__, '../iris.data').resolve(strict=True)
df = pd.read_csv(str(dtpath), header=None)
y = df.iloc[:100,4].values
y = np.where(y=='Iris-setosa', -1, 1)
X = df.iloc[:100,[0,2]].values
ppn = Perceptron(eta=0.1, n_iter=10)
ppn.fit(X, y)
fig = plt.figure()
ax = fig.add_subplot()
plot_decision_regions(X, y, ppn, ax)
plt.show()
| 29.843137 | 106 | 0.581472 |
f1a380e7e75e569a0c60e09a52694223ae2b5da8
| 2,742 |
py
|
Python
|
keystone/routers.py
|
ioram7/keystone
|
81b5ad22cc8b85d622a332e62e5c4cb63155b654
|
[
"Apache-2.0"
] | 6 |
2016-08-06T09:00:17.000Z
|
2021-10-21T23:12:47.000Z
|
keystone/routers.py
|
ioram7/keystone
|
81b5ad22cc8b85d622a332e62e5c4cb63155b654
|
[
"Apache-2.0"
] | 1 |
2021-02-23T10:29:49.000Z
|
2021-02-23T10:29:49.000Z
|
keystone/routers.py
|
ioram7/keystone
|
81b5ad22cc8b85d622a332e62e5c4cb63155b654
|
[
"Apache-2.0"
] | 10 |
2016-04-25T20:10:06.000Z
|
2021-06-10T15:14:19.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The only types of routers in this file should be ``ComposingRouters``.
The routers for the backends should be in the backend-specific router modules.
For example, the ``ComposableRouter`` for ``identity`` belongs in::
keystone.identity.routers
"""
from keystone.common import wsgi
from keystone import controllers
class Extension(wsgi.ComposableRouter):
def __init__(self, is_admin=True):
if is_admin:
self.controller = controllers.AdminExtensions()
else:
self.controller = controllers.PublicExtensions()
def add_routes(self, mapper):
extensions_controller = self.controller
mapper.connect('/extensions',
controller=extensions_controller,
action='get_extensions_info',
conditions=dict(method=['GET']))
mapper.connect('/extensions/{extension_alias}',
controller=extensions_controller,
action='get_extension_info',
conditions=dict(method=['GET']))
class VersionV2(wsgi.ComposableRouter):
def __init__(self, description):
self.description = description
def add_routes(self, mapper):
version_controller = controllers.Version(self.description)
mapper.connect('/',
controller=version_controller,
action='get_version_v2')
class VersionV3(wsgi.ComposableRouter):
def __init__(self, description):
self.description = description
def add_routes(self, mapper):
version_controller = controllers.Version(self.description)
mapper.connect('/',
controller=version_controller,
action='get_version_v3')
class Versions(wsgi.ComposableRouter):
def __init__(self, description):
self.description = description
def add_routes(self, mapper):
version_controller = controllers.Version(self.description)
mapper.connect('/',
controller=version_controller,
action='get_versions')
| 33.851852 | 78 | 0.66229 |
9367c0382549c7a02f49b4a0cc5b2d4e93cdf2e3
| 13,084 |
py
|
Python
|
tv-script-generation/problem_unittests.py
|
sunsided/DLND
|
cf2f449e262dadba798bb35cb3d717d1d8a18d59
|
[
"MIT"
] | null | null | null |
tv-script-generation/problem_unittests.py
|
sunsided/DLND
|
cf2f449e262dadba798bb35cb3d717d1d8a18d59
|
[
"MIT"
] | null | null | null |
tv-script-generation/problem_unittests.py
|
sunsided/DLND
|
cf2f449e262dadba798bb35cb3d717d1d8a18d59
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
def _print_success_message():
print('Tests Passed')
def test_create_lookup_tables(create_lookup_tables):
with tf.Graph().as_default():
test_text = '''
Moe_Szyslak Moe's Tavern Where the elite meet to drink
Bart_Simpson Eh yeah hello is Mike there Last name Rotch
Moe_Szyslak Hold on I'll check Mike Rotch Mike Rotch Hey has anybody seen Mike Rotch lately
Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick
Moe_Szyslak Whats the matter Homer You're not your normal effervescent self
Homer_Simpson I got my problems Moe Give me another one
Moe_Szyslak Homer hey you should not drink to forget your problems
Barney_Gumble Yeah you should only drink to enhance your social skills'''
test_text = test_text.lower()
test_text = test_text.split()
vocab_to_int, int_to_vocab = create_lookup_tables(test_text)
# Check types
assert isinstance(vocab_to_int, dict),\
'vocab_to_int is not a dictionary.'
assert isinstance(int_to_vocab, dict),\
'int_to_vocab is not a dictionary.'
# Compare lengths of dicts
assert len(vocab_to_int) == len(int_to_vocab),\
'Length of vocab_to_int and int_to_vocab don\'t match. ' \
'vocab_to_int is length {}. int_to_vocab is length {}'.format(len(vocab_to_int), len(int_to_vocab))
# Make sure the dicts have the same words
vocab_to_int_word_set = set(vocab_to_int.keys())
int_to_vocab_word_set = set(int_to_vocab.values())
assert not (vocab_to_int_word_set - int_to_vocab_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_set - int_to_vocab_word_set)
assert not (int_to_vocab_word_set - vocab_to_int_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_set - vocab_to_int_word_set)
# Make sure the dicts have the same word ids
vocab_to_int_word_id_set = set(vocab_to_int.values())
int_to_vocab_word_id_set = set(int_to_vocab.keys())
assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_id_set - int_to_vocab_word_id_set)
assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_id_set - vocab_to_int_word_id_set)
# Make sure the dicts make the same lookup
missmatches = [(word, id, id, int_to_vocab[id]) for word, id in vocab_to_int.items() if int_to_vocab[id] != word]
assert not missmatches,\
'Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}'.format(
len(missmatches),
*missmatches[0])
assert len(vocab_to_int) > len(set(test_text))/2,\
'The length of vocab seems too small. Found a length of {}'.format(len(vocab_to_int))
_print_success_message()
def test_get_batches(get_batches):
with tf.Graph().as_default():
test_batch_size = 128
test_seq_length = 5
test_int_text = list(range(1000*test_seq_length))
batches = get_batches(test_int_text, test_batch_size, test_seq_length)
# Check type
assert isinstance(batches, np.ndarray),\
'Batches is not a Numpy array'
# Check shape
assert batches.shape == (7, 2, 128, 5),\
'Batches returned wrong shape. Found {}'.format(batches.shape)
for x in range(batches.shape[2]):
assert np.array_equal(batches[0,0,x], np.array(range(x * 35, x * 35 + batches.shape[3]))),\
'Batches returned wrong contents. For example, input sequence {} in the first batch was {}'.format(x, batches[0,0,x])
assert np.array_equal(batches[0,1,x], np.array(range(x * 35 + 1, x * 35 + 1 + batches.shape[3]))),\
'Batches returned wrong contents. For example, target sequence {} in the first batch was {}'.format(x, batches[0,1,x])
last_seq_target = (test_batch_size-1) * 35 + 31
last_seq = np.array(range(last_seq_target, last_seq_target+ batches.shape[3]))
last_seq[-1] = batches[0,0,0,0]
assert np.array_equal(batches[-1,1,-1], last_seq),\
'The last target of the last batch should be the first input of the first batch. Found {} but expected {}'.format(batches[-1,1,-1], last_seq)
_print_success_message()
def test_tokenize(token_lookup):
with tf.Graph().as_default():
symbols = set(['.', ',', '"', ';', '!', '?', '(', ')', '--', '\n'])
token_dict = token_lookup()
# Check type
assert isinstance(token_dict, dict), \
'Returned type is {}.'.format(type(token_dict))
# Check symbols
missing_symbols = symbols - set(token_dict.keys())
unknown_symbols = set(token_dict.keys()) - symbols
assert not missing_symbols, \
'Missing symbols: {}'.format(missing_symbols)
assert not unknown_symbols, \
'Unknown symbols: {}'.format(unknown_symbols)
# Check values type
bad_value_type = [type(val) for val in token_dict.values() if not isinstance(val, str)]
assert not bad_value_type,\
'Found token as {} type.'.format(bad_value_type[0])
# Check for spaces
key_has_spaces = [k for k in token_dict.keys() if ' ' in k]
val_has_spaces = [val for val in token_dict.values() if ' ' in val]
assert not key_has_spaces,\
'The key "{}" includes spaces. Remove spaces from keys and values'.format(key_has_spaces[0])
assert not val_has_spaces,\
'The value "{}" includes spaces. Remove spaces from keys and values'.format(val_has_spaces[0])
# Check for symbols in values
symbol_val = ()
for symbol in symbols:
for val in token_dict.values():
if symbol in val:
symbol_val = (symbol, val)
assert not symbol_val,\
'Don\'t use a symbol that will be replaced in your tokens. Found the symbol {} in value {}'.format(*symbol_val)
_print_success_message()
def test_get_inputs(get_inputs):
with tf.Graph().as_default():
input_data, targets, lr = get_inputs()
# Check type
assert input_data.op.type == 'Placeholder',\
'Input not a Placeholder.'
assert targets.op.type == 'Placeholder',\
'Targets not a Placeholder.'
assert lr.op.type == 'Placeholder',\
'Learning Rate not a Placeholder.'
# Check name
assert input_data.name == 'input:0',\
'Input has bad name. Found name {}'.format(input_data.name)
# Check rank
input_rank = 0 if input_data.get_shape() == None else len(input_data.get_shape())
targets_rank = 0 if targets.get_shape() == None else len(targets.get_shape())
lr_rank = 0 if lr.get_shape() == None else len(lr.get_shape())
assert input_rank == 2,\
'Input has wrong rank. Rank {} found.'.format(input_rank)
assert targets_rank == 2,\
'Targets has wrong rank. Rank {} found.'.format(targets_rank)
assert lr_rank == 0,\
'Learning Rate has wrong rank. Rank {} found'.format(lr_rank)
_print_success_message()
def test_get_init_cell(get_init_cell):
with tf.Graph().as_default():
test_batch_size_ph = tf.placeholder(tf.int32, [])
test_rnn_size = 256
cell, init_state = get_init_cell(test_batch_size_ph, test_rnn_size)
# Check type
assert isinstance(cell, tf.contrib.rnn.MultiRNNCell),\
'Cell is wrong type. Found {} type'.format(type(cell))
# Check for name attribute
assert hasattr(init_state, 'name'),\
'Initial state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.'
# Check name
assert init_state.name == 'initial_state:0',\
'Initial state doesn\'t have the correct name. Found the name {}'.format(init_state.name)
_print_success_message()
def test_get_embed(get_embed):
with tf.Graph().as_default():
embed_shape = [50, 5, 256]
test_input_data = tf.placeholder(tf.int32, embed_shape[:2])
test_vocab_size = 27
test_embed_dim = embed_shape[2]
embed = get_embed(test_input_data, test_vocab_size, test_embed_dim)
# Check shape
assert embed.shape == embed_shape,\
'Wrong shape. Found shape {}'.format(embed.shape)
_print_success_message()
def test_build_rnn(build_rnn):
with tf.Graph().as_default():
test_rnn_size = 256
test_rnn_layer_size = 2
test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])
test_inputs = tf.placeholder(tf.float32, [None, None, test_rnn_size])
outputs, final_state = build_rnn(test_cell, test_inputs)
# Check name
assert hasattr(final_state, 'name'),\
'Final state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.'
assert final_state.name == 'final_state:0',\
'Final state doesn\'t have the correct name. Found the name {}'.format(final_state.name)
# Check shape
assert outputs.get_shape().as_list() == [None, None, test_rnn_size],\
'Outputs has wrong shape. Found shape {}'.format(outputs.get_shape())
assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size],\
'Final state wrong shape. Found shape {}'.format(final_state.get_shape())
_print_success_message()
def test_build_nn(build_nn):
with tf.Graph().as_default():
test_input_data_shape = [None, 5] # HACK: used to be [128, 5]
test_input_data = tf.placeholder(tf.int32, test_input_data_shape)
test_rnn_size = 256
test_embed_dim = 300
test_rnn_layer_size = 2
test_vocab_size = 27
test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])
logits, final_state = build_nn(test_cell, test_rnn_size, test_input_data, test_vocab_size, test_embed_dim)
# Check name
assert hasattr(final_state, 'name'), \
'Final state doesn\'t have the "name" attribute. Are you using build_rnn?'
assert final_state.name == 'final_state:0', \
'Final state doesn\'t have the correct name. Found the name {}. Are you using build_rnn?'.format(final_state.name)
# Check Shape
assert logits.get_shape().as_list() == test_input_data_shape + [test_vocab_size], \
'Outputs has wrong shape. Found shape {}'.format(logits.get_shape())
assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size], \
'Final state wrong shape. Found shape {}'.format(final_state.get_shape())
_print_success_message()
def test_get_tensors(get_tensors):
test_graph = tf.Graph()
with test_graph.as_default():
test_input = tf.placeholder(tf.int32, name='input')
test_initial_state = tf.placeholder(tf.int32, name='initial_state')
test_final_state = tf.placeholder(tf.int32, name='final_state')
test_probs = tf.placeholder(tf.float32, name='probs')
input_text, initial_state, final_state, probs = get_tensors(test_graph)
# Check correct tensor
assert input_text == test_input,\
'Test input is wrong tensor'
assert initial_state == test_initial_state, \
'Initial state is wrong tensor'
assert final_state == test_final_state, \
'Final state is wrong tensor'
assert probs == test_probs, \
'Probabilities is wrong tensor'
_print_success_message()
def test_pick_word(pick_word):
with tf.Graph().as_default():
test_probabilities = np.array([0.1, 0.8, 0.05, 0.05])
test_int_to_vocab = {word_i: word for word_i, word in enumerate(['this', 'is', 'a', 'test'])}
pred_word = pick_word(test_probabilities, test_int_to_vocab)
# Check type
assert isinstance(pred_word, str),\
'Predicted word is wrong type. Found {} type.'.format(type(pred_word))
# Check word is from vocab
assert pred_word in test_int_to_vocab.values(),\
'Predicted word not found in int_to_vocab.'
_print_success_message()
| 41.801917 | 153 | 0.648731 |
a83245f90aea384ca5e6853be54bd8442f565da4
| 1,973 |
py
|
Python
|
ros2_control_demo_robot/launch/test_rrbot_description.launch.py
|
nuclearsandwich/ros2_control_demos
|
ecb96f53fef4b2185383207311cd3162be1ccd5b
|
[
"Apache-2.0"
] | null | null | null |
ros2_control_demo_robot/launch/test_rrbot_description.launch.py
|
nuclearsandwich/ros2_control_demos
|
ecb96f53fef4b2185383207311cd3162be1ccd5b
|
[
"Apache-2.0"
] | null | null | null |
ros2_control_demo_robot/launch/test_rrbot_description.launch.py
|
nuclearsandwich/ros2_control_demos
|
ecb96f53fef4b2185383207311cd3162be1ccd5b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Stogl Robotics Consulting UG (haftungsbeschränkt)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch_ros.actions import Node
import xacro
def generate_launch_description():
# Get URDF via xacro
robot_description_path = os.path.join(
get_package_share_directory("ros2_control_demo_robot"),
"description",
"rrbot_system_position_only.urdf.xacro",
)
robot_description_config = xacro.process_file(robot_description_path)
robot_description = {"robot_description": robot_description_config.toxml()}
rviz_config_file = os.path.join(
get_package_share_directory("ros2_control_demo_robot"), "rviz", "rrbot.rviz"
)
joint_state_publisher_node = Node(
package="joint_state_publisher_gui",
executable="joint_state_publisher_gui",
)
robot_state_publisher_node = Node(
package="robot_state_publisher",
executable="robot_state_publisher",
output="both",
parameters=[robot_description],
)
rviz_node = Node(
package="rviz2",
executable="rviz2",
name="rviz2",
output="log",
arguments=["-d", rviz_config_file],
)
return LaunchDescription(
[
joint_state_publisher_node,
robot_state_publisher_node,
rviz_node,
]
)
| 30.353846 | 84 | 0.708059 |
ff543d1bb7eed8a3689e9febf67760b97d77e162
| 4,866 |
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/latest/get_virtual_appliance_site.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/latest/get_virtual_appliance_site.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/latest/get_virtual_appliance_site.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualApplianceSiteResult',
'AwaitableGetVirtualApplianceSiteResult',
'get_virtual_appliance_site',
]
@pulumi.output_type
class GetVirtualApplianceSiteResult:
"""
Virtual Appliance Site resource.
"""
def __init__(__self__, address_prefix=None, etag=None, name=None, o365_policy=None, provisioning_state=None, type=None):
if address_prefix and not isinstance(address_prefix, str):
raise TypeError("Expected argument 'address_prefix' to be a str")
pulumi.set(__self__, "address_prefix", address_prefix)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if o365_policy and not isinstance(o365_policy, dict):
raise TypeError("Expected argument 'o365_policy' to be a dict")
pulumi.set(__self__, "o365_policy", o365_policy)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[str]:
"""
Address Prefix.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the virtual appliance site.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="o365Policy")
def o365_policy(self) -> Optional['outputs.Office365PolicyPropertiesResponse']:
"""
Office 365 Policy.
"""
return pulumi.get(self, "o365_policy")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Site type.
"""
return pulumi.get(self, "type")
class AwaitableGetVirtualApplianceSiteResult(GetVirtualApplianceSiteResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualApplianceSiteResult(
address_prefix=self.address_prefix,
etag=self.etag,
name=self.name,
o365_policy=self.o365_policy,
provisioning_state=self.provisioning_state,
type=self.type)
def get_virtual_appliance_site(network_virtual_appliance_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
site_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualApplianceSiteResult:
"""
Use this data source to access information about an existing resource.
:param str network_virtual_appliance_name: The name of the Network Virtual Appliance.
:param str resource_group_name: The name of the resource group.
:param str site_name: The name of the site.
"""
__args__ = dict()
__args__['networkVirtualApplianceName'] = network_virtual_appliance_name
__args__['resourceGroupName'] = resource_group_name
__args__['siteName'] = site_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/latest:getVirtualApplianceSite', __args__, opts=opts, typ=GetVirtualApplianceSiteResult).value
return AwaitableGetVirtualApplianceSiteResult(
address_prefix=__ret__.address_prefix,
etag=__ret__.etag,
name=__ret__.name,
o365_policy=__ret__.o365_policy,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| 36.313433 | 153 | 0.660912 |
5839ffafa4d2911e3f30bffe975b40a341b3a353
| 4,422 |
py
|
Python
|
weibo_api/weibo/article.py
|
ludawei/weibo_api
|
d64589d707181b002128f035b1e24e433dcb79e7
|
[
"MIT"
] | null | null | null |
weibo_api/weibo/article.py
|
ludawei/weibo_api
|
d64589d707181b002128f035b1e24e433dcb79e7
|
[
"MIT"
] | null | null | null |
weibo_api/weibo/article.py
|
ludawei/weibo_api
|
d64589d707181b002128f035b1e24e433dcb79e7
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
import math
from ..utils.normal import normal_attr
from ..utils.streaming import streaming
from .base import Base
from ..config.urls import (
ARTICLE_DETAIL_URL,
ARTICLE_LIST_URL)
class Article(Base):
"""
头条文章
"""
def __init__(self, aid, cache, session):
super(Article, self).__init__(aid, cache, session)
def _build_url(self):
return ARTICLE_DETAIL_URL.format(id=self._id)
@property
@streaming()
def config(self):
return None
@property
@normal_attr(name_in_json='article')
def content(self):
"""
文章内容HTML
:return:
"""
return None
@property
def title(self):
"""
文章标题
:return:
"""
return self.config.title
@property
def id(self):
"""
文章ID
:return:
"""
return self.config.id
@property
def attitudes_count(self):
"""
点赞数
:return:
"""
return self.config.attitudes_count
@property
def author_name(self):
"""
文章作者名
:return:
"""
return self.config.author_name
@property
def author_uid(self):
"""
文章作者ID
:return:
"""
return self.config.author_uid
@property
def author(self):
"""
文章作者
:return:
"""
from ..weibo.people import People
return People(self.author_uid, None, self._session)
@property
def image(self):
"""
头图url
:return:
"""
return self.config.image
@property
def read_count(self):
"""
阅读量
:return:
"""
return self.config.read_count
@property
def reposts_count(self):
"""
转发量
:return:
"""
return self.config.reposts_count
class Articles(Base):
"""
全部文章
"""
def __init__(self, uid, cache, session):
super(Articles, self).__init__(uid, cache, session)
self._page_num = 1
def _build_url(self):
return ARTICLE_LIST_URL.format(id=self._id, page_num=self._page_num)
@property
@streaming()
def data(self):
return None
@property
def _cards(self):
return self.data.cards
@property
def _cardlistInfo(self):
return self.data.cardlistInfo
@property
def total(self):
"""
文章总数
:return:
"""
return self._cardlistInfo.total
@property
def _pages(self):
"""
文章总页数
:return:
"""
return int(math.ceil(self.total / 10))
def page(self, page_num=1):
"""
获取某一页的文章,默认只取第一页内容
:param page_num: 页数
:return:
"""
from ..weibo.people import People
from ..weibo.status import Status
self.refresh()
self._page_num = page_num
for card in filter(lambda x: hasattr(x, 'mblog'), self._cards):
mblog = card.mblog
# 该article实际也是status,只是在内容中可能会存在文章链接
# TODO:后期解析出文章内容中的真实文章链接,取出头条文章
article = Status(mblog.id, None, self._session)
article.text = mblog.raw_data().get('text')
article.created_at = mblog.raw_data().get('created_at')
article.source = mblog.raw_data().get('mblog.source')
article.thumbnail_pic = mblog.raw_data().get('thumbnail_pic')
article.bmiddle_pic = mblog.raw_data().get('bmiddle_pic')
article.original_pic = mblog.raw_data().get('original_pic')
article.is_paid = mblog.raw_data().get('is_paid')
article.user = People(mblog.user.id, None, self._session)
article.pic_urls = [pic.get('url') for pic in mblog.raw_data().get('pics', [])]
yield article
def page_from_to(self, from_page, to_page):
"""
获取从第from_page页到第to_page页的所有文章微博
:param from_page: int 开始页
:param to_page: int 结束页
:return:
"""
for page_num in range(from_page, to_page + 1):
for article in self.page(page_num):
yield article
def all(self):
"""
获取用户的所有文章
:return:
"""
return self.page_from_to(1, self._pages + 1)
| 22.676923 | 91 | 0.550204 |
5c7e873ce454aaee5367592647adef76bdd196ad
| 8,721 |
py
|
Python
|
app/MgrListener/MgrListener.py
|
rgrr/smartmeshsdk
|
a95f3e4d9e2254d59d326428fef8c77319cd4373
|
[
"BSD-3-Clause"
] | 29 |
2015-02-17T14:22:14.000Z
|
2021-02-19T06:01:10.000Z
|
app/MgrListener/MgrListener.py
|
rgrr/smartmeshsdk
|
a95f3e4d9e2254d59d326428fef8c77319cd4373
|
[
"BSD-3-Clause"
] | 17 |
2017-02-10T09:43:13.000Z
|
2017-09-09T05:46:49.000Z
|
app/MgrListener/MgrListener.py
|
rgrr/smartmeshsdk
|
a95f3e4d9e2254d59d326428fef8c77319cd4373
|
[
"BSD-3-Clause"
] | 35 |
2015-07-10T18:58:15.000Z
|
2022-03-20T08:56:25.000Z
|
#!/usr/bin/python
#============================ adjust path =====================================
import sys
import os
if __name__ == "__main__":
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..', '..','libs'))
sys.path.insert(0, os.path.join(here, '..', '..','external_libs'))
#============================ verify installation =============================
from SmartMeshSDK.utils import SmsdkInstallVerifier
(goodToGo,reason) = SmsdkInstallVerifier.verifyComponents(
[
SmsdkInstallVerifier.PYTHON,
SmsdkInstallVerifier.PYSERIAL,
]
)
if not goodToGo:
print "Your installation does not allow this application to run:\n"
print reason
raw_input("Press any button to exit")
sys.exit(1)
#============================ imports =========================================
import threading
from optparse import OptionParser
from SmartMeshSDK.utils import AppUtils, \
FormatUtils
from SmartMeshSDK.ApiDefinition import IpMgrDefinition
from SmartMeshSDK.IpMgrConnectorMux import IpMgrConnectorMux, \
IpMgrSubscribe
from dustUI import dustWindow, \
dustFrameConnection, \
dustFrameTable
#============================ logging =========================================
# local
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('App')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
# global
AppUtils.configureLogging()
#============================ defines =========================================
UPDATEPERIOD = 500 # in ms
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 9900
#============================ body ============================================
##
# \addtogroup MgrListener
# \{
#
class notifClient(object):
def __init__(self, connector, disconnectedCallback):
# store params
self.connector = connector
self.disconnectedCallback = disconnectedCallback
# variables
self.data = []
self.dataLock = threading.Lock()
# subscriber
self.subscriber = IpMgrSubscribe.IpMgrSubscribe(self.connector)
self.subscriber.start()
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA,
IpMgrSubscribe.IpMgrSubscribe.NOTIFIPDATA,
],
fun = self._notifCallback,
isRlbl = False,
)
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.NOTIFEVENT,
IpMgrSubscribe.IpMgrSubscribe.NOTIFLOG,
IpMgrSubscribe.IpMgrSubscribe.NOTIFHEALTHREPORT,
],
fun = self._notifCallback,
isRlbl = True,
)
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.ERROR,
IpMgrSubscribe.IpMgrSubscribe.FINISH,
],
fun = self.disconnectedCallback,
isRlbl = True,
)
#======================== public ==========================================
def getData(self):
self.dataLock.acquire()
returnVal = self.data[:]
self.dataLock.release()
return returnVal
def disconnect(self):
self.connector.disconnect()
#======================== private =========================================
def _notifCallback(self, notifName, notifParams):
self.dataLock.acquire()
# find notifName row
found=False
for row in self.data:
if row[0]==notifName:
found=True
break
# create row if needed
if not found:
self.data.append([notifName,0])
row = self.data[-1]
# increment counter
row[1] += 1
self.dataLock.release()
class notifGui(object):
def __init__(self):
# variables
self.guiLock = threading.Lock()
self.apiDef = IpMgrDefinition.IpMgrDefinition()
self.notifClientHandler = None
# create window
self.window = dustWindow.dustWindow('MgrListener',
self._windowCb_close)
# add a connection frame
self.connectionFrame = dustFrameConnection.dustFrameConnection(
self.window,
self.guiLock,
self._connectionFrameCb_connected,
frameName="manager connection",
row=0,column=0)
self.connectionFrame.apiLoaded(self.apiDef)
self.connectionFrame.show()
# add a table frame
self.tableFrame = dustFrameTable.dustFrameTable(self.window,
self.guiLock,
frameName="received notifications",
row=1,column=0)
self.tableFrame.show()
#======================== public ==========================================
def start(self, connect_params):
# TODO: how to use connect_params?
'''
This command instructs the GUI to start executing and reacting to
user interactions. It never returns and should therefore be the last
command called.
'''
try:
self.window.mainloop()
except SystemExit:
sys.exit()
#======================== private =========================================
def _windowCb_close(self):
if self.notifClientHandler:
self.notifClientHandler.disconnect()
def _connectionFrameCb_connected(self,connector):
'''
\brief Called when the connectionFrame has connected.
'''
# store the connector
self.connector = connector
# schedule the GUI to update itself in UPDATEPERIOD ms
self.tableFrame.after(UPDATEPERIOD,self._updateTable)
# start a notification client
self.notifClientHandler = notifClient(
self.connector,
self._connectionFrameCb_disconnected
)
def _connectionFrameCb_disconnected(self,notifName,notifParams):
'''
\brief Called when the connectionFrame has disconnected.
'''
# update the GUI
self.connectionFrame.updateGuiDisconnected()
# delete the connector
if self.connector:
self.connector.disconnect()
self.connector = None
def _updateTable(self):
# get the data
dataToPlot = self.notifClientHandler.getData()
# update the frame
self.tableFrame.update(dataToPlot)
# schedule the next update
self.tableFrame.after(UPDATEPERIOD,self._updateTable)
#============================ main ============================================
def main(connect_params):
notifGuiHandler = notifGui()
notifGuiHandler.start(connect_params)
if __name__ == '__main__':
# Parse the command line
parser = OptionParser("usage: %prog [options]", version="%prog 1.0")
parser.add_option("--host", dest="host",
default=DEFAULT_HOST,
help="Mux host to connect to")
parser.add_option("-p", "--port", dest="port",
default=DEFAULT_PORT,
help="Mux port to connect to")
(options, args) = parser.parse_args()
connect_params = {
'host': options.host,
'port': int(options.port),
}
main(connect_params)
##
# end of MgrListener
# \}
#
| 32.785714 | 81 | 0.469212 |
7125f29a5ac916ff8405e45ce77f524e31bf2fbb
| 5,655 |
py
|
Python
|
main_ann_ae.py
|
cmranieri/FullySpikingVAE
|
c59e7e704f55246dd2e9891964259d2e611917fa
|
[
"MIT"
] | 14 |
2021-10-05T06:28:06.000Z
|
2022-03-29T08:41:50.000Z
|
main_ann_ae.py
|
cmranieri/FullySpikingVAE
|
c59e7e704f55246dd2e9891964259d2e611917fa
|
[
"MIT"
] | null | null | null |
main_ann_ae.py
|
cmranieri/FullySpikingVAE
|
c59e7e704f55246dd2e9891964259d2e611917fa
|
[
"MIT"
] | 4 |
2021-10-05T08:57:45.000Z
|
2022-03-27T21:36:56.000Z
|
import os
import os.path
import numpy as np
import logging
import argparse
import pycuda.driver as cuda
import torch
import torchvision
from torch.nn.utils import clip_grad_norm_
from torch.nn.utils import clip_grad_value_
from torch.utils.tensorboard import SummaryWriter
from utils import AverageMeter
from utils import aboutCudaDevices
from datasets import load_dataset_ann
import models.ann_ae as ann_ae
max_accuracy = 0
min_loss = 1000
def train(network, trainloader, opti, epoch):
loss_meter = AverageMeter()
network = network.train()
for batch_idx, (real_img, label) in enumerate(trainloader):
opti.zero_grad()
real_img = real_img.to(device)
recons, latent = network(real_img)
loss = network.loss_function(recons, real_img)
loss.backward()
opti.step()
loss_meter.update(loss.detach().cpu().item())
print(f'Train[{epoch}/{max_epoch}] [{batch_idx}/{len(trainloader)}] Loss: {loss_meter.avg}')
if batch_idx == len(trainloader)-1:
os.makedirs(f'checkpoint/{args.name}/imgs/train/', exist_ok=True)
torchvision.utils.save_image((real_img+1)/2, f'checkpoint/{args.name}/imgs/train/epoch{epoch}_input.png')
torchvision.utils.save_image((recons+1)/2, f'checkpoint/{args.name}/imgs/train/epoch{epoch}_recons.png')
writer.add_images('Train/input_img', (real_img+1)/2, epoch)
writer.add_images('Train/recons_img', (recons+1)/2, epoch)
logging.info(f"Train [{epoch}] Loss: {loss_meter.avg}")
writer.add_scalar('Train/loss', loss_meter.avg, epoch)
return loss_meter.avg
def test(network, trainloader, epoch):
loss_meter = AverageMeter()
network = network.eval()
with torch.no_grad():
for batch_idx, (real_img, label) in enumerate(trainloader):
real_img = real_img.to(device)
#normalized_img = normalized_img.to(device)
recons, latent = network(real_img)
loss = network.loss_function(recons, real_img)
loss_meter.update(loss.detach().cpu().item())
print(f'Test[{epoch}/{max_epoch}] [{batch_idx}/{len(trainloader)}] Loss: {loss_meter.avg}')
if batch_idx == len(trainloader)-1:
os.makedirs(f'checkpoint/{args.name}/imgs/test/', exist_ok=True)
torchvision.utils.save_image((real_img+1)/2, f'checkpoint/{args.name}/imgs/test/epoch{epoch}_input.png')
torchvision.utils.save_image((recons+1)/2, f'checkpoint/{args.name}/imgs/test/epoch{epoch}_recons.png')
writer.add_images('Test/input_img', (real_img+1)/2, epoch)
writer.add_images('Test/recons_img', (recons+1)/2, epoch)
logging.info(f"Test [{epoch}] Loss: {loss_meter.avg}")
writer.add_scalar('Test/loss', loss_meter.avg, epoch)
return loss_meter.avg
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('name', type=str)
parser.add_argument('-dataset', type=str, required=True)
parser.add_argument('-batch_size', type=int, default=250)
parser.add_argument('-latent_dim', type=int, default=128)
parser.add_argument('-checkpoint', action='store', dest='checkpoint', help='The path of checkpoint, if use checkpoint')
parser.add_argument('-device', type=int, default=0)
try:
args = parser.parse_args()
except:
parser.print_help()
exit(0)
if args.device is None:
device = torch.device("cuda:0")
else:
device = torch.device(f"cuda:{args.device}")
if args.dataset.lower() == 'mnist':
train_loader, test_loader = load_dataset_ann.load_mnist(args.batch_size)
in_channels = 1
net = ann_ae.AE(in_channels, args.latent_dim)
elif args.dataset.lower() == 'fashion':
train_loader, test_loader = load_dataset_ann.load_fashionmnist(args.batch_size)
in_channels = 1
net = ann_ae.AE(in_channels, args.latent_dim)
elif args.dataset.lower() == 'celeba':
train_loader, test_loader = load_dataset_ann.load_celeba(args.batch_size)
in_channels = 3
net = ann_ae.AELarge(in_channels, args.latent_dim)
elif args.dataset.lower() == 'cifar10':
train_loader, test_loader = load_dataset_ann.load_cifar10(args.batch_size)
in_channels = 3
net = ann_ae.AE(in_channels, args.latent_dim)
else:
raise ValueError("invalid dataset")
net = net.to(device)
os.makedirs(f'checkpoint/{args.name}', exist_ok=True)
writer = SummaryWriter(log_dir=f'checkpoint/{args.name}/tb')
logging.basicConfig(filename=f'checkpoint/{args.name}/{args.name}.log', level=logging.INFO)
logging.info(args)
if torch.cuda.is_available():
cuda.init()
c_device = aboutCudaDevices()
print(c_device.info())
print("selected device: ", args.device)
else:
raise Exception("only support gpu")
if args.checkpoint is not None:
checkpoint_path = args.checkpoint
checkpoint = torch.load(checkpoint_path)
net.load_state_dict(checkpoint)
optimizer = torch.optim.AdamW(net.parameters(), lr=0.001, betas=(0.9, 0.999))
best_loss = 1e8
max_epoch = 150
for e in range(max_epoch):
train_loss = train(net, train_loader, optimizer, e)
test_loss = test(net, test_loader, e)
torch.save(net.state_dict(), f'checkpoint/{args.name}/checkpoint.pth')
if test_loss < best_loss:
best_loss = test_loss
torch.save(net.state_dict(), f'checkpoint/{args.name}/best.pth')
writer.close()
| 36.483871 | 123 | 0.662069 |
a71f702db5ae949deaecd3a7e99b5f37c48944c2
| 22,805 |
py
|
Python
|
scripts/tvm_cli/tvm_cli.py
|
Interplai/modelzoo
|
45829048b708b9fb9c6d8d13912ae44f184710de
|
[
"Apache-2.0"
] | null | null | null |
scripts/tvm_cli/tvm_cli.py
|
Interplai/modelzoo
|
45829048b708b9fb9c6d8d13912ae44f184710de
|
[
"Apache-2.0"
] | null | null | null |
scripts/tvm_cli/tvm_cli.py
|
Interplai/modelzoo
|
45829048b708b9fb9c6d8d13912ae44f184710de
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
#
# Copyright (c) 2020-2021, Arm Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import sys
from os import path
import importlib
import onnx
import yaml
import tvm
import tvm.relay.testing.tf as tf_testing
import tvm.contrib.graph_runtime as runtime
from tvm import relay
from tvm import autotvm
from tvm.contrib import cc
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.autotvm.graph_tuner import DPTuner, PBQPTuner
from jinja2 import Environment, FileSystemLoader
import pytest
import tensorflow as tf
import numpy as np
OUTPUT_NETWORK_MODULE_FILENAME = "deploy_lib.so"
OUTPUT_NETWORK_GRAPH_FILENAME = "deploy_graph.json"
OUTPUT_NETWORK_PARAM_FILENAME = "deploy_param.params"
OUTPUT_CONFIG_FILENAME = "inference_engine_tvm_config.hpp"
TARGETS_DEVICES = {
'llvm':'kDLCPU',
'cuda':'kDLGPU',
'opencl':'kDLOpenCL',
'vulkan':'kDLVulkan',
}
GPU_TARGETS = ['cuda', 'opencl', 'vulkan']
def yaml_processing(config, info):
'''Utility function: definition.yaml file processing'''
with open(config, 'r') as yml_file:
yaml_dict = yaml.safe_load(yml_file)
# Get path of model file
info['model'] = yaml_dict['network']['filename']
if not info['model'].startswith('/'):
yaml_file_dir = path.dirname(yml_file.name)
info['model'] = path.join(yaml_file_dir, info['model'])
# Get list of input names and shapes from .yaml file
info['input_list'] = yaml_dict['network_parameters']['input_nodes']
info['input_dict'] = {} # Used to compile the model
for input_elem in info['input_list']:
info['input_dict'][str(input_elem['name'])] = input_elem['shape']
# Get input data type
info['input_data_type'] = yaml_dict['network_parameters']['datatype']
if info['input_data_type'] == 'float32':
info['dtype_code'] = 'kDLFloat'
info['dtype_bits'] = 32
elif info['input_data_type'] == 'int8':
info['dtype_code'] = 'kDLInt'
info['dtype_bits'] = 8
else:
raise Exception('Specified input data type not supported')
# Get list of output names and shapes from .yaml file
info['output_list'] = yaml_dict['network_parameters']['output_nodes']
info['output_names'] = [] # Used to compile the model
for output_elem in info['output_list']:
info['output_names'].append(str(output_elem['name']))
return info
def get_network(info):
'''Utility function to load the model'''
if info['model'].endswith('.onnx'):
onnx_model = onnx.load(info['model'])
mod, params = relay.frontend.from_onnx(onnx_model, info['input_dict'])
elif info['model'].endswith('.pb'):
with tf.compat.v1.Session() as sess:
with tf.io.gfile.GFile(info['model'], 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
input_map = {}
for index, (name, shape) in enumerate(
info['input_dict'].items()):
tf_new_image = tf.compat.v1.placeholder(
shape=[1 if x == -1 else x for x in shape],
dtype=info['input_data_type'],
name=name)
input_map["input:"+str(index)] = tf_new_image
tf.import_graph_def(graph_def,
name='',
input_map = input_map)
graph_def = sess.graph.as_graph_def()
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
input_shape_dict = {'DecodeJpeg/contents': info['input_list']}
mod, params = relay.frontend.from_tensorflow(
graph_def,
shape=input_shape_dict,
outputs=info['output_names'])
else:
raise Exception('Model file format not supported')
# Transform data layout to what is expected by CUDA hardware, i.e. NCHW.
# The same code is used in the llvm case too, as this allows for a simpler
# handling of AutoTVM tuning. For tuning on x86, the NCHWc layout would be
# the best choice, but TVM doesn't fully support it yet
if info['target'] in GPU_TARGETS:
desired_layouts = {'nn.conv2d': ['NCHW', 'default']}
elif info['target'].startswith('llvm'):
desired_layouts = {'nn.conv2d': ['NCHW', 'default']}
else:
raise Exception('Target not supported')
seq = tvm.transform.Sequential(
[relay.transform.RemoveUnusedFunctions(),
relay.transform.ConvertLayout(desired_layouts)])
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
return mod, params
def compilation_preprocess(args):
'''
This function checks the command-line arguments and reads the necessary info
from the .yaml file, it's used when the compile option is selected
'''
# 'info' is the output dictionary
info = {}
info['lanes'] = args.lanes
info['device_type'] = TARGETS_DEVICES[args.target]
info['device_id'] = args.device_id
info['target'] = args.target
info['cross_compile'] = args.cross_compile
info['autotvm_log'] = args.autotvm_log
info['header_extension'] = '.h' if args.autoware_version == 'ai' else '.hpp'
info = yaml_processing(args.config, info)
# Define the root directory and check if the specified output_path exists.
# If not the corresponding directories are created.
# Note: if output_path has not been specified by the user,
# default to the 'filename' field from the .yaml file
if args.output_path:
info['output_path'] = args.output_path
if not path.isdir(info['output_path']):
os.makedirs(info['output_path'])
# Starting from the config file directory, take 4 levels of parent
# directory as the namespace in the case of the model zoo these 4 levels
# correspond to <task area>/<autonomous driving task>/<model name>/<model
# variant name>.
model_dir = path.abspath(path.dirname(args.config))
namespaces = model_dir.split(path.sep)
if len(namespaces) < 4:
info['namespace'] = model_dir
info['network_name'] = model_dir
else:
info['namespace'] = path.sep.join(namespaces[-4:])
info['network_name'] = namespaces[-2]
return info
def compile_model(info):
'''This functions compiles the model'''
mod, params = get_network(info)
# Set compilation params
if info['cross_compile']:
if info['target'] in GPU_TARGETS:
raise Exception(info['target'] + ' cross-compilation not supported yet')
info['target'] += ' -mtriple=aarch64-linux-gnu'
# Compile model
if info['autotvm_log'] is not None:
if info['target'].startswith('llvm'):
cm = autotvm.apply_graph_best(info['autotvm_log'])
elif info['target'] in GPU_TARGETS:
cm = autotvm.apply_history_best(info['autotvm_log'])
with cm:
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod,
target=info['target'],
params=params)
else:
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod,
target=info['target'],
params=params)
# Write the compiled model to files
output_model_path = path.join(info['output_path'],
OUTPUT_NETWORK_MODULE_FILENAME)
output_graph_path = path.join(info['output_path'],
OUTPUT_NETWORK_GRAPH_FILENAME)
output_param_path = path.join(info['output_path'],
OUTPUT_NETWORK_PARAM_FILENAME)
print('Writing library to', output_model_path)
if info['cross_compile']:
lib.export_library(output_model_path,
cc.cross_compiler(
compile_func='/usr/bin/clang',
options=['--target=aarch64-linux-gnu',
'-march=armv8-a',
'-mfpu=NEON']))
else:
lib.export_library(output_model_path)
print('Writing graph to', output_graph_path)
with open(output_graph_path, 'w') as graph_file:
graph_file.write(graph)
print('Writing weights to', output_param_path)
with open(output_param_path, 'wb') as param_file:
param_file.write(relay.save_param_dict(params))
def generate_config_file(info):
'''This function generates the config .hpp file'''
# Setup jinja template and write the config file
root = path.dirname(path.abspath(__file__))
templates_dir = path.join(root, 'templates')
env = Environment( loader = FileSystemLoader(templates_dir),
keep_trailing_newline = True )
template = env.get_template(OUTPUT_CONFIG_FILENAME + ".jinja2")
filename = path.join(info['output_path'], OUTPUT_CONFIG_FILENAME)
print('Writing pipeline configuration to', filename)
with open(filename, 'w') as fh:
fh.write(template.render(
namespace = info['namespace'],
header_extension = info['header_extension'],
network_name = info['network_name'],
network_backend = info['target'],
network_module_path = path.join('.',
OUTPUT_NETWORK_MODULE_FILENAME),
network_graph_path = path.join('.',
OUTPUT_NETWORK_GRAPH_FILENAME),
network_params_path = path.join('.',
OUTPUT_NETWORK_PARAM_FILENAME),
tvm_dtype_code = info['dtype_code'],
tvm_dtype_bits = info['dtype_bits'],
tvm_dtype_lanes = info['lanes'],
tvm_device_type = info['device_type'],
tvm_device_id = info['device_id'],
input_list = info['input_list'],
output_list = info['output_list']
))
def tuning_preprocess(args):
'''
This function checks the command-line arguments and reads the necessary info
from the .yaml file, it's used when the tune option is selected
'''
# 'info' is the output dictionary
info = {}
info['tuner'] = args.tuner
info['n_trial'] = args.n_trial
info['early_stopping'] = args.early_stopping
info['evaluate_inference_time'] = args.evaluate_inference_time
info = yaml_processing(args.config, info)
# Define the root directory and check if the specified output_path exists.
# If not the corresponding directories are created.
# Note: if output_path has not been specified by the user,
# default to the 'filename' field from the .yaml file
if args.output_path:
info['output_path'] = args.output_path
if not path.isdir(info['output_path']):
os.makedirs(info['output_path'])
# Import the AutoTVM config file
sys.path.append(os.path.dirname(os.path.abspath(args.autotvm_config)))
autotvm_config_file = os.path.basename(args.autotvm_config)
info['cfg'] = importlib.import_module(autotvm_config_file[:-3])
return info
def tune_model(info):
'''This function performs the tuning of a model'''
def tune_kernels(
tasks,
tuning_opt
):
tuner = tuning_opt.tuner
n_trial = tuning_opt.n_trial
early_stopping = tuning_opt.early_stopping
# Overwrite AutoTVM_config contents if the user provides the
# corresponding arguments
if info['tuner'] is not None:
tuner = info['tuner']
if info['n_trial'] is not None:
n_trial = info['n_trial']
if info['early_stopping'] is not None:
early_stopping = info['early_stopping']
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner in ('xgb', 'xgb-rank'):
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=100)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=tuning_opt.measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(path.join(
info['output_path'],
tuning_opt.log_filename)),
],
)
# Use graph tuner to achieve graph level optimal schedules
# Set use_DP=False if it takes too long to finish.
def tune_graph(graph,
records,
opt_sch_file,
min_exec_graph_tuner,
use_DP=True):
target_op = [
relay.op.get('nn.conv2d'),
]
Tuner = DPTuner if use_DP else PBQPTuner
executor = Tuner(graph,
{name:[1 if x == -1 else x for x in shape]
for (name,shape) in info['input_dict'].items()},
records,
target_op,
info['target'])
executor.benchmark_layout_transform(min_exec_num=min_exec_graph_tuner)
executor.run()
executor.write_opt_sch2record_file(path.join(info['output_path'],
opt_sch_file))
tuning_opt = info['cfg'].tuning_options
info['target'] = tuning_opt['target']
mod, params = get_network(info)
# extract workloads from relay program
print("Extract tasks...")
tasks = autotvm.task.extract_from_program(
mod["main"],
target=info['target'],
params=params,
ops=(relay.op.get("nn.conv2d"),))
# run tuning tasks
print("Tuning...")
tune_kernels(tasks, **tuning_opt)
if info['target'].startswith('llvm'):
opt_sch_file = tuning_opt['log_filename'][:-4] + '_graph_opt.log'
tune_graph(
mod['main'],
path.join(info['output_path'], tuning_opt['log_filename']),
path.join(info['output_path'], opt_sch_file),
tuning_opt['min_exec_graph_tuner'])
if info['target'] in GPU_TARGETS:
print("The .log file has been saved in " +
path.join(info['output_path'], tuning_opt['log_filename']))
elif info['target'].startswith('llvm'):
print("The .log file has been saved in " +
path.join(info['output_path'], opt_sch_file))
if info['evaluate_inference_time']:
if info['target'] in GPU_TARGETS:
cm = autotvm.apply_history_best(
path.join(info['output_path'],
tuning_opt['log_filename']))
elif info['target'].startswith('llvm'):
cm = autotvm.apply_graph_best(
path.join(info['output_path'], opt_sch_file))
# compile
with cm:
print("Compile...")
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod,
target=info['target'],
params=params)
# load parameters
if info['target'] in GPU_TARGETS:
ctx = tvm.context(info['target'], info['device_id'])
elif info['target'].startswith('llvm'):
ctx = tvm.cpu()
module = runtime.GraphModule(lib["default"](ctx))
for name, shape in info['input_dict'].items():
data_tvm = tvm.nd.array(
(np.random.uniform(
size=[1 if x == -1 else x for x in shape]))
.astype(info['input_data_type']))
module.set_input(name, data_tvm)
# evaluate
print("Evaluate inference time cost...")
ftimer = module.module.time_evaluator("run",
ctx,
number=10,
repeat=60)
prof_res = np.array(ftimer().results) * 1000
print("Mean inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res)))
if __name__ == '__main__':
import argparse
def compile():
'''Compiles a model using TVM'''
parser = argparse.ArgumentParser(
description='Compile a model using TVM',
usage='''tvm_cli compile [<args>]''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
requiredNamed = parser.add_argument_group('required arguments')
requiredNamed.add_argument('--config',
help='Path to .yaml config file (input)',
required=True)
requiredNamed.add_argument('--output_path',
help='Path where network module, '
'network graph and network parameters '
'will be stored',
required=True)
targets = list(TARGETS_DEVICES)
parser.add_argument('--target',
help='Compilation target',
choices=targets,
default=targets[0])
parser.add_argument('--device_id',
help='Device ID',
type=int,
default=0)
parser.add_argument('--lanes',
help='Number of lanes',
type=int,
default=1)
parser.add_argument('--cross_compile',
help='Cross compile for ArmV8a with NEON',
action='store_true',
default=False)
parser.add_argument('--autotvm_log',
help='Path to an autotvm .log file, can speed up '
'inference')
parser.add_argument('--autoware_version',
help='Targeted Autoware version',
choices=['ai', 'auto'],
default='auto')
parsed_args = parser.parse_args(sys.argv[2:])
# The dictionary 'info' contains all the information provided by the user
# and the information found in the .yaml file
try:
info = compilation_preprocess(parsed_args)
compile_model(info)
generate_config_file(info)
except Exception as e:
print('Exception: '+ str(e))
return 1
return 0
def tune():
'''Tunes a model using AutoTVM'''
parser = argparse.ArgumentParser(
description='Tune a model using AutoTVM',
usage='''tvm_cli tune [<args>]''')
requiredNamed = parser.add_argument_group('required arguments')
requiredNamed.add_argument('--config',
help='Path to .yaml config file (input)',
required=True)
requiredNamed.add_argument('--output_path',
help='Path where the output .log file will '
'be stored',
required=True)
requiredNamed.add_argument('--autotvm_config',
help='Path to an autotvm config file, see '
'AutoTVM_config_example.py',
required=True)
parser.add_argument('--tuner',
help='Specify the tuner to be used, overrides '
'--autotvm_config contents',
choices=['xgb', 'xgb-rank', 'ga', 'random',
'gridsearch'])
parser.add_argument('--n_trial',
help='Maximum number of configurations to try, '
'overrides --autotvm_config contents.',
type=int)
parser.add_argument('--early_stopping',
help='Early stop the tuning when not finding '
'better configs in this number of trials, '
'overrides --autotvm_config contents',
type=int)
parser.add_argument('--evaluate_inference_time',
help='Set to perform an inference time evaluation '
'after the tuning phase',
action='store_true',
default=False)
parsed_args = parser.parse_args(sys.argv[2:])
# The dictionary 'info' contains all the information provided by the
# user and the information found in the .yaml file
info = tuning_preprocess(parsed_args)
tune_model(info)
def test():
'''Launches the validation script'''
parser = argparse.ArgumentParser(
description='Launch the validation script',
usage='''tvm_cli test [-h]''')
parser.parse_args(sys.argv[2:])
return pytest.main(['-v'])
main_parser = argparse.ArgumentParser(
description='Compile model and configuration file (TVM)',
usage='''<command> [<args>]
Commands:
compile Compile a model using TVM
tune Tune a model using AutoTVM
test Launch the validation script''')
main_parser.add_argument('command', help='Subcommand to run')
main_parsed_args = main_parser.parse_args(sys.argv[1:2])
if main_parsed_args.command not in locals():
print('Unrecognized command')
main_parser.print_help()
exit(1)
# Invoke method with same name as the argument passed
exit(locals()[main_parsed_args.command]())
| 41.998158 | 84 | 0.562114 |
ef8a00fa155a261fcb3abb7f68a1244711abed6e
| 10,340 |
py
|
Python
|
experiments/gps-position-precision/main.py
|
aerospaceresearch/thebeacon
|
b5ce284633696563eea9ce6d9e52a119a0261a09
|
[
"MIT"
] | null | null | null |
experiments/gps-position-precision/main.py
|
aerospaceresearch/thebeacon
|
b5ce284633696563eea9ce6d9e52a119a0261a09
|
[
"MIT"
] | null | null | null |
experiments/gps-position-precision/main.py
|
aerospaceresearch/thebeacon
|
b5ce284633696563eea9ce6d9e52a119a0261a09
|
[
"MIT"
] | null | null | null |
import time
import datetime
import numpy as np
import matplotlib.pylab as plt
def transform_coordinate_geo2earthcentered(long, lat, altitude):
radius = altitude + 6371000.0
x = radius * np.cos(lat * np.pi/180) * np.sin(long * np.pi/180)
y = radius * np.cos(lat * np.pi/180) * np.cos(long * np.pi/180)
z = radius * np.sin(lat * np.pi/180)
return x,y,z
def markers(f, number, lon, lat):
content = "jsMaps.api.marker(map,{position: {lat: "+str(lat)+",lng: "+str(lon)+"}, title: 'Marker No "+str(number)+"',draggable: false})\n"
f.write(content)
def mapcenter(f, lon, lat):
content = " var map = jsMaps.api.init(\n" \
" '#map',\n"\
" 'native',\n"\
" {\n"\
" center: {\n"\
" latitude: "+ str(np.mean(lat)) +",\n"\
" longitude: "+ str(np.mean(lon)) +"\n"\
" },\n"\
" zoom: 18,\n"\
" mouse_scroll: true,\n"\
" zoom_control: true,\n"\
" map_type: true\n"\
" },tiles\n"\
" );\n"
f.write(content)
def locations(f, number, colour, lon, lat):
content = " var polyLine"+str(number)+" = [\n"
f.write(content)
for i in range(len(lon)):
#print(i, lat[i], lon[i])
if i < len(lon) - 1:
content = " {lat: " + str(lat[i]) + ",lng: " + str(lon[i]) + "},\n"
f.write(content)
if i == len(lon) - 1:
content = " {lat: " + str(lat[i]) + ",lng: " + str(lon[i]) + "}\n"
f.write(content)
content = " ];\n"\
" \n"\
" jsMaps.api.polyLine(map,{\n"\
" path: polyLine"+str(number)+",\n"\
" strokeColor: '"+colour+"',\n"\
" strokeOpacity: 0.2,\n"\
" strokeWeight: 0.6,\n"\
" draggable: true,\n"\
" editable: false\n"\
" });\n\n"
content = content + " jsMaps.api.circle(map,{\n"\
" center: {lat: " + str(np.mean(lat)) + ", lng: " + str(np.mean(lon)) + "},\n"\
" radius: 2,\n"\
" strokeColor: '"+colour+"',\n"\
" strokeOpacity: 0.8,\n"\
" strokeWeight: 2,\n"\
" fillColor: '#000000',\n"\
" fillOpacity: 0.35,\n"\
" editable: false,\n"\
" draggable: false\n"\
" });\n"
f.write(content)
def map(file):
timestamp = []
latitude = []
longitude = []
altitude = []
satellites = []
#with open(file) as f:
import os
import zipfile
with zipfile.ZipFile(file) as z:
for filename in z.namelist():
if not os.path.isdir(filename):
# read the file
with z.open(filename) as f:
tmp1 = ""
tmp2 = ""
for line in f:
if str(line).find("$GPGGA") > -1:
tmp1 = str(line).split(",")
if str(line).find("$GPRMC") > -1:
tmp2 = str(line).split(",")
if len(tmp1) > 0 and len(tmp2) > 0 and str(line).find("$GPRMC") > -1:
if tmp1[1] == tmp2[1] and len(tmp1[2]) > 0:
lat = 1.0
if tmp1[3] == "S":
lat = -1.0
lon = 1.0
if tmp1[5] == "W":
lon = -1.0
s = tmp1[1][0:2]+":"+tmp1[1][2:4]+":"+tmp1[1][4:]+" "+tmp2[9][0:2]+"-"+tmp2[9][2:4]+"-"+tmp2[9][4:6]
unixtime = time.mktime(datetime.datetime.strptime(s, "%H:%M:%S.%f %d-%m-%y").timetuple())
timestamp.append(unixtime)
latitude.append(lat * float(tmp1[2][0:2]) + float(tmp1[2][2:])/60.0)
longitude.append(lon * float(tmp1[4][0:3]) + float(tmp1[4][3:])/60.0)
altitude.append(float(tmp1[9]))
satellites.append(float(tmp1[7]))
'''
print(unixtime,
lat * float(tmp1[2][0:2]) + float(tmp1[2][2:])/60.0,
lon * float(tmp1[4][0:3]) + float(tmp1[4][3:])/60.0,
tmp1[9],
"fix", tmp1[6],
"sats", tmp1[7],
"hdop", tmp1[8])
'''
return timestamp, longitude, latitude, altitude, satellites
import os
folders = ['input/2/different/',
'input/1/different/',
'input/2/same/',
'input/1/same/']
colour = ["#FF0000",
"#FFFF00",
"#00FF00",
"#FF00FF"]
name = ["1: shack window",
"1: roof1 free sky",
"2: roof2 window",
"2: roof2 free sky"]
origin = [[48.77701, 9.23558, 236.0],
[48.777042, 9.23548, 242.0],
[48.7770, 9.2356, 242.0],
[48.7770, 9.2356, 241.0]]
sats_count = []
distance_count = []
longitude = []
latitude = []
unixtime = []
satellites = []
altitude = []
distance = []
distance_to_origin = []
for i in range(len(folders)):
tmp1 = []
tmp2 = []
for j in range(20):
tmp1.append(0)
tmp2.append(0)
sats_count.append(tmp1)
distance_count.append(tmp2)
for folder in range(len(folders)):
lon = []
lat = []
tim = []
alt = []
sats = []
for path, dirs, files in os.walk(folders[folder]):
# sort dirs and files
dirs.sort()
files.sort()
for file in files:
print(file)
out = map(path+file)
for position in range(len(out[0])):
lon.append(out[1][position])
lat.append(out[2][position])
tim.append(out[0][position])
alt.append(out[3][position])
sats.append(out[4][position])
print("test", len(out[0]), len(out[1]), len(out[2]))
longitude.append(lon)
latitude.append(lat)
unixtime.append(tim)
altitude.append(alt)
satellites.append(sats)
dist = []
xm,ym,zm = transform_coordinate_geo2earthcentered(np.mean(lon), np.mean(lat), np.mean(alt))
for i in range(len(lat)):
x,y,z = transform_coordinate_geo2earthcentered(lon[i], lat[i], alt[i])
dist.append(((x-xm)**2 + (y-ym)**2 + (z-zm)**2)**0.5)
distance.append(dist)
dist_origin = []
xm,ym,zm = transform_coordinate_geo2earthcentered(origin[folder][1], origin[folder][0], origin[folder][2])
for i in range(len(lat)):
x,y,z = transform_coordinate_geo2earthcentered(lon[i], lat[i], alt[i])
dist_origin.append(((x-xm)**2 + (y-ym)**2 + (z-zm)**2)**0.5)
distance_to_origin.append(dist_origin)
for sat in range(len(sats)):
sats_count[folder][int(sats[sat])] = sats_count[folder][int(sats[sat])] + 1
distance_count[folder][int(sats[sat])] = distance_count[folder][int(sats[sat])] + dist[sat]
for folder in range(len(folders)):
satellite_distribution = np.divide(sats_count[folder], np.sum(sats_count[folder]))
print("for", folder,
"lat, long, alt", np.mean(latitude[folder]), np.mean(longitude[folder]), np.mean(altitude[folder]),
"average distance to center", np.mean(distance[folder]),
"average distance to origin", np.mean(distance_to_origin[folder]),
"average number of visible GPS satellites for", np.mean(satellites[folder]),
"average satellite distribtion", np.mean(satellite_distribution), np.argmax(satellite_distribution))
for folder in range(len(folders)):
plt.plot(unixtime[folder], distance_to_origin[folder], "-", label=name[folder])
plt.title("distance to origin over time")
plt.xlabel("unixtime [s]")
plt.ylabel("distance [m]")
plt.legend()
plt.savefig('result_distance_to_origin.png')
#plt.show()
plt.clf()
for folder in range(len(folders)):
plt.plot(unixtime[folder], distance[folder], "-", label=name[folder])
plt.title("distance to center over time")
plt.xlabel("unixtime [s]")
plt.ylabel("distance [m]")
plt.legend()
plt.savefig("result_distance_to_center.png")
#plt.show()
plt.clf()
for folder in range(len(folders)):
satellite_distribution = np.divide(sats_count[folder], np.sum(sats_count[folder]))
plt.plot(satellite_distribution*100.0, "o-", label=name[folder])
plt.title("distribution of visible GPS Satellites")
plt.xlabel("visible GPS satellites [-]")
plt.ylabel("distribution of GPS Satellites [%]")
plt.legend()
plt.savefig("result_distribution_of_visible_gps_satellites.png")
#plt.show()
plt.clf()
for folder in range(len(folders)):
print(folder, sats_count[folder])
print(folder, distance_count[folder])
plt.plot(np.divide(distance_count[folder], sats_count[folder]), "o-", label=name[folder])
plt.title("distance to center per visible GPS satellites")
plt.xlabel("visible GPS satellites [-]")
plt.ylabel("distance/satellites [m]")
plt.legend()
plt.savefig("result_distance_to_center_per_visible_gps_satellites.png")
#plt.show()
plt.clf()
f = open("result_locotation.html", "w")
with open('template_markers.html') as fp:
for line in fp:
if line.find("XXXmapcenter") > -1:
mapcenter(f, longitude[0], latitude[0])
elif line.find("XXXlocations") > -1:
for folder in range(len(folders)):
markers(f, folder, origin[folder][1], origin[folder][0])
else:
f.write(line)
f.close()
f = open("result_gps_log.html", "w")
with open('template_markers.html') as fp:
for line in fp:
if line.find("XXXmapcenter") > -1:
mapcenter(f, longitude[0], latitude[0])
elif line.find("XXXlocations") > -1:
for folder in range(len(folders)):
locations(f, folder, colour[folder], longitude[folder][::2], latitude[folder][::2])
markers(f, folder, origin[folder][1], origin[folder][0])
else:
f.write(line)
f.close()
| 32.618297 | 143 | 0.511992 |
fd4cae1f49f088edf395ca54bed1f1a8695bcde8
| 11,190 |
py
|
Python
|
bitmovin_api_sdk/models/prewarmed_encoder_pool.py
|
bitmovin/bitmovin-api-sdk-python
|
5a85147669c84b8ca411cf2d4dbdddc92d85bbe7
|
[
"MIT"
] | 11 |
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/models/prewarmed_encoder_pool.py
|
bitmovin/bitmovin-api-sdk-python
|
5a85147669c84b8ca411cf2d4dbdddc92d85bbe7
|
[
"MIT"
] | 8 |
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/models/prewarmed_encoder_pool.py
|
bitmovin/bitmovin-api-sdk-python
|
5a85147669c84b8ca411cf2d4dbdddc92d85bbe7
|
[
"MIT"
] | 13 |
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.bitmovin_resource import BitmovinResource
from bitmovin_api_sdk.models.cloud_region import CloudRegion
from bitmovin_api_sdk.models.prewarmed_encoder_disk_size import PrewarmedEncoderDiskSize
from bitmovin_api_sdk.models.prewarmed_encoder_pool_status import PrewarmedEncoderPoolStatus
import pprint
import six
class PrewarmedEncoderPool(BitmovinResource):
@poscheck_model
def __init__(self,
id_=None,
name=None,
description=None,
created_at=None,
modified_at=None,
custom_data=None,
encoder_version=None,
cloud_region=None,
infrastructure_id=None,
disk_size=None,
target_pool_size=None,
status=None):
# type: (string_types, string_types, string_types, datetime, datetime, dict, string_types, CloudRegion, string_types, PrewarmedEncoderDiskSize, int, PrewarmedEncoderPoolStatus) -> None
super(PrewarmedEncoderPool, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data)
self._encoder_version = None
self._cloud_region = None
self._infrastructure_id = None
self._disk_size = None
self._target_pool_size = None
self._status = None
self.discriminator = None
if encoder_version is not None:
self.encoder_version = encoder_version
if cloud_region is not None:
self.cloud_region = cloud_region
if infrastructure_id is not None:
self.infrastructure_id = infrastructure_id
if disk_size is not None:
self.disk_size = disk_size
if target_pool_size is not None:
self.target_pool_size = target_pool_size
if status is not None:
self.status = status
@property
def openapi_types(self):
types = {}
if hasattr(super(PrewarmedEncoderPool, self), 'openapi_types'):
types = getattr(super(PrewarmedEncoderPool, self), 'openapi_types')
types.update({
'encoder_version': 'string_types',
'cloud_region': 'CloudRegion',
'infrastructure_id': 'string_types',
'disk_size': 'PrewarmedEncoderDiskSize',
'target_pool_size': 'int',
'status': 'PrewarmedEncoderPoolStatus'
})
return types
@property
def attribute_map(self):
attributes = {}
if hasattr(super(PrewarmedEncoderPool, self), 'attribute_map'):
attributes = getattr(super(PrewarmedEncoderPool, self), 'attribute_map')
attributes.update({
'encoder_version': 'encoderVersion',
'cloud_region': 'cloudRegion',
'infrastructure_id': 'infrastructureId',
'disk_size': 'diskSize',
'target_pool_size': 'targetPoolSize',
'status': 'status'
})
return attributes
@property
def encoder_version(self):
# type: () -> string_types
"""Gets the encoder_version of this PrewarmedEncoderPool.
The encoder version which the pool's instances will be running (required)
:return: The encoder_version of this PrewarmedEncoderPool.
:rtype: string_types
"""
return self._encoder_version
@encoder_version.setter
def encoder_version(self, encoder_version):
# type: (string_types) -> None
"""Sets the encoder_version of this PrewarmedEncoderPool.
The encoder version which the pool's instances will be running (required)
:param encoder_version: The encoder_version of this PrewarmedEncoderPool.
:type: string_types
"""
if encoder_version is not None:
if not isinstance(encoder_version, string_types):
raise TypeError("Invalid type for `encoder_version`, type has to be `string_types`")
self._encoder_version = encoder_version
@property
def cloud_region(self):
# type: () -> CloudRegion
"""Gets the cloud_region of this PrewarmedEncoderPool.
The cloud region in which the pool's instances will be running. Must be a specific region (e.g. not 'AUTO', 'GOOGLE' or 'EUROPE') (required)
:return: The cloud_region of this PrewarmedEncoderPool.
:rtype: CloudRegion
"""
return self._cloud_region
@cloud_region.setter
def cloud_region(self, cloud_region):
# type: (CloudRegion) -> None
"""Sets the cloud_region of this PrewarmedEncoderPool.
The cloud region in which the pool's instances will be running. Must be a specific region (e.g. not 'AUTO', 'GOOGLE' or 'EUROPE') (required)
:param cloud_region: The cloud_region of this PrewarmedEncoderPool.
:type: CloudRegion
"""
if cloud_region is not None:
if not isinstance(cloud_region, CloudRegion):
raise TypeError("Invalid type for `cloud_region`, type has to be `CloudRegion`")
self._cloud_region = cloud_region
@property
def infrastructure_id(self):
# type: () -> string_types
"""Gets the infrastructure_id of this PrewarmedEncoderPool.
Define an external infrastructure to run the pool on.
:return: The infrastructure_id of this PrewarmedEncoderPool.
:rtype: string_types
"""
return self._infrastructure_id
@infrastructure_id.setter
def infrastructure_id(self, infrastructure_id):
# type: (string_types) -> None
"""Sets the infrastructure_id of this PrewarmedEncoderPool.
Define an external infrastructure to run the pool on.
:param infrastructure_id: The infrastructure_id of this PrewarmedEncoderPool.
:type: string_types
"""
if infrastructure_id is not None:
if not isinstance(infrastructure_id, string_types):
raise TypeError("Invalid type for `infrastructure_id`, type has to be `string_types`")
self._infrastructure_id = infrastructure_id
@property
def disk_size(self):
# type: () -> PrewarmedEncoderDiskSize
"""Gets the disk_size of this PrewarmedEncoderPool.
Disk size of the prewarmed instances in GB. Needs to be chosen depending on input file sizes and encoding features used. (required)
:return: The disk_size of this PrewarmedEncoderPool.
:rtype: PrewarmedEncoderDiskSize
"""
return self._disk_size
@disk_size.setter
def disk_size(self, disk_size):
# type: (PrewarmedEncoderDiskSize) -> None
"""Sets the disk_size of this PrewarmedEncoderPool.
Disk size of the prewarmed instances in GB. Needs to be chosen depending on input file sizes and encoding features used. (required)
:param disk_size: The disk_size of this PrewarmedEncoderPool.
:type: PrewarmedEncoderDiskSize
"""
if disk_size is not None:
if not isinstance(disk_size, PrewarmedEncoderDiskSize):
raise TypeError("Invalid type for `disk_size`, type has to be `PrewarmedEncoderDiskSize`")
self._disk_size = disk_size
@property
def target_pool_size(self):
# type: () -> int
"""Gets the target_pool_size of this PrewarmedEncoderPool.
Number of instances to keep prewarmed while the pool is running (required)
:return: The target_pool_size of this PrewarmedEncoderPool.
:rtype: int
"""
return self._target_pool_size
@target_pool_size.setter
def target_pool_size(self, target_pool_size):
# type: (int) -> None
"""Sets the target_pool_size of this PrewarmedEncoderPool.
Number of instances to keep prewarmed while the pool is running (required)
:param target_pool_size: The target_pool_size of this PrewarmedEncoderPool.
:type: int
"""
if target_pool_size is not None:
if target_pool_size is not None and target_pool_size < 1:
raise ValueError("Invalid value for `target_pool_size`, must be a value greater than or equal to `1`")
if not isinstance(target_pool_size, int):
raise TypeError("Invalid type for `target_pool_size`, type has to be `int`")
self._target_pool_size = target_pool_size
@property
def status(self):
# type: () -> PrewarmedEncoderPoolStatus
"""Gets the status of this PrewarmedEncoderPool.
Current status of the pool.
:return: The status of this PrewarmedEncoderPool.
:rtype: PrewarmedEncoderPoolStatus
"""
return self._status
@status.setter
def status(self, status):
# type: (PrewarmedEncoderPoolStatus) -> None
"""Sets the status of this PrewarmedEncoderPool.
Current status of the pool.
:param status: The status of this PrewarmedEncoderPool.
:type: PrewarmedEncoderPoolStatus
"""
if status is not None:
if not isinstance(status, PrewarmedEncoderPoolStatus):
raise TypeError("Invalid type for `status`, type has to be `PrewarmedEncoderPoolStatus`")
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
if hasattr(super(PrewarmedEncoderPool, self), "to_dict"):
result = super(PrewarmedEncoderPool, self).to_dict()
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PrewarmedEncoderPool):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.449511 | 192 | 0.644325 |
04f72bb46c5e989433037e9b7c2e33359066e43f
| 8,499 |
py
|
Python
|
starthinker_ui/recipe/views.py
|
wunderkennd/starthinker
|
ec66e02d26e5636a55ecb56803a7cec638629ace
|
[
"Apache-2.0"
] | null | null | null |
starthinker_ui/recipe/views.py
|
wunderkennd/starthinker
|
ec66e02d26e5636a55ecb56803a7cec638629ace
|
[
"Apache-2.0"
] | 1 |
2021-06-18T14:54:19.000Z
|
2021-06-18T14:54:19.000Z
|
starthinker_ui/recipe/views.py
|
isabella232/starthinker
|
d6bbecce5ef4a543fa2a19ce981c3381061e003a
|
[
"Apache-2.0"
] | null | null | null |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import json
import math
from django.shortcuts import render
from django.contrib import messages
from django.db import connection, transaction
from django.template.loader import render_to_string
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, HttpResponseNotFound
from django.conf import settings
from starthinker_ui.account.decorators import permission_admin
from starthinker_ui.recipe.forms_script import ScriptForm
from starthinker_ui.recipe.models import Recipe, utc_milliseconds
from starthinker_ui.recipe.colab import script_to_colab
from starthinker_ui.recipe.dag import script_to_dag
from starthinker_ui.recipe.log import log_manager_scale
from starthinker_ui.recipe.compute import group_instances_list, group_instances_resize
def recipe_list(request):
recipes = {
'running': [],
'paused': [],
'finished': [],
'errors': [],
'manual': []
}
if request.user.is_authenticated:
for recipe in request.user.recipe_set.all():
if recipe.manual:
recipes['manual'].append(recipe)
elif not recipe.active or recipe.get_log()['status'] == 'NEW':
recipes['paused'].append(recipe)
elif recipe.get_log()['status'] == 'FINISHED':
recipes['finished'].append(recipe)
elif recipe.get_log()['status'] == 'ERROR':
recipes['errors'].append(recipe)
else:
recipes['running'].append(recipe)
return render(request, 'recipe/recipe_list.html', {'recipes': recipes})
@permission_admin()
def recipe_edit(request, pk=None, manual=False):
if pk:
recipe = request.user.recipe_set.get(pk=pk)
manual = recipe.manual
else:
recipe = None
if request.method == 'POST':
form_script = ScriptForm(manual, recipe, request.user, request.POST)
if form_script.is_valid():
form_script.save()
messages.success(request, 'Recipe updated.')
if request.POST.get('save_and_run') == '1':
return recipe_run(request, form_script.instance.pk)
else:
return HttpResponseRedirect(form_script.instance.link_edit())
else:
messages.error(
request,
'Recipe Script Errors: %s' % ' '.join(form_script.get_errors())
)
else:
form_script = ScriptForm(
manual, recipe, request.user, scripts=request.GET.get('scripts', ''))
return render(request, 'recipe/recipe_edit.html', {
'form_script': form_script,
'manual': manual
})
@permission_admin()
def recipe_manual(request, pk=None):
return recipe_edit(request, pk=None, manual=True)
@permission_admin()
def recipe_delete(request, pk=None):
request.user.recipe_set.filter(pk=pk).delete()
messages.success(request, 'Recipe deleted.')
return HttpResponseRedirect('/')
@permission_admin()
def recipe_run(request, pk):
try:
recipe = request.user.recipe_set.get(pk=pk)
if recipe.is_running():
messages.success(
request,
'Recipe dispatched, will run once in progress task completes.')
else:
messages.success(request,
'Recipe dispatched, give it a few minutes to start.')
recipe.force()
except Recipe.DoesNotExist as e:
messages.error(request, str(e))
return HttpResponseRedirect('/recipe/edit/%s/' % pk)
@permission_admin()
def recipe_cancel(request, pk):
try:
recipe = request.user.recipe_set.get(pk=pk)
if recipe.is_running():
messages.success(request,
'Recipe cancelled, active task will stop shortly.')
else:
messages.success(request, 'Recipe cancelled, no tasks are running.')
recipe.cancel()
except Recipe.DoesNotExist as e:
messages.error(request, str(e))
return HttpResponseRedirect('/recipe/edit/%s/' % pk)
@permission_admin()
def recipe_status(request, pk):
try:
recipe = request.user.recipe_set.get(pk=pk)
log = recipe.get_log()
log['report'] = render_to_string('recipe/log.html', {'log': log})
except Recipe.DoesNotExist:
log = {}
return JsonResponse(log)
@csrf_exempt
def recipe_start(request):
try:
recipe = Recipe.objects.get(
reference=request.POST.get('reference', 'invalid'))
if recipe.is_running():
response = HttpResponse('RECIPE INTERRUPTED', content_type='text/plain')
else:
response = HttpResponse('RECIPE STARTED', content_type='text/plain')
recipe.force()
except Recipe.DoesNotExist as e:
response = HttpResponseNotFound(
'RECIPE NOT FOUND', content_type='text/plain')
return response
@csrf_exempt
def recipe_stop(request):
try:
recipe = Recipe.objects.get(
reference=request.POST.get('reference', 'invalid'))
if recipe.is_running():
response = HttpResponse('RECIPE INTERRUPTED', content_type='text/plain')
else:
response = HttpResponse('RECIPE STOPPED', content_type='text/plain')
recipe.cancel()
except Recipe.DoesNotExist as e:
response = HttpResponseNotFound(
'RECIPE NOT FOUND', content_type='text/plain')
return response
@permission_admin()
def recipe_download(request, pk):
return render(request, 'recipe/download.html', {'recipe': pk})
@permission_admin()
def recipe_json(request, pk):
try:
recipe = request.user.recipe_set.get(pk=pk)
data = recipe.get_json(credentials=False)
response = HttpResponse(
json.dumps(data, indent=2), content_type='application/json')
response[
'Content-Disposition'] = 'attachment; filename=recipe_%s.json' % recipe.slug(
)
return response
except Exception as e:
recipe = None
messages.error(request, str(e))
return HttpResponseRedirect('/recipe/download/%s/' % pk)
@permission_admin()
def recipe_colab(request, pk):
try:
recipe = request.user.recipe_set.get(pk=pk)
data = script_to_colab(recipe.slug(), '', [],
recipe.get_json(credentials=False)['tasks'])
response = HttpResponse(data, content_type='application/vnd.jupyter')
response[
'Content-Disposition'] = 'attachment; filename=colab_%s.ipynb' % recipe.slug(
)
return response
except Exception as e:
messages.error(request, str(e))
raise (e)
return HttpResponseRedirect('/recipe/download/%s/' % pk)
@permission_admin()
def recipe_airflow(request, pk):
try:
recipe = request.user.recipe_set.get(pk=pk)
data = script_to_dag(recipe.slug(), recipe.name, '', [],
recipe.get_json(credentials=False)['tasks'])
response = HttpResponse(data, content_type='application/vnd.jupyter')
response[
'Content-Disposition'] = 'attachment; filename=airflow_%s.py' % recipe.slug(
)
return response
except Exception as e:
messages.error(request, str(e))
raise (e)
return HttpResponseRedirect('/recipe/download/%s/' % pk)
def autoscale(request):
scale = {
'jobs': 0,
'workers': {
'jobs': settings.WORKER_JOBS,
'max': settings.WORKER_MAX,
'existing': 0,
'required': 0
}
}
# get task and worker list
scale['jobs'] = Recipe.objects.filter(
active=True, job_utm__lt=utc_milliseconds()).exclude(job_utm=0).count()
scale['workers']['existing'] = 3 if request == 'TEST' else sum(
1 for instance in group_instances_list(('PROVISIONING', 'STAGING',
'RUNNING')))
scale['workers']['required'] = min(
settings.WORKER_MAX, math.ceil(scale['jobs'] / scale['workers']['jobs']))
if request != 'TEST' and scale['workers']['required'] > scale['workers'][
'existing']:
group_instances_resize(scale['workers']['required'])
# log the scaling operation
log_manager_scale(scale)
return JsonResponse(scale)
| 31.594796 | 94 | 0.66902 |
d61bf370917c8dd0c1de1413977977bd1c67fbb2
| 3,747 |
py
|
Python
|
lib/galaxy/webapps/galaxy/api/roles.py
|
ramezrawas/galaxy-1
|
c03748dd49c060a68d07bce56eae33e0ba154414
|
[
"CC-BY-3.0"
] | 6 |
2018-11-03T22:43:35.000Z
|
2022-02-15T17:51:33.000Z
|
lib/galaxy/webapps/galaxy/api/roles.py
|
igorhollaender/OBSOLETE_sirv_dashboard
|
85aec60b80ef6f561d89398e3da5963d3d0f2aa4
|
[
"CC-BY-3.0"
] | 7 |
2016-12-07T22:19:37.000Z
|
2019-01-30T15:04:26.000Z
|
lib/galaxy/webapps/galaxy/api/roles.py
|
igorhollaender/OBSOLETE_sirv_dashboard
|
85aec60b80ef6f561d89398e3da5963d3d0f2aa4
|
[
"CC-BY-3.0"
] | 10 |
2017-04-10T21:40:22.000Z
|
2022-02-21T16:50:10.000Z
|
"""
API operations on Role objects.
"""
import logging
from sqlalchemy import false
from galaxy import web
from galaxy.web.base.controller import BaseAPIController, url_for
log = logging.getLogger( __name__ )
class RoleAPIController( BaseAPIController ):
@web.expose_api
def index( self, trans, **kwd ):
"""
GET /api/roles
Displays a collection (list) of roles.
"""
rval = []
for role in trans.sa_session.query( trans.app.model.Role ).filter( trans.app.model.Role.table.c.deleted == false() ):
if trans.user_is_admin() or trans.app.security_agent.ok_to_display( trans.user, role ):
item = role.to_dict( value_mapper={ 'id': trans.security.encode_id } )
encoded_id = trans.security.encode_id( role.id )
item['url'] = url_for( 'role', id=encoded_id )
rval.append( item )
return rval
@web.expose_api
def show( self, trans, id, **kwd ):
"""
GET /api/roles/{encoded_role_id}
Displays information about a role.
"""
role_id = id
try:
decoded_role_id = trans.security.decode_id( role_id )
except TypeError:
trans.response.status = 400
return "Malformed role id ( %s ) specified, unable to decode." % str( role_id )
try:
role = trans.sa_session.query( trans.app.model.Role ).get( decoded_role_id )
except:
role = None
if not role or not (trans.user_is_admin() or trans.app.security_agent.ok_to_display( trans.user, role )):
trans.response.status = 400
return "Invalid role id ( %s ) specified." % str( role_id )
item = role.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id } )
item['url'] = url_for( 'role', id=role_id )
return item
@web.expose_api
def create( self, trans, payload, **kwd ):
"""
POST /api/roles
Creates a new role.
"""
if not trans.user_is_admin():
trans.response.status = 403
return "You are not authorized to create a new role."
name = payload.get( 'name', None )
description = payload.get( 'description', None )
if not name or not description:
trans.response.status = 400
return "Enter a valid name and a description"
if trans.sa_session.query( trans.app.model.Role ).filter( trans.app.model.Role.table.c.name == name ).first():
trans.response.status = 400
return "A role with that name already exists"
role_type = trans.app.model.Role.types.ADMIN # TODO: allow non-admins to create roles
role = trans.app.model.Role( name=name, description=description, type=role_type )
trans.sa_session.add( role )
user_ids = payload.get( 'user_ids', [] )
users = [ trans.sa_session.query( trans.model.User ).get( trans.security.decode_id( i ) ) for i in user_ids ]
group_ids = payload.get( 'group_ids', [] )
groups = [ trans.sa_session.query( trans.model.Group ).get( trans.security.decode_id( i ) ) for i in group_ids ]
# Create the UserRoleAssociations
for user in users:
trans.app.security_agent.associate_user_role( user, role )
# Create the GroupRoleAssociations
for group in groups:
trans.app.security_agent.associate_group_role( group, role )
trans.sa_session.flush()
encoded_id = trans.security.encode_id( role.id )
item = role.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id } )
item['url'] = url_for( 'role', id=encoded_id )
return [ item ]
| 39.861702 | 125 | 0.611156 |
e8208e300a13a11bd6201515d77e60fcaf4eb0cb
| 10,551 |
py
|
Python
|
src/sentry/web/frontend/admin.py
|
E-LLP/sentry
|
83d97a0ca45cdaac1d5f3026058131a3aeae0068
|
[
"BSD-3-Clause"
] | 4 |
2016-03-16T07:21:36.000Z
|
2017-09-04T07:29:56.000Z
|
src/sentry/web/frontend/admin.py
|
mitsuhiko/sentry
|
cddc3b643a13b52ac6d07ff22e4bd5d69ecbad90
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/web/frontend/admin.py
|
mitsuhiko/sentry
|
cddc3b643a13b52ac6d07ff22e4bd5d69ecbad90
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.web.frontend.admin
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import functools
import logging
import sys
import uuid
from collections import defaultdict
import pkg_resources
import six
from django.conf import settings
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Count
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_protect
from sentry import options
from sentry.app import env
from sentry.models import Project, Team, User
from sentry.plugins import plugins
from sentry.utils.email import send_mail
from sentry.utils.http import absolute_uri
from sentry.utils.warnings import DeprecatedSettingWarning, seen_warnings
from sentry.web.decorators import requires_admin
from sentry.web.forms import (
ChangeUserForm, NewUserForm, RemoveUserForm, TestEmailForm
)
from sentry.web.helpers import render_to_response, render_to_string
def configure_plugin(request, slug):
plugin = plugins.get(slug)
if not plugin.has_site_conf():
return HttpResponseRedirect(reverse('sentry'))
view = plugin.configure(request=request)
if isinstance(view, HttpResponse):
return view
return render_to_response('sentry/admin/plugins/configure.html', {
'plugin': plugin,
'title': plugin.get_conf_title(),
'slug': plugin.slug,
'view': view,
}, request)
@requires_admin
def manage_projects(request):
project_list = Project.objects.filter(
status=0,
team__isnull=False,
).select_related('team')
project_query = request.GET.get('pquery')
if project_query:
project_list = project_list.filter(name__icontains=project_query)
sort = request.GET.get('sort')
if sort not in ('name', 'date'):
sort = 'date'
if sort == 'date':
order_by = '-date_added'
elif sort == 'name':
order_by = 'name'
project_list = project_list.order_by(order_by)
context = {
'project_list': project_list,
'project_query': project_query,
'sort': sort,
}
return render_to_response('sentry/admin/projects/list.html', context, request)
@requires_admin
def manage_users(request):
user_list = User.objects.all().order_by('-date_joined')
user_query = request.GET.get('uquery')
if user_query:
user_list = user_list.filter(email__icontains=user_query)
sort = request.GET.get('sort')
if sort not in ('name', 'joined', 'login'):
sort = 'joined'
if sort == 'joined':
order_by = '-date_joined'
elif sort == 'login':
order_by = '-last_login'
elif sort == 'name':
order_by = 'name'
user_list = user_list.order_by(order_by)
return render_to_response('sentry/admin/users/list.html', {
'user_list': user_list,
'user_query': user_query,
'sort': sort,
}, request)
@requires_admin
@transaction.atomic
@csrf_protect
def create_new_user(request):
if not request.is_superuser():
return HttpResponseRedirect(reverse('sentry'))
form = NewUserForm(request.POST or None, initial={
'send_welcome_mail': True,
'create_project': True,
})
if form.is_valid():
user = form.save(commit=False)
# create a random password
password = uuid.uuid4().hex
user.set_password(password)
user.save()
if form.cleaned_data['send_welcome_mail']:
context = {
'username': user.username,
'password': password,
'url': absolute_uri(reverse('sentry')),
}
body = render_to_string('sentry/emails/welcome_mail.txt', context, request)
try:
send_mail(
'%s Welcome to Sentry' % (options.get('mail.subject-prefix'),),
body, options.get('mail.from'), [user.email],
fail_silently=False
)
except Exception as e:
logger = logging.getLogger('sentry.mail.errors')
logger.exception(e)
return HttpResponseRedirect(reverse('sentry-admin-users'))
context = {
'form': form,
}
context.update(csrf(request))
return render_to_response('sentry/admin/users/new.html', context, request)
@requires_admin
@csrf_protect
def edit_user(request, user_id):
if not request.is_superuser():
return HttpResponseRedirect(reverse('sentry'))
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return HttpResponseRedirect(reverse('sentry-admin-users'))
form = ChangeUserForm(request.POST or None, instance=user)
if form.is_valid():
user = form.save()
return HttpResponseRedirect(reverse('sentry-admin-users'))
project_list = Project.objects.filter(
status=0,
organization__member_set__user=user,
).order_by('-date_added')
context = {
'form': form,
'the_user': user,
'project_list': project_list,
}
context.update(csrf(request))
return render_to_response('sentry/admin/users/edit.html', context, request)
@requires_admin
@csrf_protect
def remove_user(request, user_id):
if str(user_id) == str(request.user.id):
return HttpResponseRedirect(reverse('sentry-admin-users'))
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return HttpResponseRedirect(reverse('sentry-admin-users'))
form = RemoveUserForm(request.POST or None)
if form.is_valid():
if form.cleaned_data['removal_type'] == '2':
user.delete()
else:
User.objects.filter(pk=user.pk).update(is_active=False)
return HttpResponseRedirect(reverse('sentry-admin-users'))
context = csrf(request)
context.update({
'form': form,
'the_user': user,
})
return render_to_response('sentry/admin/users/remove.html', context, request)
@requires_admin
def list_user_projects(request, user_id):
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return HttpResponseRedirect(reverse('sentry-admin-users'))
project_list = Project.objects.filter(
status=0,
organization__member_set__user=user,
).order_by('-date_added')
context = {
'project_list': project_list,
'the_user': user,
}
return render_to_response('sentry/admin/users/list_projects.html', context, request)
@requires_admin
def manage_teams(request):
team_list = Team.objects.order_by('-date_added')
team_query = request.GET.get('tquery')
if team_query:
team_list = team_list.filter(name__icontains=team_query)
sort = request.GET.get('sort')
if sort not in ('name', 'date', 'events'):
sort = 'date'
if sort == 'date':
order_by = '-date_added'
elif sort == 'name':
order_by = 'name'
elif sort == 'projects':
order_by = '-num_projects'
team_list = team_list.annotate(
num_projects=Count('project'),
).order_by(order_by)
return render_to_response('sentry/admin/teams/list.html', {
'team_list': team_list,
'team_query': team_query,
'sort': sort,
}, request)
@requires_admin
def status_env(request):
reserved = ('PASSWORD', 'SECRET', 'KEY')
config = []
for k in sorted(dir(settings)):
v_repr = repr(getattr(settings, k))
if any(r.lower() in v_repr.lower() for r in reserved):
v_repr = '*' * 16
if any(r in k for r in reserved):
v_repr = '*' * 16
if k.startswith('_'):
continue
if k.upper() != k:
continue
config.append((k, v_repr))
return render_to_response('sentry/admin/status/env.html', {
'python_version': sys.version,
'config': config,
'environment': env.data,
}, request)
@requires_admin
def status_packages(request):
config = []
for k in sorted(dir(settings)):
if k == 'KEY':
continue
if k.startswith('_'):
continue
if k.upper() != k:
continue
config.append((k, getattr(settings, k)))
return render_to_response('sentry/admin/status/packages.html', {
'modules': sorted([(p.project_name, p.version) for p in pkg_resources.working_set]),
'extensions': [
(p.get_title(), '%s.%s' % (p.__module__, p.__class__.__name__))
for p in plugins.all(version=None)
],
}, request)
@requires_admin
def status_warnings(request):
groupings = {
DeprecatedSettingWarning: 'Deprecated Settings',
}
groups = defaultdict(list)
warnings = []
for warning in seen_warnings:
cls = type(warning)
if cls in groupings:
groups[cls].append(warning)
else:
warnings.append(warning)
sort_by_message = functools.partial(sorted, key=str)
return render_to_response(
'sentry/admin/status/warnings.html',
{
'groups': [(groupings[key], sort_by_message(values)) for key, values in groups.items()],
'warnings': sort_by_message(warnings),
},
request,
)
@requires_admin
@csrf_protect
def status_mail(request):
form = TestEmailForm(request.POST or None)
if form.is_valid():
body = """This email was sent as a request to test the Sentry outbound email configuration."""
try:
send_mail(
'%s Test Email' % (options.get('mail.subject-prefix'),),
body, options.get('mail.from'), [request.user.email],
fail_silently=False
)
except Exception as e:
form.errors['__all__'] = [six.text_type(e)]
return render_to_response('sentry/admin/status/mail.html', {
'form': form,
'mail_host': options.get('mail.host'),
'mail_password': bool(options.get('mail.password')),
'mail_username': options.get('mail.username'),
'mail_port': options.get('mail.port'),
'mail_use_tls': options.get('mail.use-tls'),
'mail_from': options.get('mail.from'),
'mail_list_namespace': options.get('mail.list-namespace'),
}, request)
| 28.439353 | 102 | 0.633968 |
f11a04e343309920e0f53ea619f31b5d7dba063b
| 5,281 |
py
|
Python
|
api/core/geo_coordinate_service.py
|
arunrapolu4491/court-interpreter-scheduling
|
17efcdf3a7fdd470c1991452a696a7bc640fd220
|
[
"Apache-2.0"
] | null | null | null |
api/core/geo_coordinate_service.py
|
arunrapolu4491/court-interpreter-scheduling
|
17efcdf3a7fdd470c1991452a696a7bc640fd220
|
[
"Apache-2.0"
] | null | null | null |
api/core/geo_coordinate_service.py
|
arunrapolu4491/court-interpreter-scheduling
|
17efcdf3a7fdd470c1991452a696a7bc640fd220
|
[
"Apache-2.0"
] | null | null | null |
import requests
import re
from fastapi import status, HTTPException
from core.config import settings
def get_geo(address, google_map):
if google_map == True:
url = settings.GOOGLE_MAP_URL.format(address)
else:
url = settings.OPENROAD_MAP_URL.format(address)
response = requests.get(url)
return response.json()
def get_name_of_province(abvr):
states={
'BC':'British Columbia',
'ON':'Ontario',
'ONT':'Ontario',
'QC':'Quebec',
'AB':'Alberta',
'SK':'Saskatchewan',
'MB':'Manitoba',
'NL':'Newfoundland and Labrador',
'PE':'Prince Edward Island',
'NS':'Nova Scotia',
'NB':'New Brunswick',
'YT':'Yukon',
'NT':'Northwest Territories',
'NU':'Nunavut',
'WA':'Washington',
}
if abvr == "WA":
country="USA"
else:
country="CANADA"
return states[abvr], country
def get_latitude_longitude_service(address_line1, address_line2, city, postal_code, province, google_map):
# google_map = True
# address = "BC, Canada"
# response = get_geo(address, google_map)
# # print(response)
# if response['status'] == 'REQUEST_DENIED':
# google_map = False
if address_line1 is None: address_line1 = ""
if address_line2 is None: address_line2 = ""
if city is None: city = ""
if postal_code is None: postal_code = ""
if province is None: province = ""
if len(province)<4:
province,country = get_name_of_province(province.upper())
else:
country="CANADA"
city = city.lower()
if city == "north van": city="north vancouver"
if city == "west Van": city="west vancouver"
if city == "new west": city="new westminster"
if city == "buranby": city="burnaby"
if city == "vacnouver": city="vancouver"
if city == "conrich rockyview county": city="conrich"
if city == "massett" : city="masset"
if city == "leech town" : city="leechtown"
if city == "hudsons hope" : city="hudson's hope"
if city == "kelowna" and "Sparwood" in address_line1: city="sparwood"
address_line1 = address_line1.replace("R.R.#", "")
address_line2 = address_line2.replace("R.R.#", "")
address_line1 = address_line1.replace("#", "no ")
address_line2 = address_line2.replace("#", "no ")
address_line = address_line1.lower() + ", " + address_line2.lower()
# print("____")
# print(address_line)
# Remove Bag 123 or Box 123
address_line = re.sub( "bag [0-9]+,", "", address_line)
address_line = re.sub( "box [0-9]+,", "", address_line)
address_line = re.sub( "bag [0-9]+", "", address_line)
address_line = re.sub( "box [0-9]+", "", address_line)
address_line = re.sub( "bag[0-9]+,", "", address_line)
address_line = re.sub( "box[0-9]+,", "", address_line)
address_line = re.sub( "bag[0-9]+", "", address_line)
address_line = re.sub( "box[0-9]+", "", address_line)
# Typos
address_line = re.sub( "yellowhwad", "yellowhead", address_line)
address_line = re.sub( "mirtle", "Murtle", address_line)
# print(address_line)
# address_line
address = f"{address_line}, {city}, {postal_code}, {province}, {country}"
found_locations = get_geo(address, google_map)
if google_map==True and found_locations['status'] == 'REQUEST_DENIED':
raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=f"Please review the google map subscription.")
if len(found_locations)==0:
address = f"{address_line}, {city}, {province}, {country}"
found_locations = get_geo(address, google_map)
if len(found_locations)==0:
if "ave." in address_line or "avenue" in address_line:
address_line_tmp = re.sub( "ave\.", "st.", address_line)
address_line_tmp = re.sub( "avenue", "street", address_line_tmp)
else:
address_line_tmp = re.sub( "st\.", "ave.", address_line)
address_line_tmp = re.sub( "street", "avenue", address_line_tmp)
address = f"{address_line_tmp}, {city}, {province}, {country}"
found_locations = get_geo(address, google_map)
if len(found_locations)==0:
address_line = re.sub( "(?<!\S)\d+(?!\S)", "", address_line)
address_line = re.sub( "[0-9]+-[0-9]+", "", address_line)
# print("+++++++++++++++")
# print(address_line)
address = f"{address_line}, {city}, {province}, {country}"
found_locations = get_geo(address, google_map)
if len(found_locations)==0:
# print("======================")
# print(city)
address = f"{city}, {province}, {country}"
# print(address)
found_locations = get_geo(address, google_map)
# print(len(found_locations))
# print(found_locations)
# return len(found_locations)
if len(found_locations)==1:
return found_locations[0]["lat"], found_locations[0]["lon"]
else:
for found_location in found_locations:
if found_location['type'] == "administrative":
return found_location["lat"], found_location["lon"]
return found_locations[0]["lat"], found_locations[0]["lon"]
| 34.292208 | 130 | 0.603295 |
a2eb0946e7158ea57991509fec24df56300fa0bf
| 1,051 |
py
|
Python
|
cogs/jams.py
|
TDillman/wow_info_bot
|
85ee0f0604bfce3968b3401ecac30e6cacd1086b
|
[
"Apache-2.0"
] | null | null | null |
cogs/jams.py
|
TDillman/wow_info_bot
|
85ee0f0604bfce3968b3401ecac30e6cacd1086b
|
[
"Apache-2.0"
] | null | null | null |
cogs/jams.py
|
TDillman/wow_info_bot
|
85ee0f0604bfce3968b3401ecac30e6cacd1086b
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import yaml
from youtube_api import YouTubeDataAPI
from discord.ext import commands
if not os.path.isfile("config.yaml"):
sys.exit("'config.yaml' not found! Please add it and try again.")
else:
with open("config.yaml") as file:
config = yaml.load(file, Loader=yaml.FullLoader)
yt = YouTubeDataAPI(config['youtube_api_key'])
class Jams(commands.Cog, name="jams"):
def __init__(self, bot):
self.bot = bot
@commands.command(name="jams")
async def jams(self, ctx, *, args):
"""
Links a YouTube video with the requested search result
Usage: !jams Gaslight Anthem
"""
results = yt.search(args, max_results=5, order='relevance')
video_string = ''
if len(results) == 0:
await ctx.channel.send(f"No results for {args}")
else:
youtube_video_url = f"https://www.youtube.com/watch?v={results[0]['video_id']}"
await ctx.channel.send(youtube_video_url)
def setup(bot):
bot.add_cog(Jams(bot))
| 27.657895 | 91 | 0.637488 |
bdd983761f955cdaafee9b59091ca36621b68934
| 27,633 |
py
|
Python
|
sdk/python/pulumi_digitalocean/certificate.py
|
yitsushi/pulumi-digitalocean
|
9d408e7e4a3bed2d9e7aa91a32e2f154706a3400
|
[
"ECL-2.0",
"Apache-2.0"
] | 53 |
2019-04-25T14:43:12.000Z
|
2022-03-14T15:51:44.000Z
|
sdk/python/pulumi_digitalocean/certificate.py
|
yitsushi/pulumi-digitalocean
|
9d408e7e4a3bed2d9e7aa91a32e2f154706a3400
|
[
"ECL-2.0",
"Apache-2.0"
] | 158 |
2019-04-15T21:47:18.000Z
|
2022-03-29T21:21:57.000Z
|
sdk/python/pulumi_digitalocean/certificate.py
|
yitsushi/pulumi-digitalocean
|
9d408e7e4a3bed2d9e7aa91a32e2f154706a3400
|
[
"ECL-2.0",
"Apache-2.0"
] | 10 |
2019-04-15T20:16:11.000Z
|
2021-05-28T19:08:32.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from ._enums import *
__all__ = ['CertificateArgs', 'Certificate']
@pulumi.input_type
class CertificateArgs:
def __init__(__self__, *,
certificate_chain: Optional[pulumi.Input[str]] = None,
domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
leaf_certificate: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'CertificateType']]] = None):
"""
The set of arguments for constructing a Certificate resource.
:param pulumi.Input[str] certificate_chain: The full PEM-formatted trust chain
between the certificate authority's certificate and your domain's TLS
certificate. Only valid when type is `custom`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] domains: List of fully qualified domain names (FQDNs) for
which the certificate will be issued. The domains must be managed using
DigitalOcean's DNS. Only valid when type is `lets_encrypt`.
:param pulumi.Input[str] leaf_certificate: The contents of a PEM-formatted public
TLS certificate. Only valid when type is `custom`.
:param pulumi.Input[str] name: The name of the certificate for identification.
:param pulumi.Input[str] private_key: The contents of a PEM-formatted private-key
corresponding to the SSL certificate. Only valid when type is `custom`.
:param pulumi.Input[Union[str, 'CertificateType']] type: The type of certificate to provision. Can be either
`custom` or `lets_encrypt`. Defaults to `custom`.
"""
if certificate_chain is not None:
pulumi.set(__self__, "certificate_chain", certificate_chain)
if domains is not None:
pulumi.set(__self__, "domains", domains)
if leaf_certificate is not None:
pulumi.set(__self__, "leaf_certificate", leaf_certificate)
if name is not None:
pulumi.set(__self__, "name", name)
if private_key is not None:
pulumi.set(__self__, "private_key", private_key)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="certificateChain")
def certificate_chain(self) -> Optional[pulumi.Input[str]]:
"""
The full PEM-formatted trust chain
between the certificate authority's certificate and your domain's TLS
certificate. Only valid when type is `custom`.
"""
return pulumi.get(self, "certificate_chain")
@certificate_chain.setter
def certificate_chain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_chain", value)
@property
@pulumi.getter
def domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of fully qualified domain names (FQDNs) for
which the certificate will be issued. The domains must be managed using
DigitalOcean's DNS. Only valid when type is `lets_encrypt`.
"""
return pulumi.get(self, "domains")
@domains.setter
def domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "domains", value)
@property
@pulumi.getter(name="leafCertificate")
def leaf_certificate(self) -> Optional[pulumi.Input[str]]:
"""
The contents of a PEM-formatted public
TLS certificate. Only valid when type is `custom`.
"""
return pulumi.get(self, "leaf_certificate")
@leaf_certificate.setter
def leaf_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "leaf_certificate", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the certificate for identification.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> Optional[pulumi.Input[str]]:
"""
The contents of a PEM-formatted private-key
corresponding to the SSL certificate. Only valid when type is `custom`.
"""
return pulumi.get(self, "private_key")
@private_key.setter
def private_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_key", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'CertificateType']]]:
"""
The type of certificate to provision. Can be either
`custom` or `lets_encrypt`. Defaults to `custom`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'CertificateType']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class _CertificateState:
def __init__(__self__, *,
certificate_chain: Optional[pulumi.Input[str]] = None,
domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
leaf_certificate: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
not_after: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
sha1_fingerprint: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'CertificateType']]] = None,
uuid: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Certificate resources.
:param pulumi.Input[str] certificate_chain: The full PEM-formatted trust chain
between the certificate authority's certificate and your domain's TLS
certificate. Only valid when type is `custom`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] domains: List of fully qualified domain names (FQDNs) for
which the certificate will be issued. The domains must be managed using
DigitalOcean's DNS. Only valid when type is `lets_encrypt`.
:param pulumi.Input[str] leaf_certificate: The contents of a PEM-formatted public
TLS certificate. Only valid when type is `custom`.
:param pulumi.Input[str] name: The name of the certificate for identification.
:param pulumi.Input[str] not_after: The expiration date of the certificate
:param pulumi.Input[str] private_key: The contents of a PEM-formatted private-key
corresponding to the SSL certificate. Only valid when type is `custom`.
:param pulumi.Input[str] sha1_fingerprint: The SHA-1 fingerprint of the certificate
:param pulumi.Input[Union[str, 'CertificateType']] type: The type of certificate to provision. Can be either
`custom` or `lets_encrypt`. Defaults to `custom`.
:param pulumi.Input[str] uuid: The UUID of the certificate
"""
if certificate_chain is not None:
pulumi.set(__self__, "certificate_chain", certificate_chain)
if domains is not None:
pulumi.set(__self__, "domains", domains)
if leaf_certificate is not None:
pulumi.set(__self__, "leaf_certificate", leaf_certificate)
if name is not None:
pulumi.set(__self__, "name", name)
if not_after is not None:
pulumi.set(__self__, "not_after", not_after)
if private_key is not None:
pulumi.set(__self__, "private_key", private_key)
if sha1_fingerprint is not None:
pulumi.set(__self__, "sha1_fingerprint", sha1_fingerprint)
if state is not None:
pulumi.set(__self__, "state", state)
if type is not None:
pulumi.set(__self__, "type", type)
if uuid is not None:
pulumi.set(__self__, "uuid", uuid)
@property
@pulumi.getter(name="certificateChain")
def certificate_chain(self) -> Optional[pulumi.Input[str]]:
"""
The full PEM-formatted trust chain
between the certificate authority's certificate and your domain's TLS
certificate. Only valid when type is `custom`.
"""
return pulumi.get(self, "certificate_chain")
@certificate_chain.setter
def certificate_chain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_chain", value)
@property
@pulumi.getter
def domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of fully qualified domain names (FQDNs) for
which the certificate will be issued. The domains must be managed using
DigitalOcean's DNS. Only valid when type is `lets_encrypt`.
"""
return pulumi.get(self, "domains")
@domains.setter
def domains(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "domains", value)
@property
@pulumi.getter(name="leafCertificate")
def leaf_certificate(self) -> Optional[pulumi.Input[str]]:
"""
The contents of a PEM-formatted public
TLS certificate. Only valid when type is `custom`.
"""
return pulumi.get(self, "leaf_certificate")
@leaf_certificate.setter
def leaf_certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "leaf_certificate", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the certificate for identification.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notAfter")
def not_after(self) -> Optional[pulumi.Input[str]]:
"""
The expiration date of the certificate
"""
return pulumi.get(self, "not_after")
@not_after.setter
def not_after(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "not_after", value)
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> Optional[pulumi.Input[str]]:
"""
The contents of a PEM-formatted private-key
corresponding to the SSL certificate. Only valid when type is `custom`.
"""
return pulumi.get(self, "private_key")
@private_key.setter
def private_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_key", value)
@property
@pulumi.getter(name="sha1Fingerprint")
def sha1_fingerprint(self) -> Optional[pulumi.Input[str]]:
"""
The SHA-1 fingerprint of the certificate
"""
return pulumi.get(self, "sha1_fingerprint")
@sha1_fingerprint.setter
def sha1_fingerprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sha1_fingerprint", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'CertificateType']]]:
"""
The type of certificate to provision. Can be either
`custom` or `lets_encrypt`. Defaults to `custom`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'CertificateType']]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def uuid(self) -> Optional[pulumi.Input[str]]:
"""
The UUID of the certificate
"""
return pulumi.get(self, "uuid")
@uuid.setter
def uuid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uuid", value)
class Certificate(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate_chain: Optional[pulumi.Input[str]] = None,
domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
leaf_certificate: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'CertificateType']]] = None,
__props__=None):
"""
Provides a DigitalOcean Certificate resource that allows you to manage
certificates for configuring TLS termination in Load Balancers.
Certificates created with this resource can be referenced in your
Load Balancer configuration via their ID. The certificate can either
be a custom one provided by you or automatically generated one with
Let's Encrypt.
## Example Usage
### Custom Certificate
```python
import pulumi
import pulumi_digitalocean as digitalocean
cert = digitalocean.Certificate("cert",
type="custom",
private_key=(lambda path: open(path).read())("/Users/myuser/certs/privkey.pem"),
leaf_certificate=(lambda path: open(path).read())("/Users/myuser/certs/cert.pem"),
certificate_chain=(lambda path: open(path).read())("/Users/myuser/certs/fullchain.pem"))
```
### Let's Encrypt Certificate
```python
import pulumi
import pulumi_digitalocean as digitalocean
cert = digitalocean.Certificate("cert",
domains=["example.com"],
type="lets_encrypt")
```
### Use with Other Resources
Both custom and Let's Encrypt certificates can be used with other resources
including the `LoadBalancer` and `Cdn` resources.
```python
import pulumi
import pulumi_digitalocean as digitalocean
cert = digitalocean.Certificate("cert",
type="lets_encrypt",
domains=["example.com"])
# Create a new Load Balancer with TLS termination
public = digitalocean.LoadBalancer("public",
region="nyc3",
droplet_tag="backend",
forwarding_rules=[digitalocean.LoadBalancerForwardingRuleArgs(
entry_port=443,
entry_protocol="https",
target_port=80,
target_protocol="http",
certificate_name=cert.name,
)])
```
## Import
Certificates can be imported using the certificate `name`, e.g.
```sh
$ pulumi import digitalocean:index/certificate:Certificate mycertificate cert-01
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] certificate_chain: The full PEM-formatted trust chain
between the certificate authority's certificate and your domain's TLS
certificate. Only valid when type is `custom`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] domains: List of fully qualified domain names (FQDNs) for
which the certificate will be issued. The domains must be managed using
DigitalOcean's DNS. Only valid when type is `lets_encrypt`.
:param pulumi.Input[str] leaf_certificate: The contents of a PEM-formatted public
TLS certificate. Only valid when type is `custom`.
:param pulumi.Input[str] name: The name of the certificate for identification.
:param pulumi.Input[str] private_key: The contents of a PEM-formatted private-key
corresponding to the SSL certificate. Only valid when type is `custom`.
:param pulumi.Input[Union[str, 'CertificateType']] type: The type of certificate to provision. Can be either
`custom` or `lets_encrypt`. Defaults to `custom`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[CertificateArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a DigitalOcean Certificate resource that allows you to manage
certificates for configuring TLS termination in Load Balancers.
Certificates created with this resource can be referenced in your
Load Balancer configuration via their ID. The certificate can either
be a custom one provided by you or automatically generated one with
Let's Encrypt.
## Example Usage
### Custom Certificate
```python
import pulumi
import pulumi_digitalocean as digitalocean
cert = digitalocean.Certificate("cert",
type="custom",
private_key=(lambda path: open(path).read())("/Users/myuser/certs/privkey.pem"),
leaf_certificate=(lambda path: open(path).read())("/Users/myuser/certs/cert.pem"),
certificate_chain=(lambda path: open(path).read())("/Users/myuser/certs/fullchain.pem"))
```
### Let's Encrypt Certificate
```python
import pulumi
import pulumi_digitalocean as digitalocean
cert = digitalocean.Certificate("cert",
domains=["example.com"],
type="lets_encrypt")
```
### Use with Other Resources
Both custom and Let's Encrypt certificates can be used with other resources
including the `LoadBalancer` and `Cdn` resources.
```python
import pulumi
import pulumi_digitalocean as digitalocean
cert = digitalocean.Certificate("cert",
type="lets_encrypt",
domains=["example.com"])
# Create a new Load Balancer with TLS termination
public = digitalocean.LoadBalancer("public",
region="nyc3",
droplet_tag="backend",
forwarding_rules=[digitalocean.LoadBalancerForwardingRuleArgs(
entry_port=443,
entry_protocol="https",
target_port=80,
target_protocol="http",
certificate_name=cert.name,
)])
```
## Import
Certificates can be imported using the certificate `name`, e.g.
```sh
$ pulumi import digitalocean:index/certificate:Certificate mycertificate cert-01
```
:param str resource_name: The name of the resource.
:param CertificateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CertificateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate_chain: Optional[pulumi.Input[str]] = None,
domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
leaf_certificate: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'CertificateType']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CertificateArgs.__new__(CertificateArgs)
__props__.__dict__["certificate_chain"] = certificate_chain
__props__.__dict__["domains"] = domains
__props__.__dict__["leaf_certificate"] = leaf_certificate
__props__.__dict__["name"] = name
__props__.__dict__["private_key"] = private_key
__props__.__dict__["type"] = type
__props__.__dict__["not_after"] = None
__props__.__dict__["sha1_fingerprint"] = None
__props__.__dict__["state"] = None
__props__.__dict__["uuid"] = None
super(Certificate, __self__).__init__(
'digitalocean:index/certificate:Certificate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
certificate_chain: Optional[pulumi.Input[str]] = None,
domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
leaf_certificate: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
not_after: Optional[pulumi.Input[str]] = None,
private_key: Optional[pulumi.Input[str]] = None,
sha1_fingerprint: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'CertificateType']]] = None,
uuid: Optional[pulumi.Input[str]] = None) -> 'Certificate':
"""
Get an existing Certificate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] certificate_chain: The full PEM-formatted trust chain
between the certificate authority's certificate and your domain's TLS
certificate. Only valid when type is `custom`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] domains: List of fully qualified domain names (FQDNs) for
which the certificate will be issued. The domains must be managed using
DigitalOcean's DNS. Only valid when type is `lets_encrypt`.
:param pulumi.Input[str] leaf_certificate: The contents of a PEM-formatted public
TLS certificate. Only valid when type is `custom`.
:param pulumi.Input[str] name: The name of the certificate for identification.
:param pulumi.Input[str] not_after: The expiration date of the certificate
:param pulumi.Input[str] private_key: The contents of a PEM-formatted private-key
corresponding to the SSL certificate. Only valid when type is `custom`.
:param pulumi.Input[str] sha1_fingerprint: The SHA-1 fingerprint of the certificate
:param pulumi.Input[Union[str, 'CertificateType']] type: The type of certificate to provision. Can be either
`custom` or `lets_encrypt`. Defaults to `custom`.
:param pulumi.Input[str] uuid: The UUID of the certificate
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _CertificateState.__new__(_CertificateState)
__props__.__dict__["certificate_chain"] = certificate_chain
__props__.__dict__["domains"] = domains
__props__.__dict__["leaf_certificate"] = leaf_certificate
__props__.__dict__["name"] = name
__props__.__dict__["not_after"] = not_after
__props__.__dict__["private_key"] = private_key
__props__.__dict__["sha1_fingerprint"] = sha1_fingerprint
__props__.__dict__["state"] = state
__props__.__dict__["type"] = type
__props__.__dict__["uuid"] = uuid
return Certificate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="certificateChain")
def certificate_chain(self) -> pulumi.Output[Optional[str]]:
"""
The full PEM-formatted trust chain
between the certificate authority's certificate and your domain's TLS
certificate. Only valid when type is `custom`.
"""
return pulumi.get(self, "certificate_chain")
@property
@pulumi.getter
def domains(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of fully qualified domain names (FQDNs) for
which the certificate will be issued. The domains must be managed using
DigitalOcean's DNS. Only valid when type is `lets_encrypt`.
"""
return pulumi.get(self, "domains")
@property
@pulumi.getter(name="leafCertificate")
def leaf_certificate(self) -> pulumi.Output[Optional[str]]:
"""
The contents of a PEM-formatted public
TLS certificate. Only valid when type is `custom`.
"""
return pulumi.get(self, "leaf_certificate")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the certificate for identification.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notAfter")
def not_after(self) -> pulumi.Output[str]:
"""
The expiration date of the certificate
"""
return pulumi.get(self, "not_after")
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> pulumi.Output[Optional[str]]:
"""
The contents of a PEM-formatted private-key
corresponding to the SSL certificate. Only valid when type is `custom`.
"""
return pulumi.get(self, "private_key")
@property
@pulumi.getter(name="sha1Fingerprint")
def sha1_fingerprint(self) -> pulumi.Output[str]:
"""
The SHA-1 fingerprint of the certificate
"""
return pulumi.get(self, "sha1_fingerprint")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
return pulumi.get(self, "state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The type of certificate to provision. Can be either
`custom` or `lets_encrypt`. Defaults to `custom`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def uuid(self) -> pulumi.Output[str]:
"""
The UUID of the certificate
"""
return pulumi.get(self, "uuid")
| 41.931715 | 134 | 0.63388 |
58ad020800e4785ae1a23f7c1b192b77244aa272
| 62,315 |
py
|
Python
|
pysnmp-with-texts/DGS-3620-28SC-DC-L2MGMT-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8 |
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/DGS-3620-28SC-DC-L2MGMT-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4 |
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/DGS-3620-28SC-DC-L2MGMT-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10 |
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module DGS-3620-28SC-DC-L2MGMT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DGS-3620-28SC-DC-L2MGMT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:44:27 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint")
AgentNotifyLevel, = mibBuilder.importSymbols("DLINK-ID-REC-MIB", "AgentNotifyLevel")
dot1agCfmMdIndex, dot1agCfmMepIdentifier, dot1agCfmMaIndex = mibBuilder.importSymbols("IEEE8021-CFM-MIB", "dot1agCfmMdIndex", "dot1agCfmMepIdentifier", "dot1agCfmMaIndex")
swPortSecPortIndex, = mibBuilder.importSymbols("PORT-SECURITY-MIB", "swPortSecPortIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
IpAddress, TimeTicks, ModuleIdentity, Bits, Counter64, iso, Gauge32, Integer32, MibIdentifier, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Unsigned32, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "TimeTicks", "ModuleIdentity", "Bits", "Counter64", "iso", "Gauge32", "Integer32", "MibIdentifier", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Unsigned32", "Counter32")
RowStatus, DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention", "TruthValue")
dlink_Dgs3620Proj_Dgs3620_28SC_DC, = mibBuilder.importSymbols("SWDGS3620PRIMGMT-MIB", "dlink-Dgs3620Proj-Dgs3620-28SC-DC")
swL2MgmtMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2))
if mibBuilder.loadTexts: swL2MgmtMIB.setLastUpdated('1011150000Z')
if mibBuilder.loadTexts: swL2MgmtMIB.setOrganization('D-Link Corp.')
if mibBuilder.loadTexts: swL2MgmtMIB.setContactInfo('http://support.dlink.com')
if mibBuilder.loadTexts: swL2MgmtMIB.setDescription('The Structure of Layer 2 Network Management Information.')
class MacAddress(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(6, 6)
fixedLength = 6
class VlanId(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 4094)
class PortList(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 127)
swL2DevMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1))
swL2VLANMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 2))
swL2PortMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3))
swL2TrunkMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9))
swL2MirrorMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10))
swL2TrafficSegMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 14))
swL2MulticastFilterMode = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 17))
swL2MgmtMIBTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 100))
class IANAifMauAutoNegCapBits(TextualConvention, Bits):
reference = '[IEEE802.3], Section 30.6.1.1.5'
description = 'This data type is used as the syntax of the swL2PortAutoNegCapabilityBits, swL2PortAutoNegCapAdvertisedBits, and swL2PortAutoNegCapReceivedBits objects in swL2PortAutoNegTable.'
status = 'current'
namedValues = NamedValues(("bOther", 0), ("b10baseT", 1), ("b10baseTFD", 2), ("b100baseT4", 3), ("b100baseTX", 4), ("b100baseTXFD", 5), ("b100baseT2", 6), ("b100baseT2FD", 7), ("bFdxPause", 8), ("bFdxAPause", 9), ("bFdxSPause", 10), ("bFdxBPause", 11), ("b1000baseX", 12), ("b1000baseXFD", 13), ("b1000baseT", 14), ("b1000baseTFD", 15))
swL2DevInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 1))
swDevInfoTotalNumOfPort = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swDevInfoTotalNumOfPort.setStatus('current')
if mibBuilder.loadTexts: swDevInfoTotalNumOfPort.setDescription('The number of ports within this switch. This value is the sum of the ports within this switch.')
swDevInfoNumOfPortInUse = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swDevInfoNumOfPortInUse.setStatus('current')
if mibBuilder.loadTexts: swDevInfoNumOfPortInUse.setDescription('The number of ports in this switch connected to the segment or the end stations.')
swL2DevCtrl = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2))
swL2DevCtrlSnmpTrapState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlSnmpTrapState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlSnmpTrapState.setDescription('This object controls the SNMP trap status.')
swL2DevCtrlCleanAllStatisticCounter = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("active", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlCleanAllStatisticCounter.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCleanAllStatisticCounter.setDescription('When the object is set to active, all statistical counters will be cleared. If set to normal, no action will occur.')
swL2DevCtrlVlanIdOfFDBTbl = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 7), VlanId()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlVlanIdOfFDBTbl.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlVlanIdOfFDBTbl.setDescription('Indicates the VLAN ID to which the Dot1dTpFdbTable belongs; The default value is the DEFAULT_VLAN_ID of the system.')
swL2MACNotifyState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2MACNotifyState.setStatus('current')
if mibBuilder.loadTexts: swL2MACNotifyState.setDescription('This object can enable or disable MAC Notification.')
swL2MACNotifyHistorySize = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 500))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2MACNotifyHistorySize.setStatus('current')
if mibBuilder.loadTexts: swL2MACNotifyHistorySize.setDescription('This object indicates the history size of MAC addresses in the MAC Address table. The default value is 1.')
swL2MACNotifyInterval = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2MACNotifyInterval.setStatus('current')
if mibBuilder.loadTexts: swL2MACNotifyInterval.setDescription('This object indicates the time interval, in seconds, that will trigger MAC notification messages.')
swL2DevCtrlAsymVlanState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlAsymVlanState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlAsymVlanState.setDescription('This object enables or disables asymmetric VLANs during the runtime of the system.')
swL2DevCtrlTelnet = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 14))
swL2DevCtrlTelnetState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 14, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlTelnetState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlTelnetState.setDescription('This object controls the Telnet status.')
swL2DevCtrlTelnetTcpPort = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 14, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlTelnetTcpPort.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlTelnetTcpPort.setDescription('This object designates TCP ports. When Telnet is disabled, this object is not accessible.')
swL2DevCtrlManagementVlanId = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 16), VlanId()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlManagementVlanId.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlManagementVlanId.setDescription('This object controls which previously created VLANs are included in the System IP Interface.')
swL2DevCtrlWeb = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 17))
swL2DevCtrlWebState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 17, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlWebState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlWebState.setDescription('This object controls the Web status.')
swL2DevCtrlWebTcpPort = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 17, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlWebTcpPort.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlWebTcpPort.setDescription('This object designates TCP ports. When Web is disabled, this object is not accessible.')
swL2DevCtrlLLDPState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlLLDPState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlLLDPState.setDescription('Specifies the state of the LLDP function. When this function is enabled, the switch can start to transmit LLDP packets and receive and process the LLDP packets. The specific function of each port will depend on the per port LLDP setting. For the advertisement of LLDP packets, the switch announces the information to its neighbor through ports. For receiving LLDP packets, the switch will learn the information from the LLDP packets advertised from the neighbor in the neighbor table. ')
swL2DevCtrlLLDPForwardMessageState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlLLDPForwardMessageState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlLLDPForwardMessageState.setDescription("When lldp is disabled and lldp forward_message's are enabled, the LLDP Data Unit packets received by the switch will be forwarded. ")
swL2DevCtrlIpAutoconfig = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlIpAutoconfig.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlIpAutoconfig.setDescription('This object controls the IP auto configuration state.')
swL2DevCtrlCFM = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 21))
swL2DevCtrlCFMState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 21, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlCFMState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCFMState.setDescription('This object indicates the CFM global state.')
swL2DevCtrlCFMPortTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 21, 2), )
if mibBuilder.loadTexts: swL2DevCtrlCFMPortTable.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCFMPortTable.setDescription('A table containing the CFM state of specified ports.')
swL2DevCtrlCFMPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 21, 2, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2DevCtrlCFMPortIndex"))
if mibBuilder.loadTexts: swL2DevCtrlCFMPortEntry.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCFMPortEntry.setDescription('The entry of the CFM state on specified ports.')
swL2DevCtrlCFMPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 21, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: swL2DevCtrlCFMPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCFMPortIndex.setDescription('This object indicates the port number.')
swL2DevCtrlCFMPortState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 21, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlCFMPortState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCFMPortState.setDescription('This object indicates the CFM state by port.')
swL2DevCtrlCFMMaTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 21, 3), )
if mibBuilder.loadTexts: swL2DevCtrlCFMMaTable.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCFMMaTable.setDescription('A table containing the CFM mode of specified MAs.')
swL2DevCtrlCFMMaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 21, 3, 1), ).setIndexNames((0, "IEEE8021-CFM-MIB", "dot1agCfmMdIndex"), (0, "IEEE8021-CFM-MIB", "dot1agCfmMaIndex"))
if mibBuilder.loadTexts: swL2DevCtrlCFMMaEntry.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCFMMaEntry.setDescription('The entry of the CFM mode on specified MAs.')
swL2DevCtrlCFMMaMode = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 21, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("software", 1), ("hardware", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlCFMMaMode.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCFMMaMode.setDescription('This object indicates the CFM mode by MA.')
swL2DevCtrlCFMMepTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 21, 4), )
if mibBuilder.loadTexts: swL2DevCtrlCFMMepTable.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCFMMepTable.setDescription('A table containing the CFM mode of specified MEPs.')
swL2DevCtrlCFMMepEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 21, 4, 1), ).setIndexNames((0, "IEEE8021-CFM-MIB", "dot1agCfmMdIndex"), (0, "IEEE8021-CFM-MIB", "dot1agCfmMaIndex"), (0, "IEEE8021-CFM-MIB", "dot1agCfmMepIdentifier"))
if mibBuilder.loadTexts: swL2DevCtrlCFMMepEntry.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCFMMepEntry.setDescription('The entry of the CFM mode on specified MEPs.')
swL2DevCtrlCFMMepMode = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 21, 4, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("software", 1), ("hardware", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2DevCtrlCFMMepMode.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlCFMMepMode.setDescription('This object indicates the CFM mode by MEP.')
swL2DevCtrlVLANTrunkState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 2, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevCtrlVLANTrunkState.setStatus('current')
if mibBuilder.loadTexts: swL2DevCtrlVLANTrunkState.setDescription('This indicates the global state of the VLAN trunking feature of the device.')
swL2DevAlarm = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 3))
swL2DevAlarmNewRoot = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevAlarmNewRoot.setStatus('current')
if mibBuilder.loadTexts: swL2DevAlarmNewRoot.setDescription('When the device has become the new root of the Spanning Tree, this object decides whether to send a new root trap.')
swL2DevAlarmTopologyChange = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevAlarmTopologyChange.setStatus('current')
if mibBuilder.loadTexts: swL2DevAlarmTopologyChange.setDescription('This object determines whether or not to send a trap message when the switch topology changes. If the object is enabled (3), the Topology Change trap is sent by the device when any of its configured ports transition from the Learning state to the Forwarding state, or from the Forwarding state to the Blocking state. For the same port transition, the device does not send the trap if this object value is disabled or in another state.')
swL2DevAlarmLinkChange = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 1, 3, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2DevAlarmLinkChange.setStatus('current')
if mibBuilder.loadTexts: swL2DevAlarmLinkChange.setDescription('This object determines whether or not to send a trap message when the link changes. If the object is enabled (3), the Link Change trap is sent by the device when any of its port links change. The device does not send the trap if this object value is disabled or in another state.')
swL2VlanStaticTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 2, 1), )
if mibBuilder.loadTexts: swL2VlanStaticTable.setStatus('current')
if mibBuilder.loadTexts: swL2VlanStaticTable.setDescription('A table containing static configuration information for each VLAN configured into the device by (local or network) management. All entries are permanent and will be restored after the device is reset.')
swL2VlanStaticEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 2, 1, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2VlanIndex"))
if mibBuilder.loadTexts: swL2VlanStaticEntry.setStatus('current')
if mibBuilder.loadTexts: swL2VlanStaticEntry.setDescription('Static information for a VLAN configured into this device by (local or network) management.')
swL2VlanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 2, 1, 1, 1), VlanId())
if mibBuilder.loadTexts: swL2VlanIndex.setStatus('current')
if mibBuilder.loadTexts: swL2VlanIndex.setDescription('The VLAN-ID or other identifier referring to this VLAN.')
swL2VLANAdvertisement = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2VLANAdvertisement.setStatus('current')
if mibBuilder.loadTexts: swL2VLANAdvertisement.setDescription('This object indicates if the advertisement is active or not.')
swL2PVIDAutoAssignmentState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PVIDAutoAssignmentState.setStatus('current')
if mibBuilder.loadTexts: swL2PVIDAutoAssignmentState.setDescription("This object controls the PVID auto assignment state. If 'Auto-assign PVID' is disabled, the PVID can only be changed by PVID configuration (user changes explicitly). The VLAN configuration will not automatically change the PVID. If 'Auto-assign PVID' is enabled, the PVID will be changed by PVID or VLAN configuration. When a user configures a port to VLAN X's untagged membership, this port's PVID will be updated with VLAN X. Using the VLAN list command, PVID is updated as the last item of the VLAN list. When a user removes a port from the untagged membership of the PVID's VLAN, the port's PVID will be assigned 'default VLAN'.")
swL2VlanPortInfoTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 2, 3), )
if mibBuilder.loadTexts: swL2VlanPortInfoTable.setStatus('current')
if mibBuilder.loadTexts: swL2VlanPortInfoTable.setDescription('A table containing the VLAN and port role information of each port.')
swL2VlanPortInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 2, 3, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2VlanPortInfoPortIndex"), (0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2VlanPortInfoVid"))
if mibBuilder.loadTexts: swL2VlanPortInfoEntry.setStatus('current')
if mibBuilder.loadTexts: swL2VlanPortInfoEntry.setDescription('The table entry of VLAN port information.')
swL2VlanPortInfoPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2VlanPortInfoPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2VlanPortInfoPortIndex.setDescription('The port index.')
swL2VlanPortInfoVid = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 2, 3, 1, 2), VlanId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2VlanPortInfoVid.setStatus('current')
if mibBuilder.loadTexts: swL2VlanPortInfoVid.setDescription('The VLAN ID assigned to a special port')
swL2VlanPortInfoPortRole = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("untagged", 2), ("tagged", 3), ("dynamic", 4), ("forbidden", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2VlanPortInfoPortRole.setStatus('current')
if mibBuilder.loadTexts: swL2VlanPortInfoPortRole.setDescription("The port role of a special port. When the role is 'other', it means this port does not belong to the VLAN.")
swL2NniGvrpBpduAddress = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 2, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dot1d", 1), ("dot1ad", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2NniGvrpBpduAddress.setStatus('current')
if mibBuilder.loadTexts: swL2NniGvrpBpduAddress.setDescription("Specifies the GVRP's BPDU MAC address of the NNI port with Q-in-Q status.")
swL2PortInfoTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 1), )
if mibBuilder.loadTexts: swL2PortInfoTable.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoTable.setDescription('A table that contains information about every port.')
swL2PortInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 1, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2PortInfoPortIndex"), (0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2PortInfoMediumType"))
if mibBuilder.loadTexts: swL2PortInfoEntry.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoEntry.setDescription('A list of information for each port of the device.')
swL2PortInfoPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortInfoPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoPortIndex.setDescription("This object indicates the module's port number.(1..Max port number in the module)")
swL2PortInfoMediumType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("copper", 1), ("fiber", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortInfoMediumType.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoMediumType.setDescription('Indicates the medium type of the port number.')
swL2PortInfoUnitID = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortInfoUnitID.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoUnitID.setDescription('Indicates the ID of the unit in the system.')
swL2PortInfoType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("portType-none", 0), ("portType-100Base-T", 2), ("portType-100Base-X", 3), ("portType-1000Base-T", 4), ("portType-1000Base-X", 5), ("portType-10GBase-R", 6), ("portType-10GBase-CX4", 7), ("portType-SIO", 8), ("portType-module-empty", 9), ("portType-user-last", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortInfoType.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoType.setDescription('This object indicates the connector type of this port.')
swL2PortInfoLinkStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("link-pass", 2), ("link-fail", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortInfoLinkStatus.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoLinkStatus.setDescription('This object indicates the port link status.')
swL2PortInfoNwayStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18))).clone(namedValues=NamedValues(("link-down", 0), ("full-10Mbps-8023x", 1), ("full-10Mbps-none", 2), ("half-10Mbps-backp", 3), ("half-10Mbps-none", 4), ("full-100Mbps-8023x", 5), ("full-100Mbps-none", 6), ("half-100Mbps-backp", 7), ("half-100Mbps-none", 8), ("full-1Gigabps-8023x", 9), ("full-1Gigabps-none", 10), ("half-1Gigabps-backp", 11), ("half-1Gigabps-none", 12), ("full-10Gigabps-8023x", 13), ("full-10Gigabps-none", 14), ("half-10Gigabps-8023x", 15), ("half-10Gigabps-none", 16), ("empty", 17), ("err-disabled", 18)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortInfoNwayStatus.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoNwayStatus.setDescription('This object indicates the port speed and duplex mode.')
swL2PortInfoErrorDisabled = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("none", 0), ("storm", 1), ("stp-lbd", 2), ("ctp-lbd", 3), ("ddm", 4), ("bpdu-protection", 5), ("unknow", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortInfoErrorDisabled.setStatus('current')
if mibBuilder.loadTexts: swL2PortInfoErrorDisabled.setDescription('This object indicates the blocking type of this port.')
swL2PortCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 2), )
if mibBuilder.loadTexts: swL2PortCtrlTable.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlTable.setDescription('A table that contains control information about every port.')
swL2PortCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 2, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2PortCtrlPortIndex"), (0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2PortCtrlMediumType"))
if mibBuilder.loadTexts: swL2PortCtrlEntry.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlEntry.setDescription('A list of control information for each port on the device.')
swL2PortCtrlPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortCtrlPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlPortIndex.setDescription("This object indicates the module's port number.(1..Max port number in the module)")
swL2PortCtrlMediumType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("copper", 1), ("fiber", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortCtrlMediumType.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlMediumType.setDescription('Indicates the medium type of the port number.')
swL2PortCtrlUnitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortCtrlUnitIndex.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlUnitIndex.setDescription('Indicates the ID of the unit in the device')
swL2PortCtrlAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlAdminState.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlAdminState.setDescription('This object decides if the port is enabled or disabled.')
swL2PortCtrlNwayState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("other", 1), ("nway-enabled", 2), ("nway-disabled-10Mbps-Half", 3), ("nway-disabled-10Mbps-Full", 4), ("nway-disabled-100Mbps-Half", 5), ("nway-disabled-100Mbps-Full", 6), ("nway-disabled-1Gigabps-Half", 7), ("nway-disabled-1Gigabps-Full", 8), ("nway-disabled-1Gigabps-Full-master", 9), ("nway-disabled-1Gigabps-Full-slave", 10)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlNwayState.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlNwayState.setDescription('Chose the port speed, duplex mode, and N-Way function mode.')
swL2PortCtrlFlowCtrlState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlFlowCtrlState.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlFlowCtrlState.setDescription('The flow control mechanism is different between full duplex mode and half duplex mode. For half duplex mode, the jamming signal is asserted. For full duplex mode, the IEEE 802.3x flow control function sends PAUSE frames and receives PAUSE frames.')
swL2PortCtrlLearningState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlLearningState.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlLearningState.setDescription('This object decides if the port is locked or not.')
swL2PortCtrlMACNotifyState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlMACNotifyState.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlMACNotifyState.setDescription("This object sets each port's MAC notification state.")
swL2PortCtrlMDIXState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("auto", 1), ("normal", 2), ("cross", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlMDIXState.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlMDIXState.setDescription('This object configures the MDIX setting of the port.')
swL2PortCtrlJumboFrame = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCtrlJumboFrame.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlJumboFrame.setDescription("This object configures the switch's jumbo frame settings.")
swL2PortCtrlJumboFrameMaxSize = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortCtrlJumboFrameMaxSize.setStatus('current')
if mibBuilder.loadTexts: swL2PortCtrlJumboFrameMaxSize.setDescription('This object describes how many bytes the max jumbo frame is.')
swL2PortCounterCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 6), )
if mibBuilder.loadTexts: swL2PortCounterCtrlTable.setStatus('current')
if mibBuilder.loadTexts: swL2PortCounterCtrlTable.setDescription('A table that is used to clear counter information about every port.')
swL2PortCounterCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 6, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2PortCounterCtrlPortIndex"))
if mibBuilder.loadTexts: swL2PortCounterCtrlEntry.setStatus('current')
if mibBuilder.loadTexts: swL2PortCounterCtrlEntry.setDescription('A list of entries used to clear the counter information for each port of the device.')
swL2PortCounterCtrlPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortCounterCtrlPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2PortCounterCtrlPortIndex.setDescription("This object indicates the module's port number.(1..Max port number in the module)")
swL2PortCounterClearCtrl = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("start", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortCounterClearCtrl.setStatus('current')
if mibBuilder.loadTexts: swL2PortCounterClearCtrl.setDescription('This object indicates whether to clear the counters for each port of the device or not.')
swL2PortJumboFrameCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 10), )
if mibBuilder.loadTexts: swL2PortJumboFrameCtrlTable.setStatus('current')
if mibBuilder.loadTexts: swL2PortJumboFrameCtrlTable.setDescription("A table that contains information for each port's jumbo frame information.")
swL2PortJumboFrameCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 10, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2PortJumboFrameCtrlPortIndex"))
if mibBuilder.loadTexts: swL2PortJumboFrameCtrlEntry.setStatus('current')
if mibBuilder.loadTexts: swL2PortJumboFrameCtrlEntry.setDescription("A list of information for each port's jumbo frame of the device.")
swL2PortJumboFrameCtrlPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 10, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2PortJumboFrameCtrlPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2PortJumboFrameCtrlPortIndex.setDescription("This object indicates the module's port number.(1..Max port number in the module)")
swL2PortJumboFrameCtrlPortState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 3, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortJumboFrameCtrlPortState.setStatus('current')
if mibBuilder.loadTexts: swL2PortJumboFrameCtrlPortState.setDescription("This object indicates if the port's jumbo frame is enabled or disabled.")
swL2TrunkMaxSupportedEntries = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2TrunkMaxSupportedEntries.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkMaxSupportedEntries.setDescription('Maximum number of entries in the trunk configuration table (swL2TrunkCtrlTable).')
swL2TrunkCurrentNumEntries = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2TrunkCurrentNumEntries.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkCurrentNumEntries.setDescription('Current active number of entries in the trunk configuration table.')
swL2TrunkCtrlTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 3), )
if mibBuilder.loadTexts: swL2TrunkCtrlTable.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkCtrlTable.setDescription('This table specifies information about the logical port trunk groups.')
swL2TrunkCtrlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 3, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2TrunkIndex"))
if mibBuilder.loadTexts: swL2TrunkCtrlEntry.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkCtrlEntry.setDescription('A list of information about each logical port trunk group.')
swL2TrunkIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2TrunkIndex.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkIndex.setDescription('The index number of the logical port trunk group. The trunk group number depends on the existence of unit and module.')
swL2TrunkMasterPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2TrunkMasterPort.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkMasterPort.setDescription('This object indicates the master port number of the port trunk entry. When using Port Trunking, you cannot configure the other ports of the group except the master port. Their configuration must be the same as the master port (e.g. speed, duplex, enabled/disabled, flow control, and so on).')
swL2TrunkMember = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 3, 1, 4), PortList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2TrunkMember.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkMember.setDescription('Indicates the number of ports included in this Trunk group. The trunk port number depends on the existence of the module. The maximum number of ports is 8 for one trunk group.')
swL2TrunkFloodingPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2TrunkFloodingPort.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkFloodingPort.setDescription('The object indicates the flooding port number of the port trunk entry. The first port of the Trunk group is implicitly configured to be the flooding port.')
swL2TrunkType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("static", 2), ("lacp", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2TrunkType.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkType.setDescription('This object indicates the type of trunk group. static: is a static trunk group lacp: is a LACP trunk group. ')
swL2TrunkState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 3, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2TrunkState.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkState.setDescription('This object indicates the status of this entry.')
swL2TrunkAlgorithm = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("other", 1), ("mac-source", 2), ("mac-destination", 3), ("mac-source-dest", 4), ("ip-source", 5), ("ip-destination", 6), ("ip-source-dest", 7), ("l4-source-port", 8), ("l4-destination-port", 9), ("l4-source-dest-port", 10)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2TrunkAlgorithm.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkAlgorithm.setDescription('This object configures part of the packet examined by the switch when selecting the egress port for transmitting load-sharing data.')
swL2TrunkLACPPortTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 5), )
if mibBuilder.loadTexts: swL2TrunkLACPPortTable.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkLACPPortTable.setDescription('This table specifies which ports are grouped together (this can be up to 8 ports) into a single logical link.')
swL2TrunkLACPPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 5, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2TrunkLACPPortIndex"))
if mibBuilder.loadTexts: swL2TrunkLACPPortEntry.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkLACPPortEntry.setDescription('A list of information specifying which ports are grouped together (this can be up to 8 ports) into a single logical link.')
swL2TrunkLACPPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2TrunkLACPPortIndex.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkLACPPortIndex.setDescription('The index of logical port LACP. ')
swL2TrunkLACPPortState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("passive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2TrunkLACPPortState.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkLACPPortState.setDescription('The state of a logical port LACP.')
swL2TrunkVLANTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 6), )
if mibBuilder.loadTexts: swL2TrunkVLANTable.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkVLANTable.setDescription('This table is used to manage the VLAN trunking feature of the device.')
swL2TrunkVLANEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 6, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2TrunkVLANPort"))
if mibBuilder.loadTexts: swL2TrunkVLANEntry.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkVLANEntry.setDescription('This object is used to configure the VLAN trunking settings for each port.')
swL2TrunkVLANPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2TrunkVLANPort.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkVLANPort.setDescription('This object indicates the port being configured.')
swL2TrunkVLANState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 9, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2TrunkVLANState.setStatus('current')
if mibBuilder.loadTexts: swL2TrunkVLANState.setDescription('The state of the logical port VLAN trunk.')
swL2MirrorLogicTargetPort = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2MirrorLogicTargetPort.setStatus('current')
if mibBuilder.loadTexts: swL2MirrorLogicTargetPort.setDescription('This object indicates which switch port will sniff another port. A trunk port member cannot be configured as a target snooping port. The port number is the sequential (logical) number, which is also applied to the bridge MIB, etc.')
swL2MirrorPortSourceIngress = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10, 2), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2MirrorPortSourceIngress.setStatus('current')
if mibBuilder.loadTexts: swL2MirrorPortSourceIngress.setDescription('This represents the port where ingress packets will be sniffed.')
swL2MirrorPortSourceEgress = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10, 3), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2MirrorPortSourceEgress.setStatus('current')
if mibBuilder.loadTexts: swL2MirrorPortSourceEgress.setDescription('This represents the port where egress packets will be sniffed.')
swL2MirrorPortState = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2MirrorPortState.setStatus('current')
if mibBuilder.loadTexts: swL2MirrorPortState.setDescription('This object indicates the port mirroring state. other (1) - this entry is currently in use but the conditions under which it will remain so are different from each of the following values. disabled (2) - After writing this value to the object, the corresponding entry will be removed from the table. enabled (3) - This entry resides in the table.')
swL2MirrorGroupTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10, 5), )
if mibBuilder.loadTexts: swL2MirrorGroupTable.setStatus('current')
if mibBuilder.loadTexts: swL2MirrorGroupTable.setDescription('This table specifies information about the Mirror group configuration.')
swL2MirrorGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10, 5, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2MirrorGroupID"))
if mibBuilder.loadTexts: swL2MirrorGroupEntry.setStatus('current')
if mibBuilder.loadTexts: swL2MirrorGroupEntry.setDescription('A list of information about each Mirror group configuration.')
swL2MirrorGroupID = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10, 5, 1, 1), Integer32())
if mibBuilder.loadTexts: swL2MirrorGroupID.setStatus('current')
if mibBuilder.loadTexts: swL2MirrorGroupID.setDescription('This object indicates the mirror group. The range of this object is (1..n), the value of n depends on detail project. ')
swL2MirrorGroupRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10, 5, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2MirrorGroupRowStatus.setStatus('current')
if mibBuilder.loadTexts: swL2MirrorGroupRowStatus.setDescription('This object manages this mirror group entry.')
swL2MirrorGroupState = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2MirrorGroupState.setStatus('current')
if mibBuilder.loadTexts: swL2MirrorGroupState.setDescription('This object indicates the mirror group state.')
swL2MirrorGroupLogicTargetPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2MirrorGroupLogicTargetPort.setStatus('current')
if mibBuilder.loadTexts: swL2MirrorGroupLogicTargetPort.setDescription('This object indicates the mirror group target port.')
swL2MirrorGroupPortSourceIngress = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10, 5, 1, 5), PortList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2MirrorGroupPortSourceIngress.setStatus('current')
if mibBuilder.loadTexts: swL2MirrorGroupPortSourceIngress.setDescription('This object indicates the mirror group ingress source ports.')
swL2MirrorGroupPortSourceEngress = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 10, 5, 1, 6), PortList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: swL2MirrorGroupPortSourceEngress.setStatus('current')
if mibBuilder.loadTexts: swL2MirrorGroupPortSourceEngress.setDescription('This object indicates the mirror group engress source ports.')
swL2TrafficSegTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 14, 1), )
if mibBuilder.loadTexts: swL2TrafficSegTable.setStatus('current')
if mibBuilder.loadTexts: swL2TrafficSegTable.setDescription('This table specifies that the port can just forward traffic to the specific port list.')
swL2TrafficSegEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 14, 1, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2TrafficSegPort"))
if mibBuilder.loadTexts: swL2TrafficSegEntry.setStatus('current')
if mibBuilder.loadTexts: swL2TrafficSegEntry.setDescription('A list of information which specifies the port with its traffic forwarding list.')
swL2TrafficSegPort = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 14, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2TrafficSegPort.setStatus('current')
if mibBuilder.loadTexts: swL2TrafficSegPort.setDescription('The port number of the logical port.')
swL2TrafficSegForwardPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 14, 1, 1, 2), PortList()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2TrafficSegForwardPorts.setStatus('current')
if mibBuilder.loadTexts: swL2TrafficSegForwardPorts.setDescription('The port list where a specific port can forward traffic.')
swL2MulticastFilterModeVlanTable = MibTable((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 17, 1), )
if mibBuilder.loadTexts: swL2MulticastFilterModeVlanTable.setStatus('current')
if mibBuilder.loadTexts: swL2MulticastFilterModeVlanTable.setDescription(' A table that contains information about the VLAN multicast filter mode.')
swL2MulticastFilterModeVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 17, 1, 1), ).setIndexNames((0, "DGS-3620-28SC-DC-L2MGMT-MIB", "swL2MulticastFilterVid"))
if mibBuilder.loadTexts: swL2MulticastFilterModeVlanEntry.setStatus('current')
if mibBuilder.loadTexts: swL2MulticastFilterModeVlanEntry.setDescription('A list of multicast filter mode information for each VLAN. ')
swL2MulticastFilterVid = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 17, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2MulticastFilterVid.setStatus('current')
if mibBuilder.loadTexts: swL2MulticastFilterVid.setDescription('Indicates the VID for each VLAN.')
swL2MulticastFilterVlanMode = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 17, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("forward-all-groups", 1), ("forward-unregistered-groups", 2), ("filter-unregistered-groups", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2MulticastFilterVlanMode.setStatus('current')
if mibBuilder.loadTexts: swL2MulticastFilterVlanMode.setDescription('Specifies the multicast filter mode for each VLAN.')
swL2Notify = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 100, 1))
swL2NotifyMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 100, 1, 1))
swL2NotifyPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 100, 1, 2))
swL2NotifFirmware = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 100, 1, 2, 0))
swL2macNotificationSeverity = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 100, 1, 1, 1), AgentNotifyLevel()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2macNotificationSeverity.setStatus('current')
if mibBuilder.loadTexts: swL2macNotificationSeverity.setDescription('Indicates the level of macNotification detection.')
swL2PortSecurityViolationSeverity = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 100, 1, 1, 2), AgentNotifyLevel()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: swL2PortSecurityViolationSeverity.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityViolationSeverity.setDescription('Indicates the level of PortSecurityViolation detection.')
swL2macNotification = NotificationType((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 100, 1, 2, 0, 1)).setObjects(("DGS-3620-28SC-DC-L2MGMT-MIB", "swL2macNotifyInfo"))
if mibBuilder.loadTexts: swL2macNotification.setStatus('current')
if mibBuilder.loadTexts: swL2macNotification.setDescription(' This trap indicates the MAC address variations in the address table . ')
swL2PortSecurityViolationTrap = NotificationType((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 100, 1, 2, 0, 2)).setObjects(("PORT-SECURITY-MIB", "swPortSecPortIndex"), ("DGS-3620-28SC-DC-L2MGMT-MIB", "swL2PortSecurityViolationMac"))
if mibBuilder.loadTexts: swL2PortSecurityViolationTrap.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityViolationTrap.setDescription('When the port_security trap is enabled, new MAC addresses that violate the pre-defined port security configuration will trigger trap messages to be sent out.')
swl2NotificationBidings = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 100, 1, 2, 1))
swL2macNotifyInfo = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 100, 1, 2, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: swL2macNotifyInfo.setStatus('current')
if mibBuilder.loadTexts: swL2macNotifyInfo.setDescription('This object indicates information about the last time the system rebooted.')
swL2PortSecurityViolationMac = MibScalar((1, 3, 6, 1, 4, 1, 171, 11, 118, 9, 2, 100, 1, 2, 1, 2), MacAddress()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: swL2PortSecurityViolationMac.setStatus('current')
if mibBuilder.loadTexts: swL2PortSecurityViolationMac.setDescription('This object indicates the MAC address that violated the port security configuration.')
mibBuilder.exportSymbols("DGS-3620-28SC-DC-L2MGMT-MIB", swL2TrunkVLANPort=swL2TrunkVLANPort, swL2MirrorGroupEntry=swL2MirrorGroupEntry, swL2DevCtrlTelnetTcpPort=swL2DevCtrlTelnetTcpPort, swL2DevAlarmTopologyChange=swL2DevAlarmTopologyChange, swL2DevCtrlLLDPForwardMessageState=swL2DevCtrlLLDPForwardMessageState, swDevInfoNumOfPortInUse=swDevInfoNumOfPortInUse, swL2TrunkLACPPortIndex=swL2TrunkLACPPortIndex, swL2PortMgmt=swL2PortMgmt, swL2MirrorGroupPortSourceEngress=swL2MirrorGroupPortSourceEngress, swL2PortCtrlEntry=swL2PortCtrlEntry, swL2PortInfoMediumType=swL2PortInfoMediumType, swL2VlanPortInfoPortRole=swL2VlanPortInfoPortRole, swL2TrunkAlgorithm=swL2TrunkAlgorithm, swL2PortJumboFrameCtrlEntry=swL2PortJumboFrameCtrlEntry, swL2PortSecurityViolationTrap=swL2PortSecurityViolationTrap, swL2VlanPortInfoTable=swL2VlanPortInfoTable, swL2macNotificationSeverity=swL2macNotificationSeverity, swL2MACNotifyState=swL2MACNotifyState, swL2MirrorPortSourceEgress=swL2MirrorPortSourceEgress, swL2TrunkCtrlTable=swL2TrunkCtrlTable, swL2MgmtMIB=swL2MgmtMIB, swL2TrunkFloodingPort=swL2TrunkFloodingPort, swL2PortJumboFrameCtrlPortIndex=swL2PortJumboFrameCtrlPortIndex, swL2TrafficSegEntry=swL2TrafficSegEntry, swL2macNotification=swL2macNotification, swL2DevCtrlManagementVlanId=swL2DevCtrlManagementVlanId, swL2PortCtrlUnitIndex=swL2PortCtrlUnitIndex, swL2PortCtrlJumboFrame=swL2PortCtrlJumboFrame, swL2DevCtrlCFMMaMode=swL2DevCtrlCFMMaMode, swL2DevCtrlCFMMaEntry=swL2DevCtrlCFMMaEntry, swL2PortCtrlMDIXState=swL2PortCtrlMDIXState, swL2VLANMgmt=swL2VLANMgmt, swL2DevCtrlCFMMepMode=swL2DevCtrlCFMMepMode, swL2TrunkLACPPortTable=swL2TrunkLACPPortTable, VlanId=VlanId, swL2PortInfoUnitID=swL2PortInfoUnitID, swL2DevCtrlWeb=swL2DevCtrlWeb, PYSNMP_MODULE_ID=swL2MgmtMIB, swL2PortJumboFrameCtrlPortState=swL2PortJumboFrameCtrlPortState, swL2MulticastFilterMode=swL2MulticastFilterMode, swL2DevCtrlSnmpTrapState=swL2DevCtrlSnmpTrapState, swL2DevCtrlCFMPortEntry=swL2DevCtrlCFMPortEntry, swL2MirrorGroupRowStatus=swL2MirrorGroupRowStatus, swL2PortCounterCtrlEntry=swL2PortCounterCtrlEntry, swL2MulticastFilterModeVlanTable=swL2MulticastFilterModeVlanTable, swL2MirrorMgmt=swL2MirrorMgmt, swL2TrunkMasterPort=swL2TrunkMasterPort, swL2MgmtMIBTraps=swL2MgmtMIBTraps, swL2DevAlarmLinkChange=swL2DevAlarmLinkChange, swL2MACNotifyInterval=swL2MACNotifyInterval, swL2VLANAdvertisement=swL2VLANAdvertisement, swL2PortInfoEntry=swL2PortInfoEntry, swL2DevInfo=swL2DevInfo, swl2NotificationBidings=swl2NotificationBidings, swL2MirrorGroupLogicTargetPort=swL2MirrorGroupLogicTargetPort, swL2MulticastFilterModeVlanEntry=swL2MulticastFilterModeVlanEntry, swL2PortCtrlNwayState=swL2PortCtrlNwayState, swL2TrunkCurrentNumEntries=swL2TrunkCurrentNumEntries, IANAifMauAutoNegCapBits=IANAifMauAutoNegCapBits, swL2NotifFirmware=swL2NotifFirmware, swL2TrafficSegForwardPorts=swL2TrafficSegForwardPorts, swL2DevCtrlCFMMaTable=swL2DevCtrlCFMMaTable, swL2PortInfoErrorDisabled=swL2PortInfoErrorDisabled, swL2PortCounterCtrlTable=swL2PortCounterCtrlTable, swL2DevAlarmNewRoot=swL2DevAlarmNewRoot, swL2DevCtrlTelnet=swL2DevCtrlTelnet, swL2PortCtrlTable=swL2PortCtrlTable, swL2MirrorPortState=swL2MirrorPortState, swL2MirrorGroupPortSourceIngress=swL2MirrorGroupPortSourceIngress, swL2PVIDAutoAssignmentState=swL2PVIDAutoAssignmentState, swL2PortCtrlLearningState=swL2PortCtrlLearningState, swL2DevCtrlWebTcpPort=swL2DevCtrlWebTcpPort, swL2PortInfoLinkStatus=swL2PortInfoLinkStatus, swL2MirrorGroupTable=swL2MirrorGroupTable, swL2DevCtrlCleanAllStatisticCounter=swL2DevCtrlCleanAllStatisticCounter, swL2NotifyMgmt=swL2NotifyMgmt, swL2DevCtrlCFMMepEntry=swL2DevCtrlCFMMepEntry, swL2DevCtrlLLDPState=swL2DevCtrlLLDPState, swL2DevCtrlIpAutoconfig=swL2DevCtrlIpAutoconfig, swL2TrafficSegPort=swL2TrafficSegPort, swL2TrunkState=swL2TrunkState, swL2DevMgmt=swL2DevMgmt, swL2TrunkMaxSupportedEntries=swL2TrunkMaxSupportedEntries, swL2PortCtrlJumboFrameMaxSize=swL2PortCtrlJumboFrameMaxSize, swL2VlanPortInfoVid=swL2VlanPortInfoVid, swL2PortCtrlMediumType=swL2PortCtrlMediumType, swL2TrunkMember=swL2TrunkMember, swL2MirrorGroupID=swL2MirrorGroupID, PortList=PortList, swL2PortCtrlPortIndex=swL2PortCtrlPortIndex, swL2PortInfoType=swL2PortInfoType, swL2PortCounterCtrlPortIndex=swL2PortCounterCtrlPortIndex, swL2MulticastFilterVlanMode=swL2MulticastFilterVlanMode, swL2DevCtrlCFMMepTable=swL2DevCtrlCFMMepTable, swL2PortInfoTable=swL2PortInfoTable, swL2DevCtrlCFMPortIndex=swL2DevCtrlCFMPortIndex, swL2MirrorPortSourceIngress=swL2MirrorPortSourceIngress, swL2VlanIndex=swL2VlanIndex, swL2DevCtrlVLANTrunkState=swL2DevCtrlVLANTrunkState, swL2DevAlarm=swL2DevAlarm, swL2MirrorLogicTargetPort=swL2MirrorLogicTargetPort, MacAddress=MacAddress, swL2TrunkVLANState=swL2TrunkVLANState, swL2TrunkMgmt=swL2TrunkMgmt, swL2NotifyPrefix=swL2NotifyPrefix, swL2DevCtrl=swL2DevCtrl, swL2TrunkIndex=swL2TrunkIndex, swL2PortInfoPortIndex=swL2PortInfoPortIndex, swL2VlanPortInfoPortIndex=swL2VlanPortInfoPortIndex, swL2DevCtrlCFM=swL2DevCtrlCFM, swL2DevCtrlTelnetState=swL2DevCtrlTelnetState, swL2MACNotifyHistorySize=swL2MACNotifyHistorySize, swL2VlanStaticTable=swL2VlanStaticTable, swL2TrafficSegTable=swL2TrafficSegTable, swL2VlanPortInfoEntry=swL2VlanPortInfoEntry, swL2VlanStaticEntry=swL2VlanStaticEntry, swL2NniGvrpBpduAddress=swL2NniGvrpBpduAddress, swL2DevCtrlWebState=swL2DevCtrlWebState, swL2TrunkLACPPortEntry=swL2TrunkLACPPortEntry, swL2PortCounterClearCtrl=swL2PortCounterClearCtrl, swL2Notify=swL2Notify, swL2DevCtrlVlanIdOfFDBTbl=swL2DevCtrlVlanIdOfFDBTbl, swL2TrunkVLANTable=swL2TrunkVLANTable, swL2DevCtrlAsymVlanState=swL2DevCtrlAsymVlanState, swL2DevCtrlCFMState=swL2DevCtrlCFMState, swL2PortSecurityViolationMac=swL2PortSecurityViolationMac, swL2PortCtrlMACNotifyState=swL2PortCtrlMACNotifyState, swL2macNotifyInfo=swL2macNotifyInfo, swL2DevCtrlCFMPortTable=swL2DevCtrlCFMPortTable, swL2PortJumboFrameCtrlTable=swL2PortJumboFrameCtrlTable, swL2TrunkType=swL2TrunkType, swL2PortInfoNwayStatus=swL2PortInfoNwayStatus, swL2MulticastFilterVid=swL2MulticastFilterVid, swL2PortCtrlAdminState=swL2PortCtrlAdminState, swL2TrunkLACPPortState=swL2TrunkLACPPortState, swL2PortCtrlFlowCtrlState=swL2PortCtrlFlowCtrlState, swL2TrafficSegMgmt=swL2TrafficSegMgmt, swL2PortSecurityViolationSeverity=swL2PortSecurityViolationSeverity, swL2MirrorGroupState=swL2MirrorGroupState, swL2TrunkCtrlEntry=swL2TrunkCtrlEntry, swDevInfoTotalNumOfPort=swDevInfoTotalNumOfPort, swL2TrunkVLANEntry=swL2TrunkVLANEntry, swL2DevCtrlCFMPortState=swL2DevCtrlCFMPortState)
| 150.883777 | 6,495 | 0.779186 |
de2c7228ca227f2e7c68dc59fd9e80f020d5efba
| 2,421 |
py
|
Python
|
script.module.streamlink.base/resources/lib/streamlink/plugins/vrtbe.py
|
bobbybark/tantrumrepo
|
1451c481254d3fedec9f430139d18db7312a9b1a
|
[
"Beerware"
] | 3 |
2020-03-03T13:21:44.000Z
|
2021-07-21T09:53:31.000Z
|
script.module.streamlink.base/resources/lib/streamlink/plugins/vrtbe.py
|
eggman19/tantrumrepo
|
1451c481254d3fedec9f430139d18db7312a9b1a
|
[
"Beerware"
] | null | null | null |
script.module.streamlink.base/resources/lib/streamlink/plugins/vrtbe.py
|
eggman19/tantrumrepo
|
1451c481254d3fedec9f430139d18db7312a9b1a
|
[
"Beerware"
] | 1 |
2018-08-30T20:04:34.000Z
|
2018-08-30T20:04:34.000Z
|
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.plugin.api import validate
from streamlink.stream import HDSStream
from streamlink.stream import HLSStream
from streamlink.utils import parse_json
_url_re = re.compile(r'''https?://www\.vrt\.be/vrtnu/(?:kanalen/(?P<channel>[^/]+)|\S+)''')
_json_re = re.compile(r'''(\173[^\173\175]+\175)''')
API_LIVE = 'https://services.vrt.be/videoplayer/r/live.json'
API_VOD = 'https://mediazone.vrt.be/api/v1/{0}/assets/{1}'
_stream_schema = validate.Schema({
'targetUrls': [
{
'type': validate.text,
'url': validate.text
},
],
})
class VRTbe(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_live_stream(self, channel):
channel = 'vualto_{0}'.format(channel)
_live_json_re = re.compile(r'''"{0}":\s(\173[^\173\175]+\175)'''.format(channel))
res = http.get(API_LIVE)
match = _live_json_re.search(res.text)
if not match:
return
data = parse_json(match.group(1))
hls_url = data['hls']
if hls_url:
for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():
yield s
def _get_vod_stream(self):
vod_url = self.url
if vod_url.endswith('/'):
vod_url = vod_url[:-1]
json_url = '{0}.securevideo.json'.format(vod_url)
res = http.get(json_url)
match = _json_re.search(res.text)
if not match:
return
data = parse_json(match.group(1))
res = http.get(API_VOD.format(data['clientid'], data['mzid']))
data = http.json(res, schema=_stream_schema)
for d in data['targetUrls']:
if d['type'] == 'HDS':
hds_url = d['url']
for s in HDSStream.parse_manifest(self.session, hds_url).items():
yield s
if d['type'] == 'HLS':
hls_url = d['url']
for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():
yield s
def _get_streams(self):
match = _url_re.match(self.url)
channel = match.group('channel')
if channel:
return self._get_live_stream(channel)
else:
return self._get_vod_stream()
__plugin__ = VRTbe
| 28.482353 | 91 | 0.584469 |
765d36958e18d120f3ab780bb837229d94d39b1a
| 18,871 |
py
|
Python
|
glumpy/app/viewport.py
|
Frekby/glumpy
|
7a71151ac208766d9697737cb7978bb9d12aa243
|
[
"BSD-3-Clause"
] | 1,074 |
2015-01-02T07:52:35.000Z
|
2022-03-28T08:58:55.000Z
|
glumpy/app/viewport.py
|
Frekby/glumpy
|
7a71151ac208766d9697737cb7978bb9d12aa243
|
[
"BSD-3-Clause"
] | 273 |
2015-01-02T19:49:30.000Z
|
2021-12-15T11:02:53.000Z
|
glumpy/app/viewport.py
|
Frekby/glumpy
|
7a71151ac208766d9697737cb7978bb9d12aa243
|
[
"BSD-3-Clause"
] | 206 |
2015-01-01T10:51:53.000Z
|
2022-03-07T13:52:13.000Z
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
"""
"""
from . window import event
from glumpy.log import log
from glumpy import gloo, gl, library
class ViewportDispatcher(event.EventDispatcher):
def __init__(self):
pass
ViewportDispatcher.register_event_type('on_enter')
ViewportDispatcher.register_event_type('on_leave')
ViewportDispatcher.register_event_type('on_resize')
ViewportDispatcher.register_event_type('on_mouse_motion')
ViewportDispatcher.register_event_type('on_mouse_drag')
ViewportDispatcher.register_event_type('on_mouse_press')
ViewportDispatcher.register_event_type('on_mouse_release')
ViewportDispatcher.register_event_type('on_mouse_scroll')
ViewportDispatcher.register_event_type('on_character')
ViewportDispatcher.register_event_type('on_key_press')
ViewportDispatcher.register_event_type('on_key_release')
ViewportDispatcher.register_event_type('on_draw')
class Viewport(event.EventDispatcher):
"""
A Viewport represents a rectangular area on a window.
:param size: Requested size as (width, height)
:param position: Requested position as (x,y)
:param anchor: Anchor point as (x,y)
:param float aspect: Aspect (= width/height).
The size and the position are always relative to the parent viewport. They
may be given in pixels (int) or as a percentage (float) of parent viewport
size. Positive or negative values are accepted.
.. important::
The viewport class works in conjunction with the Viewport transform that
ensure actual positioning and sizing within a shader program.
Let's consider a root viewport of size 400x400 and a child viewport:
**Absolute size**
.. code:: python
viewport = Viewport(400,400)
child = Viewport(100,100)
viewport.add(child)
# Child size is 100x100 (pixels)
.. code:: python
viewport = Viewport(400,400)
child = Viewport(-100, -100)
viewport.add(child)
# Child size is (400-100) x (400-100) = 300 x 300 (pixels)
**Relative size**
.. code:: python
viewport = Viewport(400,400)
child = Viewport(0.5, 0.5)
viewport.add(child)
# Child size is 400*0.5 x 400*0.5 = 200 x 200 (pixels)
# Child size is 200x200 pixels.
.. code:: python
viewport = Viewport(400,400)
child = Viewport(-0.125, -0.125)
viewport.add(child)
# Child size is (400*(1-0.125)) x (400*(1-0.125)) = 50 x 50 (pixels)
.. note::
It is also possible to define an aspect (width/height) that will be
enforced anytime.
Positioning the viewport inside the parent viewport is also made using
absolute or relative coordinates.
**Absolute position**
.. code:: python
viewport = Viewport(size=(400,400), position=(0,0))
child = Viewport(size=(100,100), position=(10,10))
viewport.add(child)
# Child position is +10+10 (pixels)
.. code:: python
viewport = Viewport(size=(400,400), position=(0,0))
child = Viewport(size=(100,100), position=(-10,-10))
viewport.add(child)
# Child position is +(400-10)+(400-10) = +390+390 (pixels)
**Relative position**
.. code:: python
viewport = Viewport(size=(400,400), position=(0,0))
child = Viewport(size=(100,100), position=(0.25,0.25))
viewport.add(child)
# Child position is +(400*0.25)+(400*0.25) = +100+100 (pixels)
.. code:: python
viewport = Viewport(size=(400,400), position=(0,0))
child = Viewport(size=(100,100), position=(-0.25,-0.25))
viewport.add(child)
# Child position is +(400*(1-0.25))+(400*(1-0.25)) = +300+300 (pixels)
.. note::
The final position of the viewport relates to the anchor point which
can be also set in absolute or relative coordinates.
The order of rendering is done according to the order of the viewport
hierarchy, starting from the root viewport.
"""
# Internal id counter to keep track of created objects
_idcount = 0
def __init__(self, size=(800,600), position=(0,0), anchor=(0,0), aspect=None):
"""
Create a new viewport with requested size and position.
Parameters
----------
"""
self._parent = None
self._children = []
self._active_viewports = []
self._dispatcher = ViewportDispatcher()
# Aspect ratio (width/height)
self._aspect = aspect
if aspect:
log.info("Enforcing viewport aspect ratio (%g)" % aspect)
# Anchor point for placement
self._anchor = anchor
# Requested size & position (may be honored or not, depending on parent)
# (relative or absolute coordinates)
self._requested_size = size
self._requested_position = position
# Clipped size & position (used for glScissor)
# (absolute coordinates)
self._scissor_size = size
self._scissor_position = position
# Viewport size & position (used for glViewport)
# (absolute coordinates)
self._viewport_size = size
self._viewport_position = position
# Wheter viewport is active (cursor is inside)
self._active = False
# Viewport id
self._id = Viewport._idcount
Viewport._idcount += 1
def event(self, *args):
return self._dispatcher.event(*args)
def attach(self, *args, **kwargs):
self.dispatcher.push_handlers(*args, **kwargs)
def add(self, child):
""" Add a new child to the viewport """
child._parent = self
self._children.append(child)
def __getitem__(self, index):
"""Get children using index"""
return self._children[index]
@property
def dispatcher(self):
""" Event dispatcher """
return self._dispatcher
@property
def name(self):
""" Viewport name """
return "VP%d" % (self._id)
@property
def active(self):
""" Whether viewport is active """
return self._active
@active.setter
def active(self, value):
""" Whether viewport is active """
self._active = value
for child in self._children:
child.active = value
@property
def root(self):
""" Root viewport """
if not self._parent:
return self
return self._parent.root
@property
def parent(self):
""" Parent viewport """
return self._parent
@property
def extents(self):
""" Actual position and size of the viewport """
x,y = self._viewport_position
w,h = self._viewport_size
return x, y, w, h
@property
def scissor(self):
""" Actual position and size of the scissor """
x,y = self._scissor_position
w,h = self._scissor_size
return x, y, w, h
@property
def size(self):
""" Actual size of the viewport """
return self._viewport_size
@size.setter
def size(self, size):
""" Actual size of the viewport """
self._requested_size = size
self.root._compute_viewport()
@property
def position(self):
""" Actual position of the viewport """
return self._viewport_position
@position.setter
def position(self, position):
""" Actual position of the viewport """
self._requested_position = position
self.root._compute_viewport()
def _compute_viewport(self):
""" Compute actual viewport in absolute coordinates """
# Root requests are always honored, modulo the aspect
if self.parent is None:
w,h = self._requested_size
if self._aspect:
h = w * self._aspect
if h > self._requested_size[1]:
h = self._requested_size[1]
w = h/self._aspect
x = (self._requested_size[0] - w)/2
y = (self._requested_size[1] - h)/2
self._position = x,y
self._size = w,h
self._viewport_position = x,y
self._viewport_size = w,h
self._scissor_position = x,y
self._scissor_size = w,h
for child in self._children:
child._compute_viewport()
return
# Children viewport request depends on parent viewport
pvx, pvy = self.parent._viewport_position
pvw, pvh = self.parent._viewport_size
psx, psy = self.parent._scissor_position
psw, psh = self.parent._scissor_size
# Relative width (to actual parent viewport)
# ------------------------------------------
if self._requested_size[0] <= -1.0:
vw = max(pvw + self._requested_size[0],0)
elif self._requested_size[0] < 0.0:
vw = max(pvw + self._requested_size[0]*pvw,0)
elif self._requested_size[0] <= 1.0:
vw = self._requested_size[0]*pvw
# Absolute width
else:
vw = self._requested_size[0]
vw = int(round(vw))
# Enforce aspect first
if self._aspect:
vh = self._aspect*vw
if vh > pvh and -1 < self._requested_size[0] <= 1:
vh = pvh
vw = vh/self._aspect
# Relative height (to actual parent viewport)
# -------------------------------------------
else:
if self._requested_size[1] <= -1.0:
vh = max(pvh + self._requested_size[1],0)
elif self._requested_size[1] < 0.0:
vh = max(pvh + self._requested_size[1]*pvh,0)
elif self._requested_size[1] <= 1.0:
vh = self._requested_size[1]*pvh
# Absolute height
else:
vh = self._requested_size[1]
vh = int(round(vh))
# X anchor
# ---------------------------------------
if self._anchor[0] <= -1.0:
ax = vw + self._anchor[0]
elif self._anchor[0] < 0.0:
ax = vw + self._anchor[0]*vw
elif self._anchor[0] < 1.0:
ax = self._anchor[0]*vw
else:
ax = self._anchor[0]
ax = int(round(ax))
# X positioning
# ---------------------------------------
if self._requested_position[0] <= -1.0:
vx = pvw + self._requested_position[0]
elif -1.0 < self._requested_position[0] < 0.0:
vx = pvw + self._requested_position[0]*pvw
elif 0.0 <= self._requested_position[0] < 1.0:
vx = self._requested_position[0]*pvw
else:
vx = self._requested_position[0]
vx = int(round(vx)) + pvx - ax
# Y anchor
# ---------------------------------------
if self._anchor[1] <= -1.0:
ay = vh + self._anchor[1]
elif -1.0 < self._anchor[1] < 0.0:
ay = vh + self._anchor[1]*vh
elif 0.0 <= self._anchor[1] < 1.0:
ay = self._anchor[1]*vh
else:
ay = self._anchor[1]
ay = int(round(ay))
# Y positioning
# ---------------------------------------
if self._requested_position[1] <= -1.0:
vy = pvh + self._requested_position[1] #- vh
elif -1.0 < self._requested_position[1] < 0.0:
vy = pvh + self._requested_position[1]*pvh
elif 0.0 <= self._requested_position[1] < 1.0:
vy = self._requested_position[1]*pvh
else:
vy = self._requested_position[1]
vy = int(round(vy)) + pvy - ay
# Compute scissor size & position
sx = max(pvx,vx)
sy = max(pvy,vy)
sw = max(min(psw-(sx-pvx)-1,vw), 0)
sh = max(min(psh-(sy-pvy)-1,vh), 0)
# Update internal information
self._viewport_size = vw, vh
self._viewport_position = vx, vy
self._scissor_size = sw, sh
self._scissor_position = sx, sy
# Update children
for child in self._children:
child._compute_viewport()
def __contains__(self, xy):
x,y = xy
# WARN: mouse pointer is usually upside down
y = self.root.size[1] - y
xmin = self._viewport_position[0]
xmax = xmin + self._viewport_size[0]
ymin = self._viewport_position[1]
ymax = ymin + self._viewport_size[1]
return xmin <= x < xmax and ymin <= y < ymax
# def lock(self):
# vx, vy = self._viewport_position
# vw, vh = self._viewport_size
# sx, sy = self._scissor_position
# sw, sh = self._scissor_size
# gl.glPushAttrib( gl.GL_VIEWPORT_BIT | gl.GL_SCISSOR_BIT )
# gl.glViewport( vx, vy, vw, vh )
# gl.glEnable( gl.GL_SCISSOR_TEST )
# gl.glScissor( sx, sy, sw+1, sh+1 )
# def unlock(self):
# gl.glPopAttrib( )
def on_draw(self, dt):
# Root viewport
if self.parent is None:
# gl.glEnable(gl.GL_SCISSOR_TEST)
# gl.glViewport(*self.viewport)
# gl.glScissor(*self.scissor)
self.dispatcher.dispatch_event("on_draw", dt)
for child in self._children:
# x,y = child._viewport_position
# w,h = child._viewport_size
# gl.glViewport(x,y,w,h)
# x,y = child._scissor_position
# w,h = child._scissor_size
# gl.glScissor(x,y,w+1,h+1)
# WARNING
# Order is important because the direct 'on_draw' event on child
# may result in a viewport/scissor modification.
child.dispatcher.dispatch_event("on_draw", dt)
child.dispatch_event("on_draw", dt)
# if self.parent is None:
# gl.glDisable(gl.GL_SCISSOR_TEST)
# gl.glViewport(*self.viewport)
def on_resize(self, width, height):
if self.parent == None:
self._requested_size = width, height
self._compute_viewport()
self.dispatcher.dispatch_event("on_resize", self.size[0], self.size[1])
for child in self._children:
child.dispatch_event("on_resize", width, height)
def on_key_press(self, key, modifiers):
""" Default key handler that close window on escape """
pass
# if key == window.key.ESCAPE:
# self.close()
# return True
def on_mouse_press(self, x, y, button):
self.dispatcher.dispatch_event("on_mouse_press",
x-self.position[0], y-self.position[1], button)
if self.parent == None:
self._active_viewports = []
for child in self._children:
if (x,y) in child:
self.root._active_viewports.append(child)
ox, oy = child.position
child.dispatch_event("on_mouse_press", x, y, button)
def on_mouse_release(self, x, y, button):
self.dispatcher.dispatch_event(
"on_mouse_release", x-self.position[0], y-self.position[1], button)
if self.parent == None:
for child in self._active_viewports:
ox, oy = child.position
child.dispatch_event("on_mouse_release", x, y, button)
def on_mouse_drag(self, x, y, dx, dy, button):
self.dispatcher.dispatch_event(
"on_mouse_drag", x-self.position[0], y-self.position[1], dx, dy, button)
if self.parent == None:
if len(self.root._active_viewports):
#child = self.root._active_viewports[-1]
for child in self.root._active_viewports:
ox, oy = child.position
child.dispatch_event("on_mouse_drag", x, y, dx, dy, button)
def on_mouse_scroll(self, x, y, dx, dy):
self.dispatcher.dispatch_event(
"on_mouse_scroll", x-self.position[0], y-self.position[1], dx, dy)
if self.parent == None:
if self.root._active_viewports:
# child = self.root._active_viewports[-1]
for child in self.root._active_viewports:
ox, oy = child.position
child.dispatch_event("on_mouse_scroll", x, y, dx, dy)
def on_mouse_motion(self, x, y, dx, dy):
self.dispatcher.dispatch_event(
"on_mouse_motion", x-self.position[0], y-self.position[1], dx, dy)
for child in self._children:
ox, oy = child.position
if (x,y) in child:
if not child._active:
child.dispatch_event("on_enter")
child.dispatcher.dispatch_event("on_enter")
self.active = False
child._active = True
child.dispatch_event("on_mouse_motion", x, y, dx, dy)
else:
if child._active:
child.dispatch_event("on_leave")
child.active = False
if (x,y) in self:
self._active = True
def __replines__(self):
""" ASCII display of trees by Andrew Cooke """
yield "%s (%dx%d%+d%+d)" % (self.name,
self.size[0], self.size[1],
self.position[0], self.position[1])
last = self._children[-1] if self._children else None
for child in self._children:
prefix = '└── ' if child is last else '├── '
for line in child.__replines__():
yield prefix + line
prefix = ' ' if child is last else '│ '
def __str__(self):
return '\n'.join(self.__replines__()) + '\n'
# Viewport events
Viewport.register_event_type('on_enter')
Viewport.register_event_type('on_leave')
Viewport.register_event_type('on_resize')
Viewport.register_event_type('on_mouse_motion')
Viewport.register_event_type('on_mouse_drag')
Viewport.register_event_type('on_mouse_press')
Viewport.register_event_type('on_mouse_release')
Viewport.register_event_type('on_mouse_scroll')
Viewport.register_event_type('on_character')
Viewport.register_event_type('on_key_press')
Viewport.register_event_type('on_key_release')
Viewport.register_event_type('on_draw')
# Window events
#Viewport.register_event_type('on_init')
#Viewport.register_event_type('on_show')
#Viewport.register_event_type('on_hide')
#Viewport.register_event_type('on_close')
#Viewport.register_event_type('on_idle')
| 30.986864 | 86 | 0.56971 |
a7cde93e6663ea44b5b62ed751b892ba7bd195e2
| 1,716 |
py
|
Python
|
libs/python/config/boost.py
|
ZCube/boost-cmake
|
f1eca5534ab6c9bc89cf7ee4670f056503b7ba86
|
[
"BSL-1.0"
] | 918 |
2016-12-22T02:53:08.000Z
|
2022-03-22T06:21:35.000Z
|
libs/python/config/boost.py
|
ZCube/boost-cmake
|
f1eca5534ab6c9bc89cf7ee4670f056503b7ba86
|
[
"BSL-1.0"
] | 203 |
2016-12-27T12:09:03.000Z
|
2022-03-30T20:46:55.000Z
|
libs/python/config/boost.py
|
ZCube/boost-cmake
|
f1eca5534ab6c9bc89cf7ee4670f056503b7ba86
|
[
"BSL-1.0"
] | 122 |
2016-12-22T17:38:09.000Z
|
2022-02-22T14:25:49.000Z
|
#
# Copyright (c) 2016 Stefan Seefeld
# All rights reserved.
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from . import ui
import os
def add_options(vars):
ui.add_option("--boost-prefix", dest="boost_prefix", type="string", nargs=1, action="store",
metavar="DIR", default=os.environ.get("BOOST_DIR"),
help="prefix for Boost libraries; should have 'include' and 'lib' subdirectories, 'boost' and 'stage\\lib' subdirectories on Windows")
ui.add_option("--boost-include", dest="boost_include", type="string", nargs=1, action="store",
metavar="DIR", help="location of Boost header files")
ui.add_option("--boostbook-prefix", dest="boostbook_prefix", type="string",
nargs=1, action="store",
metavar="DIR", default="/usr/share/boostbook",
help="prefix for BoostBook stylesheets")
def check(context):
boost_source_file = r"#include <boost/config.hpp>"
context.Message('Checking for Boost...')
boost_prefix = context.env.GetOption('boost_prefix')
boost_include = context.env.GetOption('boost_include')
boostbook_prefix = context.env.GetOption('boostbook_prefix')
incpath=None
if boost_include:
incpath=boost_include
elif boost_prefix:
incpath=boost_prefix
if incpath:
context.env.AppendUnique(CPPPATH=[incpath])
if not context.TryCompile(boost_source_file, '.cpp'):
context.Result(0)
return False
context.env.AppendUnique(boostbook_prefix=boostbook_prefix)
context.Result(1)
return True
| 37.304348 | 152 | 0.670163 |
e4f0fc47f6662bc1fb55741363ab0fa10b127821
| 5,407 |
py
|
Python
|
src/pycture/dialogs/segments_input.py
|
miguel-martinr/Pycture
|
f174699f620244dd188cb1650e3455f553cb5090
|
[
"MIT"
] | null | null | null |
src/pycture/dialogs/segments_input.py
|
miguel-martinr/Pycture
|
f174699f620244dd188cb1650e3455f553cb5090
|
[
"MIT"
] | 51 |
2021-10-06T02:40:17.000Z
|
2022-01-13T12:45:43.000Z
|
src/pycture/dialogs/segments_input.py
|
miguel-martinr/Pycture
|
f174699f620244dd188cb1650e3455f553cb5090
|
[
"MIT"
] | 1 |
2022-01-17T16:10:25.000Z
|
2022-01-17T16:10:25.000Z
|
from typing import List
from PyQt5.QtWidgets import (QDialog, QLabel, QLineEdit, QMainWindow,
QPushButton, QVBoxLayout, QHBoxLayout,
QLayout, QSizePolicy, QWidget)
from PyQt5.QtCore import Qt, Signal
from matplotlib import pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from pycture.editor.image.color import Color
from .notification import Notification
from .widgets import RGBCheckboxes, PointsInput, CustomIntValidator, DropdownList
PLOT_LINE_COLOR = "#0000ff"
PLOT__LINE_WIDTH = 3
class SegmentsInput(QDialog):
applied = Signal(str, list, tuple) # Editor, list of points and color options
# It is guaranteed that there will be at least two points
def __init__(self, parent: QMainWindow, editors: List[str]):
super().__init__(parent, Qt.WindowType.Window)
self.setWindowTitle("Linear transformation by segments")
self.layout = QHBoxLayout()
self.setLayout(self.layout)
self.graph_figure = None
self.setup_options_layout(editors)
graph = self.preview_transformation()
graph.setFixedSize(graph.size())
self.layout.addWidget(graph)
self.layout.setSizeConstraint(QLayout.SetFixedSize)
self.show()
def setup_options_layout(self, editors: List[str]):
options_layout = QVBoxLayout()
self.layout.addLayout(options_layout)
self.dropdown = DropdownList(self, editors)
options_layout.addWidget(self.dropdown)
self.checkboxes = RGBCheckboxes(self)
options_layout.addWidget(self.checkboxes)
self.setup_points_input(options_layout)
separator = QWidget()
separator.setMinimumSize(0, 0)
separator.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
options_layout.addWidget(separator)
apply_button = QPushButton("Apply")
apply_button.clicked.connect(self.emit_applied)
options_layout.addWidget(apply_button)
def setup_points_input(self, layout: QVBoxLayout):
layout.addWidget(QLabel("Number of segments:", self))
self.number_of_points_input = QLineEdit("1", self)
self.number_of_points_input.setValidator(CustomIntValidator(1, 25))
layout.addWidget(self.number_of_points_input)
accept_button = QPushButton("Accept", self)
layout.addWidget(accept_button)
self.points_input = PointsInput(self, CustomIntValidator(0, 255))
self.points_input.points_changed.connect(self.update_graph)
self.points_input.set_number_of_points(2)
layout.addWidget(self.points_input)
accept_button.clicked.connect(self.update_number_of_points)
def update_number_of_points(self):
number_of_points = int(self.number_of_points_input.text()) + 1
self.points_input.set_number_of_points(number_of_points)
def emit_applied(self):
points = self.get_points()
if points is None:
return
editor = self.dropdown.currentText()
if self.parent().get_editor(editor) is None:
Notification(self, "An active image must be chosen")
return
color_options = self.checkboxes.get_checked()
self.applied.emit(editor, points, color_options)
def get_points(self):
points = self.points_input.get_points()
if self.check_points_integrity(points):
return points
Notification(self, "The x coordinates of the points must be monotonically increasing")
def check_points_integrity(self, points):
points_x = list(map(lambda point: point[0], self.points_input.get_points()))
for i in range(len(points_x) - 1):
if points_x[i] >= points_x[i + 1]:
return False
return True
def set_dropdown_image(self, editor: str):
self.dropdown.set_selected(editor)
def update_graph(self):
new_graph = self.preview_transformation()
new_graph.setFixedSize(new_graph.size())
old_graph = self.layout.itemAt(1).widget()
self.layout.replaceWidget(old_graph, new_graph)
old_graph.deleteLater()
def preview_transformation(self) -> FigureCanvasQTAgg:
plt.style.use('dark_background')
title = "Linear transformation"
if self.graph_figure is not None:
plt.close(self.graph_figure)
self.graph_figure = plt.figure()
points = self.points_input.get_points()
self.plot_changes(points)
self.plot_unchanged_areas(points)
plt.xlabel("Vin")
plt.ylabel("Vout")
plt.title(title)
plt.xlim(0, 255)
plt.ylim(0, 255)
return FigureCanvasQTAgg(self.graph_figure)
def plot_changes(self, points: List):
x = list(map(lambda point: point[0], points))
y = list(map(lambda point: point[1], points))
plt.plot(x, y, color=PLOT_LINE_COLOR, linewidth=3)
def plot_unchanged_areas(self, points: List):
if len(points) == 0:
return
if points[0][0] > 1:
x = [0, points[0][0]]
y = x
plt.plot(x, y, color=PLOT_LINE_COLOR, linewidth=3)
if points[-1][0] < 254:
x = [points[-1][0], 255]
y = x
plt.plot(x, y, color=PLOT_LINE_COLOR, linewidth=3)
| 38.347518 | 94 | 0.6597 |
2317286cc5fcbdb7def17604f904887594ccdcb4
| 132 |
py
|
Python
|
041-selfpower.py
|
johnsonZhaoxin/ProjectEuler
|
74f3755d5f0e305897556b46515babe55429a834
|
[
"MIT"
] | null | null | null |
041-selfpower.py
|
johnsonZhaoxin/ProjectEuler
|
74f3755d5f0e305897556b46515babe55429a834
|
[
"MIT"
] | null | null | null |
041-selfpower.py
|
johnsonZhaoxin/ProjectEuler
|
74f3755d5f0e305897556b46515babe55429a834
|
[
"MIT"
] | null | null | null |
import math
def selfpower(x):
return pow(x,x)
sum = 0
for i in range(1,1001):
sum += selfpower(i) #为什么这样的数值类运算就很快
print(sum)
| 18.857143 | 39 | 0.674242 |
3b0bf00303c04c8aed3d4b0d35a51a06b01905c1
| 3,424 |
py
|
Python
|
tests/system/reliability/local_capacity/test_aggregate_local_capacity_contribution.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | null | null | null |
tests/system/reliability/local_capacity/test_aggregate_local_capacity_contribution.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | null | null | null |
tests/system/reliability/local_capacity/test_aggregate_local_capacity_contribution.py
|
souissim/gridpath
|
4eeca2be24b485edc56026e38cfda83f4a6b27ea
|
[
"Apache-2.0"
] | 1 |
2021-12-21T20:44:21.000Z
|
2021-12-21T20:44:21.000Z
|
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import str
from collections import OrderedDict
from importlib import import_module
import os.path
import sys
import unittest
from tests.common_functions import create_abstract_model, add_components_and_load_data
TEST_DATA_DIRECTORY = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "test_data"
)
# Import prerequisite modules
PREREQUISITE_MODULE_NAMES = [
"temporal.operations.timepoints",
"temporal.operations.horizons",
"temporal.investment.periods",
"geography.load_zones",
"geography.local_capacity_zones",
"project",
"project.capacity.capacity",
"system.reliability.local_capacity.local_capacity_requirement",
"project.reliability.local_capacity",
"project.reliability.local_capacity.local_capacity_contribution",
]
NAME_OF_MODULE_BEING_TESTED = (
"system.reliability.local_capacity.aggregate_local_capacity_contribution"
)
IMPORTED_PREREQ_MODULES = list()
for mdl in PREREQUISITE_MODULE_NAMES:
try:
imported_module = import_module("." + str(mdl), package="gridpath")
IMPORTED_PREREQ_MODULES.append(imported_module)
except ImportError:
print("ERROR! Module " + str(mdl) + " not found.")
sys.exit(1)
# Import the module we'll test
try:
MODULE_BEING_TESTED = import_module(
"." + NAME_OF_MODULE_BEING_TESTED, package="gridpath"
)
except ImportError:
print("ERROR! Couldn't import module " + NAME_OF_MODULE_BEING_TESTED + " to test.")
class TestAggPrjSimpleLocalCapacity(unittest.TestCase):
""" """
def test_add_model_components(self):
"""
Test that there are no errors when adding model components
:return:
"""
create_abstract_model(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
def test_load_model_data(self):
"""
Test that data are loaded with no errors
:return:
"""
add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
def test_data_loaded_correctly(self):
"""
Test components initialized with data as expected
:return:
"""
m, data = add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=MODULE_BEING_TESTED,
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage="",
)
instance = m.create_instance(data)
if __name__ == "__main__":
unittest.main()
| 31.412844 | 87 | 0.690421 |
83421737e8131f411a5187bcc0308c61cd0afc90
| 3,746 |
py
|
Python
|
doc/lib/attributes.py
|
jkrayl/cc-utils
|
f006650b6093e6cddc08906905e642d86962cb39
|
[
"Apache-2.0"
] | null | null | null |
doc/lib/attributes.py
|
jkrayl/cc-utils
|
f006650b6093e6cddc08906905e642d86962cb39
|
[
"Apache-2.0"
] | null | null | null |
doc/lib/attributes.py
|
jkrayl/cc-utils
|
f006650b6093e6cddc08906905e642d86962cb39
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed
# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import textwrap
import typing
import enum
import ci.util
import concourse.model.base as base_model
# add repository root to pythonpath
sys.path.append(os.path.abspath('../..'))
class AttributesDocumentation(object):
def __init__(
self,
model_element_type,
prefix: str='',
):
self._model_element_type = model_element_type
self._child_elements = []
self._prefix = ci.util.check_type(prefix, str)
def add_child(
self,
model_element_type,
element_name: str,
):
if self._prefix:
child_prefix = '.'.join((self._prefix, element_name))
else:
child_prefix = element_name
child_documentation = AttributesDocumentation(
model_element_type,
prefix=child_prefix,
)
self._child_elements.append(child_documentation)
return child_documentation
def children(self):
return self._child_elements
def fill_table(self, table_builder):
if issubclass(self._model_element_type, enum.Enum):
table_builder.add_table_header(['value', 'explanation'])
else:
table_builder.add_table_header(['name', 'required?', 'default', 'type', 'explanation'])
def attr_to_table_row(attr_spec, prefix=None):
name = attr_spec.name()
required = 'yes' if attr_spec.is_required() else 'no'
default_value = attr_spec.default_value()
if callable(default_value):
default_value = default_value.__name__
else:
default_value = str(default_value)
doc = textwrap.dedent(attr_spec.doc())
type_ = attr_spec.type()
if isinstance(type_, typing._GenericAlias):
if type_.__origin__ is dict:
# assumption: type is typing.Dict[T1, T2]
key_type, val_type = type_.__args__
self.add_child(
model_element_type=val_type,
element_name=f'{name}.<user-chosen>'
)
type_str = type_._name + f'[{str(key_type)}, {str(val_type)}]'
elif type_.__origin__ is list:
type_str = type_._name + f'[{str(type_.__args__[0])}]'
elif issubclass(type_, base_model.AttribSpecMixin):
# recurse to child element
self.add_child(model_element_type=type_, element_name=name)
type_str = type_.__name__
else:
type_str = type_.__name__
if issubclass(self._model_element_type, enum.Enum):
table_builder.add_table_row((name, doc))
else:
table_builder.add_table_row((name, required, default_value, type_str, doc))
for attr_spec in self._model_element_type._attribute_specs():
attr_to_table_row(attr_spec)
return table_builder
| 34.685185 | 99 | 0.626535 |
9d3da3159e9d12baef58966aaff0ff66b60bf0c7
| 10,392 |
py
|
Python
|
scripts/check_code_format.py
|
mark-lunarg/Vulkan-ValidationLayers
|
fa0402e0556537045864e8784d86b3c14576081f
|
[
"Apache-2.0"
] | null | null | null |
scripts/check_code_format.py
|
mark-lunarg/Vulkan-ValidationLayers
|
fa0402e0556537045864e8784d86b3c14576081f
|
[
"Apache-2.0"
] | null | null | null |
scripts/check_code_format.py
|
mark-lunarg/Vulkan-ValidationLayers
|
fa0402e0556537045864e8784d86b3c14576081f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2020 Valve Corporation
# Copyright (c) 2020 LunarG, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Mark Lobodzinski <[email protected]>
# Author: Mike Schuchardt <[email protected]>
# Author: Nathaniel Cesario <[email protected]>
# Author: Karl Schultz <[email protected]>
# Script to determine if source code in Pull Request is properly formatted.
#
# This script checks for:
# -- clang-format errors in the PR source code
# -- out-of-date copyrights in PR source files
# -- improperly formatted commit messages (using the function above)
#
# Notes:
# Exits with non 0 exit code if formatting is needed.
# Requires python3 to run correctly
# In standalone mode (outside of CI), changes must be rebased on master
# to get meaningful and complete results
import os
import argparse
import difflib
import re
import subprocess
import sys
from subprocess import check_output
from datetime import date
from argparse import RawDescriptionHelpFormatter
os.system("")
#
#
# Color print routine, takes a string matching a txtcolor above and the output string, resets color upon exit
def CPrint(msg_type, msg_string):
color = '\033[0m'
txtcolors = {'HELP_MSG': '\033[0;36m',
'SUCCESS_MSG': '\033[1;32m',
'CONTENT': '\033[1;39m',
'ERR_MSG': '\033[1;31m',
'NO_COLOR': '\033[0m'}
print(txtcolors.get(msg_type, txtcolors['NO_COLOR']) + msg_string + txtcolors['NO_COLOR'])
#
#
# Get list of files involved in this branch
target_files_data = subprocess.check_output(['git', 'diff', '--name-only', 'origin/master'])
target_files = target_files_data.decode('utf-8')
target_files = target_files.split("\n")
#
#
# Check clang-formatting of source code diff
def VerifyClangFormatSource():
CPrint('', "\nChecking PR source code for clang-format errors:")
retval = 0
good_file_pattern = re.compile('.*\\.(cpp|cc|c\+\+|cxx|c|h|hpp)$')
diff_files_list = [item for item in target_files if good_file_pattern.search(item)]
diff_files = ' '.join([str(elem) for elem in diff_files_list])
retval = 0
if diff_files != '':
git_diff = subprocess.Popen(('git', 'diff', '-U0', 'origin/master', '--', diff_files), stdout=subprocess.PIPE)
diff_files_data = subprocess.check_output(('python3', './scripts/clang-format-diff.py', '-p1', '-style=file'), stdin=git_diff.stdout)
diff_files_data = diff_files_data.decode('utf-8')
if diff_files_data != '':
CPrint('ERR_MSG', "\nFound formatting errors!")
CPrint('CONTENT', "\n" + diff_files_data)
retval = 1
else:
CPrint('SUCCESS_MSG', "\nThe modified source code in PR has been properly clang-formatted.\n\n")
return retval
#
#
# Check copyright dates for modified files
def VerifyCopyrights():
CPrint('', "\nChecking PR source files for correct copyright information:")
retval = 0
current_year = str(date.today().year)
for file in target_files:
if file is None or not os.path.isfile(file):
continue
copyright_match = re.search('Copyright (.)*LunarG', open(file, encoding="utf-8", errors='ignore').read(1024))
if copyright_match and current_year not in copyright_match.group(0):
CPrint('ERR_MSG', '\n' + file + " has an out-of-date copyright notice.")
retval = 1;
if retval == 0:
CPrint('SUCCESS_MSG', "\nThe modified source files have correct copyright dates.\n\n")
return retval
#
#
# Check commit message formats for commits in this PR/Branch
def VerifyCommitMessageFormat():
CPrint('', "\nChecking PR commit messages for consistency issues:")
retval = 0
# Construct correct commit list
pr_commit_range_parms = ['git', 'log', '--no-merges', '--left-only', 'HEAD...origin/master', '--pretty=format:"XXXNEWLINEXXX"%n%B']
commit_data = check_output(pr_commit_range_parms)
commit_text = commit_data.decode('utf-8')
if commit_text is None:
CPrint('SUCCESS_MSG', "\nNo commit messages were found for format checks.\n")
return retval
msg_cur_line = 0
msg_prev_line = ''
for msg_line_text in commit_text.splitlines():
msg_cur_line += 1
if 'XXXNEWLINEXXX' in msg_line_text:
msg_cur_line = 0
line_length = len(msg_line_text)
if msg_cur_line == 1:
# Enforce subject line must be 64 chars or less
if line_length > 64:
CPrint('ERR_MSG', "The following subject line exceeds 64 characters in length.")
CPrint('CONTENT', " '" + msg_line_text + "'\n")
retval = 1
# Output error if last char of subject line is not alpha-numeric
if msg_line_text[-1] in '.,':
CPrint('ERR_MSG', "For the following commit, the last character of the subject line must not be a period or comma.")
CPrint('CONTENT', " '" + msg_line_text + "'\n")
retval = 1
# Output error if subject line doesn't start with 'module: '
if 'Revert' not in msg_line_text:
module_name = msg_line_text.split(' ')[0]
if module_name[-1] != ':':
CPrint('ERR_MSG', "The following subject line must start with a single word specifying the functional area of the change, followed by a colon and space.")
CPrint('ERR_MSG', "e.g., 'layers: Subject line here' or 'corechecks: Fix off-by-one error in ValidateFences'.")
CPrint('ERR_MSG', "Other common module names include layers, build, cmake, tests, docs, scripts, stateless, gpu, syncval, practices, etc.")
CPrint('CONTENT', " '" + msg_line_text + "'\n")
retval = 1
else:
# Check if first character after the colon is lower-case
subject_body = msg_line_text.split(': ')[1]
if not subject_body[0].isupper():
CPrint('ERR_MSG', "The first word of the subject line after the ':' character must be capitalized.")
CPrint('CONTENT', " '" + msg_line_text + "'\n")
retval = 1
# Check that first character of subject line is not capitalized
if msg_line_text[0].isupper():
CPrint('ERR_MSG', "The first word of the subject line must be lower case.")
CPrint('CONTENT', " '" + msg_line_text + "'\n")
retval = 1
elif msg_cur_line == 2:
# Commit message must have a blank line between subject and body
if line_length != 0:
CPrint('ERR_MSG', "The following subject line must be followed by a blank line.")
CPrint('CONTENT', " '" + msg_prev_line + "'\n")
retval = 1
else:
# Lines in a commit message body must be less than 72 characters in length (but give some slack)
if line_length > 76:
CPrint('ERR_MSG', "The following commit message body line exceeds the 72 character limit.")
CPrint('CONTENT', " '" + msg_line_text + "'\n")
retval = 1
msg_prev_line = msg_line_text
if retval == 0:
CPrint('SUCCESS_MSG', "\nThe commit messages are properly formatted.\n\n")
else:
CPrint('HELP_MSG', "Commit Message Format Requirements:")
CPrint('HELP_MSG', "-----------------------------------")
CPrint('HELP_MSG', "o Subject lines must be <= 64 characters in length")
CPrint('HELP_MSG', "o Subject lines must start with a module keyword which is lower-case and followed by a colon and a space")
CPrint('HELP_MSG', "o The first word following the colon must be capitalized and the subject line must not end in a '.'")
CPrint('HELP_MSG', "o The subject line must be followed by a blank line")
CPrint('HELP_MSG', "o The commit description must be <= 72 characters in width\n")
CPrint('HELP_MSG', "Examples:")
CPrint('HELP_MSG', "---------")
CPrint('HELP_MSG', " build: Fix Vulkan header/registry detection for SDK")
CPrint('HELP_MSG', " tests: Fix QueryPerformanceIncompletePasses stride usage")
CPrint('HELP_MSG', " corechecks: Fix validation of VU 03227")
CPrint('HELP_MSG', " state_tracker: Remove 'using std::*' statements")
CPrint('HELP_MSG', " stateless: Account for DynStateWithCount for multiViewport\n")
CPrint('HELP_MSG', "Refer to this document for additional detail:")
CPrint('HELP_MSG', "https://github.com/KhronosGroup/Vulkan-ValidationLayers/blob/master/CONTRIBUTING.md#coding-conventions-and-formatting")
return retval
#
#
# Entrypoint
def main():
parser = argparse.ArgumentParser(description='''Usage: python3 ./scripts/check_code_format.py
- Reqires python3 and clang-format 7.0+
- Run script in repo root
- May produce inaccurate clang-format results if local branch is not rebased on origin/master
''', formatter_class=RawDescriptionHelpFormatter)
args = parser.parse_args()
if sys.version_info[0] != 3:
print("This script requires Python 3. Run script with [-h] option for more details.")
exit()
if os.path.isfile('check_code_format.py'):
os.chdir('..')
clang_format_failure = VerifyClangFormatSource()
copyright_failure = VerifyCopyrights()
commit_msg_failure = VerifyCommitMessageFormat()
if clang_format_failure or copyright_failure or commit_msg_failure:
CPrint('ERR_MSG', "\nOne or more format checks failed.\n\n")
exit(1)
else:
CPrint('SUCCESS_MSG', "\nAll format checks passed.\n\n")
exit(0)
if __name__ == '__main__':
main()
| 47.022624 | 174 | 0.637895 |
1757b0a33948f3751ba222e0173f6b0281c93b99
| 6,974 |
py
|
Python
|
tfx/orchestration/portable/mlmd/execution_lib_test.py
|
Jeffwan/tfx
|
efb3c78ac32f4c7c5979136aa0a11bffc287f236
|
[
"Apache-2.0"
] | null | null | null |
tfx/orchestration/portable/mlmd/execution_lib_test.py
|
Jeffwan/tfx
|
efb3c78ac32f4c7c5979136aa0a11bffc287f236
|
[
"Apache-2.0"
] | null | null | null |
tfx/orchestration/portable/mlmd/execution_lib_test.py
|
Jeffwan/tfx
|
efb3c78ac32f4c7c5979136aa0a11bffc287f236
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.portable.mlmd.execution_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tfx.orchestration import metadata
from tfx.orchestration.portable import test_utils
from tfx.orchestration.portable.mlmd import common_utils
from tfx.orchestration.portable.mlmd import context_lib
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import standard_artifacts
from google.protobuf import text_format
from ml_metadata.proto import metadata_store_pb2
class ExecutionLibTest(test_utils.TfxTest):
def setUp(self):
super().setUp()
self._connection_config = metadata_store_pb2.ConnectionConfig()
self._connection_config.sqlite.SetInParent()
def _generate_contexts(self, metadata_handler):
context_spec = pipeline_pb2.NodeContexts()
text_format.Parse(
"""
contexts {
type {name: 'pipeline_context'}
name {
field_value {string_value: 'my_pipeline'}
}
}
contexts {
type {name: 'component_context'}
name {
field_value {string_value: 'my_component'}
}
}""", context_spec)
return context_lib.register_contexts_if_not_exists(metadata_handler,
context_spec)
def testPrepareExecution(self):
with metadata.Metadata(connection_config=self._connection_config) as m:
execution_type = metadata_store_pb2.ExecutionType()
text_format.Parse(
"""
name: 'my_execution'
properties {
key: 'p2'
value: STRING
}
""", execution_type)
result = execution_lib.prepare_execution(
m,
execution_type,
exec_properties={
'p1': 1,
'p2': '2'
},
state=metadata_store_pb2.Execution.COMPLETE)
self.assertProtoEquals(
"""
type_id: 1
last_known_state: COMPLETE
properties {
key: 'p2'
value {
string_value: '2'
}
}
custom_properties {
key: 'p1'
value {
int_value: 1
}
}
""", result)
def testArtifactAndEventPairs(self):
model = standard_artifacts.Model()
model.uri = 'model'
example = standard_artifacts.Examples()
example.uri = 'example'
example.id = 1
expected_artifact_one = metadata_store_pb2.Artifact()
expected_artifact_two = metadata_store_pb2.Artifact()
text_format.Parse("""
type_id: 1
uri: 'model'""", expected_artifact_one)
text_format.Parse(
"""
id: 1
type_id: 2
uri: 'example'""", expected_artifact_two)
expected_event_one = metadata_store_pb2.Event()
expected_event_two = metadata_store_pb2.Event()
text_format.Parse(
"""
path {
steps {
key: 'model'
}
steps {
index: 0
}
}
type: INPUT""", expected_event_one)
text_format.Parse(
"""
path {
steps {
key: 'example'
}
steps {
index: 0
}
}
type: INPUT""", expected_event_two)
with metadata.Metadata(connection_config=self._connection_config) as m:
result = execution_lib._create_artifact_and_event_pairs(
m, {
'model': [model],
'example': [example],
}, metadata_store_pb2.Event.INPUT)
self.assertListEqual([(expected_artifact_one, expected_event_one),
(expected_artifact_two, expected_event_two)],
result)
def testPutExecutionGraph(self):
with metadata.Metadata(connection_config=self._connection_config) as m:
# Prepares an input artifact. The artifact should be registered in MLMD
# before the put_execution call.
input_example = standard_artifacts.Examples()
input_example.uri = 'example'
input_example.type_id = common_utils.register_type_if_not_exist(
m, input_example.artifact_type).id
[input_example.id] = m.store.put_artifacts([input_example.mlmd_artifact])
# Prepares an output artifact.
output_model = standard_artifacts.Model()
output_model.uri = 'model'
execution = execution_lib.prepare_execution(
m,
metadata_store_pb2.ExecutionType(name='my_execution_type'),
exec_properties={
'p1': 1,
'p2': '2'
},
state=metadata_store_pb2.Execution.COMPLETE)
contexts = self._generate_contexts(m)
execution = execution_lib.put_execution(
m,
execution,
contexts,
input_artifacts={'example': [input_example]},
output_artifacts={'model': [output_model]})
self.assertProtoPartiallyEquals(
output_model.mlmd_artifact,
m.store.get_artifacts_by_id([output_model.id])[0],
ignored_fields=[
'create_time_since_epoch', 'last_update_time_since_epoch'
])
# Verifies edges between artifacts and execution.
[input_event] = m.store.get_events_by_artifact_ids([input_example.id])
self.assertEqual(input_event.execution_id, execution.id)
self.assertEqual(input_event.type, metadata_store_pb2.Event.INPUT)
[output_event] = m.store.get_events_by_artifact_ids([output_model.id])
self.assertEqual(output_event.execution_id, execution.id)
self.assertEqual(output_event.type, metadata_store_pb2.Event.OUTPUT)
# Verifies edges connecting contexts and {artifacts, execution}.
context_ids = [context.id for context in contexts]
self.assertCountEqual(
[c.id for c in m.store.get_contexts_by_artifact(input_example.id)],
context_ids)
self.assertCountEqual(
[c.id for c in m.store.get_contexts_by_artifact(output_model.id)],
context_ids)
self.assertCountEqual(
[c.id for c in m.store.get_contexts_by_execution(execution.id)],
context_ids)
if __name__ == '__main__':
tf.test.main()
| 33.690821 | 79 | 0.638084 |
bb110e03416be0f3d4df56493139d785b80b30b1
| 2,388 |
py
|
Python
|
telebot.py
|
pun1sh3r/tele_bot
|
7f4ac8651c5f3396294c467e83ea89497f9e9f05
|
[
"MIT"
] | null | null | null |
telebot.py
|
pun1sh3r/tele_bot
|
7f4ac8651c5f3396294c467e83ea89497f9e9f05
|
[
"MIT"
] | null | null | null |
telebot.py
|
pun1sh3r/tele_bot
|
7f4ac8651c5f3396294c467e83ea89497f9e9f05
|
[
"MIT"
] | null | null | null |
import telepot as tp
import requests as rq
from telepot.loop import MessageLoop
import time
import logging
import sys
from config import suprnova_api, telegram_api, suprnova_url, suprnova_id,coinmkcap_url
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt='%(asctime)s: %(levelname)s: %(message)s')
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(formatter)
log.addHandler(handler)
class Telebot:
def __init__(self,bot):
self.s_api = suprnova_api
self.s_id = suprnova_id
self.t_api = telegram_api
self.s_url = suprnova_url
self.c_url = coinmkcap_url
self.bot = bot
def tel_handler(self,msg):
content_type, chat_type, chat_id = tp.glance(msg)
log.info("Message received: {0}".format(msg['text']))
if content_type == 'text':
if msg['text'] == '/balance':
final_price = self.q_supr('zencash')
self.bot.sendMessage(chat_id,"Hi there. Current balance: ${}".format(final_price))
log.info("sent message to telegrambot...")
def q_supr(self,coin):
params = {
'page': 'api',
'action' : 'getuserbalance',
'api_key' : self.s_api,
'id' : self.s_id
}
try:
req = rq.get(self.s_url,params=params)
if req.status_code == 200:
req = req.json()
price = req['getuserbalance']['data']['confirmed']
final_price = self.calc_price(price,coin)
return final_price
except Exception as ex:
log.error('exception ocurred: {}'.format(ex))
def calc_price(self,price,coin):
params = {
'fsym' : 'ZEN',
'tsyms' : 'USD'
}
try:
req = rq.get(self.c_url,params=params)
if req.status_code == 200:
req = req.json()
final_price = req['USD']
final_price = float(price) * float(final_price)
return round(final_price,3)
except Exception as ex:
log.error('exception ocurred: {}'.format(ex))
telebot = tp.Bot(telegram_api)
bot = Telebot(telebot)
MessageLoop(telebot,bot.tel_handler).run_as_thread()
log.info("telebot listening...")
while 1:
time.sleep(10)
| 27.767442 | 98 | 0.586683 |
2043d31a5eb695685b716b37e4c769b3a2cc202b
| 188 |
py
|
Python
|
blobcity/utils/__init__.py
|
naresh1205/autoai
|
395249b9c40bcd6accb39e4e4b6c94810a2a68b3
|
[
"Apache-2.0"
] | null | null | null |
blobcity/utils/__init__.py
|
naresh1205/autoai
|
395249b9c40bcd6accb39e4e4b6c94810a2a68b3
|
[
"Apache-2.0"
] | null | null | null |
blobcity/utils/__init__.py
|
naresh1205/autoai
|
395249b9c40bcd6accb39e4e4b6c94810a2a68b3
|
[
"Apache-2.0"
] | null | null | null |
from .FileType import getDataFrameType
from .AutoFeatureSelection import *
from .ProblemType import *
from .Cleaner import *
from .YamlGenerator import writeYml
from .progress_bar import *
| 31.333333 | 38 | 0.824468 |
8928488c9257b68e6a1268a08c6443f871e52903
| 1,397 |
py
|
Python
|
cardinal/random.py
|
dataiku-research/cardinal
|
052ea2273e1e6a389e257e3775873620378fe908
|
[
"Apache-2.0"
] | 17 |
2021-02-13T16:29:16.000Z
|
2022-03-08T03:16:12.000Z
|
cardinal/random.py
|
SoftwareImpacts/SIMPAC-2021-174
|
052ea2273e1e6a389e257e3775873620378fe908
|
[
"Apache-2.0"
] | 5 |
2021-02-15T14:09:41.000Z
|
2021-03-23T23:31:25.000Z
|
cardinal/random.py
|
SoftwareImpacts/SIMPAC-2021-174
|
052ea2273e1e6a389e257e3775873620378fe908
|
[
"Apache-2.0"
] | 1 |
2021-12-24T17:41:09.000Z
|
2021-12-24T17:41:09.000Z
|
import numpy as np
from .base import ScoredQuerySampler
from .typeutils import RandomStateType, check_random_state
class RandomSampler(ScoredQuerySampler):
"""Randomly select samples
Args:
batch_size : Number of samples to select.
random_state : The seed of the pseudo random number generator to use
when shuffling the data. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state
is the random number generator; If None (defdault), the random
number generator is the RandomState instance used by `np.random`.
Attributes:
random_state : The random state used by the sampler.
"""
def __init__(self, batch_size: int, random_state: RandomStateType = None):
super().__init__(batch_size=batch_size)
self.random_state = random_state
def fit(self, X: np.array = None, y: np.array = None) -> 'RandomSampler':
"""Sets the random state
Args:
X: Labeled samples of shape (n_samples, n_features).
y: Labels of shape (n_samples).
Returns:
The object itself
"""
self.random_state = check_random_state(self.random_state)
return self
def score_samples(self, X: np.array) -> np.array:
return self.random_state.rand(X.shape[0])
| 34.073171 | 78 | 0.654975 |
ef227e2706ac990ac03558617c6b1b3064fad4ca
| 178 |
py
|
Python
|
platzi/Conversor.py
|
diegosish/Introduction-Python
|
b40dde602c0b5ca4ec4f6b13303ad6d3831fab73
|
[
"MIT"
] | null | null | null |
platzi/Conversor.py
|
diegosish/Introduction-Python
|
b40dde602c0b5ca4ec4f6b13303ad6d3831fab73
|
[
"MIT"
] | null | null | null |
platzi/Conversor.py
|
diegosish/Introduction-Python
|
b40dde602c0b5ca4ec4f6b13303ad6d3831fab73
|
[
"MIT"
] | null | null | null |
Pesos = input("¿Cuántos Pesos Colombianos tiene?: ")
Pesos = float(Pesos)
v_Dolar = 4033
Dolares = Pesos / v_Dolar
Dolares = str(Dolares)
print("Tienes $" + Dolares + " Dolares")
| 29.666667 | 52 | 0.702247 |
7789f063700bf46fd36a7939a4e0d61ba0112349
| 1,247 |
py
|
Python
|
Question_61_70/answers/answer_62.py
|
nishidayoshikatsu/Gasyori100knock
|
f7fe35bca772eda2961a0790274c4934119b3fc2
|
[
"MIT"
] | 1 |
2019-06-15T00:09:27.000Z
|
2019-06-15T00:09:27.000Z
|
Question_61_70/answers/answer_62.py
|
nishidayoshikatsu/Gasyori100knock
|
f7fe35bca772eda2961a0790274c4934119b3fc2
|
[
"MIT"
] | 1 |
2020-10-13T19:22:07.000Z
|
2020-10-13T19:22:07.000Z
|
Question_61_70/answers/answer_62.py
|
nishidayoshikatsu/Gasyori100knock
|
f7fe35bca772eda2961a0790274c4934119b3fc2
|
[
"MIT"
] | 1 |
2020-01-30T15:38:48.000Z
|
2020-01-30T15:38:48.000Z
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Read image
img = cv2.imread("renketsu.png").astype(np.float32)
H, W, C = img.shape
_tmp = np.zeros((H, W), dtype=np.int)
_tmp[img[..., 0]>0] = 1
tmp = 1 - _tmp
out = np.zeros((H, W, 3), dtype=np.uint8)
for y in range(H):
for x in range(W):
if _tmp[y, x] < 1:
continue
c = 0
c += (tmp[y,min(x+1,W-1)] - tmp[y,min(x+1,W-1)] * tmp[max(y-1,0),min(x+1,W-1)] * tmp[max(y-1,0),x])
c += (tmp[max(y-1,0),x] - tmp[max(y-1,0),x] * tmp[max(y-1,0),max(x-1,0)] * tmp[y,max(x-1,0)])
c += (tmp[y,max(x-1,0)] - tmp[y,max(x-1,0)] * tmp[min(y+1,H-1),max(x-1,0)] * tmp[min(y+1,H-1),x])
c += (tmp[min(y+1,H-1),x] - tmp[min(y+1,H-1),x] * tmp[min(y+1,H-1),min(x+1,W-1)] * tmp[y,min(x+1,W-1)])
if c == 0:
out[y,x] = [0, 0, 255]
elif c == 1:
out[y,x] = [0, 255, 0]
elif c == 2:
out[y,x] = [255, 0, 0]
elif c == 3:
out[y,x] = [255, 255, 0]
elif c == 4:
out[y,x] = [255, 0, 255]
out = out.astype(np.uint8)
# Save result
cv2.imwrite("out.png", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 27.711111 | 111 | 0.473937 |
83363cca1ee1a100aa367c5e04416bf4d42aae7d
| 534 |
py
|
Python
|
systemtest/quality/views/history.py
|
IBM-Power-SystemTest/systemtest
|
a29e6d54500ca13f554073cc66a4a2d403ea5b14
|
[
"BSD-3-Clause"
] | 1 |
2022-03-09T18:07:11.000Z
|
2022-03-09T18:07:11.000Z
|
systemtest/quality/views/history.py
|
IBM-Power-SystemTest/systemtest
|
a29e6d54500ca13f554073cc66a4a2d403ea5b14
|
[
"BSD-3-Clause"
] | null | null | null |
systemtest/quality/views/history.py
|
IBM-Power-SystemTest/systemtest
|
a29e6d54500ca13f554073cc66a4a2d403ea5b14
|
[
"BSD-3-Clause"
] | null | null | null |
# Django filters
from systemtest.utils.views import AbstractFilteView
# APPs
from systemtest.quality import forms
class QualityHistory(AbstractFilteView):
"""
Django ListView for history of systems
References:
https://docs.djangoproject.com/en/3.1/ref/class-based-views/generic-display/#listview
https://docs.djangoproject.com/en/3.1/topics/auth/default/#the-permissionrequiredmixin-mixin
"""
filterset_class = forms.SystemHistoryFilterSet
template_name = "quality/history.html"
| 31.411765 | 104 | 0.737828 |
80a10dac68f27ab0b809206598a779b337eaf1ab
| 1,969 |
py
|
Python
|
examples/nbuild_spaceship_project.py
|
Jeffrey-P-McAteer/ntest
|
04eada57e8590a5e5b1fd6c50cadd72cdc18b640
|
[
"MIT"
] | null | null | null |
examples/nbuild_spaceship_project.py
|
Jeffrey-P-McAteer/ntest
|
04eada57e8590a5e5b1fd6c50cadd72cdc18b640
|
[
"MIT"
] | 1 |
2020-11-18T14:15:06.000Z
|
2020-11-18T14:15:06.000Z
|
examples/nbuild_spaceship_project.py
|
Jeffrey-P-McAteer/nbuild
|
04eada57e8590a5e5b1fd6c50cadd72cdc18b640
|
[
"MIT"
] | null | null | null |
import os
# python -m pip install --user nbuild
import nbuild as nb
this_dir = os.path.dirname(os.path.abspath(__file__))
p = nb.Project(
name='Spaceship Project',
poc='Jeffrey McAteer <[email protected]>',
description='''
The spaceship project aims to build a real-life replica of the starship from Star Trek.
Specifically the NCC-1701 will be used as a target model (see https://en.wikipedia.org/wiki/USS_Enterprise_(NCC-1701) )
''',
type_=nb.SW_Application,
deliverable=nb.Phys_Item(
item_name='Spaceship',
),
risks=[
nb.Risk(
name='faster-than-light travel',
if_='faster-than-light travel is determined to be impossible',
then='''
the delivered spaceship will not be to-spec with the NCC-1701 ship we are trying to replicate.
Cost will be unchanged, schedule will be unchanged. This is a qualitative risk.
''',
probability=3,
impact=5,
mitigation=nb.Mitigation.Accept(),
),
nb.Risk(
name='Rocket launch price increases',
if_='the price of commercial rocket launches increases by more than $1mil/per launch',
then='''
(cost) the project will be over-budget by $1mil/per remaining launch, with a maximum of $30mil (30 launches req. total).
The schedule may be delayed by the amount of time it takes to secure funds if we cannot get more funding to cover new costs.
''',
probability=2,
impact=4,
mitigation=nb.Mitigation.Control('''
We will invest $5mil in a private space corporation and use our voting shares to vote against price increases.
At the end of launches the $5mil investment will be liquidated to provide funds to finish the spaceship.
'''),
),
],
tests=[
]
)
# This actually begins the evaluation
p.evaluate()
p.write_reports_to(os.path.join(this_dir, 'reports'))
# This calls the default OS handler for reports (usually a web browser)
p.open_reports()
| 31.253968 | 132 | 0.684611 |
7be022d3b80cf4d2a7bd661c529f1f258a137277
| 2,220 |
py
|
Python
|
composer/models/vit_small_patch16/hparams.py
|
ravi-mosaicml/ravi-composer
|
d100053198524672f628c3959a8c4e51a9302e2d
|
[
"Apache-2.0"
] | null | null | null |
composer/models/vit_small_patch16/hparams.py
|
ravi-mosaicml/ravi-composer
|
d100053198524672f628c3959a8c4e51a9302e2d
|
[
"Apache-2.0"
] | null | null | null |
composer/models/vit_small_patch16/hparams.py
|
ravi-mosaicml/ravi-composer
|
d100053198524672f628c3959a8c4e51a9302e2d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 MosaicML. All Rights Reserved.
"""`YAHP <https://docs.mosaicml.com/projects/yahp/en/stable/README.html>`_ interface for :class:`.ViTSmallPatch16`."""
from dataclasses import dataclass
import yahp as hp
from composer.models.model_hparams import ModelHparams
from composer.utils.import_helpers import MissingConditionalImportError
__all__ = ["ViTSmallPatch16Hparams"]
@dataclass
class ViTSmallPatch16Hparams(ModelHparams):
"""`YAHP <https://docs.mosaicml.com/projects/yahp/en/stable/README.html>`_ interface for :class:`.ViTSmallPatch16`.
Args:
num_classes (int, optional): number of classes for the model. Default: ``1000``.
image_size (int, optional): input image size. If you have rectangular images, make sure your image
size is the maximum of the width and height. Default: ``224``.
channels (int, optional): number of image channels. Default: ``3``.
dropout (float, optional): 0.0 - 1.0 dropout rate. Default: ``0``.
embedding_dropout (float, optional): 0.0 - 1.0 embedding dropout rate. Default: ``0``.
"""
num_classes: int = hp.optional("number of classes. Needed for classification tasks", default=1000)
image_size: int = hp.optional(
"input image size. If you have rectangular images, make sure your image size is the maximum of the width and height",
default=224)
channels: int = hp.optional("number of image channels", default=3)
dropout: float = hp.optional("dropout rate", default=0.0)
embedding_dropout: float = hp.optional("embedding dropout rate", default=0.0)
def validate(self):
try:
import vit_pytorch # type: ignore
except ImportError as e:
raise MissingConditionalImportError(extra_deps_group="vit", conda_package="vit_pytorch>=0.27") from e
def initialize_object(self):
from composer.models import ViTSmallPatch16
return ViTSmallPatch16(num_classes=self.num_classes,
image_size=self.image_size,
channels=self.channels,
dropout=self.dropout,
embedding_dropout=self.embedding_dropout)
| 46.25 | 125 | 0.677027 |
27c03129f346f8b963466b938a80dba5a070863c
| 1,318 |
py
|
Python
|
app/core/tests/test_admin.py
|
dkrooshof/recipe-app-api
|
7cb4e13d21fbb99691074da077d481c428c602bb
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
dkrooshof/recipe-app-api
|
7cb4e13d21fbb99691074da077d481c428c602bb
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
dkrooshof/recipe-app-api
|
7cb4e13d21fbb99691074da077d481c428c602bb
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='[email protected]',
password='Password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='[email protected]',
password='Password456',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.380952 | 68 | 0.639605 |
b4dca55daf24e10f1fd034222c4898b51e3d6d09
| 9,609 |
py
|
Python
|
src/logo_update_png.py
|
shounen51/poe_AutoFlaskByAttack
|
ce6628583d0f0c6076d2f522a340689b722732f2
|
[
"MIT"
] | 3 |
2021-01-08T05:42:50.000Z
|
2021-07-09T14:15:57.000Z
|
src/logo_update_png.py
|
shounen51/poe_AutoFlaskByAttack
|
ce6628583d0f0c6076d2f522a340689b722732f2
|
[
"MIT"
] | null | null | null |
src/logo_update_png.py
|
shounen51/poe_AutoFlaskByAttack
|
ce6628583d0f0c6076d2f522a340689b722732f2
|
[
"MIT"
] | null | null | null |
logo_update_png = b'iVBORw0KGgoAAAANSUhEUgAAADwAAAA8CAYAAAA6/NlyAAAb3klEQVRoQ61bCXRb5ZX+tO+ydsmLvG+Js9lOQpwdEghhTegCtFOmy3RaSttpB9qmKynQ6bRlzrQM7cy0M5SlnLaUCRRoQhMCoZDEIQshi2MT77ZkS5Zsy7b2bc69T5Il21nKzDvHB3Isv/ffd+/97ne/eyWqra1N4//rEgHI3E0klkCsUEKi0UGsUEFucUCi1kCqN0JmskIEMUQSCRRGCyQSSeYEaaRSQDKZQHTcg8SEHxF3P2JjI4iNe5GKRv7PJxUJBueddKFbXuHX+X8vEovZQDJUotJAbrZDrNJAZrRAqjdBJBFDWmSGWqmARibB9Q3lWF1RglKjHtF4HGddXjz9Tge8M2FEfKOIT4wh4urnn0TAjzS9kXnXFQ+Y+4uMwX/Ni5tz87x/ipUqSMmjKi0UFkfGUCt7VSyVQmGyQK9UQitO43ObWrCo2Aq9Rj3v4cP+SXzzpb/AE5hB1O9BuL8Lof732dPpVCIXRVc0vOCowj8+gMELPIbCV6WGtMjEhkq0OsgMdkiLDBBJpFCbbbBoVVhkM+C2ZXVoKLZCmgvjhV92+8V+7N7bjsi4BxH3AMKD3Yh63UgGp4D0VWbhAo7PM3ih387m5ILHEokglisgUesg0xsgM9uhsJVCajBDLJVBZbLBrFFgbVUJbmiqgtNogFwmvapwSiSTeHTvYRzo7EVszM0hHR7uRdw3ilQselX34A+xWbO2fWAPk+coT6XaIvas3GyD3GSDRFsEJXlUp8bm6hJsrK9AudkApVwmHJKcQ8/PXKlUCvFEAgq5fJ4RU6EI/v7ZffB4RhEdHUZ44CKio0NITI0jnU4VhvZVpvFfbTCBkkiuhExXBAkbaofMYIHMYOYXoNHpsaWuDDtaFsOu10ClmG8InXQ6GMZZlwdHekfgng7hrpYGrK5xzjP6zc4+PLL3bcT8XkRcfZnQdgmIfZVG5t90jsGXvgOhq0gqz6CvlssMIa/MZINMZ+CS01LuwOc2tcKh10DNhua5MvPUbs8Y9p+5iHfc45iKxBGKRZFMpmDSqPHgzWuxuMSWd740wtE4vv6H/Tg/4ELUM4zw4EVE3INIBMjLyQUALPvMvDzPL5dXU4e5nqoJfdXsUalOD5nRClmRBVKtHuU2C/6mpR5rGqqgkkkhEs039OzQKJ470YGOsQBC0TjC415Q0RXKTJrv2+gw4zvb18JepMuLeeDkgAu7XnwDMZ8HYVcfIoM9iHqHr1CX5zrvKlGajKXcJECSanWQFlnYWIlSzS/g9qW1+Ju2ZShSKyEWiwu8Q2Bx0TOGp4514LTLi2gsjviEj42kPGQvpZJcn+meKqsDn17ThDtaFxeg+Ewkiu/sOYgzfUOIjAwyGaHwTkz6/+pcvmwO5xurLK3kQ4lEYijMNphVcty/9RosryieV2IIYX1TM3j80EmcHPYiGpxGKhTiw5HBiakJJEMziPk9XFcVlmIoS6vYy1qLHT/ZsQmNFNqZQKH7vdXVhx8eOIGobwThgfcR7n+fy1Q6mbh6xKYkmw3pPOgmtqRUQ1ZkYs9mjVUQQEklWOm04ctbVsOs1RQ8jBB3MhTBE6+34y/DPgSnZ5AMTSMRmGBqSKGbmJkSDI1F+f85nE12KBxlUJVVQ24rxbbGCnzpxg0FgOcen8RXnzsAr8eD8FAPQgPvM3KnoqHLEJEFOMPcHGYOrNbwIeQWO+TWYvYsGWvSqHBHUwVuW7l0tsxk7hkIhXF+aAQ/PfQuxkMRpoHJ6QBivlEkw0H+bzoRRzIaRjI4jXQ8JuSvSASJWguFvQyq8jooSyqgsdrxn3ffgDKzMXficDSGxw6+gz+f6USUwproJtXlibFCujmn7s41uTCkxRKmhmyo2QGFwwmpzgCp0YIlJVZ8du1SLCq1F+Qqhdvg+BSeOfoe2gc8iE5NIhkJ8UGoZiaDMxDJFUwtKfxSsRiSM4Gc4VReqKbTi1VX1kNV2cCh/dElFfj0tW2QSARcSKXTONYziN17jyIyNorwQNcHCuucwUwktHooyKMmG5TF5ZDoDNBZ7FhbVYy/39AMs64whMmr7T3DePbEBXimgoj4PAxG8XEvh6tUrYNEV8TNhFgmQzqZRCoSQiI0hcS0ENpxCu94jBsKSh36UVhLYLA58LvP7oRCmu2kgAHfBHa9eAijLhdz61B/F5cq+vurvdhgCmM6GNFCYkwUVoScJosVd7c2YPvSukxdnb2tJzCNX731Lk4OjyEYiTLJJ9QkFKW8pTrNL43KmVoLsUSCVDLJYEWhTeCVRdv45Di/EKoGKvJyRT2U1mI89Ymb4DDqcw+dCkfx0J/ewqnOboRdvQj3E/MazGDBVZgsyoAWe9ZWwgYTWlJ3U1rswBfWL0NzRWlBvhIwDY4H8OP9xzAwPoXg2CiXFg7hkUHEpyagdDj5PsTAyksc2FhhQ7XdwlHwwqkL8EXi7F3KQWoMyEupcJCJjLKsBqqKOj7LuroKPHTHlpwl8WQSe0514peHjnO6ML8e6ub6jFQyz+JLESgRRHUNjWnyhqq8lj1LANVYU4kvbWxGjdVUQPapX32roxtPnrqIsVwITyBOzfrMFIenooTCsgpykxWLa6ux6/rVsOo0jO6U7/6pIHbtOQjXxBQio8OIDF7kQ5PHqTIo7MJLJ6MtxaV4/nN35BgbNUnHegbwT6+2Y2rMw+Up1NeFqNfFUXM1l6hh2Yo05a2yvA7qilrYy8px/7WtaK4shpSJhFAM6bB/aH8Pvz/bh1AkgqjfyyFMb5q8RQYTvaSQVFc2wFpK92nBmtry2XNkGgf/dBB3PfESYl43lxduCrxuZmgUaUJYN0BtL8G+L360wI7RySk8uPcI3u/uYV5NvXJkdOgyrKvQ26Kmtg1pZVm14BVbCbauWIyvXLdqXs7+4sARvNI5hND4WCYHhRCO+kaRmPAhRR2PvZSNJbR1VlbhZx++FsYFGnyy4IbHfo/omLuguRdJpZAWGaGuaGCjqVT9aOsKtDQtyhlNePGjvW/j7c4ersdssKufS9/VXKIlG7ekCSTooBqbA5+8pgkfWbWk4G+fOXwaT71znmsrhV4WhSmEYxNjSEUiEMlkfEDB4DqUVVTh0R0bYTfMgk7+TdlgnxvhPkHNoFJFJTBLYRW2MsgdTmxxmvGtu27N/SlF2pNHz+K3h09lQE+gmXFyREEeL2y+qGnjlrSmqpGR0VTqxD9sbsWmuvKCRufWf/sdZsbHEB0bYXbDZCIksKissCaSyhhoVFWCh6ms3Lu6EdtWLit8cias5xpM8hDVfuIAhCNUt5UmG76+ph7XrmouuMeTh47jmVNdHFnhwS6E+66GZmaah1kPN0Jts+OelYtw55qlEOW1dvc88UcMDg0hMtQjMJzRQaSovOQJalTHKaTpxdGPzGTBjkUV+OKNGwobisx980OaXh710nJrCaQ6EhTMcBiLsNRmwDduWl9gbCyRwK8Pv4fn3+1i7GBeTTTT476C3iXcRtS0/tp0PsO5cXEV7tvUCmW2cU8Dzxx5F08eeQ9x3wiHH0stmVzOnoZqOcmvKmc1FMWVUBQ78aG2Vty3ibxTKHMcPdeJ7x04yZFCoCU3WgVZSKFEkdmKRrsRX9jcinKTIXN74e8DwTDODY/il0fPYWBggEW9gs5pQUWzMMBEi1a1pQWGUwGF3YlFNVXYfcsG2PTa3CcDwSA+8+x++EeGufkWmvABDuv8izixsqQS6upGKJ012LGmBV/e3DIvme7+2dPwhKKcHoTMVPepA3OaDfjYilqsqi6DTq3iv0un0wiEIjh4oRdn3H6c6HchPBVAYnqCXzzV4Pni3iXqMBGP+qYlaQKK/Dy+/7pWrMuWk4xznjp8Gk8fOY2Y1yXILJlylC+Okw6tKK2EuqoBKmctrl/eiK9tW5vXPgo3+8h//RFjQwPMh1Xl9VCYrFhu1eGrt1zLspA4IyAkEgn8uaMXr14YQLd3HNHpADcblD6p8AyXQiYudJYxN5LhUCZuLy0+CsQjj7gTj95c58RXtq6BVjmrR02Ho/j003+CzzvKzGghcZxCUlFcDnUlgWAtGiuc+Nc7t83rrG75+R8w4yElsg/K8hpUVVXhh7du4LzNXj3uUfz62AWmrpFJHxupMxixstwOg0KKo0M+jAwNIJ6ZThBrI92rEKnne1pUV1+fzifuhJQGRwl+uHMTFtmtObSmbuXg+R786MAxLktU/8LDfcyusjWQJFtKC/KwsrwWVZWVeGT7GpTaLDlDHn/1L3ipy82NPIEgAZy5rBwPXNuMttoK/hyF8Nf2vM7UNUUNvlgCm0qOL21uQZOzBHKpBL9+6yRe6uhHyDvKTYQAXK4rCgLcPGSVDaJz1J6RlzdUl+D+besKvBwIR/CV3+/HgGuE8ybq7mdvZ+c+QmkqYVpITEnrKMG9G5pxy7I6weA08JmnX0FvBnDikz5+Hon311SX4eFbBURPJFP4yhPPo2doCI9+4ZOQIcU6V5FKmXtxpJE9/Go7vK5BAUhZAXHNdk6XSmOhWxLmPdn2TGa0QWmx47GPbkG9g2SdWVHuzJAHu158E0GvG/EJkk772ctkdDoeZ6RWllVCWVLFdbltSSO+u30th/V7F7qw+82zmBglhvU+4oFxoR20laGupgoP37wuR1So/KSTaSgUGT17DvR1Drmxe/9xeIYFg3Oc+gqtYq4fznqZJBaimVRTl9dU4qHbNkGnUuQeNxOJ4RdvHMdrXQPcEs4ddpFmLXDzWqicNRwt39jWhg215fiX14/jza4+REl9pDAc7OaQJnamtRfj4ds2otnpyJWiaCyBeCrF4t3Dfz6Gf7h+DW5squHfj04E8MALh+AaGsr1xjGPC6l49LJ6dYGmRWyHmm+qpdwmGi34/MZW7FyZryKm0Tc2ge+98jZGJme4U+LDE73zeZCKhgubeVspCwl6cQozKTGi42MFkZHtf7WOMuzaugobGipzL/fjj/4KY3IdROk0FBIJPrysCvdsXMW/nwyGcf+eN9DT24vIYLcgBowMCQZf5iqQeDi09SYoHKXcl5LEQ6PO//rU7ai2mAro5usdPfjZoVMIeEYQp66J8tnrRmzcw8MuauYJsbOamFgmRyoeR2Laj8jwAJc3unINv60EdzfX45PrZ2nkP7/8BhISGVRSKZxK4KbWpdBqBNVlYmoG979wCH0DwqCNQ5oYYMHcaQGUnifiyeSsKVFvTCSCOqjFVeV46JYNMGlnR5uBYAj/dvAdHB3yYmbUzY0FlZns8JqZl8GcaQb0LPOkYhEkQ0GmhMSyJGo1lKXV/BxSLD+yvhWfX7/ish7K/nJiOoh/3PM6+voHBA+zwUNCWUpnBf7sp7OGixYelxJjkrNWXJFp5m1YVeHAd26/Dpq8WZF7YgoPvvIWBvwB7o+zIJYdXotkcpZ4JCo1j2nSiSiLeEQYKPTpJahKK7nhIKKyrqkOD2xtQ5FGQGPqjOLxJKajUQz6A0hAhAqjDsVGPUYoh198E+7BAYT6L3JIs+qRTgsCIXH9WHTeAF1UW1ebzq4pZGmKSCzQPQIwKjPUL6usxbhjeR3uaVtWQCSIAX3/T4dBGld2eM2e9gn1mdKE6qhIImLUFcYrghzDRMXuhKpckHWKbA5G9KVlDvR5/djX0QdPMIJgOAJ6uUVKBT7SXIvtzU04PeDGP+1/J1eWeFciU/vohSYmx5EksZCGAJFwLmpmc3juYF8ihcxgYg9TyBHaam3F2LGsFh9f3ZQRyQWqeKJnED84cByTPi9iYx5E3X2IeF1MUPIfNjdW58qz9IwSUxGaDEqsrKlEKJ2GWgTYjUVQyKRQSUQs/tNE8rX3LuDf2y8wv08nU1w6iXez2D81ydFG+jcJFPnnuPyoRakSJFvyNBltNLPRO2medM0SKDIzX+K8Lx45if9+rw/hcR/ncdQzxCBGXRWF70JT+4Xqv8riwLe3tqKtsbpghEPhTbOpFESQScQgQe/rzx9Ez8S0AJJSKRKpJDs6EZzi6GIVNTOHys60LmHwrLupIeDJPtVWkm8NZmgsDnzimiX4UEsDZJnVhWg0hj0nzuLJE+8jRqjNYsEQgxPVatKj5y6kzM6ubELtt5ZAb3Pg8TtvgNOk5xym0H76yBkMhWKIxeOss927bina6irx8qkLuDg6hg+tXgalTIpkKoWZaAz/cbAd58amhBTjfrmb5STSr688EKdRiFLNIEZqBIe30Qyl0YovXNuKm5pqIRHPCn3/c7IDTxw9x0YT+xKM9iDmH2UpNp0Swm6+d4VxDi277FzRAJKD7/j575GUyNjw2IQPBILVpcXYfdM6lFlNePC5ffjS9g2w6LQZgEtAoZBjLDCN7+89ggs9vZmS1cnn4CnHvPnwQhw0ZzRJMA7WmnhOrDfivk0tuGlJbSa800im0nj+nXN46vgFFvxoJEqoTQZzo5FRSubydzK20qjD7ls3wFakwy9fP4aXLgzSq8mMRNNwFGnxj5ubsaLKySj9yL4jeOT2zdxOfuzXL0GvkGHX9WtQaTXgR/uPor2jO2cwhTbhyZU9nCtlgqcppym85fZSQZLRG/GZNUuws3VxDr0jsTj2HD+D357uwYx/jDlz1mgK8VQoyHshWe5OL9FcXIYHb6INACvevNCDX7V3QCoRo1inxsRMGN5QFA9sXYmN9QIT++mBdijiYXxyyzoBxM50Yk2NE2qVChc9Puz+0xGMuoh2XhRkYM8Qk5KrN5ieQls7tFVntLBCSUZT3kn0Rvzd2mXY2bKI0ZQuKiV72k9jT5cLkz7i3OPMxsjTlN+0DJOTliwObK0rw/3b1uLiiA+/OHQcH29txOqGah7cUcn77FMv486Vi3F7cyMvtf1kfzvODnvw84/dWNBF0Tjmn/e9jfbOntw4h5gY73glE3kG54fypVorsURQHIiNGS1CnXaUCUZrdPi7DS0FRk8HQ/jD28fxcu8YJsnTExkK6hvh8MoaTFFTazfjE6sX41eHz6BEmsTXbr8eWpUCkViCScc3X3wDErEY37xhFVbXVuLV05145mQnHr9rG4waQQ6iiyYan//NXkz7vdxUsHaet991dR7O5DDvTcqpcxKB2AqReqKexJeVjnJINHp8ev0KfGjVYigkgqenZoJ47cwFPHfBDc+IS1AouNEY4aYiO2ynnJaqtUiEZqAsMmJjVTFqi2040tkNfxwYdo9AqTfg9kVOfG5LG0KxOPr9k6gyG6DKrkQBHA0PPP86hoaHCihntqm4Imjx7iRRTaM1tzcJWl1KpxlMqLiT1wWjnbwsetfKxfyjlJPRIq6Z7b0uPHboFHyeEdaTacpIDTvPn7V67rAktLqoK4LSZEWFXoXVdVXwTQZw3jOBsZkIz7m+u70NrRXF8/g2KTIEXhPBML79x0O40NvPuRvqm0VozsorbfGQVxUWOxT2csgsxZDqaZ1QTBNqNpiKO4ES1VnqrkgA0FpLsHO5wMiyC2m0fHasz4VHDx7HlN/P9ZnzmdcWIhmVpB4aWzE+trIRW+orUGw2MBb4wxG80+vG/5zuwqeuWYLrFlczq5qJxuGeCOBI9yCsRXrsaG4EaW/fe+EA3ut385Au1NvJQl+W/FzeYLGE58XUyPPsiWa+FIYyMWLJFGKJJBLTAUY/4qxkgKzIzF2Szl6MHczIlkLBngZv3L3b78L3Xz2GyMw0gxcbPTLI00v6KbWa8S93bc8pH6FIlD0bT6bw6rlu/PZkFxxaBSASIxRPIBJPIJlI4IbF1fjU+maeVf/768ew72x3bvZEcq4QiVfwMGlUlKOaShpuNcBQXIbvbm1FdbGNVxDO9Lvw7IkOuKcjiIZpd2NK2OMITkNqMDEj+8zaZbhteV2OJhKh6BgYxv0vHUYiGmF5lbzNnZOzBoudDuzesQUWvZbJx91PvITNDZW4d8MK0FLbj/cdwYebG9BSU84ITvhKiKKQSjmXo/EEXjxNM+QTiLr6EezrZLEwMT3JL/2yHiY0ZgUkMy+ylZXjmXtuznFoMpqIhi8QwG+OncfhvhEEo3EkZia5DeSVYYMZ961fjptbFuX0ZgpH2va573f74R2fYBZFxIBQ21Rchsc+uhVlNIQTAV996gWsqa/CnW0rcHZwBAc7evDlbety95qbzMTKDnb04if7jwpbuH2drK4mpvxCk3G5HGYVMjMCJe3JUlaOf925EU5SPy5xvXDiPF4514PRYATxRJLzW22y4BvXX4ONDSTDZgTBNDARCuNbew6ia3iEZ8W0iUAM7qubW3DDUiEqJoMhGDRqkKj3/LGz6JqYwQNbVnN69Hp9uDgWQJ9vEt7JKXz71k28h/JWZy8eee0E114S+0N9mV3rZPIKBtOALDegrufDVBq0eHjHZlh0mkvuPNNb7nJ5uJ89PeJHMgX8bWs9rmuqgVxWqEJ2e/344m//jIh/jLk3D+QMJnxnexvWVDshFon5HXkD0/jey2+hxx+AViZBPAXeoE8l40gnEtDqDfjBzW1YWlGKk/0jwqqi3ztv2+fyHiaNy0DybaYnph0MiwOlBh0+v345FpXYoFHI5qwcFrqetgWSiSR0c5bYsp/yTgSw67l96PcFGGSyXxtQFBnx5CcE8CKq+rvj5/GbI6d5h4RXoFJppCJBpKJRyB1luGFpPXbdvJEj4cC5bvz00EnB4Pxt+gKmNasJFAxmiBBwe0gIXVLJL0Ci1EBvNmO5w4iPr1kKp7HoEmvCl4z83C9GxwN44LlX4RrzM9En1CfZh1jc4jIH7t3civMuL5499T4CNJ5xk1AXYeWE9sG4cjhrcGPzIny4ZRECoSgef/Nd9Pb3cZ2fOxKa/yWPBWglybfcNNB6P9VlopJaPS+hOGw2XFdXxnoxqRXZQVhGbVlogzhnLKHwid4hfOvlt4XxDa8TDkJZLCzG0DN0BoMAhLSfOTXJE0PezUonIZYroS6vY3GCFlNpUzAcTyDgF/Y5efN2qIdFw9zgvgC0Flg1zp4u+wUO2nin+RMPsM02ZklygwUVJloMd+L6phqY5iywXcrP49NBPLT3MM5092UGdH1MO6kL4w1ei4OZF6FrPOAHCe1Ut7njikZ4F1TouKqYqYmlEg71xExAGAGROup1CyUp8z2JS2paCx2SaaZKk1v9596YDqU38WKZxmRBlcWAHU2VWFNbAU3eLGju6r+fmvSX3+T96fzhXIw4tkqT+VqBg+kmiX68fUtd16QvIySkwGsSmZ1QcoBIImOJNhkJg+7Dm7u0wJq3+yGqra1L/1XrqPQmaHNPreU3nP0CFnmdeDHtZ2lUCtSYDdwnN5Ta5tXMDpcHPz54AvR1HTJWGNeQdwdY36aL7s8bfFI5G5FlcwX6GLerSuGrQ3IlQA1LKoVUIsYrGVmxId95V+TSl4SdTG9MD6MuKX/zlkR4+qKHUibDEnsRz5VkYjHckwG81jWEyXgKU+O+AqFtwW+fkbxL8hGrQulLbumwFCySAPxZ+lxqzmberBUf3ODMPXjdmEAts1tNOU0rh4TmPH2gb6RJ6OBi3rWkGk3dEuUZhd1CUuqVsf2Df+J/AZSqqSm310OJAAAAAElFTkSuQmCC'
| 9,609 | 9,609 | 0.970757 |
a32370273ba6035361417f8642b36faa650b7148
| 18,974 |
py
|
Python
|
google/ads/googleads/v5/services/services/dynamic_search_ads_search_term_view_service/client.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v5/services/services/dynamic_search_ads_search_term_view_service/client.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v5/services/services/dynamic_search_ads_search_term_view_service/client.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v5.resources.types import (
dynamic_search_ads_search_term_view,
)
from google.ads.googleads.v5.services.types import (
dynamic_search_ads_search_term_view_service,
)
from .transports.base import (
DynamicSearchAdsSearchTermViewServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import DynamicSearchAdsSearchTermViewServiceGrpcTransport
class DynamicSearchAdsSearchTermViewServiceClientMeta(type):
"""Metaclass for the DynamicSearchAdsSearchTermViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[DynamicSearchAdsSearchTermViewServiceTransport]]
_transport_registry[
"grpc"
] = DynamicSearchAdsSearchTermViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[DynamicSearchAdsSearchTermViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class DynamicSearchAdsSearchTermViewServiceClient(
metaclass=DynamicSearchAdsSearchTermViewServiceClientMeta
):
"""Service to fetch dynamic search ads views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DynamicSearchAdsSearchTermViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DynamicSearchAdsSearchTermViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> DynamicSearchAdsSearchTermViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
DynamicSearchAdsSearchTermViewServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def dynamic_search_ads_search_term_view_path(
customer: str, dynamic_search_ads_search_term_view: str,
) -> str:
"""Return a fully-qualified dynamic_search_ads_search_term_view string."""
return "customers/{customer}/dynamicSearchAdsSearchTermViews/{dynamic_search_ads_search_term_view}".format(
customer=customer,
dynamic_search_ads_search_term_view=dynamic_search_ads_search_term_view,
)
@staticmethod
def parse_dynamic_search_ads_search_term_view_path(
path: str,
) -> Dict[str, str]:
"""Parse a dynamic_search_ads_search_term_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer>.+?)/dynamicSearchAdsSearchTermViews/(?P<dynamic_search_ads_search_term_view>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[
str, DynamicSearchAdsSearchTermViewServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the dynamic search ads search term view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DynamicSearchAdsSearchTermViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(
transport, DynamicSearchAdsSearchTermViewServiceTransport
):
# transport is a DynamicSearchAdsSearchTermViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = DynamicSearchAdsSearchTermViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_dynamic_search_ads_search_term_view(
self,
request: dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dynamic_search_ads_search_term_view.DynamicSearchAdsSearchTermView:
r"""Returns the requested dynamic search ads search term
view in full detail.
Args:
request (:class:`google.ads.googleads.v5.services.types.GetDynamicSearchAdsSearchTermViewRequest`):
The request object. Request message for
[DynamicSearchAdsSearchTermViewService.GetDynamicSearchAdsSearchTermView][google.ads.googleads.v5.services.DynamicSearchAdsSearchTermViewService.GetDynamicSearchAdsSearchTermView].
resource_name (:class:`str`):
Required. The resource name of the
dynamic search ads search term view to
fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v5.resources.types.DynamicSearchAdsSearchTermView:
A dynamic search ads search term
view.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest,
):
request = dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_dynamic_search_ads_search_term_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("DynamicSearchAdsSearchTermViewServiceClient",)
| 40.980562 | 196 | 0.649099 |
670ed4e62914081b1b3937d35a3b7ced55f36866
| 3,193 |
py
|
Python
|
ezyrb/gpr.py
|
peaceiris/EZyRB
|
4036d56c58e21232d309f4589efc82aba7146de6
|
[
"MIT"
] | null | null | null |
ezyrb/gpr.py
|
peaceiris/EZyRB
|
4036d56c58e21232d309f4589efc82aba7146de6
|
[
"MIT"
] | 1 |
2020-06-05T14:00:51.000Z
|
2020-06-05T14:00:51.000Z
|
ezyrb/gpr.py
|
peaceiris/EZyRB
|
4036d56c58e21232d309f4589efc82aba7146de6
|
[
"MIT"
] | null | null | null |
"""
Module wrapper exploiting `GPy` for Gaussian Process Regression
"""
import GPy
import numpy as np
from scipy.optimize import minimize
from .approximation import Approximation
class GPR(Approximation):
"""
Multidimensional regression using Gaussian process.
:cvar numpy.ndarray X_sample: the array containing the input points,
arranged by row.
:cvar numpy.ndarray Y_sample: the array containing the output values,
arranged by row.
:cvar GPy.models.GPRegression model: the regression model.
"""
def __init__(self):
self.X_sample = None
self.Y_sample = None
self.model = None
def fit(self, points, values, kern=None, optimization_restart=20):
"""
Construct the regression given `points` and `values`.
:param array_like points: the coordinates of the points.
:param array_like values: the values in the points.
"""
self.X_sample = np.array(points)
self.Y_sample = np.array(values)
if self.X_sample.ndim == 1:
self.X_sample = self.X_sample.reshape(-1,1)
if self.Y_sample.ndim == 1:
self.Y_sample = self.Y_sample.reshape(-1,1)
if kern is None:
kern = GPy.kern.RBF(
input_dim=self.X_sample.shape[1],
ARD=False)
self.model = GPy.models.GPRegression(
self.X_sample,
self.Y_sample,
kern,
normalizer=True)
self.model.optimize_restarts(optimization_restart, verbose=False)
def predict(self, new_points):
"""
Predict the mean and the variance of Gaussian distribution at given
`new_points`.
:param array_like new_points: the coordinates of the given points.
:return: the mean and the variance
:rtype: (numpy.ndarray, numpy.ndarray)
"""
return self.model.predict(new_points)
def optimal_mu(self, bounds, optimization_restart=10):
"""
Proposes the next sampling point by looking at the point where the
Gaussian covariance is maximized. A gradient method (with multi
starting points) is adopted for the optimization.
:param numpy.ndarray bounds: the boundaries in the gradient
optimization. The shape must be (*input_dim*, 2), where *input_dim*
is the dimension of the input points.
:param int optimization_restart: the number of restart in the gradient
optimization. Default is 10.
"""
dim = self.X_sample.shape[1]
min_val = 1
min_x = None
def min_obj(X):
return -np.linalg.norm(self.predict(X.reshape(1, -1))[1])
initial_starts = np.random.uniform(
bounds[:, 0],
bounds[:, 1],
size=(optimization_restart, dim))
# Find the best optimum by starting from n_restart different random
# points.
for x0 in initial_starts:
res = minimize(min_obj, x0, bounds=bounds, method='L-BFGS-B')
if res.fun < min_val:
min_val = res.fun
min_x = res.x
return min_x.reshape(1, -1)
| 32.581633 | 79 | 0.616035 |
40061211ece24cf7911daf49b86a641fad6a6007
| 15,593 |
py
|
Python
|
academic/apps/projects/migrations/0001_initial.py
|
phretor/django-academic
|
864452238056e07056990479396e8446a1bad086
|
[
"BSD-3-Clause"
] | 2 |
2015-10-16T17:07:03.000Z
|
2016-06-23T09:54:51.000Z
|
academic/apps/projects/migrations/0001_initial.py
|
phretor/django-academic
|
864452238056e07056990479396e8446a1bad086
|
[
"BSD-3-Clause"
] | null | null | null |
academic/apps/projects/migrations/0001_initial.py
|
phretor/django-academic
|
864452238056e07056990479396e8446a1bad086
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Topic'
db.create_table('projects_topic', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('highlight', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
('highlight_order', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0, db_index=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=2048, db_index=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=128, db_index=True)),
('excerpt', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('projects', ['Topic'])
# Adding model 'Project'
db.create_table('projects_project', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('highlight', self.gf('django.db.models.fields.BooleanField')(default=False)),
('redirect_to', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('short_title', self.gf('django.db.models.fields.CharField')(max_length=1024, db_index=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=128, db_index=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=2048, db_index=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('excerpt', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('footer', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('topic', self.gf('django.db.models.fields.related.ForeignKey')(related_name='projects', to=orm['projects.Topic'])),
))
db.send_create_signal('projects', ['Project'])
# Adding M2M table for field downloads on 'Project'
db.create_table('projects_project_downloads', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('project', models.ForeignKey(orm['projects.project'], null=False)),
('download', models.ForeignKey(orm['content.download'], null=False))
))
db.create_unique('projects_project_downloads', ['project_id', 'download_id'])
# Adding M2M table for field people on 'Project'
db.create_table('projects_project_people', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('project', models.ForeignKey(orm['projects.project'], null=False)),
('person', models.ForeignKey(orm['people.person'], null=False))
))
db.create_unique('projects_project_people', ['project_id', 'person_id'])
# Adding M2M table for field organizations on 'Project'
db.create_table('projects_project_organizations', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('project', models.ForeignKey(orm['projects.project'], null=False)),
('organization', models.ForeignKey(orm['organizations.organization'], null=False))
))
db.create_unique('projects_project_organizations', ['project_id', 'organization_id'])
# Adding M2M table for field publications on 'Project'
db.create_table('projects_project_publications', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('project', models.ForeignKey(orm['projects.project'], null=False)),
('publication', models.ForeignKey(orm['publishing.publication'], null=False))
))
db.create_unique('projects_project_publications', ['project_id', 'publication_id'])
# Adding M2M table for field sponsors on 'Project'
db.create_table('projects_project_sponsors', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('project', models.ForeignKey(orm['projects.project'], null=False)),
('sponsor', models.ForeignKey(orm['organizations.sponsor'], null=False))
))
db.create_unique('projects_project_sponsors', ['project_id', 'sponsor_id'])
# Adding M2M table for field related_topics on 'Project'
db.create_table('projects_project_related_topics', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('project', models.ForeignKey(orm['projects.project'], null=False)),
('topic', models.ForeignKey(orm['projects.topic'], null=False))
))
db.create_unique('projects_project_related_topics', ['project_id', 'topic_id'])
def backwards(self, orm):
# Deleting model 'Topic'
db.delete_table('projects_topic')
# Deleting model 'Project'
db.delete_table('projects_project')
# Removing M2M table for field downloads on 'Project'
db.delete_table('projects_project_downloads')
# Removing M2M table for field people on 'Project'
db.delete_table('projects_project_people')
# Removing M2M table for field organizations on 'Project'
db.delete_table('projects_project_organizations')
# Removing M2M table for field publications on 'Project'
db.delete_table('projects_project_publications')
# Removing M2M table for field sponsors on 'Project'
db.delete_table('projects_project_sponsors')
# Removing M2M table for field related_topics on 'Project'
db.delete_table('projects_project_related_topics')
models = {
'content.download': {
'Meta': {'ordering': "['title']", 'object_name': 'Download'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('filebrowser.fields.FileBrowseField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'organizations.organization': {
'Meta': {'object_name': 'Organization'},
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'web_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'organizations.sponsor': {
'Meta': {'ordering': "['order', 'name']", 'object_name': 'Sponsor', '_ormbases': ['organizations.Organization']},
'logo': ('filebrowser.fields.FileBrowseField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'organization_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['organizations.Organization']", 'unique': 'True', 'primary_key': 'True'})
},
'people.person': {
'Meta': {'ordering': "['rank', 'last_name', 'first_name']", 'object_name': 'Person'},
'affiliation': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'people'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['organizations.Organization']"}),
'current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'e_mail': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'mid_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'picture': ('filebrowser.fields.FileBrowseField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rank': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'people'", 'null': 'True', 'to': "orm['people.Rank']"}),
'web_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'people.rank': {
'Meta': {'ordering': "['order']", 'object_name': 'Rank'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'plural_name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'projects.project': {
'Meta': {'ordering': "['topic', 'modified', 'created']", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'downloads': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['content.Download']", 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'footer': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'projects'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['organizations.Organization']"}),
'people': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects'", 'symmetrical': 'False', 'to': "orm['people.Person']"}),
'publications': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['publishing.Publication']", 'null': 'True', 'blank': 'True'}),
'redirect_to': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'related_topics': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'secondary_projects'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['projects.Topic']"}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'sponsors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['organizations.Sponsor']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'db_index': 'True'}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects'", 'to': "orm['projects.Topic']"})
},
'projects.topic': {
'Meta': {'ordering': "['highlight_order', 'title']", 'object_name': 'Topic'},
'description': ('django.db.models.fields.TextField', [], {}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'highlight_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'db_index': 'True'})
},
'publishing.authorship': {
'Meta': {'ordering': "('order',)", 'object_name': 'Authorship'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Person']"}),
'publication': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['publishing.Publication']"})
},
'publishing.publication': {
'Meta': {'ordering': "['-year']", 'unique_together': "(('title', 'year'),)", 'object_name': 'Publication'},
'abstract': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'attachment': ('filebrowser.fields.FileBrowseField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'publications'", 'to': "orm['people.Person']", 'through': "orm['publishing.Authorship']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'fulltext': ('filebrowser.fields.FileBrowseField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'month': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '512', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'db_index': 'True'})
}
}
complete_apps = ['projects']
| 72.189815 | 245 | 0.596935 |
4f076b98c4c26c36f5b504235743709c31f5cdf3
| 495 |
py
|
Python
|
MDM_Server/log_util2.py
|
AstonZ/iOS_MDM_Guide
|
b3da4fb959485e964802ff18ab5f44abf3d4a4da
|
[
"MIT"
] | 9 |
2019-01-30T08:54:17.000Z
|
2022-02-02T05:38:28.000Z
|
MDM_Server/log_util2.py
|
AstonZ/iOS_MDM_Guide
|
b3da4fb959485e964802ff18ab5f44abf3d4a4da
|
[
"MIT"
] | null | null | null |
MDM_Server/log_util2.py
|
AstonZ/iOS_MDM_Guide
|
b3da4fb959485e964802ff18ab5f44abf3d4a4da
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import logging
import os
from datetime import datetime
from pprint import pprint
def dlog(msg):
logging.debug(msg)
pprint(msg)
def start_logging(title):
today = datetime.now().strftime('%Y-%m-%d')
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
LOG_FILE_NAME = 'logs/' + title + '_' + today + '.log'
logging.basicConfig(filename=LOG_FILE_NAME, level=logging.DEBUG, format=LOG_FORMAT, datefmt=DATE_FORMAT)
| 30.9375 | 108 | 0.678788 |
5006a7f2c77e671e09f94d757455bdecc566265d
| 666 |
py
|
Python
|
manage.py
|
Coding-Dojo-ACC/django-upload
|
54b9e0f8cde00c058c9f5f2dd2bf23dafab59a8b
|
[
"MIT"
] | null | null | null |
manage.py
|
Coding-Dojo-ACC/django-upload
|
54b9e0f8cde00c058c9f5f2dd2bf23dafab59a8b
|
[
"MIT"
] | null | null | null |
manage.py
|
Coding-Dojo-ACC/django-upload
|
54b9e0f8cde00c058c9f5f2dd2bf23dafab59a8b
|
[
"MIT"
] | 1 |
2021-06-11T03:26:48.000Z
|
2021-06-11T03:26:48.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uploadProj.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.956522 | 74 | 0.68018 |
f1f7e98f323901ecaba441dc8b7bcc02a3d5349d
| 2,397 |
py
|
Python
|
venv/Lib/site-packages/pyrogram/raw/types/update_read_channel_outbox.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/types/update_read_channel_outbox.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/types/update_read_channel_outbox.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class UpdateReadChannelOutbox(TLObject): # type: ignore
"""This object is a constructor of the base type :obj:`~pyrogram.raw.base.Update`.
Details:
- Layer: ``117``
- ID: ``0x25d6c9c7``
Parameters:
channel_id: ``int`` ``32-bit``
max_id: ``int`` ``32-bit``
"""
__slots__: List[str] = ["channel_id", "max_id"]
ID = 0x25d6c9c7
QUALNAME = "types.UpdateReadChannelOutbox"
def __init__(self, *, channel_id: int, max_id: int) -> None:
self.channel_id = channel_id # int
self.max_id = max_id # int
@staticmethod
def read(data: BytesIO, *args: Any) -> "UpdateReadChannelOutbox":
# No flags
channel_id = Int.read(data)
max_id = Int.read(data)
return UpdateReadChannelOutbox(channel_id=channel_id, max_id=max_id)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(Int(self.channel_id))
data.write(Int(self.max_id))
return data.getvalue()
| 31.96 | 103 | 0.620776 |
bc279eb4b59c9a83f3d8d105d000ddd002bc6c47
| 276 |
py
|
Python
|
src/bo4e/enum/energierichtung.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/enum/energierichtung.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/enum/energierichtung.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
# pylint:disable=missing-module-docstring
from bo4e.enum.strenum import StrEnum
class Energierichtung(StrEnum):
"""
Spezifiziert die Energierichtung einer Markt- und/oder Messlokation
"""
AUSSP = "AUSSP" #: Ausspeisung
EINSP = "EINSP" #: Einspeisung
| 21.230769 | 71 | 0.706522 |
84e72178372d76f6ac7518a8e064bcb778be1ca3
| 1,701 |
py
|
Python
|
app/core/migrations/0001_initial.py
|
somi38/recipe-app-api
|
ccd736dbf6286aa463201c10336001080c013554
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
somi38/recipe-app-api
|
ccd736dbf6286aa463201c10336001080c013554
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
somi38/recipe-app-api
|
ccd736dbf6286aa463201c10336001080c013554
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-10-11 10:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.029412 | 266 | 0.637272 |
c84646eddb5cbbec371c20bfd7e6df90b86156a2
| 2,009 |
py
|
Python
|
SeleniumProject_BrandTest/business/brand_business.py
|
SuperVivian/SeleniumProject_BrandTest
|
8c36854bcbb9bebd0ac4a0a68fc8884f17c108ac
|
[
"MIT"
] | null | null | null |
SeleniumProject_BrandTest/business/brand_business.py
|
SuperVivian/SeleniumProject_BrandTest
|
8c36854bcbb9bebd0ac4a0a68fc8884f17c108ac
|
[
"MIT"
] | null | null | null |
SeleniumProject_BrandTest/business/brand_business.py
|
SuperVivian/SeleniumProject_BrandTest
|
8c36854bcbb9bebd0ac4a0a68fc8884f17c108ac
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from selenium import webdriver
from util.find_element import my_get_element
class BrandBusiness:
def __init__(self,driver):
self.driver = driver
def click_one(self,key):
element = my_get_element(self.driver, key) # 点击品牌团
element.click()
return element
def get_brand_expect_name(self,key):
# 获取品牌专场的链接
pop_brand_a_element = my_get_element(self.driver, key)
a_url = pop_brand_a_element.get_attribute('href')
# 获取打开链接的专场名字
self.driver.get(a_url)
brand_h1_element = my_get_element(self.driver, 'brand_h1')
expect_name = brand_h1_element.get_attribute('textContent')
return expect_name
def get_brand_target_name(self,key):
# 获取品牌专场的链接
pop_brand_a_element = my_get_element(self.driver, key)
a_url = pop_brand_a_element.get_attribute('href')
# 获取打开链接的专场名字
self.driver.get(a_url)
brand_h1_element = my_get_element(self.driver, 'brand_h1')
expect_name = brand_h1_element.get_attribute('textContent')
return expect_name
def get_text_from_content(self,key):
element = my_get_element(self.driver, key)
text = element.get_attribute('textContent').replace(' ', '').strip()
return text
def get_text_from_img(self,key):
element = my_get_element(self.driver, key)
text = element.get_attribute('alt')[:-2]
return text
def open_url_get_element(self,url_key,target_key):
# 获取品牌专场的链接
url_element = my_get_element(self.driver, url_key)
url = url_element.get_attribute('href')
# 获取打开链接的专场名字
self.driver.get(url)
element = my_get_element(self.driver, target_key)
return element
def get_element_location(self,key):
element = my_get_element(self.driver, key)
return element.location
def get_strip_text(self,element):
return element.get_attribute('textContent').replace(' ', '').strip()
| 34.050847 | 76 | 0.671478 |
edf9cfe351d579e3066e3ef563f02b467dae7f66
| 12,302 |
py
|
Python
|
eval/common.py
|
kikefdezl/RadEfficientDet
|
b72a090ee4302a618cf7580a860530690ab53a8c
|
[
"Apache-2.0"
] | null | null | null |
eval/common.py
|
kikefdezl/RadEfficientDet
|
b72a090ee4302a618cf7580a860530690ab53a8c
|
[
"Apache-2.0"
] | null | null | null |
eval/common.py
|
kikefdezl/RadEfficientDet
|
b72a090ee4302a618cf7580a860530690ab53a8c
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from utils.compute_overlap import compute_overlap
from utils.visualization import draw_detections, draw_annotations
from utils.colors import colors
import numpy as np
import cv2
import progressbar
import os
assert (callable(progressbar.progressbar)), "Using wrong progressbar module, install 'progressbar2' instead."
def _compute_ap(recall, precision):
"""
Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
Args:
recall: The recall curve (list).
precision: The precision curve (list).
Returns:
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def _get_detections(generator, model, score_threshold=0.05, max_detections=100, visualize=False, save_path=None):
"""
Get the detections from the model using the generator.
The result is a list of lists such that the size is:
all_detections[num_images][num_classes] = detections[num_class_detections, 5]
Args:
generator: The generator used to run images through the model.
model: The model to run on the images.
score_threshold: The score confidence threshold to use.
max_detections: The maximum number of detections to use per image.
save_path: The path to save the images with visualized detections to.
Returns:
A list of lists containing the detections for each image in the generator.
"""
all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in
range(generator.size())]
for i in progressbar.progressbar(range(generator.size()), prefix='Running network: '):
image = generator.load_image(i)
radar_images = generator.load_radar_images(i)
src_image = image.copy()
src_radar_images = [ri.copy() for ri in radar_images]
h, w = image.shape[:2]
anchors = generator.anchors
image, radar_images, scale = generator.preprocess_image(image, radar_images)
# run network
boxes, scores, *_, labels = model.predict_on_batch([np.expand_dims(image, axis=0),
np.expand_dims(radar_images[0], axis=0),
np.expand_dims(radar_images[1], axis=0),
np.expand_dims(radar_images[2], axis=0),
np.expand_dims(radar_images[3], axis=0),
np.expand_dims(radar_images[4], axis=0)])
boxes /= scale
boxes[:, :, 0] = np.clip(boxes[:, :, 0], 0, w - 1)
boxes[:, :, 1] = np.clip(boxes[:, :, 1], 0, h - 1)
boxes[:, :, 2] = np.clip(boxes[:, :, 2], 0, w - 1)
boxes[:, :, 3] = np.clip(boxes[:, :, 3], 0, h - 1)
# select indices which have a score above the threshold
indices = np.where(scores[0, :] > score_threshold)[0]
# select those scores
scores = scores[0][indices]
# find the order with which to sort the scores
scores_sort = np.argsort(-scores)[:max_detections]
# select detections
# (n, 4)
image_boxes = boxes[0, indices[scores_sort], :]
# (n, )
image_scores = scores[scores_sort]
# (n, )
image_labels = labels[0, indices[scores_sort]]
# (n, 6)
detections = np.concatenate(
[image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)
if save_path:
draw_annotations(src_image, generator.load_annotations(i), label_to_name=generator.label_to_name)
draw_detections(src_image, detections[:5, :4], detections[:5, 4], detections[:5, 5].astype(np.int32),
colors, label_to_name=generator.label_to_name, score_threshold=score_threshold)
fused_radar_img = np.expand_dims(src_radar_images[0], axis=2)
fused_radar_img = cv2.cvtColor(fused_radar_img, cv2.COLOR_GRAY2BGR)
for ri in src_radar_images[1:]:
ri = np.expand_dims(ri, axis=2)
ri = cv2.cvtColor(ri, cv2.COLOR_GRAY2BGR)
fused_radar_img = cv2.add(fused_radar_img, ri)
merged_radar_image = cv2.add(fused_radar_img, src_image)
src_image = cv2.addWeighted(merged_radar_image, 0.5, src_image, 0.5, 0)
cv2.imwrite(os.path.join(save_path, f'{i}.jpg'), src_image)
# copy detections to all_detections
for class_id in range(generator.num_classes()):
all_detections[i][class_id] = detections[detections[:, -1] == class_id, :-1]
return all_detections
def _get_annotations(generator):
"""
Get the ground truth annotations from the generator.
The result is a list of lists such that the size is:
all_annotations[num_images][num_classes] = annotations[num_class_annotations, 5]
Args:
generator: The generator used to retrieve ground truth annotations.
Returns:
A list of lists containing the annotations for each image in the generator.
"""
all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
for i in progressbar.progressbar(range(generator.size()), prefix='Parsing annotations: '):
# load the annotations
annotations = generator.load_annotations(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
if not generator.has_label(label):
continue
all_annotations[i][label] = annotations['bboxes'][annotations['labels'] == label, :].copy()
return all_annotations
def evaluate(
generator,
model,
iou_threshold=0.5,
score_threshold=0.01,
max_detections=100,
visualize=False,
epoch=0,
save_path=None
):
"""
Evaluate a given dataset using a given model.
Args:
generator: The generator that represents the dataset to evaluate.
model: The model to evaluate.
iou_threshold: The threshold used to consider when a detection is positive or negative.
score_threshold: The score confidence threshold to use for detections.
max_detections: The maximum number of detections to use per image.
visualize: Show the visualized detections or not.
Returns:
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = _get_detections(generator, model, score_threshold=score_threshold, max_detections=max_detections,
visualize=visualize, save_path=save_path)
all_annotations = _get_annotations(generator)
average_precisions = {}
num_tp = 0
num_fp = 0
# process detections and annotations
for label in range(generator.num_classes()):
if not generator.has_label(label):
continue
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0, 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
if false_positives.shape[0] == 0:
num_fp += 0
else:
num_fp += false_positives[-1]
if true_positives.shape[0] == 0:
num_tp += 0
else:
num_tp += true_positives[-1]
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = _compute_ap(recall, precision)
average_precisions[label] = average_precision, num_annotations
print('num_fp={}, num_tp={}'.format(num_fp, num_tp))
return average_precisions
if __name__ == '__main__':
from generators.pascal import PascalVocGenerator
from model import efficientdet
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
phi = 1
weighted_bifpn = False
common_args = {
'batch_size': 1,
'phi': phi,
}
test_generator = PascalVocGenerator(
'datasets/VOC2007',
'test',
shuffle_groups=False,
skip_truncated=False,
skip_difficult=True,
**common_args
)
model_path = 'checkpoints/2019-12-03/pascal_05_0.6283_1.1975_0.8029.h5'
input_shape = (test_generator.image_size, test_generator.image_size)
anchors = test_generator.anchors
num_classes = test_generator.num_classes()
model, prediction_model = efficientdet(phi=phi, num_classes=num_classes, weighted_bifpn=weighted_bifpn)
prediction_model.load_weights(model_path, by_name=True)
average_precisions = evaluate(test_generator, prediction_model, visualize=False)
# compute per class average precision
total_instances = []
precisions = []
for label, (average_precision, num_annotations) in average_precisions.items():
print('{:.0f} instances of class'.format(num_annotations), test_generator.label_to_name(label),
'with average precision: {:.4f}'.format(average_precision))
total_instances.append(num_annotations)
precisions.append(average_precision)
mean_ap = sum(precisions) / sum(x > 0 for x in total_instances)
print('mAP: {:.4f}'.format(mean_ap))
| 38.564263 | 118 | 0.636075 |
92bedbbb0fa29480abb1f954e50d5ff57f1e67d7
| 29,201 |
py
|
Python
|
VirtualBox-5.0.0/src/VBox/ValidationKit/testmanager/core/testgroup.py
|
egraba/vbox_openbsd
|
6cb82f2eed1fa697d088cecc91722b55b19713c2
|
[
"MIT"
] | 1 |
2015-04-30T14:18:45.000Z
|
2015-04-30T14:18:45.000Z
|
VirtualBox-5.0.0/src/VBox/ValidationKit/testmanager/core/testgroup.py
|
egraba/vbox_openbsd
|
6cb82f2eed1fa697d088cecc91722b55b19713c2
|
[
"MIT"
] | null | null | null |
VirtualBox-5.0.0/src/VBox/ValidationKit/testmanager/core/testgroup.py
|
egraba/vbox_openbsd
|
6cb82f2eed1fa697d088cecc91722b55b19713c2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# $Id: testgroup.py $
"""
Test Manager - Test groups management.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
# Standard python imports.
import unittest;
# Validation Kit imports.
from testmanager.core.base import ModelDataBase, ModelDataBaseTestCase, ModelLogicBase, TMExceptionBase
from testmanager.core.testcase import TestCaseData, TestCaseDataEx;
class TestGroupMemberData(ModelDataBase):
"""Representation of a test group member database row."""
ksParam_idTestGroup = 'TestGroupMember_idTestGroup';
ksParam_idTestCase = 'TestGroupMember_idTestCase';
ksParam_tsEffective = 'TestGroupMember_tsEffective';
ksParam_tsExpire = 'TestGroupMember_tsExpire';
ksParam_uidAuthor = 'TestGroupMember_uidAuthor';
ksParam_iSchedPriority = 'TestGroupMember_iSchedPriority';
ksParam_aidTestCaseArgs = 'TestGroupMember_aidTestCaseArgs';
kasAllowNullAttributes = ['idTestGroup', 'idTestCase', 'tsEffective', 'tsExpire', 'uidAuthor', 'aidTestCaseArgs' ];
kiMin_iSchedPriority = 0;
kiMax_iSchedPriority = 31;
def __init__(self):
ModelDataBase.__init__(self)
#
# Initialize with defaults.
# See the database for explanations of each of these fields.
#
self.idTestGroup = None;
self.idTestCase = None;
self.tsEffective = None;
self.tsExpire = None;
self.uidAuthor = None;
self.iSchedPriority = 16;
self.aidTestCaseArgs = None;
def initFromDbRow(self, aoRow):
"""
Reinitialize from a SELECT * FROM TestCaseGroupMembers.
Return self. Raises exception if no row.
"""
if aoRow is None:
raise TMExceptionBase('Test group member not found.')
self.idTestGroup = aoRow[0];
self.idTestCase = aoRow[1];
self.tsEffective = aoRow[2];
self.tsExpire = aoRow[3];
self.uidAuthor = aoRow[4];
self.iSchedPriority = aoRow[5];
self.aidTestCaseArgs = aoRow[6];
return self
def getAttributeParamNullValues(self, sAttr):
# Arrays default to [] as NULL currently. That doesn't work for us.
if sAttr == 'aidTestCaseArgs':
aoNilValues = [None, '-1'];
else:
aoNilValues = ModelDataBase.getAttributeParamNullValues(self, sAttr);
return aoNilValues;
def _validateAndConvertAttribute(self, sAttr, sParam, oValue, aoNilValues, fAllowNull, oDb):
if sAttr != 'aidTestCaseArgs':
return ModelDataBase._validateAndConvertAttribute(self, sAttr, sParam, oValue, aoNilValues, fAllowNull, oDb);
# -1 is a special value, which when present make the whole thing NULL (None).
(aidVariations, sError) = self.validateListOfInts(oValue, aoNilValues = aoNilValues, fAllowNull = fAllowNull,
iMin = -1, iMax = 0x7ffffffe);
if sError is None:
if aidVariations is None:
pass;
elif -1 in aidVariations:
aidVariations = None;
elif 0 in aidVariations:
sError = 'Invalid test case varation ID #0.';
else:
aidVariations = sorted(aidVariations);
return (aidVariations, sError);
class TestGroupMemberDataEx(TestGroupMemberData):
"""Extended representation of a test group member."""
def __init__(self):
"""Extend parent class"""
TestGroupMemberData.__init__(self)
self.oTestCase = None; # TestCaseDataEx.
def initFromDbRowEx(self, aoRow, oDb, tsNow = None):
"""
Reinitialize from a SELECT * FROM TestGroupMembers, TestCases row.
Will query the necessary additional data from oDb using tsNow.
Returns self. Raises exception if no row or database error.
"""
TestGroupMemberData.initFromDbRow(self, aoRow);
self.oTestCase = TestCaseDataEx();
self.oTestCase.initFromDbRowEx(aoRow[7:], oDb, tsNow);
return self;
def initFromParams(self, oDisp, fStrict = True):
self.oTestCase = None;
return TestGroupMemberData.initFromParams(self, oDisp, fStrict);
def getDataAttributes(self):
asAttributes = TestGroupMemberData.getDataAttributes(self);
asAttributes.remove('oTestCase');
return asAttributes;
def _validateAndConvertWorker(self, asAllowNullAttributes, oDb):
dErrors = TestGroupMemberData._validateAndConvertWorker(self, asAllowNullAttributes, oDb);
if self.ksParam_idTestCase not in dErrors:
self.oTestCase = TestCaseDataEx()
try:
self.oTestCase.initFromDbWithId(oDb, self.idTestCase);
except Exception as oXcpt:
self.oTestCase = TestCaseDataEx()
dErrors[self.ksParam_idTestCase] = str(oXcpt);
return dErrors;
class TestGroupMemberData2(TestCaseData):
"""Special representation of a Test Group Member item"""
def __init__(self):
"""Extend parent class"""
TestCaseData.__init__(self)
self.idTestGroup = None
self.aidTestCaseArgs = []
def initFromDbRowEx(self, aoRow):
"""
Reinitialize from a :WRONG QUERY:
SELECT TestCases.idTestCase,
TestGroupMembers.tsEffective,
TestGroupMembers.tsExpire,
TestGroupMembers.uidAuthor,
TestCases.idGenTestCase,
TestCases.sName,
TestCases.sDescription,
TestCases.fEnabled,
TestCases.cSecTimeout,
TestCases.sBaseCmd,
TestCases.sValidationKitZips,
TestGroupMembers.idTestGroup,
TestGroupMembers.aidTestCaseArgs
FROM TestCases, TestGroupMembers
WHERE TestCases.idTestCase = TestGroupMembers.idTestCase
..row. Represents complete test group member (test case) info.
Returns object of type TestGroupMemberData2. Raises exception if no row.
"""
TestCaseData.initFromDbRow(self, aoRow);
self.idTestGroup = aoRow[-2]
self.aidTestCaseArgs = aoRow[-1]
return self;
class TestGroupData(ModelDataBase):
"""
Test group data.
"""
ksIdAttr = 'idTestGroup';
ksParam_idTestGroup = 'TestGroup_idTestGroup'
ksParam_tsEffective = 'TestGroup_tsEffective'
ksParam_tsExpire = 'TestGroup_tsExpire'
ksParam_uidAuthor = 'TestGroup_uidAuthor'
ksParam_sName = 'TestGroup_sName'
ksParam_sDescription = 'TestGroup_sDescription'
kasAllowNullAttributes = ['idTestGroup', 'tsEffective', 'tsExpire', 'uidAuthor', 'sDescription' ];
def __init__(self):
ModelDataBase.__init__(self);
#
# Initialize with defaults.
# See the database for explanations of each of these fields.
#
self.idTestGroup = None
self.tsEffective = None
self.tsExpire = None
self.uidAuthor = None
self.sName = None
self.sDescription = None
def initFromDbRow(self, aoRow):
"""
Reinitialize from a SELECT * FROM TestGroups row.
Returns object of type TestGroupData. Raises exception if no row.
"""
if aoRow is None:
raise TMExceptionBase('Test group not found.')
self.idTestGroup = aoRow[0]
self.tsEffective = aoRow[1]
self.tsExpire = aoRow[2]
self.uidAuthor = aoRow[3]
self.sName = aoRow[4]
self.sDescription = aoRow[5]
return self
def initFromDbWithId(self, oDb, idTestGroup, tsNow = None, sPeriodBack = None):
"""
Initialize the object from the database.
"""
oDb.execute(self.formatSimpleNowAndPeriodQuery(oDb,
'SELECT *\n'
'FROM TestGroups\n'
'WHERE idTestGroup = %s\n'
, ( idTestGroup,), tsNow, sPeriodBack));
aoRow = oDb.fetchOne()
if aoRow is None:
raise TMExceptionBase('idTestGroup=%s not found (tsNow=%s sPeriodBack=%s)' % (idTestGroup, tsNow, sPeriodBack,));
return self.initFromDbRow(aoRow);
class TestGroupDataEx(TestGroupData):
"""
Extended test group data.
"""
ksParam_aoMembers = 'TestGroupDataEx_aoMembers';
kasAltArrayNull = [ 'aoMembers', ];
## Helper parameter containing the comma separated list with the IDs of
# potential members found in the parameters.
ksParam_aidTestCases = 'TestGroupDataEx_aidTestCases';
def __init__(self):
TestGroupData.__init__(self);
self.aoMembers = []; # TestGroupMemberDataEx.
def _initExtraMembersFromDb(self, oDb, tsNow = None, sPeriodBack = None):
"""
Worker shared by the initFromDb* methods.
Returns self. Raises exception if no row or database error.
"""
self.aoMembers = [];
_ = sPeriodBack; ## @todo sPeriodBack
if tsNow is None:
oDb.execute('SELECT TestGroupMembers.*, TestCases.*\n'
'FROM TestGroupMembers\n'
'LEFT OUTER JOIN TestCases ON (\n'
' TestGroupMembers.idTestCase = TestCases.idTestCase\n'
' AND TestCases.tsExpire = \'infinity\'::TIMESTAMP)\n'
'WHERE TestGroupMembers.idTestGroup = %s\n'
' AND TestGroupMembers.tsExpire = \'infinity\'::TIMESTAMP\n'
'ORDER BY TestCases.sName, TestCases.idTestCase\n'
, (self.idTestGroup,));
else:
oDb.execute('SELECT TestGroupMembers.*, TestCases.*\n'
'FROM TestGroupMembers\n'
'LEFT OUTER JOIN TestCases ON (\n'
' TestGroupMembers.idTestCase = TestCases.idTestCase\n'
' AND TestCases.tsExpire > %s\n'
' AND TestCases.tsEffective <= %s)\n'
'WHERE TestGroupMembers.idTestGroup = %s\n'
' AND TestGroupMembers.tsExpire > %s\n'
' AND TestGroupMembers.tsEffective <= %s\n'
'ORDER BY TestCases.sName, TestCases.idTestCase\n'
, (tsNow, tsNow, self.idTestGroup, tsNow, tsNow));
for aoRow in oDb.fetchAll():
self.aoMembers.append(TestGroupMemberDataEx().initFromDbRowEx(aoRow, oDb, tsNow));
return self;
def initFromDbRowEx(self, aoRow, oDb, tsNow = None, sPeriodBack = None):
"""
Reinitialize from a SELECT * FROM TestGroups row. Will query the
necessary additional data from oDb using tsNow.
Returns self. Raises exception if no row or database error.
"""
TestGroupData.initFromDbRow(self, aoRow);
return self._initExtraMembersFromDb(oDb, tsNow, sPeriodBack);
def initFromDbWithId(self, oDb, idTestGroup, tsNow = None, sPeriodBack = None):
"""
Initialize the object from the database.
"""
TestGroupData.initFromDbWithId(self, oDb, idTestGroup, tsNow, sPeriodBack);
return self._initExtraMembersFromDb(oDb, tsNow, sPeriodBack);
def getAttributeParamNullValues(self, sAttr):
if sAttr != 'aoMembers':
return TestGroupData.getAttributeParamNullValues(self, sAttr);
return ['', [], None];
def convertParamToAttribute(self, sAttr, sParam, oValue, oDisp, fStrict):
if sAttr != 'aoMembers':
return TestGroupData.convertParamToAttribute(self, sAttr, sParam, oValue, oDisp, fStrict);
aoNewValue = [];
aidSelected = oDisp.getListOfIntParams(sParam, iMin = 1, iMax = 0x7ffffffe, aiDefaults = [])
sIds = oDisp.getStringParam(self.ksParam_aidTestCases, sDefault = '');
for idTestCase in sIds.split(','):
try: idTestCase = int(idTestCase);
except: pass;
oDispWrapper = self.DispWrapper(oDisp, '%s[%s][%%s]' % (TestGroupDataEx.ksParam_aoMembers, idTestCase,))
oMember = TestGroupMemberDataEx().initFromParams(oDispWrapper, fStrict = False);
if idTestCase in aidSelected:
aoNewValue.append(oMember);
return aoNewValue;
def _validateAndConvertAttribute(self, sAttr, sParam, oValue, aoNilValues, fAllowNull, oDb):
if sAttr != 'aoMembers':
return TestGroupData._validateAndConvertAttribute(self, sAttr, sParam, oValue, aoNilValues, fAllowNull, oDb);
asErrors = [];
aoNewMembers = [];
for oOldMember in oValue:
oNewMember = TestGroupMemberDataEx().initFromOther(oOldMember);
aoNewMembers.append(oNewMember);
dErrors = oNewMember.validateAndConvert(oDb);
if len(dErrors) > 0:
asErrors.append(str(dErrors));
if len(asErrors) == 0:
for i in range(len(aoNewMembers)):
idTestCase = aoNewMembers[i];
for j in range(i + 1, len(aoNewMembers)):
if aoNewMembers[j].idTestCase == idTestCase:
asErrors.append('Duplicate testcase #%d!' % (idTestCase, ));
break;
return (aoNewMembers, None if len(asErrors) == 0 else '<br>\n'.join(asErrors));
class TestGroupLogic(ModelLogicBase):
"""
Test case management logic.
"""
#
# Standard methods.
#
def fetchForListing(self, iStart, cMaxRows, tsNow):
"""
Fetches test groups.
Returns an array (list) of TestGroupDataEx items, empty list if none.
Raises exception on error.
"""
if tsNow is None:
self._oDb.execute('SELECT *\n'
'FROM TestGroups\n'
'WHERE tsExpire = \'infinity\'::TIMESTAMP\n'
'ORDER BY sName ASC\n'
'LIMIT %s OFFSET %s\n'
, (cMaxRows, iStart,));
else:
self._oDb.execute('SELECT *\n'
'FROM TestGroups\n'
'WHERE tsExpire > %s\n'
' AND tsEffective <= %s\n'
'ORDER BY sName ASC\n'
'LIMIT %s OFFSET %s\n'
, (tsNow, tsNow, cMaxRows, iStart,));
aoRet = [];
for aoRow in self._oDb.fetchAll():
aoRet.append(TestGroupDataEx().initFromDbRowEx(aoRow, self._oDb, tsNow));
return aoRet;
def addEntry(self, oData, uidAuthor, fCommit = False):
"""
Adds a testgroup to the database.
"""
#
# Validate inputs.
#
assert isinstance(oData, TestGroupDataEx);
dErrors = oData.validateAndConvert(self._oDb);
if len(dErrors) > 0:
raise TMExceptionBase('addEntry invalid input: %s' % (dErrors,));
self._assertUniq(oData, None);
#
# Do the job.
#
self._oDb.execute('INSERT INTO TestGroups (uidAuthor, sName, sDescription)\n'
'VALUES (%s, %s, %s)\n'
'RETURNING idTestGroup\n'
, ( uidAuthor,
oData.sName,
oData.sDescription,));
idTestGroup = self._oDb.fetchOne()[0];
oData.idTestGroup = idTestGroup;
for oMember in oData.aoMembers:
oMember.idTestGroup = idTestGroup;
self._insertTestGroupMember(uidAuthor, oMember)
self._oDb.maybeCommit(fCommit);
return True;
def editEntry(self, oData, uidAuthor, fCommit = False):
"""
Modifies a test group.
"""
#
# Validate inputs and read in the old(/current) data.
#
assert isinstance(oData, TestGroupDataEx);
dErrors = oData.validateAndConvert(self._oDb);
if len(dErrors) > 0:
raise TMExceptionBase('editEntry invalid input: %s' % (dErrors,));
self._assertUniq(oData, oData.idTestGroup);
oOldData = TestGroupDataEx().initFromDbWithId(self._oDb, oData.idTestGroup);
#
# Update the data that needs updating.
#
if not oData.isEqualEx(oOldData, [ 'aoMembers', 'tsEffective', 'tsExpire', 'uidAuthor', ]):
self._historizeTestGroup(oData.idTestGroup);
self._oDb.execute('INSERT INTO TestGroups\n'
' (uidAuthor, idTestGroup, sName, sDescription)\n'
'VALUES (%s, %s, %s, %s)\n'
, ( uidAuthor,
oData.idTestGroup,
oData.sName,
oData.sDescription, ));
# Create a lookup dictionary for old entries.
dOld = {};
for oOld in oOldData.aoMembers:
dOld[oOld.idTestCase] = oOld;
assert len(dOld) == len(oOldData.aoMembers);
# Add new members, updated existing ones.
dNew = {};
for oNewMember in oData.aoMembers:
oNewMember.idTestGroup = oData.idTestGroup;
if oNewMember.idTestCase in dNew:
raise TMExceptionBase('Duplicate test group member: idTestCase=%d (%s / %s)'
% (oNewMember.idTestCase, oNewMember, dNew[oNewMember.idTestCase],));
dNew[oNewMember.idTestCase] = oNewMember;
oOldMember = dOld.get(oNewMember.idTestCase, None);
if oOldMember is not None:
if oNewMember.isEqualEx(oOldMember, [ 'uidAuthor', 'tsEffective', 'tsExpire' ]):
continue; # Skip, nothing changed.
self._historizeTestGroupMember(oData.idTestGroup, oNewMember.idTestCase);
self._insertTestGroupMember(uidAuthor, oNewMember);
# Expire members that have been removed.
sQuery = self._oDb.formatBindArgs('UPDATE TestGroupMembers\n'
'SET tsExpire = CURRENT_TIMESTAMP\n'
'WHERE idTestGroup = %s\n'
' AND tsExpire = \'infinity\'::TIMESTAMP\n'
, ( oData.idTestGroup, ));
if len(dNew) > 0:
sQuery += ' AND idTestCase NOT IN (%s)' % (', '.join([str(iKey) for iKey in dNew.keys()]),);
self._oDb.execute(sQuery);
self._oDb.maybeCommit(fCommit);
return True;
def removeEntry(self, uidAuthor, idTestGroup, fCascade = False, fCommit = False):
"""
Deletes a test group.
"""
_ = uidAuthor; ## @todo record uidAuthor.
#
# Cascade.
#
if fCascade is not True:
self._oDb.execute('SELECT SchedGroups.idSchedGroup, SchedGroups.sName\n'
'FROM SchedGroupMembers, SchedGroups\n'
'WHERE SchedGroupMembers.idTestGroup = %s\n'
' AND SchedGroupMembers.tsExpire = \'infinity\'::TIMESTAMP\n'
' AND SchedGroups.idSchedGroup = SchedGroupMembers.idSchedGroup\n'
' AND SchedGroups.tsExpire = \'infinity\'::TIMESTAMP\n'
, ( idTestGroup, ));
aoGroups = self._oDb.fetchAll();
if len(aoGroups) > 0:
asGroups = ['%s (#%d)' % (sName, idSchedGroup) for idSchedGroup, sName in aoGroups];
raise TMExceptionBase('Test group #%d is member of one ore more scheduling groups: %s'
% (idTestGroup, ', '.join(asGroups),));
else:
self._oDb.execute('UPDATE SchedGroupMembers\n'
'SET tsExpire = CURRENT_TIMESTAMP\n'
'WHERE idTestGroup = %s\n'
' AND tsExpire = \'infinity\'::TIMESTAMP\n'
, ( idTestGroup, ));
#
# Remove the group.
#
self._oDb.execute('UPDATE TestGroupMembers\n'
'SET tsExpire = CURRENT_TIMESTAMP\n'
'WHERE idTestGroup = %s\n'
' AND tsExpire = \'infinity\'::TIMESTAMP\n'
, (idTestGroup,))
self._oDb.execute('UPDATE TestGroups\n'
'SET tsExpire = CURRENT_TIMESTAMP\n'
'WHERE idTestGroup = %s\n'
' AND tsExpire = \'infinity\'::TIMESTAMP\n'
, (idTestGroup,))
self._oDb.maybeCommit(fCommit)
return True;
#
# Other methods.
#
def fetchOrderedByName(self, tsNow = None):
"""
Return list of objects of type TestGroupData ordered by name.
May raise exception on database error.
"""
if tsNow is None:
self._oDb.execute('SELECT *\n'
'FROM TestGroups\n'
'WHERE tsExpire = \'infinity\'::TIMESTAMP\n'
'ORDER BY sName ASC\n');
else:
self._oDb.execute('SELECT *\n'
'FROM TestGroups\n'
'WHERE tsExpire > %s\n'
' AND tsEffective <= %s\n'
'ORDER BY sName ASC\n'
, (tsNow, tsNow,));
aoRet = []
for _ in range(self._oDb.getRowCount()):
aoRet.append(TestGroupData().initFromDbRow(self._oDb.fetchOne()));
return aoRet;
def getMembers(self, idTestGroup):
"""
Fetches all test case records from DB which are
belong to current Test Group.
Returns list of objects of type TestGroupMemberData2 (!).
"""
self._oDb.execute('SELECT TestCases.*,\n'
' TestGroupMembers.idTestGroup,\n'
' TestGroupMembers.aidTestCaseArgs\n'
'FROM TestCases, TestGroupMembers\n'
'WHERE TestCases.tsExpire = \'infinity\'::TIMESTAMP\n'
' AND TestGroupMembers.tsExpire = \'infinity\'::TIMESTAMP\n'
' AND TestGroupMembers.idTestCase = TestCases.idTestCase\n'
' AND TestGroupMembers.idTestGroup = %s\n'
'ORDER BY TestCases.idTestCase ASC;',
(idTestGroup,))
aaoRows = self._oDb.fetchAll()
aoRet = []
for aoRow in aaoRows:
aoRet.append(TestGroupMemberData2().initFromDbRowEx(aoRow))
return aoRet
def getAll(self, tsNow=None):
"""Return list of objects of type TestGroupData"""
if tsNow is None:
self._oDb.execute('SELECT *\n'
'FROM TestGroups\n'
'WHERE tsExpire = \'infinity\'::TIMESTAMP\n'
'ORDER BY idTestGroup ASC;')
else:
self._oDb.execute('SELECT *\n'
'FROM TestGroups\n'
'WHERE tsExpire > %s\n'
' AND tsEffective <= %s\n'
'ORDER BY idTestGroup ASC;',
(tsNow, tsNow))
aaoRows = self._oDb.fetchAll()
aoRet = []
for aoRow in aaoRows:
aoRet.append(TestGroupData().initFromDbRow(aoRow))
return aoRet
def getById(self, idTestGroup, tsNow=None):
"""Get Test Group data by its ID"""
if tsNow is None:
self._oDb.execute('SELECT *\n'
'FROM TestGroups\n'
'WHERE tsExpire = \'infinity\'::timestamp\n'
' AND idTestGroup = %s\n'
'ORDER BY idTestGroup ASC;', (idTestGroup,))
else:
self._oDb.execute('SELECT *\n'
'FROM TestGroups\n'
'WHERE tsExpire > %s\n'
' AND tsEffective <= %s\n'
' AND idTestGroup = %s\n'
'ORDER BY idTestGroup ASC;',
(tsNow, tsNow, idTestGroup))
aRows = self._oDb.fetchAll()
if len(aRows) not in (0, 1):
raise TMExceptionBase('Found more than one test groups with the same credentials. Database structure is corrupted.')
try:
return TestGroupData().initFromDbRow(aRows[0])
except IndexError:
return None
#
# Helpers.
#
def _assertUniq(self, oData, idTestGroupIgnore):
""" Checks that the test group name is unique, raises exception if it isn't. """
self._oDb.execute('SELECT idTestGroup\n'
'FROM TestGroups\n'
'WHERE sName = %s\n'
' AND tsExpire = \'infinity\'::TIMESTAMP\n'
+ ('' if idTestGroupIgnore is None else ' AND idTestGroup <> %d\n' % (idTestGroupIgnore,))
, ( oData.sName, ))
if self._oDb.getRowCount() > 0:
raise TMExceptionBase('A Test group with name "%s" already exist.' % (oData.sName,));
return True;
def _historizeTestGroup(self, idTestGroup):
""" Historize Test Group record. """
self._oDb.execute('UPDATE TestGroups\n'
'SET tsExpire = CURRENT_TIMESTAMP\n'
'WHERE idTestGroup = %s\n'
' AND tsExpire = \'infinity\'::TIMESTAMP\n'
, ( idTestGroup, ));
return True;
def _historizeTestGroupMember(self, idTestGroup, idTestCase):
""" Historize Test Group Member record. """
self._oDb.execute('UPDATE TestGroupMembers\n'
'SET tsExpire = CURRENT_TIMESTAMP\n'
'WHERE idTestGroup = %s\n'
' AND idTestCase = %s\n'
' AND tsExpire = \'infinity\'::timestamp\n'
, (idTestGroup, idTestCase,));
return True;
def _insertTestGroupMember(self, uidAuthor, oMember):
""" Inserts a test group member. """
self._oDb.execute('INSERT INTO TestGroupMembers\n'
' (uidAuthor, idTestGroup, idTestCase, iSchedPriority, aidTestCaseArgs)\n'
'VALUES (%s, %s, %s, %s, %s)\n'
, ( uidAuthor,
oMember.idTestGroup,
oMember.idTestCase,
oMember.iSchedPriority,
oMember.aidTestCaseArgs, ));
return True;
#
# Unit testing.
#
# pylint: disable=C0111
class TestGroupMemberDataTestCase(ModelDataBaseTestCase):
def setUp(self):
self.aoSamples = [TestGroupMemberData(),];
class TestGroupDataTestCase(ModelDataBaseTestCase):
def setUp(self):
self.aoSamples = [TestGroupData(),];
if __name__ == '__main__':
unittest.main();
# not reached.
| 40.613352 | 128 | 0.5457 |
f5021ea500e0de7ae12ef39785e771c2b3b97139
| 569 |
py
|
Python
|
desktop/core/ext-py/Babel-2.5.1/tests/test_lists.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 5,079 |
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/Babel-2.5.1/tests/test_lists.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623 |
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/Babel-2.5.1/tests/test_lists.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033 |
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# coding=utf-8
from babel import lists
def test_format_list():
for list, locale, expected in [
([], 'en', ''),
([u'string'], 'en', u'string'),
(['string1', 'string2'], 'en', u'string1 and string2'),
(['string1', 'string2', 'string3'], 'en', u'string1, string2, and string3'),
(['string1', 'string2', 'string3'], 'zh', u'string1、string2和string3'),
(['string1', 'string2', 'string3', 'string4'], 'ne', u'string1 र string2, string3 र string4'),
]:
assert lists.format_list(list, locale=locale) == expected
| 37.933333 | 102 | 0.571178 |
b56a3ef0abb67564b748f07b2887b28f6df357f9
| 7,977 |
py
|
Python
|
tests/unit/drivers/test_recursive_traversal_tree.py
|
jancijen/jina
|
def98c9656e1fabdcd0622e79fe9e7cb660e2116
|
[
"Apache-2.0"
] | 1 |
2020-10-23T03:01:56.000Z
|
2020-10-23T03:01:56.000Z
|
tests/unit/drivers/test_recursive_traversal_tree.py
|
normalcereal/jina
|
d95e4e72e7c8acb0278ea049aa56905ba97271d3
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/drivers/test_recursive_traversal_tree.py
|
normalcereal/jina
|
d95e4e72e7c8acb0278ea049aa56905ba97271d3
|
[
"Apache-2.0"
] | null | null | null |
import os
from jina.proto import jina_pb2
from jina.drivers import BaseRecursiveDriver
cur_dir = os.path.dirname(os.path.abspath(__file__))
DOCUMENTS_PER_LEVEL = 1
class AppendOneChunkTwoMatchesCrafter(BaseRecursiveDriver):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.is_apply = False
self._use_tree_traversal = True
def _apply_all(self, docs, *args, **kwargs) -> None:
for doc in docs:
add_chunk(doc)
add_match(doc)
add_match(doc)
def add_chunk(doc):
chunk = doc.chunks.add()
chunk.granularity = doc.granularity + 1
chunk.adjacency = doc.adjacency
return chunk
def add_match(doc):
match = doc.matches.add()
match.granularity = doc.granularity
match.adjacency = doc.adjacency + 1
return match
def build_docs():
""" Builds up a complete chunk-match structure, with a depth of 2 in both directions recursively. """
max_granularity = 2
max_adjacency = 2
def iterate_build(document, current_granularity, current_adjacency):
if current_granularity < max_granularity:
for i in range(DOCUMENTS_PER_LEVEL):
chunk = add_chunk(document)
iterate_build(chunk, chunk.granularity, chunk.adjacency)
if current_adjacency < max_adjacency:
for i in range(DOCUMENTS_PER_LEVEL):
match = add_match(document)
iterate_build(match, match.granularity, match.adjacency)
docs = []
for base_id in range(DOCUMENTS_PER_LEVEL):
document = jina_pb2.Document()
document.granularity = 0
document.adjacency = 0
docs.append(document)
iterate_build(document, 0, 0)
return docs
def apply_traversal_path(traversal_paths):
docs = build_docs()
driver = AppendOneChunkTwoMatchesCrafter(traversal_paths=traversal_paths)
driver._traverse_apply(docs)
return docs
def test_only_root():
docs = apply_traversal_path(['r'])
assert len(docs) == 1
assert len(docs[0].chunks) == 2
assert len(docs[0].chunks[0].chunks) == 1
assert len(docs[0].chunks[0].chunks[0].matches) == 1
assert len(docs[0].chunks[0].matches) == 1
assert len(docs[0].matches) == 3
assert len(docs[0].matches[0].chunks) == 1
def test_only_matches():
docs = apply_traversal_path(['m'])
assert len(docs) == 1
assert len(docs[0].chunks) == 1
assert len(docs[0].chunks[0].matches) == 1
assert len(docs[0].matches) == 1
assert len(docs[0].matches[0].chunks) == 2
assert len(docs[0].matches[0].matches) == 3
assert len(docs[0].matches[0].matches[0].chunks) == 1
def test_only_chunks():
docs = apply_traversal_path(['c'])
assert len(docs) == 1
assert len(docs[0].chunks) == 1
assert len(docs[0].chunks[0].chunks) == 2
assert len(docs[0].chunks[0].matches) == 3
assert len(docs[0].matches) == 1
assert len(docs[0].matches[0].chunks) == 1
assert len(docs[0].matches[0].matches) == 1
assert len(docs[0].matches[0].matches[0].chunks) == 1
def test_match_chunk():
docs = apply_traversal_path(['mc'])
assert len(docs) == 1
assert len(docs[0].chunks) == 1
assert len(docs[0].chunks[0].matches) == 1
assert len(docs[0].matches) == 1
assert len(docs[0].matches[0].chunks) == 1
assert len(docs[0].matches[0].chunks[0].chunks) == 2
assert len(docs[0].matches[0].matches) == 1
assert len(docs[0].matches[0].matches[0].chunks) == 1
def test_chunk_match():
docs = apply_traversal_path(['cm'])
assert len(docs) == 1
assert len(docs[0].chunks) == 1
assert len(docs[0].chunks[0].matches) == 1
assert len(docs[0].chunks[0].matches[0].chunks) == 2
assert len(docs[0].matches) == 1
assert len(docs[0].matches[0].chunks) == 1
assert len(docs[0].matches[0].matches) == 1
assert len(docs[0].matches[0].matches[0].chunks) == 1
def test_multi_paths():
docs = apply_traversal_path(['cc', 'mm'])
assert len(docs) == 1
assert len(docs[0].chunks) == 1
assert len(docs[0].chunks[0].matches) == 1
assert len(docs[0].chunks[0].chunks) == 1
assert len(docs[0].chunks[0].chunks[0].chunks) == 1
assert len(docs[0].matches) == 1
assert len(docs[0].matches[0].chunks) == 1
assert len(docs[0].matches[0].matches) == 1
assert len(docs[0].matches[0].matches[0].chunks) == 2
def test_both_from_0():
docs = apply_traversal_path(['r', 'c', 'm', 'cc', 'mm'])
assert len(docs) == 1
assert len(docs[0].chunks) == 2
assert len(docs[0].chunks[0].chunks) == 2
assert len(docs[0].chunks[0].chunks[0].matches) == 3
assert len(docs[0].chunks[0].chunks[0].chunks) == 1 # 0 before traversal
assert len(docs[0].chunks[0].matches) == 3
assert len(docs[0].matches) == 3
assert len(docs[0].matches[0].chunks) == 2
assert len(docs[0].matches[0].matches) == 3
assert len(docs[0].matches[0].matches[0].chunks) == 2
def test_adjacency0_granularity1():
docs = apply_traversal_path(['c', 'cc', 'cm', 'cmm'])
assert len(docs) == 1
assert len(docs[0].chunks) == 1
assert len(docs[0].chunks[0].chunks) == 2
assert len(docs[0].chunks[0].chunks[0].matches) == 3
assert len(docs[0].chunks[0].matches) == 3
assert len(docs[0].chunks[0].matches[0].chunks) == 2
assert len(docs[0].chunks[0].matches[0].matches) == 3
assert len(docs[0].chunks[0].matches[0].matches[0].chunks) == 2
assert len(docs[0].matches) == 1
assert len(docs[0].matches[0].chunks) == 1
assert len(docs[0].matches[0].matches) == 1
assert len(docs[0].matches[0].matches[0].chunks) == 1
def test_adjacency1_granularity1():
docs = apply_traversal_path(['cm', 'cmm', 'mcc'])
assert len(docs) == 1
assert len(docs[0].chunks) == 1
assert len(docs[0].chunks[0].chunks) == 1
assert len(docs[0].chunks[0].chunks[0].matches) == 1
assert len(docs[0].chunks[0].matches) == 1
assert len(docs[0].chunks[0].matches[0].chunks) == 2
assert len(docs[0].chunks[0].matches[0].matches) == 3
assert len(docs[0].chunks[0].matches[0].matches[0].chunks) == 2
assert len(docs[0].matches) == 1
assert len(docs[0].matches[0].chunks) == 1
assert len(docs[0].matches[0].chunks[0].chunks) == 1
assert len(docs[0].matches[0].chunks[0].chunks[0].matches) == 3
assert len(docs[0].matches[0].chunks[0].matches) == 1
assert len(docs[0].matches[0].matches) == 1
assert len(docs[0].matches[0].matches[0].chunks) == 1
def test_selection():
docs = apply_traversal_path(['cmm', 'mcm'])
assert docs[0].chunks[0].matches[0].matches[0].granularity == 1
assert docs[0].chunks[0].matches[0].matches[0].adjacency == 2
assert len(docs[0].chunks[0].matches[0].matches) == 1
assert docs[0].matches[0].chunks[0].matches[0].granularity == 1
assert docs[0].matches[0].chunks[0].matches[0].adjacency == 2
assert len(docs[0].matches[0].chunks[0].matches) == 1
def test_root_chunk():
docs = apply_traversal_path(['r', 'c'])
assert len(docs) == 1
assert len(docs[0].chunks) == 2
assert len(docs[0].chunks[0].chunks) == 2
assert len(docs[0].chunks[1].chunks) == 1
def test_chunk_root():
docs = apply_traversal_path(['c', 'r'])
assert len(docs) == 1
assert len(docs[0].chunks) == 2
assert len(docs[0].chunks[0].chunks) == 2
assert len(docs[0].chunks[1].chunks) == 0
def test_traverse_apply():
docs = build_docs()
doc = docs[0]
doc.ClearField('chunks')
docs = [doc, ]
driver = AppendOneChunkTwoMatchesCrafter(traversal_paths=('mcm',))
assert docs[0].matches[0].chunks[0].matches[0].granularity == 1
assert docs[0].matches[0].chunks[0].matches[0].adjacency == 2
driver._traverse_apply(docs)
assert len(docs[0].matches[0].chunks[0].matches) == 1
assert len(docs[0].matches[0].chunks[0].matches[0].chunks) == 2
assert len(docs[0].matches[0].chunks[0].matches[0].matches) == 2
| 34.682609 | 105 | 0.638586 |
6c8439f3c58f52a15d04d152bdedcd8579553da3
| 6,936 |
py
|
Python
|
symjax/data/imagenet.py
|
RandallBalestriero/TheanoXLA
|
d8778c2eb3254b478cef4f45d934bf921e695619
|
[
"Apache-2.0"
] | 67 |
2020-02-21T21:26:46.000Z
|
2020-06-14T14:25:42.000Z
|
symjax/data/imagenet.py
|
RandallBalestriero/TheanoXLA
|
d8778c2eb3254b478cef4f45d934bf921e695619
|
[
"Apache-2.0"
] | 8 |
2020-02-22T14:45:56.000Z
|
2020-06-07T16:56:47.000Z
|
symjax/data/imagenet.py
|
RandallBalestriero/TheanoXLA
|
d8778c2eb3254b478cef4f45d934bf921e695619
|
[
"Apache-2.0"
] | 4 |
2020-02-21T17:34:46.000Z
|
2020-05-30T08:30:14.000Z
|
# Based on a script by Seiya Tokui. With the following copyright
# Copyright (c) 2014 Seiya Tokui
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# Given the wnid of a synset, the wnid of hyponym synsets can be obtained at
# http://www.image-net.org/api/text/wordnet.structure.hyponym?wnid=[wnid]
#
# To obtain the full hyponym (the synset of the whole subtree starting
# from wnid), you can request
# http://www.image-net.org/api/text/wordnet.structure.hyponym?wnid=[wnid]&full=1
#
# to get the word of s synset
# http://www.image-net.org/api/text/wordnet.synset.getwords?wnid=[wnid]
#
# Given the wnid of a synset, the URLs of its images can be obtained at
# http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=[wnid]
#
# mappingfrom all synset to words
# http://image-net.org/archive/words.txt
#
import argparse
import urllib.request, urllib.error, urllib.parse
import time
import os
import math
import threading
import sys
import imghdr
import http.client
from ssl import CertificateError
class DownloadError(Exception):
"""Base class for exceptions in this module."""
def __init__(self, message=""):
self.message = message
def download(n_images, min_size, n_threads, wnids_list, out_dir):
wnid_thread_lists = list()
wnid_list_len = len(wnids_list)
wnid_thread_sizes = int(math.ceil(float(wnid_list_len) / n_threads))
for i in range(n_threads):
wnid_thread_lists.append(
wnids_list[i * wnid_thread_sizes : (i + 1) * wnid_thread_sizes]
)
# Define the threads
def downloader(wnid_list):
for wnid in wnid_list:
dir_name = wnid
print("Downloading " + dir_name)
dir_path = os.path.join(out_dir, dir_name)
if os.path.isdir(dir_path):
print("skipping: already have " + dir_name)
else:
image_url_list = get_image_urls(wnid)
download_images(dir_path, image_url_list, n_images, min_size)
# initialize the threads
print(wnid_thread_lists[0])
download_threads = [
threading.Thread(target=downloader, args=([wnid_thread_lists[i]]))
for i in range(n_threads)
]
for t in download_threads:
t.start()
is_alive = True
while is_alive:
is_alive = False
for t in download_threads:
is_alive = is_alive or t.isAlive()
time.sleep(0.1)
for t in download_threads:
t.join()
print("finished")
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
def get_url_request_list_function(request_url):
def get_url_request_list(wnid, timeout=5, retry=3):
url = request_url + wnid
f = urllib.request.urlopen(url)
response = f.read().decode()
f.close()
print("response: " + response)
list = str.split(response)
return list
return get_url_request_list
get_image_urls = get_url_request_list_function(
"http://www.image-net.org/api/text/imagenet.synset.geturls?wnid="
)
get_subtree_wnid = get_url_request_list_function(
"http://www.image-net.org/api/text/wordnet.structure.hyponym?wnid="
)
get_full_subtree_wnid = get_url_request_list_function(
"http://www.image-net.org/api/text/wordnet.structure.hyponym?full=1&wnid="
)
def get_words_wnid(wnid):
url = "http://www.image-net.org/api/text/wordnet.synset.getwords?wnid=" + wnid
f = urllib.request.urlopen(url)
content = f.read().decode()
f.close()
return content
def download_images(dir_path, image_url_list, n_images, min_size):
mkdir(dir_path)
image_count = 0
for url in image_url_list:
if image_count == n_images:
break
try:
f = urllib.request.urlopen(url)
image = f.read()
f.close()
extension = imghdr.what("", image) # check if valid image
if extension == "jpeg":
extension = "jpg"
if sys.getsizeof(image) > min_size:
image_name = "image_" + str(image_count) + "." + extension
image_path = os.path.join(dir_path, image_name)
image_file = open(image_path, "wb")
image_file.write(image)
image_file.close()
image_count += 1
except:
print("skipping ", url)
def main(wnid, out_dir, n_threads, n_images, fullsubtree, noroot, nosubtree, min_size):
wnids_list = []
# First get the list of wnids
if not noroot:
wnids_list.append(wnid)
if not nosubtree:
if fullsubtree:
subtree = get_full_subtree_wnid(wnid)
else:
subtree = get_subtree_wnid(wnid, timeout, retry)
for i in range(1, len(subtree)):
subtree[i] = subtree[i][1:] # removes dash
wnids_list.extend(subtree)
# create root directory
mkdir(out_dir)
download(n_images, min_size, n_threads, wnids_list, out_dir)
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("wnid", help="Imagenet wnid, example n03489162")
p.add_argument("outdir", help="Output directory")
p.add_argument(
"--jobs",
"-j",
type=int,
default=1,
help="Number of parallel threads to download",
)
p.add_argument(
"--images",
"-i",
type=int,
default=20,
metavar="N_IMAGES",
help="Number of images per category to download",
)
p.add_argument(
"--fullsubtree", "-F", action="store_true", help="Downloads the full subtree"
)
p.add_argument(
"--noroot", "-R", action="store_true", help="Do not Downloads the root"
)
p.add_argument(
"--nosubtree", "-S", action="store_true", help="Do not Downloads the subtree"
)
p.add_argument(
"--humanreadable",
"-H",
action="store_true",
help="Makes the folders human readable",
)
p.add_argument(
"--minsize",
"-m",
type=float,
default=7000,
help="Min size of the images in bytes",
)
args = p.parse_args()
main(
wnid=args.wnid,
out_dir=args.outdir,
n_threads=args.jobs,
n_images=args.images,
fullsubtree=args.fullsubtree,
noroot=args.noroot,
nosubtree=args.nosubtree,
min_size=args.minsize,
)
| 29.76824 | 87 | 0.638408 |
ca46e40fd9b4b2fb8c818ac03d324a8c7124e64c
| 1,425 |
py
|
Python
|
app/auth/views.py
|
Tracymbone/my_personal_blog
|
07d00b8540aa22e7f12b09614874c0fa908ee4b4
|
[
"MIT"
] | null | null | null |
app/auth/views.py
|
Tracymbone/my_personal_blog
|
07d00b8540aa22e7f12b09614874c0fa908ee4b4
|
[
"MIT"
] | null | null | null |
app/auth/views.py
|
Tracymbone/my_personal_blog
|
07d00b8540aa22e7f12b09614874c0fa908ee4b4
|
[
"MIT"
] | null | null | null |
from . import auth
from flask import render_template,redirect,url_for,request,flash
from .forms import RegisterForm, LoginForm
from ..models import User
from .. import db
from flask_login import login_user,logout_user,login_required
@auth.route('/register',methods = ["GET","POST"])
def register():
reg_form = RegisterForm()
if reg_form.validate_on_submit():
user=User(username=reg_form.username.data,email=reg_form.email.data,password=reg_form.password.data)
db.session.add(user)
db.session.commit()
return redirect(url_for('auth.login'))
mail_message("Hello, Welcome To Tracy's Blog.", "email/welcome", client.email, subscriber=client)
return render_template('auth/register.html', form=reg_form)
@auth.route('/login',methods = ["GET","POST"])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user=User.query.filter_by(email=login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
return render_template('auth/login.html', form=login_form)
@auth.route('/logout')
@login_required
def logout():
"""Logout function"""
logout_user()
return redirect(url_for('main.index'))
| 30.978261 | 108 | 0.707368 |
de28466dbdcaef9746395aabc8b10f83c1d52b32
| 8,541 |
py
|
Python
|
cirtorch/utils/download.py
|
Attila94/cnnimageretrieval-pytorch
|
ea6ac81e26e46fa2f58e6ca376607bf6f12350a4
|
[
"MIT"
] | null | null | null |
cirtorch/utils/download.py
|
Attila94/cnnimageretrieval-pytorch
|
ea6ac81e26e46fa2f58e6ca376607bf6f12350a4
|
[
"MIT"
] | null | null | null |
cirtorch/utils/download.py
|
Attila94/cnnimageretrieval-pytorch
|
ea6ac81e26e46fa2f58e6ca376607bf6f12350a4
|
[
"MIT"
] | null | null | null |
import os
def download_test(data_dir,datasets):
"""
DOWNLOAD_TEST Checks, and, if required, downloads the necessary datasets for the testing.
download_test(DATA_ROOT) checks if the data necessary for running the example script exist.
If not it downloads it in the folder structure:
DATA_ROOT/test/oxford5k/ : folder with Oxford images and ground truth file
DATA_ROOT/test/paris6k/ : folder with Paris images and ground truth file
DATA_ROOT/test/roxford5k/ : folder with Oxford images and revisited ground truth file
DATA_ROOT/test/rparis6k/ : folder with Paris images and revisited ground truth file
"""
# Create data folder if it does not exist
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
# Create datasets folder if it does not exist
datasets_dir = os.path.join(data_dir, 'test')
if not os.path.isdir(datasets_dir):
os.mkdir(datasets_dir)
# Download datasets folders test/DATASETNAME/
#datasets = ['oxford5k', 'paris6k', 'roxford5k', 'rparis6k', '247tokyo1k']
for di in range(len(datasets)):
dataset = datasets[di]
if dataset == 'oxford5k':
src_dir = 'http://www.robots.ox.ac.uk/~vgg/data/oxbuildings'
dl_files = ['oxbuild_images.tgz']
elif dataset == 'paris6k':
src_dir = 'http://www.robots.ox.ac.uk/~vgg/data/parisbuildings'
dl_files = ['paris_1.tgz', 'paris_2.tgz']
elif dataset == 'roxford5k':
src_dir = 'http://www.robots.ox.ac.uk/~vgg/data/oxbuildings'
dl_files = ['oxbuild_images.tgz']
elif dataset == 'rparis6k':
src_dir = 'http://www.robots.ox.ac.uk/~vgg/data/parisbuildings'
dl_files = ['paris_1.tgz', 'paris_2.tgz']
elif dataset == '247tokyo1k':
src_dir = 'http://www.ok.ctrl.titech.ac.jp/~torii/project/247/download'
dl_files = ['247query_v3.zip']
elif dataset in ['gp_dl_nr','gp_dr_nr']:
pass
else:
raise ValueError('Unknown dataset: {}!'.format(dataset))
dst_dir = os.path.join(datasets_dir, dataset, 'jpg')
if not os.path.isdir(dst_dir):
# for oxford and paris download images
if dataset == 'oxford5k' or dataset == 'paris6k' or dataset == "247tokyo1k":
print('>> Dataset {} directory does not exist. Creating: {}'.format(dataset, dst_dir))
os.makedirs(dst_dir)
for dli in range(len(dl_files)):
dl_file = dl_files[dli]
src_file = os.path.join(src_dir, dl_file)
dst_file = os.path.join(dst_dir, dl_file)
print('>> Downloading dataset {} archive {}...'.format(dataset, dl_file))
os.system('wget {} -O {}'.format(src_file, dst_file))
print('>> Extracting dataset {} archive {}...'.format(dataset, dl_file))
# create tmp folder
dst_dir_tmp = os.path.join(dst_dir, 'tmp')
os.system('mkdir {}'.format(dst_dir_tmp))
# extract in tmp folder
if dl_file.endswith(".zip"):
os.system('unzip {} -d {}'.format(dst_file, dst_dir_tmp))
else:
os.system('tar -zxf {} -C {}'.format(dst_file, dst_dir_tmp))
# remove all (possible) subfolders by moving only files in dst_dir
os.system('find {} -type f -exec mv -i {{}} {} \\;'.format(dst_dir_tmp, dst_dir))
# remove tmp folder
os.system('rm -rf {}'.format(dst_dir_tmp))
print('>> Extracted, deleting dataset {} archive {}...'.format(dataset, dl_file))
os.system('rm {}'.format(dst_file))
# for roxford and rparis just make sym links
elif dataset == 'roxford5k' or dataset == 'rparis6k':
print('>> Dataset {} directory does not exist. Creating: {}'.format(dataset, dst_dir))
dataset_old = dataset[1:]
dst_dir_old = os.path.join(datasets_dir, dataset_old, 'jpg')
if not os.path.exists(os.path.join(datasets_dir, dataset)):
os.mkdir(os.path.join(datasets_dir, dataset))
if not os.path.exists(dst_dir):
# Broken link
if os.path.lexists(dst_dir):
os.remove(dst_dir)
os.symlink(dst_dir_old, dst_dir)
print('>> Created symbolic link from {} jpg to {} jpg'.format(dataset_old, dataset))
if dataset == "247tokyo1k":
gnd_src_dir = "http://cmp.felk.cvut.cz/daynightretrieval/download/data"
else:
gnd_src_dir = os.path.join('http://cmp.felk.cvut.cz/cnnimageretrieval/data', 'test', dataset)
gnd_dst_dir = os.path.join(datasets_dir, dataset)
gnd_dl_file = 'gnd_{}.pkl'.format(dataset)
gnd_src_file = os.path.join(gnd_src_dir, gnd_dl_file)
gnd_dst_file = os.path.join(gnd_dst_dir, gnd_dl_file)
if not os.path.exists(gnd_dst_file):
print('>> Downloading dataset {} ground truth file...'.format(dataset))
os.system('wget {} -O {}'.format(gnd_src_file, gnd_dst_file))
def download_train(data_dir):
"""
DOWNLOAD_TRAIN Checks, and, if required, downloads the necessary datasets for the training.
download_train(DATA_ROOT) checks if the data necessary for running the example script exist.
If not it downloads it in the folder structure:
DATA_ROOT/train/retrieval-SfM-120k/ : folder with rsfm120k images and db files
DATA_ROOT/train/retrieval-SfM-30k/ : folder with rsfm30k images and db files
"""
# Create data folder if it does not exist
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
# Create datasets folder if it does not exist
datasets_dir = os.path.join(data_dir, 'train')
if not os.path.isdir(datasets_dir):
os.mkdir(datasets_dir)
# Download folder train/retrieval-SfM-120k/
src_dir = os.path.join('http://cmp.felk.cvut.cz/cnnimageretrieval/data', 'train', 'ims')
dst_dir = os.path.join(datasets_dir, 'retrieval-SfM-120k', 'ims')
dl_file = 'ims.tar.gz'
if not os.path.isdir(dst_dir):
src_file = os.path.join(src_dir, dl_file)
dst_file = os.path.join(dst_dir, dl_file)
print('>> Image directory does not exist. Creating: {}'.format(dst_dir))
os.makedirs(dst_dir)
print('>> Downloading ims.tar.gz...')
os.system('wget {} -O {}'.format(src_file, dst_file))
print('>> Extracting {}...'.format(dst_file))
os.system('tar -zxf {} -C {}'.format(dst_file, dst_dir))
print('>> Extracted, deleting {}...'.format(dst_file))
os.system('rm {}'.format(dst_file))
# Create symlink for train/retrieval-SfM-30k/
dst_dir_old = os.path.join(datasets_dir, 'retrieval-SfM-120k', 'ims')
dst_dir = os.path.join(datasets_dir, 'retrieval-SfM-30k', 'ims')
if not os.path.isdir(dst_dir):
if not os.path.isdir(datasets_dir):
os.makedirs(os.path.join(datasets_dir, 'retrieval-SfM-30k'))
# Broken link
if os.path.lexists(dst_dir):
os.remove(dst_dir)
os.symlink(dst_dir_old, dst_dir)
print('>> Created symbolic link from retrieval-SfM-120k/ims to retrieval-SfM-30k/ims')
# Download db files
src_dir = os.path.join('http://cmp.felk.cvut.cz/cnnimageretrieval/data', 'train', 'dbs')
datasets = ['retrieval-SfM-120k', 'retrieval-SfM-30k']
for dataset in datasets:
dst_dir = os.path.join(datasets_dir, dataset)
if dataset == 'retrieval-SfM-120k':
dl_files = ['{}.pkl'.format(dataset), '{}-whiten.pkl'.format(dataset)]
elif dataset == 'retrieval-SfM-30k':
dl_files = ['{}-whiten.pkl'.format(dataset)]
if not os.path.isdir(dst_dir):
print('>> Dataset directory does not exist. Creating: {}'.format(dst_dir))
os.mkdir(dst_dir)
for i in range(len(dl_files)):
src_file = os.path.join(src_dir, dl_files[i])
dst_file = os.path.join(dst_dir, dl_files[i])
if not os.path.isfile(dst_file):
print('>> DB file {} does not exist. Downloading...'.format(dl_files[i]))
os.system('wget {} -O {}'.format(src_file, dst_file))
| 50.241176 | 105 | 0.596534 |
bbfcd43faf17d36c1bacc353ce10290e2a31950b
| 165 |
py
|
Python
|
skyscape/skyscape_apidefinition.py
|
skyscape-cloud-services/skyscape_python
|
4f5608dd6272c4370adf04a0317842b8ea5083e2
|
[
"Apache-2.0"
] | 1 |
2015-12-04T14:14:25.000Z
|
2015-12-04T14:14:25.000Z
|
skyscape/skyscape_apidefinition.py
|
UKCloud/skyscape_python
|
4f5608dd6272c4370adf04a0317842b8ea5083e2
|
[
"Apache-2.0"
] | 1 |
2016-01-06T15:46:22.000Z
|
2016-01-06T15:46:22.000Z
|
skyscape/skyscape_apidefinition.py
|
UKCloud/skyscape_python
|
4f5608dd6272c4370adf04a0317842b8ea5083e2
|
[
"Apache-2.0"
] | 1 |
2015-12-02T17:22:20.000Z
|
2015-12-02T17:22:20.000Z
|
__author__ = 'prossi'
class APIDEFINITION:
def __init__(self, obj, connection):
self.__dict__ = dict(obj.attrib)
self.connection = connection
| 18.333333 | 40 | 0.672727 |
e07c8ab265ead95f7480592c12ace50290eba068
| 4,630 |
py
|
Python
|
src/knn_classifier.py
|
mtambos/character_identification
|
c72c417351afccb830ab731eb9622f966bd5a2e8
|
[
"MIT"
] | null | null | null |
src/knn_classifier.py
|
mtambos/character_identification
|
c72c417351afccb830ab731eb9622f966bd5a2e8
|
[
"MIT"
] | null | null | null |
src/knn_classifier.py
|
mtambos/character_identification
|
c72c417351afccb830ab731eb9622f966bd5a2e8
|
[
"MIT"
] | null | null | null |
__author__ = 'mtambos'
from __future__ import division, print_function
import os
import pandas as pd
from PIL import Image
import numpy as np
def to_gray_scale(img):
img_array = np.asarray(img)
luminosity = lambda x: 0.21*x[0] + 0.72*x[1] + 0.07*x[2]
return np.apply_along_axis(func1d=luminosity, axis=2, arr=img_array)
def load_files(path):
# get files list
files_list = os.walk(path).next()[2]
# load bmp files. traditional for instead of comprehension due to the need of closing file pointers
img_files = pd.DataFrame(columns=range(400), index=np.array([], dtype=np.int), dtype=np.float)
for f in files_list:
name, ext = os.path.splitext(f)
# use only bmp files
if ext.lower() == '.bmp':
with file(os.path.join(path, f), 'rb') as img:
bitmap = Image.open(img)
bands = bitmap.getbands()
# check whether the image is color or b/w
if len(bands) == 3:
# convert to gray scale and append
bitmap = to_gray_scale(bitmap).flatten()
elif len(bands) == 1:
bitmap = np.asarray(bitmap).flatten()
# add image as a row with the file name as key
img_files.loc[int(name)] = bitmap
# sort the indexes so they coincide with the label's indexes
img_files.sort_index(inplace=True)
return img_files
def distance(v, r):
# subtract r (a row) from each row in v (a matrix),
# square the individual elements,
# sum all elements per row and finally
# take the square root of each row
return np.sum((v - r)**2, axis=1)**0.5
def get_nearest_neighbors(train_set, labels, point, k):
# calculate the distance from point to all points in train_set
distances = distance(train_set, point)
# choose the k smallest distances' indexes
indexes = np.argpartition(distances.values, kth=k)[:k]
# return the k smallest labels and distances
return labels.iloc[indexes], distances.iloc[indexes]
def classify(train_set, test_set, labels, k):
# create data frame for the results and set its index's name
classes = pd.DataFrame(columns=['Class'])
classes.index.name = 'ID'
for i, r in enumerate(test_set.iterrows()):
# get the k points in the train set
# nearest to the point r in the test set
knn, distances = get_nearest_neighbors(train_set, labels, r[1], k)
value_counts = knn.Class.value_counts()
# value_counts[0] = 1 means that all
# k training points have different labels
# TODO: check case where 2 or more labels
# TODO: have the same amount of counts
# TODO: (and it's higher than 1)
if value_counts[0] > 1:
winner = value_counts.index[0]
else:
index = np.argmin(distances.values)
winner = knn.iloc[index, 0]
classes.loc[test_set.index[i]] = winner
return classes
def optimize_k(base_path):
# load labels
labels_path = os.path.join(base_path, 'trainLabels.csv')
labels = pd.read_csv(labels_path, index_col=0, dtype={'ID': np.int, 'Class': np.str})
# load train set
train_path = os.path.join(base_path, 'train')
train_set = load_files(train_path)
train_set_index = train_set.index
# select random subset of train set as test set
test_set_size = len(train_set) // 3
test_indexes = set(np.random.choice(train_set_index, test_set_size))
test_set = train_set.loc[list(test_indexes)]
# select the labels corresponding to the test set
test_labels = labels.loc[list(test_indexes)]
# remove test elements from train set
train_set = train_set.loc[list(set(train_set_index) - test_indexes)]
accuracies = {}
for k in range(1, 10):
print('k=%s' % k)
# classify the test set with knn
classes = classify(train_set, test_set, labels, k)
# check what portion of the test were correctly classified
# TODO: select new random subset of train as test
acc = (classes == test_labels).sum()/len(test_labels)
accuracies[k] = acc
return accuracies
def run(base_path, k):
labels_path = os.path.join(base_path, 'trainLabels.csv')
labels = pd.read_csv(labels_path, index_col=0, dtype={'ID': np.int, 'Class': np.str})
train_path = os.path.join(base_path, 'train')
train_set = load_files(train_path)
test_path = os.path.join(base_path, 'test')
test_set = load_files(test_path)
classes = classify(train_set, test_set, labels, k)
classes.to_csv('submission%sk.csv' % k)
| 37.95082 | 103 | 0.647732 |
025ff6e1c9dee565f2b190501e90711bce864d54
| 1,970 |
py
|
Python
|
src/app/voltdb/voltdb_src/lib/python/voltcli/voltdb.d/mask.py
|
OpenMPDK/SMDK
|
8f19d32d999731242cb1ab116a4cb445d9993b15
|
[
"BSD-3-Clause"
] | 44 |
2022-03-16T08:32:31.000Z
|
2022-03-31T16:02:35.000Z
|
src/app/voltdb/voltdb_src/lib/python/voltcli/voltdb.d/mask.py
|
H2O0Lee/SMDK
|
eff49bc17a55a83ea968112feb2e2f2ea18c4ff5
|
[
"BSD-3-Clause"
] | 1 |
2022-03-29T02:30:28.000Z
|
2022-03-30T03:40:46.000Z
|
src/app/voltdb/voltdb_src/lib/python/voltcli/voltdb.d/mask.py
|
H2O0Lee/SMDK
|
eff49bc17a55a83ea968112feb2e2f2ea18c4ff5
|
[
"BSD-3-Clause"
] | 18 |
2022-03-19T04:41:04.000Z
|
2022-03-31T03:32:12.000Z
|
# This file is part of VoltDB.
# Copyright (C) 2008-2020 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
import os
# Main Java Class
CatalogPasswordScrambler = 'org.voltdb.utils.CatalogPasswordScrambler'
@VOLT.Command(
# Descriptions for help screen.
description = 'Mask user passwords in VoltDB deployment file.',
description2 = 'At least one deployment file is required.',
# Command line arguments.
arguments = (
VOLT.PathArgument(
'deploymentfile',
'Source and optionally a destination masked deployment file(s)',
min_count=1, max_count=2
)
)
)
# Command implementation
def mask(runner):
# Check that there's something to compile.
if not runner.opts.deploymentfile:
runner.abort_with_help('At least one deployment file must be specified.')
# Verbose argument display.
if runner.is_verbose():
params = ['Deployment file: %s' % runner.opts.deploymentfile[0]]
if len(runner.opts.deploymentfile) == 2:
params.append('Masked deployment file: %s' % runner.opts.deploymentfile[1])
runner.verbose_info('Mask parameters:', params)
# Build the positional and keyword argument lists and invoke the scrambler
args = runner.opts.deploymentfile
runner.java_execute(CatalogPasswordScrambler, None, *args)
| 35.818182 | 87 | 0.711675 |
ea175bbff7356e410ffafad610d1f930b58872be
| 960 |
py
|
Python
|
tests/test_cli_validation.py
|
radon-h2020/radon-defect-prediction-cli
|
4b2c9c49491b1f1f5d5e748609f8f77a878791e1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cli_validation.py
|
radon-h2020/radon-defect-prediction-cli
|
4b2c9c49491b1f1f5d5e748609f8f77a878791e1
|
[
"Apache-2.0"
] | 5 |
2020-10-23T11:46:21.000Z
|
2020-12-14T11:53:51.000Z
|
tests/test_cli_validation.py
|
radon-h2020/radon-defect-prediction-cli
|
4b2c9c49491b1f1f5d5e748609f8f77a878791e1
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from argparse import ArgumentTypeError
from radondp.cli import valid_dir, valid_file, valid_balancers, valid_normalizers, valid_classifiers
class CLIValidationTestCase(unittest.TestCase):
def test_valid_dir(self):
with self.assertRaises(ArgumentTypeError):
valid_dir('this/is/an/invalid/dir')
def test_valid_file(self):
with self.assertRaises(ArgumentTypeError):
valid_file('this/is/an/invalid/file.yml')
def test_valid_balancers(self):
with self.assertRaises(ArgumentTypeError):
valid_balancers('none ros rus invalid')
def test_valid_normalizers(self):
with self.assertRaises(ArgumentTypeError):
valid_normalizers('none std minmax invalid')
def test_valid_classifiers(self):
with self.assertRaises(ArgumentTypeError):
valid_classifiers('dt logit nb rf svm invalid')
if __name__ == '__main__':
unittest.main()
| 30 | 100 | 0.716667 |
bb5d6dc8a9a26843bfa5e503f7a4e7d4ba1b0879
| 838 |
py
|
Python
|
ooobuild/dyn/form/control/time_field.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/form/control/time_field.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/form/control/time_field.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.form.control
from ....lo.form.control.time_field import TimeField as TimeField
__all__ = ['TimeField']
| 32.230769 | 74 | 0.752983 |
7c946f09e691f3ffdd20ee6aaa16a4fb99bc128c
| 3,344 |
py
|
Python
|
ai_dataset/types/keras.py
|
aron-kvvon/ai-dataset-python
|
d10feb0f2e301456995a99227e82a4f294e0ecb7
|
[
"MIT"
] | null | null | null |
ai_dataset/types/keras.py
|
aron-kvvon/ai-dataset-python
|
d10feb0f2e301456995a99227e82a4f294e0ecb7
|
[
"MIT"
] | null | null | null |
ai_dataset/types/keras.py
|
aron-kvvon/ai-dataset-python
|
d10feb0f2e301456995a99227e82a4f294e0ecb7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from ai_dataset.types.abstract_data import AbstractData
keras_dataset_list = {
'mnist': tf.keras.datasets.mnist,
'cifar10': tf.keras.datasets.cifar10
}
class KerasData(AbstractData):
def __init__(self, type, is_train=True, dataset_in=None):
"""
:param type:
:param is_train:
:param dataset_in:
"""
super().__init__(type, is_train)
if dataset_in is not None:
self._dataset = dataset_in
else:
self._dataset = self._download()
def __len__(self):
return len(list(self._dataset))
def _download(self):
dataset = None
if self.type in keras_dataset_list:
module = keras_dataset_list[self.type]
(train_images, train_labels), (test_images, test_labels) = module.load_data()
if self.is_train:
train_images, train_labels = self._data_prepare(train_images, train_labels)
dataset = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
else:
test_images, test_labels = self._data_prepare(test_images, test_labels)
dataset = tf.data.Dataset.from_tensor_slices((test_images, test_labels))
else:
print(f'Keras dataset type:{self.type} is NOT available')
return dataset
def _data_prepare(self, images, labels):
if self.type == 'mnist':
images = np.expand_dims(images, axis=-1)
images = images.astype(np.float32) / 255.
# label was just an integer. integer was changed to list
# categorical or just integer
num_classes = labels.max() + 1
labels = to_categorical(labels, num_classes)
return images, labels
def concatenate(self, add_data: 'KerasData'):
self._dataset = self._dataset.concatenate(add_data.get_dataset())
def extend_label(self, ext_label):
length = len(list(self._dataset))
# data_[0]: x (image vectors)
# data_[1]: y (label)
data_ = next(self._dataset.batch(length).as_numpy_iterator())
if isinstance(ext_label, int):
# same value for all data
list_ext_labels = [ext_label] * len(data_[1])
elif isinstance(ext_label, list) or isinstance(ext_label, tuple):
if len(data_[1]) != len(ext_label):
print(f'Warning!, The length of dataset is different from the ext_labels')
list_ext_labels = ext_label
else:
print(f'Extended label type:{type(ext_label)} must be int, list, or tuple')
self._dataset = tf.data.Dataset.from_tensor_slices((data_[0], data_[1], list_ext_labels))
def split(self, length):
if length > len(list(self._dataset)):
print(f'Split length: {length} is bigger than the length of dataset is :{len(list(self._dataset))}')
return self, None
remain = len(list(self._dataset)) - length
part1 = KerasData(self.type, self.is_train, self._dataset.take(length))
part2 = KerasData(self.type, self.is_train, self._dataset.skip(length).take(remain))
return part1, part2
def subset(self, indices):
pass
| 35.574468 | 112 | 0.63128 |
c7a25cd3d292442bb18f764b15e47f734b058bc7
| 5,657 |
py
|
Python
|
myven/lib/python3.8/site-packages/ansible/plugins/action/eos.py
|
baltham/dne-dna-code
|
4a13309a790a670d2f07e635c9264a0c29976c6a
|
[
"MIT"
] | 1 |
2021-04-02T08:08:39.000Z
|
2021-04-02T08:08:39.000Z
|
myven/lib/python3.8/site-packages/ansible/plugins/action/eos.py
|
baltham/dne-dna-code
|
4a13309a790a670d2f07e635c9264a0c29976c6a
|
[
"MIT"
] | null | null | null |
myven/lib/python3.8/site-packages/ansible/plugins/action/eos.py
|
baltham/dne-dna-code
|
4a13309a790a670d2f07e635c9264a0c29976c6a
|
[
"MIT"
] | 1 |
2020-05-03T01:13:16.000Z
|
2020-05-03T01:13:16.000Z
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.eos.eos import eos_provider_spec
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.common.utils import load_provider
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
socket_path = None
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
del self._task.args['provider']
if self._task.args.get('transport'):
display.warning('transport is unnecessary when using network_cli and will be ignored')
del self._task.args['transport']
elif self._play_context.connection == 'local':
provider = load_provider(eos_provider_spec, self._task.args)
transport = provider['transport'] or 'cli'
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'eos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
self._task.args['provider'] = ActionModule.eapi_implementation(provider, self._play_context)
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
if (self._play_context.connection == 'local' and transport == 'cli') or self._play_context.connection == 'network_cli':
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while '(config' in to_text(out, errors='surrogate_then_replace').strip():
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('abort')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
@staticmethod
def eapi_implementation(provider, play_context):
provider['transport'] = 'eapi'
if provider.get('host') is None:
provider['host'] = play_context.remote_addr
if provider.get('port') is None:
default_port = 443 if provider['use_ssl'] else 80
provider['port'] = int(play_context.port or default_port)
if provider.get('timeout') is None:
provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT
if provider.get('username') is None:
provider['username'] = play_context.connection_user
if provider.get('password') is None:
provider['password'] = play_context.password
if provider.get('authorize') is None:
provider['authorize'] = False
return provider
| 42.856061 | 127 | 0.64734 |
7e7b3412fc4da24e9b18aa994f71ba4b36ace5ab
| 9,416 |
py
|
Python
|
tools/gentools/MMAnsibleDeployAll.py
|
cedardeployer/cedar
|
dbf26d198c44422fffb6fba59f8fa6576c0300fb
|
[
"MIT"
] | null | null | null |
tools/gentools/MMAnsibleDeployAll.py
|
cedardeployer/cedar
|
dbf26d198c44422fffb6fba59f8fa6576c0300fb
|
[
"MIT"
] | null | null | null |
tools/gentools/MMAnsibleDeployAll.py
|
cedardeployer/cedar
|
dbf26d198c44422fffb6fba59f8fa6576c0300fb
|
[
"MIT"
] | null | null | null |
# This code is used to create Ansible files for deploying Lambda's
# all that is needed is a target Lambda, tests, and it will do the rest.
# finds associate roles and policies
# creates Ansible modules based on those policies and roles
# defines the Lambdas and creates them with tests
# finds api-gateways or other events
# if api found defines the security needed. creates modules for deployment with templates
import re
from time import sleep
import os
import time
import random
import shutil
from datetime import datetime, date
import boto3
from botocore.exceptions import ClientError
import json
import sys
from shutil import copyfile
import fileinput
import logging
import urllib
import distutils
from distutils import dir_util
from tools.gentools.microMolder import LambdaMolder
from . import awsconnect
from .awsconnect import awsConnect
from shutil import copyfile
#from context import FormatContext
#import pyaml
# pip install pyyaml
import yaml
import decimal
from tools.gentools.microUtils import writeYaml, writeJSON, account_replace, loadServicesMap, loadConfig, ansibleSetup
import subprocess
from subprocess import check_output
from subprocess import Popen, PIPE
logger = logging.getLogger(__name__)
local_dev = True
try:
os.environ['bucket']
import ansible.inventory
import ansible.playbook
# import ansible.runner
import ansible.constants
from ansible import utils
# from ansible import callbacks
local_dev = False
except Exception:
logger.info('RUNNING AS LOCAL DEPLOYMENT')
# sudo ansible-playbook -i windows-servers CR-Admin-Users.yml -vvvv
# dir_path = os.path.dirname(__file__)
dir_path = os.path.dirname(os.path.realpath(__file__))
# directory='/Users/bgarner/CR/Ansible_Deployer/ansible'
directory = os.path.join('../../ansible')
def ansibleResetDefinition(role, target, static_path=None):
final_path = directory
if static_path:
final_path = f"{static_path}/ansible" if 'ansible' not in static_path else static_path
rolePath = "%s/roles/%s/defaults" % (final_path, role)
main = "%s/main.yaml" % rolePath
logger.debug(f'Main file path: {main}')
os.remove(main) # only removing old destination
copyfile("%s/main_%s.yaml" % (rolePath, target), main)
return final_path
def ansibleDeleteCache(role, baseDir):
rolePath = "%s/ansible/%s" % (baseDir, role)
if os.path.exists(rolePath):
print("[W] removing directory %s" % (rolePath))
shutil.rmtree(rolePath)
def ansibleInvoke(account, config, role, static_path=None):
msg=''
roleFile = '%s.yaml' % (role)
# roleFile = '%s_%s.yaml' % (account, role)
target = config['all']
local_dev = True
if not static_path:
newPath = ansibleResetDefinition(role, target, static_path)
else:
local_dev = False
prevPath = dir_path
logger.info(f'Definition role file: {roleFile}')
print(f"\n [DEPLOY] {account}::{target}")
if not local_dev:
import ansible_runner
import ansible
if 'bucket' in os.environ:
ansible.constants.DEFAULT_REMOTE_TMP = '/tmp/ansible'
# TODO: Fix playbook path
print('Available path: ', dir_path)
r = ansible_runner.run(inventory='/tmp/ansible/windows-servers',
private_data_dir='/tmp/ansible',
playbook='/tmp/ansible/test_123.yaml')
# print("{}: {}".format(r.status, r.rc))
# successful: 0
# for each_host_event in r.events:
# print(each_host_event['event'])
# print("Final status:")
print(r.stats)
else:
os.chdir(newPath)
quotedRole = '"%s"' % (roleFile)
args = ['ansible-playbook', '-i', 'windows-servers', quotedRole, '-vvvv']
msg = ""
commandIn = " ".join(args)
try:
print(' ', commandIn)
rawOut = check_output(commandIn, stderr=PIPE, shell=True).decode()
# rawOut = check_output(args, stderr=PIPE).decode()
# rawOut = check_output(args, stderr=PIPE, shell=True).decode()
if isinstance(rawOut, str):
output = rawOut
else:
output = rawOut.decode("utf-8")
msg = output
except Exception as e:
msg = "[E] error occured target:%s file:%s error:%s" % (target, roleFile, e)
logger.error(msg)
# process = Popen(args, stdout=PIPE, stderr=PIPE)#, timeout=timeout)
# stdout, stderr = process.communicate() #will wait without deadlocking
#print (stdout)
os.chdir(prevPath)
# print (stderr)
print(f" [COMPLETE] {account}::{target}")
return account, target, msg
def deployStart(target_name, accounts, targets, role, static_path=None, HardStop=False):
outputs = {}
for target in targets:
for k, v in accounts.items():
if target in v['all']:
# SENTRY: Put Sentry back if it was in target (Taken out at lambda describe)
try:
sts_client = awsconnect.stsClient
aconnect2 = awsConnect(k, v['eID'], v['role'], sts_client, 'us-east-1')
aconnect2.connect()
client = aconnect2.__get_client__('lambda')
lmda = client.get_function(FunctionName=target_name)
with open(f"../../ansible/roles/{role}/defaults/main_{v['all']}.yaml", "r") as stream:
try:
ydata = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
if ydata[f'A{role}'.replace('-', '_')]['lambdas'][0]['handler'] != lmda['Configuration']['Handler']:
ydata[f'A{role}'.replace('-', '_')]['lambdas'][0]['handler'] = lmda['Configuration']['Handler']
if 'Environment' in lmda['Configuration']:
if 'Variables' in lmda['Configuration']['Environment']:
if 'SENTRY_ENVIRONMENT' in lmda['Configuration']['Environment']['Variables']:
if 'SENTRY_ENVIRONMENT' in ydata[f'A{role}'.replace('-', '_')]['lambdas'][0]['environment_variables']:
del ydata[f'A{role}'.replace('-', '_')]['lambdas'][0]['environment_variables']['SENTRY_ENVIRONMENT']
for nvar, nvarv in lmda['Configuration']['Environment']['Variables'].items():
if 'SENTRY' in nvar:
ydata[f'A{role}'.replace('-', '_')]['lambdas'][0]['environment_variables'][nvar] = nvarv
# if lmda['Configuration']['Environment']['Variables']:
# ydata[f'A{role}'.replace('-', '_')]['lambdas'][0]['environment_variables'].update(lmda['Configuration']['Environment']['Variables'])
if 'Layers' in lmda['Configuration']:
if lmda['Configuration']['Layers']:
for lay in lmda['Configuration']['Layers']:
if 'Sentry' in lay:
# if 'sentry_sdk.integrations.init_serverless_sdk.sentry_lambda_handler' in ydata[f'A{role}'.replace('-', '_')]['lambdas'][0]['handler']:
ydata[f'A{role}'.replace('-', '_')]['lambda_updates'][0]['layers'].extend(lmda['Configuration']['Layers'])
# Add layer to main
with open(f"../../ansible/roles/{role}/defaults/main_{v['all']}.yaml", 'w', encoding='utf8') as outfile:
outfile.write('---\n')
yaml.dump(ydata, outfile, default_flow_style=False, allow_unicode=True)
except client.exceptions.ResourceNotFoundException:
print(" Does not yet exist in target env...")
# pass
# SENTRY: END
account, target, result = ansibleInvoke(k, v, role, static_path)
outputs.update({account: {"name": target, "value": result}})
if HardStop:
if '[E]' in result:
return outputs
break
return outputs
# cp -R /usr/local/src/venvs/vdocx3/lib/python3.6/site-packages/slacker /path/to/Lambda
# ansible-playbook -i windows-servers xx_tablename.yaml -vvvv
# python MMAnsibleDeployAll.py "xx-stage,xx-test" xx_tablename ENVR.yaml
#
# python MMAnsibleDeployAll.py "stage,prod" API_Name ENVR.yaml
# OR call it manually in /ansible folder
# ansible-playbook -i windows-servers xx-LambdaName -vvvv
if __name__ == "__main__":
found = None
length = 0
target_environments = str(sys.argv[1]).strip().split(",")
role = str(sys.argv[2]).strip()
config = str(sys.argv[3]).strip()
start_time = time.time()
fullpath = "%s/%s" % (dir_path, config)
origin, global_accts = loadConfig(fullpath, "dev")
results = deployStart(global_accts, target_environments, role)
for k, v in results.items():
msg = "%s Account: %s, %s" % (v['name'], k, v['value'])
print(msg)
# print(global_accts)
#print (target_environments)
#//logger.info("Finished")
print("--- %s seconds ---" % (time.time() - start_time))
| 41.298246 | 173 | 0.5976 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.