ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a41b316715757ff29c41497cbc9373076e381d3 | import sys
import os
import crawler
import parser
import json
import datetime
if('COURT' in os.environ):
court = os.environ['COURT']
else:
sys.stderr.write("Invalid arguments, missing parameter: 'COURT'.\n")
os._exit(1)
if('YEAR' in os.environ):
year = os.environ['YEAR']
else:
sys.stderr.write("Invalid arguments, missing parameter: 'YEAR'.\n")
os._exit(1)
if('MONTH' in os.environ):
month = os.environ['MONTH']
month = month.zfill(2)
else:
sys.stderr.write("Invalid arguments, missing parameter: 'MONTH'.\n")
os._exit(1)
if('DRIVER_PATH' in os.environ):
driver_path = os.environ['DRIVER_PATH']
else:
sys.stderr.write("Invalid arguments, missing parameter: 'DRIVER_PATH'.\n")
os._exit(1)
if('OUTPUT_FOLDER' in os.environ):
output_path = os.environ['OUTPUT_FOLDER']
else:
output_path = "/output"
if('GIT_COMMIT' in os.environ):
crawler_version = os.environ['GIT_COMMIT']
else:
sys.stderr.write("crawler_version cannot be empty")
os._exit(1)
now = datetime.datetime.now()
current_year = now.year
current_month = now.month
# Main execution
def main():
file_names = crawler.crawl(court, year, month, driver_path, output_path)
employees = parser.parse(file_names)
cr = {
'aid': court.lower(),
'month': month,
'year': year,
'files': file_names,
'crawler': {
'id': court.lower(),
'version': crawler_version,
},
'employees': employees,
# https://hackernoon.com/today-i-learned-dealing-with-json-datetime-when-unmarshal-in-golang-4b281444fb67
'timestamp': now.astimezone().replace(microsecond=0).isoformat(),
}
print(json.dumps({'cr': cr}, ensure_ascii=False))
if __name__ == '__main__':
main()
|
py | 1a41b3c7c5f2939ca47132175570ac5a6ce116c5 | from django.urls import path
from api.services import DeputyService
from api.services import VoteService
from api.services import LegislatureService
from api.services import PartyService
from api.services import PropositionService
app_name = 'api'
urlpatterns = [
# path('error/<string:message>', views.error, name='error'),
path('deputies/', DeputyService.deputies, name='deputies'),
path('deputies/<int:deputy_id>', DeputyService.deputy, name='deputy'),
path('votes/', VoteService.votes, name='votes'),
path('legislatures/', LegislatureService.legislatures, name='legislatures'),
path('parties/', PartyService.parties, name='parties'),
path('parties/<int:party_id>', PartyService.party, name='party'),
path('propositions/', PropositionService.propositions, name='propositions'),
path('propositions/<int:proposition_id>',
PropositionService.proposition, name='proposition')
]
|
py | 1a41b440a97286403744706ded16f732cdcfb64c | #!/usr/bin/env python
"""
Wrap a bash command in a jip script
Please not that this command is indented to work on single file input/output.
You can specify more that one input file and the command will run independently
on all inputs. The 'output' options is used for pipes explicitly. If you do not
want to pipe your output, but handle output yourself, use the 'outfile'
(-f/--outfile) option. Here is a quick example::
jip bash -n 'LC ${input}' --input A.txt B.txt \
-f '${input|ext}.count' -c 'wc -l ${input} > ${outfile}'
This will run the following two jobs:
wc -l A.txt > A.count
and
wc -l B.txt > B.count
Note that you can use the job options also in the jobs name, which might
be usefull if you run the job on a compute cluster.
Usage:
jip-bash [-P <profile>] [-t <time>] [-q <queue>] [-p <prio>]
[-A <account>] [-C <threads>] [-m <mem>] [-n <name>] [--hold]
[-N <nodes>] [-T <tasks>] [--tasks-per-node <n>] [-E <pe>]
[-O <out>] [-e <err>] [--dry] [--show]
[-i <input>...] [-o <output>...] [-f <outfile>...]
[-s] [--keep] [--force] [--with-profiler]
[-c <cmd>...]
jip-bash [--help|-h]
Options:
-P, --profile <profile> Select a job profile for resubmission
-t, --time <time> Max wallclock time for the job
-q, --queue <queue> Job queue
-p, --priority <prio> Job priority
-A, --account <account> The account to use for submission
-C, --threads <cpus> Number of CPU's assigned to the job
[default: 1]
-T, --tasks <tasks> Number of requested tasks. In case you submit MPI
jobs, this is the number of MPI CPU's the job
request
-N, --nodes <nodes> Number of nodes assigned to the job
--tasks-per-node <n> If supported by your grid engine, you can use this
to specify how many tasks should be scheduled on
each requested node
-E, --environment <pe> Specify an environment if your grid engine
supports it. For SGE, this is translated to
the parallel environment
-m, --mem <mem> Max memory assigned to the job
-n, --name <name> Job name
-R, --reload Reload and rerender the job command
-e, --log <err> Jobs stderr log file
-O, --out <out> Jobs stdout log file
-i, --input <input> The scripts input
[default: stdin]
-o, --output <output> The scripts output
[default: stdout]
-f, --outfile <outfile> Optional output file name
-s, --submit Submit as job to the cluster
--hold Put job on hold after submission
--keep Keep output also in case of failure
--dry Show a dry run
--show Show the command that will be executed
--force Force execution/submission
--with-profiler execute the run with a profiler
-c, --cmd <cmd> The bash command line that will be wrapped
-h --help Show this help message
"""
import jip
import jip.cluster
import jip.cli
import jip.profiles
from jip.logger import getLogger
from . import parse_args, colorize, YELLOW, RED
import sys
log = getLogger("jip.cli.jip_bash")
def main():
args = parse_args(__doc__, options_first=False)
pipeline = jip.Pipeline()
bash = pipeline.job(
args['--name'] if args['--name'] else 'bash'
).run('bash')
if not args['--cmd']:
args['--cmd'] = "\n".join(sys.stdin.readlines())
bash.input = [sys.stdin if a == 'stdin' else a
for a in args['--input']]
bash.output = [sys.stdout if a == 'stdout' else a
for a in args['--output']]
bash.outfile = [a for a in args['--outfile']]
bash.cmd = args['--cmd']
if not args['--cmd']:
print >>sys.stderr, "No Command specified!"
sys.exit(1)
if args['--dry'] or args['--show']:
jip.cli.dry(pipeline, [],
dry=args['--dry'],
show=args['--show'])
return
profile = jip.profiles.get(name='default'
if not args['--profile']
else args['--profile'])
profile.load_args(args)
jobs = jip.jobs.create_jobs(pipeline, [], keep=args['--keep'],
profile=profile,
profiler=args['--with-profiler'])
force = args['--force']
if not args["--submit"]:
# assign job ids
for i, j in enumerate(jobs):
j.id = i + 1
for exe in jip.jobs.create_executions(jobs):
if exe.completed and not force:
print >>sys.stderr, colorize("Skipping", YELLOW), exe.name
else:
success = jip.jobs.run_job(exe.job)
if not success:
print >>sys.stderr, colorize(exe.job.state, RED)
sys.exit(1)
else:
try:
#####################################################
# Iterate the executions and submit
#####################################################
for exe in jip.jobs.create_executions(jobs, save=True,
check_outputs=not force,
check_queued=not force):
if exe.completed and not force:
print colorize("Skipping %s" % exe.name, YELLOW)
else:
if jip.jobs.submit_job(exe.job, force=force):
print "Submitted %s with remote id %s" % (
exe.job.id, exe.job.job_id
)
except Exception as err:
log.debug("Submission error: %s", err, exc_info=True)
print >>sys.stderr, colorize("Error while submitting job:", RED), \
colorize(str(err), RED)
##################################################
# delete all submitted jobs
##################################################
jip.jobs.delete(jobs, clean_logs=True)
if __name__ == "__main__":
main()
|
py | 1a41b6457465606b31aff628275ce77192091eb5 | from .sales_order import SalesOrder
|
py | 1a41b6c88e6506448b3d411f39b41aed77cc0ba8 | import numpy as np
from numpy.testing import assert_array_equal, assert_raises
from numpy.testing.decorators import skipif
from skimage.morphology import convex_hull_image, convex_hull_object
from skimage.morphology._convex_hull import possible_hull
try:
import scipy.spatial
scipy_spatial = True
except ImportError:
scipy_spatial = False
@skipif(not scipy_spatial)
def test_basic():
image = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
expected = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
assert_array_equal(convex_hull_image(image), expected)
# Test that an error is raised on passing a 3D image:
image3d = np.empty((5, 5, 5))
assert_raises(ValueError, convex_hull_image, image3d)
@skipif(not scipy_spatial)
def test_qhull_offset_example():
nonzeros = (([1367, 1368, 1368, 1368, 1369, 1369, 1369, 1369, 1369, 1370,
1370, 1370, 1370, 1370, 1370, 1370, 1371, 1371, 1371, 1371,
1371, 1371, 1371, 1371, 1371, 1372, 1372, 1372, 1372, 1372,
1372, 1372, 1372, 1372, 1373, 1373, 1373, 1373, 1373, 1373,
1373, 1373, 1373, 1374, 1374, 1374, 1374, 1374, 1374, 1374,
1375, 1375, 1375, 1375, 1375, 1376, 1376, 1376, 1377]),
([151, 150, 151, 152, 149, 150, 151, 152, 153, 148, 149, 150,
151, 152, 153, 154, 147, 148, 149, 150, 151, 152, 153, 154,
155, 146, 147, 148, 149, 150, 151, 152, 153, 154, 146, 147,
148, 149, 150, 151, 152, 153, 154, 147, 148, 149, 150, 151,
152, 153, 148, 149, 150, 151, 152, 149, 150, 151, 150]))
image = np.zeros((1392, 1040), dtype=bool)
image[nonzeros] = True
expected = image.copy()
assert_array_equal(convex_hull_image(image), expected)
@skipif(not scipy_spatial)
def test_pathological_qhull_example():
image = np.array(
[[0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0]], dtype=bool)
expected = np.array(
[[0, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 0]], dtype=bool)
assert_array_equal(convex_hull_image(image), expected)
@skipif(not scipy_spatial)
def test_possible_hull():
image = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)
expected = np.array([[1, 4],
[2, 3],
[3, 2],
[4, 1],
[4, 1],
[3, 2],
[2, 3],
[1, 4],
[2, 5],
[3, 6],
[4, 7],
[2, 5],
[3, 6],
[4, 7],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6]])
ph = possible_hull(image)
assert_array_equal(ph, expected)
@skipif(not scipy_spatial)
def test_object():
image = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
expected4 = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 1, 0, 1],
[1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
assert_array_equal(convex_hull_object(image, 4), expected4)
expected8 = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
assert_array_equal(convex_hull_object(image, 8), expected8)
assert_raises(ValueError, convex_hull_object, image, 7)
# Test that an error is raised on passing a 3D image:
image3d = np.empty((5, 5, 5))
assert_raises(ValueError, convex_hull_object, image3d)
if __name__ == "__main__":
np.testing.run_module_suite()
|
py | 1a41b7564a7571f9d1032df6dd50e99cff10c615 | # read_numbers.py
#
# Sample program to read numbers from a file, count them and sum them.
# Assumes each line in the file contains a valid number.
# CSC 110
# Winter 2012
# open the file 'numbers.txt' for reading
infile = open('numbers.txt', 'r')
total = 0 # initialization
count = 0 # initialization
line = infile.readline() # read in first line (initialization)
# as long as 'line' isn't an empty string,
# we haven't reached the end of the file
while line != '':
value = float(line) # convert from string to number
print(value)
total += value
count += 1
line = infile.readline() # this is the update -- read another line
infile.close() # close the connection to the file
print('There were ' + str(count) + ' numbers, totaling ' + str(total))
|
py | 1a41b8b651589aac116664c0c428ae47af39b6d5 | """."""
import networkx as nx
from regraph import Rule
from regraph import NXHierarchy, NXGraph
# from regraph import print_graph
# from regraph import (HierarchyError)
import regraph.primitives as prim
class TestRelations(object):
def __init__(self):
hierarchy = NXHierarchy()
base = NXGraph()
prim.add_nodes_from(base, [
("circle", {"a": {1, 2, 3}}),
("square", {"b": {1, 2, 3}})
])
prim.add_edges_from(base, [
("circle", "circle"),
("square", "square"),
("circle", "square", {"c": {5, 6, 7}}),
("square", "circle")
])
hierarchy.add_graph("base", base)
a1 = NXGraph()
prim.add_nodes_from(a1, [
("black_circle", {"a": {1}}),
("white_circle", {"a": {2}}),
("black_square", {"b": {1}}),
("white_square", {"b": {1}})
])
prim.add_edges_from(a1, [
("white_circle", "white_circle"),
("white_circle", "white_square", {"c": {5}}),
("black_circle", "black_square"),
("black_square", "white_square"),
("black_circle", "white_square", {"c": {6}})
])
hierarchy.add_graph("a1", a1)
hierarchy.add_typing(
"a1", "base",
{
"black_circle": "circle",
"white_circle": "circle",
"white_square": "square",
"black_square": "square"
}
)
a2 = NXGraph()
prim.add_nodes_from(a2, [
("right_circle", {"a": {1, 2}}),
("middle_square", {"b": {1}}),
("left_circle", {"a": 1})
])
prim.add_edges_from(a2, [
("right_circle", "middle_square", {"c": {5, 6, 7}}),
("left_circle", "middle_square", {"c": {6, 7}})
])
hierarchy.add_graph("a2", a2)
hierarchy.add_typing(
"a2", "base",
{
"right_circle": "circle",
"middle_square": "square",
"left_circle": "circle"
}
)
self.hierarchy = hierarchy
def test_add_relation(self):
self.hierarchy.add_relation(
"a2", "a1",
{
"right_circle": {"white_circle", "black_circle"},
"middle_square": "white_square",
"left_circle": "black_circle"
},
{"name": "Some relation"})
g, l, r = self.hierarchy.relation_to_span(
"a1", "a2", edges=True, attrs=True)
# print_graph(g)
# print(l)
# print(r)
# print(self.hierarchy)
# self.hierarchy.remove_graph("a1")
# print(self.hierarchy.relation)
lhs = NXGraph()
lhs.add_nodes_from(["s", "c"])
rule = Rule.from_transform(lhs)
rule.inject_clone_node("s")
# instances = self.hierarchy.find_matching(
# "base",
# rule.lhs
# )
self.hierarchy.rewrite(
"base", rule, {"s": "square", "c": "circle"})
# g, l, r = new_hierarchy.relation_to_span("a1", "a2")
# print_graph(g)
# print(l)
# print(r)
|
py | 1a41b92e0d1a05be4bfd2323bcf2b25f728c47af | # Run Validation test. Use functions to test run and get output
import util
import time
def create_service(nspc, image):
port = "-p 80/http"
fullName = util.rioRun(nspc, port, image)
return fullName
def stage_service(image, fullName, version):
util.rioStage(image, fullName, version)
return
def get_app_info(fullName, field):
time.sleep(10)
inspect = util.rioInspect(fullName, field)
return inspect
def get_version_endpoint(fullName, version):
fullNameVersion = (f"{fullName}:{version}")
time.sleep(10)
endpoint = "status.endpoints[0]"
print(f"{fullNameVersion}")
inspect = util.rioInspect(fullNameVersion, endpoint)
return inspect
def test_rio_app_endpoint(nspc):
image = "ibuildthecloud/demo:v1"
image2 = "ibuildthecloud/demo:v3"
fullName = create_service(nspc, image)
stage_service(image2, fullName, "v3")
appEndpoint = get_app_info(fullName, "status.endpoints[0]")
results = util.run(f"curl {appEndpoint}")
print(f"{results}")
assert results == 'Hello World'
def test_rio_svc_endpoint1(nspc):
image = "ibuildthecloud/demo:v1"
image2 = "ibuildthecloud/demo:v3"
fullName = create_service(nspc, image)
stage_service(image2, fullName, "v3")
svcEndpoint = get_version_endpoint(fullName, "v0")
svcEndpoint2 = get_version_endpoint(fullName, "v3")
results1 = util.run(f"curl {svcEndpoint}")
results2 = util.run(f'curl {svcEndpoint2}')
print(f"{results1}")
assert results1 == 'Hello World'
assert results2 == 'Hello World v3'
|
py | 1a41b9ac4f67847e15d2c5a26ee31b70cce8aec0 | # Faça um algoritmo que leia o preço de um produto e mostre seu novo preço, com 5% de desconto.
preco = float(input('Digite o preço do produto em reais: R$ '))
print('O valor do produto com 5% de desconto será {:.2f} reais'.format(preco - (preco * 0.05)))
|
py | 1a41ba456bea5c2ad4019a7c6bc3cdee15ad1893 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This is a sample Project for using MediaPipe in Game.
# And this is for teaching, so this is only demo, not a real game.
import cv2
import time
import mediapipe_pose
import mediapipe_hand
import game_123
import game_hand
# 單純開啟攝影機,與擷取畫面做顯示
def open_camera(camera_num):
cap = cv2.VideoCapture(camera_num, cv2.CAP_DSHOW)
# 進行不斷循環的撥放
while True:
success, image = cap.read()
# 若有攫取到畫面,就在視窗中顯示
if success:
cv2.imshow('MediaPipe Pose', image)
# 需有 waitKey,畫面才會顯示內容,並在此查看是否按下 ESC,若有按下,則停止擷取與播放
if cv2.waitKey(5) & 0xFF == 27:
break
# 釋放攝影機資源,與關閉視窗
cap.release()
cv2.destroyAllWindows()
# 開啟攝影機,擷取畫面顯示,並加入 fps 計算與顯示
def open_camera_and_show_fps(camera_num):
cap = cv2.VideoCapture(camera_num, cv2.CAP_DSHOW)
# 記錄前一個 frame 的結束時間
prev_frame_time = 0
# 記錄這一格 frame 的結束時間
new_frame_time = 0
# 用來顯示文字的字型
font = cv2.FONT_HERSHEY_SIMPLEX
# 進行不斷循環的撥放
while True:
success, image = cap.read()
# 若有攫取到畫面,就在視窗中顯示
if success:
# 從 time.time() 中取得時間
new_frame_time = time.time()
# 計算 fps,使用 每個 frame 需要多少時間的計算方式
fps = 1 / (new_frame_time - prev_frame_time)
prev_frame_time = new_frame_time
# 取小數點兩位,並轉換成字串
fps = '目前 fps:' + str(round(fps, 2))
# putting the FPS count on the frame
# cv2.putText(image, fps, (10, 30), font, 1, (100, 255, 0), 2, cv2.LINE_AA)
image = game_123.add_chinese_font_to_image(image, fps, 10, 10)
# 使用 OpenCV 功能來顯示影像
cv2.imshow('MediaPipe Pose', image)
# 需有 waitKey,畫面才會顯示內容,並在此查看是否按下 ESC,若有按下,則停止擷取與播放
if cv2.waitKey(5) & 0xFF == 27:
break
# 釋放攝影機資源,與關閉視窗
cap.release()
cv2.destroyAllWindows()
# 開啟攝影機,擷取畫面顯示,並加入 fps 計算與顯示
def open_camera_show_fps_pose_estimation(camera_num):
cap = cv2.VideoCapture(camera_num, cv2.CAP_DSHOW)
# 記錄前一個 frame 的結束時間
prev_frame_time = 0
# 記錄這一格 frame 的結束時間
new_frame_time = 0
# 用來顯示文字的字型
font = cv2.FONT_HERSHEY_SIMPLEX
# 進行不斷循環的撥放
while True:
success, image = cap.read()
# 若有攫取到畫面,就在視窗中顯示
if success:
# 傳入 MediaPipe 模組,進行 Pose 評估
image = mediapipe_pose.pose_estimation(image)
# image, right_box, left_box = mediapipe_hand.hands_estimation(image)
# 從 time.time() 中取得時間
new_frame_time = time.time()
# 計算 fps,使用 每個 frame 需要多少時間的計算方式
fps = 1 / (new_frame_time - prev_frame_time)
prev_frame_time = new_frame_time
# 取小數點兩位,並轉換成字串
fps = 'fps:' + str(round(fps, 2))
# putting the FPS count on the frame
cv2.putText(image, fps, (10, 30), font, 1, (100, 255, 0), 2, cv2.LINE_AA)
# 使用 OpenCV 功能來顯示影像
cv2.imshow('MediaPipe Pose', image)
# 需有 waitKey,畫面才會顯示內容,並在此查看是否按下 ESC,若有按下,則停止擷取與播放
if cv2.waitKey(5) & 0xFF == 27:
break
# 釋放攝影機資源,與關閉視窗
cap.release()
cv2.destroyAllWindows()
# main enter point
if __name__ == '__main__':
# open_camera(0)
# open_camera_and_show_fps(0)
# open_camera_show_fps_pose_estimation(0)
# game_123.play_game()
game_hand.play_game()
|
py | 1a41ba887e09f06733639b2581530efcede1ff71 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'AdDestinationTypeEnum',
},
)
class AdDestinationTypeEnum(proto.Message):
r"""Container for enumeration of Google Ads destination types.
"""
class AdDestinationType(proto.Enum):
r"""Enumerates Google Ads destination types"""
UNSPECIFIED = 0
UNKNOWN = 1
NOT_APPLICABLE = 2
WEBSITE = 3
APP_DEEP_LINK = 4
APP_STORE = 5
PHONE_CALL = 6
MAP_DIRECTIONS = 7
LOCATION_LISTING = 8
MESSAGE = 9
LEAD_FORM = 10
YOUTUBE = 11
UNMODELED_FOR_CONVERSIONS = 12
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | 1a41ba8cd05bef536a77a37f788267cb565eb906 | import numpy
import theano
import theano.tensor as tensor
from nmt import RNNsearch
from binmt import BiRNNsearch
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import tools
from layer import LayerFactory
from config import *
from optimizer import adadelta, SGD, adam, adam_slowstart
from data import DataCollection, getbatch
from mrt_utils import getMRTBatch
import cPickle
import json
import argparse
import signal
import time
import datetime
import logging
import types
parser = argparse.ArgumentParser("the script for training the NMT model")
parser.add_argument('-c', '--config', help = 'path to configuration file', required = True)
parser.add_argument('--debug', action = 'store_true', help = 'set verbose level for debugging')
parser.add_argument('--map', help = 'path to the mapping file')
parser.add_argument('--save-all', action = 'store_true', help = 'save all intermediate models')
args = parser.parse_args()
if args.debug:
logging.basicConfig(level = logging.DEBUG,
format = '[%(asctime)s %(levelname)s] %(message)s',
datefmt = '%d %b %H:%M:%S')
logging.debug('training with debug info')
else:
logging.basicConfig(level = logging.INFO,
format = '[%(asctime)s %(levelname)s] %(message)s',
datefmt = '%d %b %H:%M:%S')
if __name__ == '__main__':
# initialize config
config = config()
if args.config:
config = update_config(config, load_config(open(args.config, 'r').read()))
print_config(config)
if config['MRT']:
config['batchsize'] = 1 # the mini-batch size must be 1 for MRT
mapping = None
if args.map:
mapping = cPickle.load(open(args.map, 'r'))
logging.info('STEP 2: Training')
# prepare data
logging.info('STEP 2.1: Loading training data')
data = DataCollection(config)
logging.info('Done!\n')
# build model
logging.info('STEP 2.2: Building model')
model = eval(config['model'])(config)
model.build()
logging.info('Done!\n')
logging.info('STEP 2.3: Building optimizer')
trainer = eval(config['optimizer'])(config, model.creater.params)
update_grads, update_params = trainer.build(model.cost, model.inputs)
logging.info('Done!\n')
# load checkpoint
logging.info('STEP 2.4: Loading checkpoint')
data.load_status(config['checkpoint_status'])
model.load(config['checkpoint_model'])
logging.info('Done!\n')
# train
logging.info('STEP 2.5: Online training')
while data.num_iter < config['max_iter']:
try:
st = time.time()
data.num_iter += 1
trainx, trainy = data.next()
x, xmask, y, ymask = getbatch(trainx, trainy, config)
if 'MRT' in config and config['MRT'] is True:
x, xmask, y, ymask, MRTLoss = getMRTBatch(x, xmask, y, ymask, config, model, data)
if config['semi_learning']:
xm, ym = data.next_mono()
xm, xmask, ym, ymask = getbatch(xm, ym, config)
x, xmask, y, ymask, valid = model.get_inputs_batch(x, y, xm, ym)
# saving checkpoint
if data.num_iter % config['checkpoint_freq'] == 0:
model.save(config['checkpoint_model'], data = data, mapping = mapping)
data.save_status(config['checkpoint_status'])
# saving and validating intermediate models
if config['save']:
if data.num_iter % config['save_freq'] == 0:
if args.save_all:
logging.info('Saving an intermediate model')
model.save(config['save_path'] + '/model_iter' + str(data.num_iter) + '.npz', data = data, mapping = mapping)
logging.info('Validating the model at iteration ' + str(data.num_iter))
output_path = config['valid_dir'] + '/iter_' + str(data.num_iter) + '.trans'
valid_input = open(config['valid_src'], 'r')
valid_output = open(output_path, 'w')
line = valid_input.readline()
valid_num = 0
# translating
while line != '':
line = line.strip()
result = model.translate(data.toindex_source(line.split(' ')))
print >> valid_output, data.print_target(numpy.asarray(result))
valid_num += 1
if valid_num % 100 == 0:
logging.info('%d sentences translated' % valid_num)
line = valid_input.readline()
valid_output.close()
valid_refs = tools.get_ref_files(config['valid_ref'])
# logging
data.valid_result[data.num_iter] = 100 * tools.bleu_file(output_path, valid_refs)
data.valid_time[data.num_iter] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
f = open('log', 'w')
f.write(data.print_log())
f.close()
data.print_valid()
logging.info('Done!\n')
# update the best model
if data.last_improved(last = True) == 0:
model.save(config['save_path'] + '/model_best.npz', data = data, mapping = mapping)
if data.last_improved() >= config['try_iter']:
logging.info('No improvement for %d iterations. Stop training.\n' % data.last_improved())
break
# updating gradients
upst = time.time()
if 'MRT' in config and config['MRT'] is True:
cost, grad_norm = update_grads(x, xmask, y, ymask, MRTLoss)
elif config['semi_learning']:
cost, grad_norm = update_grads(x, xmask, y, ymask, y, ymask, x, xmask, valid)
else:
cost, grad_norm = update_grads(x, xmask, y, ymask)
# NaN processing
if numpy.isinf(cost.mean()) or numpy.isnan(cost.mean()):
logging.warning('There is an NaN!')
update_params()
ed = time.time()
data.time += ed - st
data.updatetime += ed - upst
data.train_cost.append(cost.mean())
logging.debug('iteration %d: cost = %.4f, grad_norm = %.3e,' % (data.num_iter, cost.mean(), grad_norm)+
' iter_time = %.3f, total_time: %s' % (ed - st, tools.print_time(data.time)))
except KeyboardInterrupt:
logging.info('\nStop training by keyboard interruption.')
break
# save checkpoint
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
logging.info('Saving model and status\n')
model.save(config['checkpoint_model'], data = data, mapping = mapping)
data.save_status(config['checkpoint_status'])
logging.info('The training is completed.\n')
signal.signal(signal.SIGINT, s)
|
py | 1a41bb13b301b5213fa9d1cab1f3789862f29044 | # Copyright (c) 2020 Portworx
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
import base64
import os
from kubernetes import client, config
from openstorage import api_pb2
from openstorage import api_pb2_grpc
# Environment Variables
ENDPOINT_ENV_KEY = 'OPENSTORAGE_SDK_ENDPOINT'
SECURE_ENV_KEY = 'OPENSTORAGE_SDK_SECURE'
TOKEN_ENV_KEY = 'OPENSTORAGE_SDK_TOKEN'
CAFILE_ENV_KEY = 'OPENSTORAGE_SDK_CAFILE'
SECRET_NAME_ENV_KEY = 'OPENSTORAGE_SDK_SECRET_NAME'
SECRET_NAMESPACE_ENV_KEY = 'OPENSTORAGE_SDK_SECRET_NAMESPACE'
class Connector(object):
"""
Connects to OpenStorage SDK server.
Manages connection and setup of the gRPC when using tokens and TLS.
The token may be passed in or fetched from a Kubernetes secret.
"""
def __init__(self, endpoint='', secure=False, token='', cafile='',
token_secret_namespace='', token_secret_name=''):
"""
:param endpoint: gRPC endpoint to OpenStorage SDK server
:type endpoint: str
:param secure: use TLS for the connection
:type secure: bool
:param token: OpenStorage Auth token
:type token: str
:param cafile: Path to CA file if required for TLS. If not provided
and 'secure' is enabled, then the CA must be part of the host.
:type cafile: str
:param token_secret_name: Name of the Kubernetes secret containing
the OpenStorage Auth token
:type token_secret_name: str
:param token_secret_namespace: Name of the namespace in Kubernetes
containing the secret object with the OpenStorage Auth token
:type token_secret_namespace: str
"""
self.endpoint = endpoint
self.secure = secure
self.token = token
self.cafile = cafile
self.token_secret_name = token_secret_name
self.token_secret_namespace = token_secret_namespace
# Overrite settings using environment
self._from_environment()
if self.endpoint == '':
raise Exception('Endpoint not provided')
# Check if secret must be fetched from Kubernetes
if self._use_k8s_secret():
self.token = self._get_kubernetes_secret()
def connect(self, opts=None):
"""
Connect to server
:param opts:gRPC channel options if any
:return: A gRPC channel
"""
if self._is_secure():
return self._get_secure_channel(opts)
elif self._is_authenticated():
return self._get_auth_insecure_channel(opts)
return grpc.insecure_channel(self.endpoint, opts)
def _from_environment(self):
e = os.getenv(ENDPOINT_ENV_KEY)
if e:
self.endpoint = e
e = os.getenv(SECURE_ENV_KEY)
if e:
self.secure = e.lower() in ['true', '1', 't', 'y', 'yes']
e = os.getenv(TOKEN_ENV_KEY)
if e:
self.token = e
e = os.getenv(CAFILE_ENV_KEY)
if e:
self.cafile = e
e = os.getenv(SECRET_NAME_ENV_KEY)
if e:
self.token_secret_name = e
e = os.getenv(SECRET_NAMESPACE_ENV_KEY)
if e:
self.token_secret_namespace = e
def _use_k8s_secret(self):
return self.token_secret_name != '' and self.token_secret_namespace != ''
def _is_secure(self):
return self.secure or self.cafile != ''
def _is_authenticated(self):
return self.token != ''
def _get_kubernetes_secret(self):
config.load_kube_config()
v1 = client.CoreV1Api()
ret = v1.read_namespaced_secret(self.token_secret_name, self.token_secret_namespace)
return str(base64.b64decode(ret.data['auth-token']), "utf-8")
def _get_secure_channel(self, opts=None):
# Setup CA if any
with open(self.cafile, 'rb') as f:
capem = f.read()
creds = grpc.ssl_channel_credentials(root_certificates=capem)
# Setup authentication if any
if self._is_authenticated():
auth = grpc.access_token_call_credentials(self.token)
return grpc.secure_channel(self.endpoint, grpc.composite_channel_credentials(creds, auth), opts)
return grpc.secure_channel(self.endpoint, creds, opts)
def _get_auth_insecure_channel(self, opts=None):
channel = grpc.insecure_channel(self.endpoint, opts)
return grpc.intercept_channel(channel, TokenAuthentication(self.token))
class TokenAuthentication(grpc.UnaryUnaryClientInterceptor):
"""
gRPC interceptor which allows authentication to a non-TLS server
"""
def __init__(self, token):
self.token = token
def intercept_unary_unary(self, continuation, client_call_details, request):
try:
client_call_details.metadata.append(("authorization", "bearer "+self.token))
except AttributeError:
md = []
md.append(("authorization", "bearer "+self.token))
client_call_details = client_call_details._replace(metadata=md)
return continuation(client_call_details, request)
|
py | 1a41bbd94898b47bf242e854ea1ee7c9f19577a0 | """ nftfw - Geoip2 support
Requires python3-geoip2 and geoupupdate packages
and a license from MaxMind
https://dev.maxmind.com/geoip/geoip2/geolite2/
"""
import os.path
class GeoIPCountry:
"""Lookup ip addresses in geoip2 """
# Set up reader
countryreader = None
# Country database
country = '/var/lib/GeoIP/GeoLite2-Country.mmdb'
# Errors
# pylint: disable=invalid-name
AddressNotFoundError = None
InvalidDatabaseError = None
def __init__(self):
"""Check geoip2 availability
See if the country database file can be found
"""
# geoip2 may not be installed
# but pylint will complain on bullseye with import-outside-toplevel
# if the disable code is installed, pylint will complain on buster
# about the disable code below (now deactivated)
# pylint argument disable=import-outside-toplevel
# All this is to allow the system to run when geoip2 is not installed
# so we don't insist on it
try:
from geoip2.database import Reader
from geoip2.errors import AddressNotFoundError
from maxminddb import InvalidDatabaseError
self.AddressNotFoundError = AddressNotFoundError
self.InvalidDatabaseError = InvalidDatabaseError
if os.path.exists(self.country):
self.countryreader = Reader(self.country)
except ImportError:
return
def isinstalled(self):
"""Return True if we have a reader """
return self.countryreader is not None
def lookup(self, ip):
"""Lookup an ip in the geoip2 database
Parameters
----------
ip : str
Ip to lookup
Returns
-------
tuple (name, iso)
name : str
Country name
None if no reader
or no result
iso : str
Two character ISO code for the country
"""
# pylint: disable=no-member
if self.countryreader is None:
return(None, None)
# remove any mask from ip
if ip[-3] == '/':
ip = ip[0:-3]
elif ip[-2] == '/':
ip = ip[0:-2]
try:
cn = self.countryreader.country(ip)
iso = None
cname = None
if cn.country.iso_code:
iso = cn.country.iso_code
if cn.country.name:
cname = cn.country.name
return(cname, iso)
except (ValueError, AttributeError, self.AddressNotFoundError, self.InvalidDatabaseError):
return(None, None)
|
py | 1a41bcd14d0f8656fd794fda68ac4084a83f6b90 | """
coast - Plot land and water.
"""
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import (
args_in_kwargs,
build_arg_string,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
@fmt_docstring
@use_alias(
R="region",
J="projection",
A="area_thresh",
C="lakes",
B="frame",
D="resolution",
E="dcw",
I="rivers",
L="map_scale",
N="borders",
W="shorelines",
G="land",
S="water",
U="timestamp",
V="verbose",
X="xshift",
Y="yshift",
c="panel",
p="perspective",
t="transparency",
)
@kwargs_to_strings(R="sequence", c="sequence_comma", p="sequence")
def coast(self, **kwargs):
r"""
Plot continents, shorelines, rivers, and borders on maps
Plots grayshaded, colored, or textured land-masses [or water-masses] on
maps and [optionally] draws coastlines, rivers, and political
boundaries. Alternatively, it can (1) issue clip paths that will
contain all land or all water areas, or (2) dump the data to an ASCII
table. The data files come in 5 different resolutions: (**f**)ull,
(**h**)igh, (**i**)ntermediate, (**l**)ow, and (**c**)rude. The full
resolution files amount to more than 55 Mb of data and provide great
detail; for maps of larger geographical extent it is more economical to
use one of the other resolutions. If the user selects to paint the
land-areas and does not specify fill of water-areas then the latter
will be transparent (i.e., earlier graphics drawn in those areas will
not be overwritten). Likewise, if the water-areas are painted and no
land fill is set then the land-areas will be transparent.
A map projection must be supplied.
Full option list at :gmt-docs:`coast.html`
{aliases}
Parameters
----------
{J}
{R}
*Required if this is the first plot command.*
{A}
{B}
lakes : str or list
*fill*\ [**+l**\|\ **+r**].
Set the shade, color, or pattern for lakes and river-lakes. The
default is the fill chosen for wet areas set by the ``water``
parameter. Optionally, specify separate fills by appending
**+l** for lakes or **+r** for river-lakes, and passing multiple
strings in a list.
resolution : str
**f**\|\ **h**\|\ **i**\|\ **l**\|\ **c**.
Selects the resolution of the data set to: (**f**\ )ull,
(**h**\ )igh, (**i**\ )ntermediate, (**l**\ )ow,
and (**c**\ )rude.
land : str
Select filling or clipping of "dry" areas.
rivers : int or str or list
*river*\ [/*pen*].
Draw rivers. Specify the type of rivers and [optionally] append
pen attributes [Default pen is width = default, color = black,
style = solid].
Choose from the list of river types below; pass a list to
``rivers`` to use multiple arguments.
0 = Double-lined rivers (river-lakes)
1 = Permanent major rivers
2 = Additional major rivers
3 = Additional rivers
4 = Minor rivers
5 = Intermittent rivers - major
6 = Intermittent rivers - additional
7 = Intermittent rivers - minor
8 = Major canals
9 = Minor canals
10 = Irrigation canals
You can also choose from several preconfigured river groups:
a = All rivers and canals (0-10)
A = All rivers and canals except river-lakes (1-10)
r = All permanent rivers (0-4)
R = All permanent rivers except river-lakes (1-4)
i = All intermittent rivers (5-7)
c = All canals (8-10)
map_scale : str
[**g**\|\ **j**\|\ **J**\|\ **n**\|\ **x**]\ *refpoint*\
**+w**\ *length*.
Draws a simple map scale centered on the reference point specified.
borders : int or str or list
*border*\ [/*pen*].
Draw political boundaries. Specify the type of boundary and
[optionally] append pen attributes [Default pen is width = default,
color = black, style = solid].
Choose from the list of boundaries below. Pass a list to
``borders`` to use multiple arguments.
1 = National boundaries
2 = State boundaries within the Americas
3 = Marine boundaries
a = All boundaries (1-3)
water : str
Select filling or clipping of "wet" areas.
{U}
shorelines : int or str or list
[*level*\ /]\ *pen*.
Draw shorelines [Default is no shorelines]. Append pen attributes
[Default is width = default, color = black, style = solid] which
apply to all four levels. To set the pen for a single level,
pass a string with *level*\ /*pen*\ , where level is
1-4 and represent coastline, lakeshore, island-in-lake shore, and
lake-in-island-in-lake shore. Pass a list of *level*\ /*pen*
strings to ``shorelines`` to set multiple levels. When specific
level pens are set, those not listed will not be drawn.
dcw : str or list
*code1,code2,…*\ [**+l**\|\ **L**\ ][**+g**\ *fill*\ ]
[**+p**\ *pen*\ ][**+z**].
Select painting or dumping country polygons from the
`Digital Chart of the World
<https://en.wikipedia.org/wiki/Digital_Chart_of_the_World>`__.
Append one or more comma-separated countries using the 2-character
`ISO 3166-1 alpha-2 convention
<https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2>`__.
To select a state of a country (if available), append
.\ *state*, (e.g, US.TX for Texas). To specify a whole continent,
prepend **=** to any of the continent codes (e.g. =EU for Europe).
Append **+p**\ *pen* to draw polygon outlines
(default is no outline) and **+g**\ *fill* to fill them
(default is no fill). Append **+l**\|\ **+L** to =\ *continent* to
only list countries in that continent; repeat if more than one
continent is requested.
{XY}
{c}
{p}
{t}
{V}
"""
kwargs = self._preprocess(**kwargs) # pylint: disable=protected-access
if not args_in_kwargs(args=["C", "G", "S", "I", "N", "E", "Q", "W"], kwargs=kwargs):
raise GMTInvalidInput(
"""At least one of the following parameters must be specified:
lakes, land, water, rivers, borders, dcw, Q, or shorelines"""
)
with Session() as lib:
lib.call_module(module="coast", args=build_arg_string(kwargs))
|
py | 1a41bcecb952e2ee37f5af00d823cffb077d5d38 | #!/usr/bin/env python
"""Tests vertex list drawing.
"""
from __future__ import absolute_import
import unittest
import pyglet
from tests.annotations import Platform, skip_platform
from .graphics_common import GraphicsGenericTestCase, get_feedback, GL_TRIANGLES
@skip_platform(Platform.OSX) # TODO: Check whether OpenGL < 3.0 or compatibility profile is enabled
class RetainedDrawingTestCase(GraphicsGenericTestCase, unittest.TestCase):
def get_feedback(self, data):
vertex_list = pyglet.graphics.vertex_list(self.n_vertices, *data)
return get_feedback(lambda: vertex_list.draw(GL_TRIANGLES))
|
py | 1a41bd0070fb193933e44f6bb00984a54132a2ae | from PIL import Image, ImageOps
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import time
from timeit import default_timer as timer
#########from IPython.display import display
#Création des fonctions de tri
def triParComptage(Tab):
# Initialisation des variables
bSup=max(Tab)
TabComptage=[]
x = 0
# Initialisation du tableau de comptage à 0
for i in range (max(Tab)+1):
TabComptage.append(0)
# Création du tableau de comptage
for i in range (len(Tab)):
TabComptage[Tab[i]]+=1
# Création du tableau trié
for i in range (bSup+1):
for j in range (TabComptage[i]):
Tab[x] = i
x+=1
return Tab
def tri_fusion(m):
if len(m) <= 1:
return m
milieu = len(m) // 2
#on prend le mid
gauche = m[:milieu]
droite = m[milieu:]
#on le refait encore et encore
gauche = tri_fusion(gauche)
droite = tri_fusion(droite)
#"créer une liste avec la fonction fusion
return list(fusion(gauche,droite))
def fusion(gauche,droite):
resultat = []
#on definie la position de recherche
index_gauche=0
index_droite=0
#boucle qui a pour condition que la position de recherche soit inferieur a la taille de la liste
while index_gauche < len(gauche) and index_droite < len(droite):
#si la liste1[i] et inferieur ou egale a la liste2[i]
if gauche[index_gauche] <= droite[index_droite]:
#alors a liste final aprend l'element liste[i]
resultat.append(gauche[index_gauche])
#i1=i1+1
index_gauche += 1
else:
#alors la liste final aprend l'element liste2[i]
resultat.append(droite[index_droite])
#i2=i2+1
index_droite += 1
#si liste existe alors liste final aprend les "elements" (exemple gauche=[1,2,3,4,5,6]
#sera egale gauche[2:]=[3,4,5,6] avec extend on fait enleve chaque element et donc on
# obtiens liste_final=[3,4,5,6] et non liste_final=[[3,4,5,6]])
if gauche:
resultat.extend(gauche[index_gauche:])
if droite:
resultat.extend(droite[index_droite:])
return resultat
def tri_selection(tab):
for i in range(len(tab)):
# Trouver le min
min = i
for j in range(i+1, len(tab)):
if tab[min] > tab[j]:
min = j
tmp = tab[i]
tab[i] = tab[min]
tab[min] = tmp
return tab
#Fonction pour calculer la dilatation d'histogramme
def dilatation_histo(Pixel,h):
#Tri de la liste
#triParComptage(Pixel)
imin=h[0]
imax=h[-1]
l, g = Pixel.size
for y in range(g):
for x in range(l):
c = Pixel.getpixel((x, y))
dilat = int((c)*(c-imin)/(imax-imin))
if dilat>255:
dilat=255
Pixel.putpixel((x, y), dilat)
return Pixel
#Fonction pour changer la luminosité de l'image
def lumino(img,lum):
a=input('voulez vous up ou down la lumino \nTape 1 ou Tape 2\nVotre réponse: ')
h,l = img.size
if a == '1': # le if doit être avant les deux boucles qui parcours l'image
# Sinon après chaque rebouclage le prog attend la valeur a de l'utilisateur
for y in range(h-1):
for x in range(l-1):
c = img.getpixel((y, x))
c += lum
if c > 255:
img.putpixel((y,x), 255)
else:
img.putpixel((y, x), c)
elif a == '2':
for y in range(h-1):
for x in range(l-1):
c = img.getpixel((y, x))
c -= lum
if c < 0:
img.putpixel((y,x), 0)
else:
img.putpixel((y, x), c)
return img
#Liste contenant tous les pixels de l'image
def création(Img):
Pixel=[]
(l,h)=Img.size
for i in range(l):
for j in range(h):
p = Img.getpixel((i, j))
Pixel.append(p)
return Pixel
#Appel de l'image
Img = Image.open("D:\Programe\Python\APP1\APP\APP3\lena_ng.png")
#Menu
print("\n\nBienvenue,\nvous allez lancer le programme de modification d'image.")
n=int(input("veuillez choisir un programme à lancer: \n1 Tri des couleurs de l'image \n2 modifier la luminosité \n3 Augmenter le contraste \n\n Entrer ici le numéro: "))
if n==1:
u=int(input("Quel algorithme de tri voulez vous utiliser ?\n\n1 Tri par comptage \n2 Tri par fusion \n3 Tri par séléction \n Entrer ici le numéro: "))
Pixel = création(Img)
if u==1:
start_time = time.time()
h=triParComptage(Pixel)
print(h)
print("Temps d execution : %s secondes ---" % (time.time() - start_time))
if u==2:
start_time = time.time()
h=tri_fusion(Pixel)
print(h)
print("Temps d execution : %s secondes ---" % (time.time() - start_time))
if u==3:
print(" Cela va prendre un peu de temps...")
start_time = time.time()
h=tri_selection(Pixel)
print(h)
print("Temps d execution : %s secondes ---" % (time.time() - start_time))
plt.hist(h, range = (0, 255), bins = 255, color = 'yellow', edgecolor = 'red')
plt.xlabel('Niveaux de gris')
plt.ylabel('Effectif')
plt.title('Histogramme des différentes valeurs des pixels dans l\'image')
plt.show
if n==2:
Img.show()
# Appel de la fonction
value = int(input("entrer un contraste entre 0 et 255: "))
img1 = lumino(Img,value)
# Affichage des images
img1.show()
if n==3:
print("\nImage originale :")
Img.show()
Pixel = création(Img)
h=triParComptage(Pixel)
plt.hist(h, range = (0, 255), bins = 255, color = 'green', edgecolor = 'blue')
plt.xlabel('Niveaux de gris')
plt.ylabel('Effectif')
plt.title('Histogramme des différentes valeurs des pixels dans l\'image')
plt.show()
dilatation_histo(Img,h)
m=création(Img)
Img.show()
plt.hist(m, range = (0, 255), bins = 255, color = 'yellow', edgecolor = 'red')
plt.xlabel('Niveaux de gris')
plt.ylabel('Effectif')
plt.title('Histogramme des différentes valeurs des pixels dans l\'image')
plt.show()
print("\nImage contrastée :")
print("\nHistogramme de dilatation : en bleu avant la dilatation, en rouge après la dilatation\n")
#Fin du menu |
py | 1a41bd92b2c168afbf457297062127c6125c8eaf | import os
import glob
import argparse
from keras.callbacks import (
LearningRateScheduler,
ModelCheckpoint
)
from keras.datasets import cifar10
from keras.layers import (
Activation,
Input,
Dense,
Flatten
)
from keras.models import Model
from keras.optimizers import SGD, RMSprop, Adam, Nadam
from keras.utils.visualize_util import plot
from keras.utils import np_utils
from keras import backend as K
from fractalnet import fractal_net
NB_CLASSES = 10
NB_EPOCHS = 400
LEARN_START = 0.02
BATCH_SIZE = 100
MOMENTUM = 0.9
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
Y_train = np_utils.to_categorical(y_train, NB_CLASSES)
Y_test = np_utils.to_categorical(y_test, NB_CLASSES)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print X_train.shape
# Drop by 10 when we halve the number of remaining epochs (200, 300, 350, 375)
def learning_rate(epoch):
if epoch < 200:
return 0.02
if epoch < 300:
return 0.002
if epoch < 350:
return 0.0002
if epoch < 375:
return 0.00002
return 0.000002
def build_network(deepest=False):
dropout = [0., 0.1, 0.2, 0.3, 0.4]
conv = [(64, 3, 3), (128, 3, 3), (256, 3, 3), (512, 3, 3), (512, 2, 2)]
input= Input(shape=(3, 32, 32) if K._BACKEND == 'theano' else (32, 32,3))
output = fractal_net(
c=3, b=5, conv=conv,
drop_path=0.15, dropout=dropout,
deepest=deepest)(input)
output = Flatten()(output)
output = Dense(NB_CLASSES, init='he_normal')(output)
output = Activation('softmax')(output)
model = Model(input=input, output=output)
#optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM)
#optimizer = SGD(lr=LEARN_START, momentum=MOMENTUM, nesterov=True)
optimizer = Adam()
#optimizer = Nadam()
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
plot(model, to_file='model.png', show_shapes=True)
return model
def train_network(net):
print("Training network")
snapshot = ModelCheckpoint(
filepath="snapshots/weights.{epoch:04d}-{val_loss:.4f}.h5",
monitor="val_loss",
save_best_only=False)
learn = LearningRateScheduler(learning_rate)
net.fit(
x=X_train, y=Y_train, batch_size=BATCH_SIZE,
nb_epoch=NB_EPOCHS, validation_data=(X_test, Y_test),
#callbacks=[learn, snapshot]
callbacks=[snapshot]
)
def test_network(net, weights):
print("Loading weights from '{}' and testing".format(weights))
net.load_weights(weights)
ret = net.evaluate(x=X_test, y=Y_test, batch_size=BATCH_SIZE)
print('Test:', ret)
def main():
parser = argparse.ArgumentParser(description='FractalNet on CIFAR-10')
parser.add_argument('--load', nargs=1,
help='Test network with weights file')
parser.add_argument('--deepest', help='Build with only deepest column activated',
action='store_true')
parser.add_argument('--test-all', nargs=1,
help='Test all the weights from a folder')
parser.add_argument('--summary',
help='Print a summary of the network and exit',
action='store_true')
args = parser.parse_args()
net = build_network(deepest=args.deepest)
if args.load:
weights = args.load[0]
test_network(net, weights)
elif args.test_all:
folder = args.test_all[0]
for weights in glob.glob(os.path.join(folder, 'weigh*')):
test_network(net, weights)
elif args.summary:
net.summary()
else:
train_network(net)
main()
|
py | 1a41be06dfed85808f2d191b22bb6c8dd01e1ee5 | """
Note: When using this api many of the commands come with an option to skip the initilization of the comms e.g. ...
def read_word(self, address, initialize_comms=True):
setting initialize_comms=False will skip the comms initialization step and save ~0.2 seconds. However one intialization
needs to be done to get things running. Therefore the fastest way to perform 5 register reads is...
pystlink.read_word(0x08000000, initialize_comms=True)
pystlink.read_word(0x08000000, initialize_comms=False)
pystlink.read_word(0x08000000, initialize_comms=False)
pystlink.read_word(0x08000000, initialize_comms=False)
pystlink.read_word(0x08000000, initialize_comms=False)
"""
import time
from textwrap import wrap
from pystlink import lib
from pystlink.lib import stlinkv2
from pystlink.lib import stlinkusb
from pystlink.lib import stm32
from pystlink.lib import stm32fp
from pystlink.lib import stm32fs
from pystlink.lib import stm32l0
from pystlink.lib import stm32l4
from pystlink.lib import stm32h7
from pystlink.lib import stm32devices
from pystlink.lib import stlinkex
from pystlink.lib import dbg
from pystlink.lib.srec import Srec
class PyStlink():
CPUID_REG = 0xe000ed00
def __init__(self, verbosity=0):
self.stlink = None
self.driver = None
self._dbg = dbg.Dbg(verbosity)
self._serial = None
self._index = 0
self._hard = False
self._connector = stlinkusb.StlinkUsbConnector(dbg=self._dbg, serial=self._serial, index=self._index)
self.comms_initialized = False
try:
self.initialize_comms()
except stlinkex.StlinkException:
pass
def initialize_comms(self):
self.initialize_stlink_comms()
if self.stlink.coreid == 0:
raise stlinkex.StlinkException('STLink could not connect to microcontroller')
self._core = stm32.Stm32(self.stlink, dbg=self._dbg)
self.find_mcus_by_core()
self._dbg.info("CORE: %s" % self._mcus_by_core['core'])
self.find_mcus_by_devid()
self.find_mcus_by_flash_size()
self._dbg.info("MCU: %s" % '/'.join([mcu['type'] for mcu in self._mcus]))
self._dbg.info("FLASH: %dKB" % self._flash_size)
self.load_driver()
self.comms_initialized = True
def initialize_stlink_comms(self):
self.stlink = stlinkv2.Stlink(self._connector, dbg=self._dbg)
self._dbg.info("DEVICE: ST-Link/%s" % self.stlink.ver_str)
self._dbg.info("SUPPLY: %.2fV" % self.stlink.target_voltage)
self._dbg.verbose("COREID: %08x" % self.stlink.coreid)
def get_target_voltage(self):
self.initialize_stlink_comms()
return self.stlink.target_voltage
def find_mcus_by_core(self):
if (self._hard):
self._core.core_hard_reset_halt()
else:
self._core.core_halt()
cpuid = self.stlink.get_debugreg32(PyStlink.CPUID_REG)
if cpuid == 0:
raise stlinkex.StlinkException('Not connected to CPU')
self._dbg.verbose("CPUID: %08x" % cpuid)
partno = 0xfff & (cpuid >> 4)
for mcu_core in stm32devices.DEVICES:
if mcu_core['part_no'] == partno:
self._mcus_by_core = mcu_core
return
raise stlinkex.StlinkException('PART_NO: 0x%03x is not supported' % partno)
def find_mcus_by_devid(self):
# STM32H7 hack: this MCU has ID-CODE on different address than STM32F7
devid = 0x000
idcode_regs = self._mcus_by_core['idcode_reg']
if isinstance(self._mcus_by_core['idcode_reg'], int):
idcode_regs = [idcode_regs]
for idcode_reg in idcode_regs:
idcode = self.stlink.get_debugreg32(idcode_reg)
self._dbg.verbose("IDCODE: %08x" % idcode)
devid = 0xfff & idcode
for mcu_devid in self._mcus_by_core['devices']:
if mcu_devid['dev_id'] == devid:
self._mcus_by_devid = mcu_devid
return
raise stlinkex.StlinkException('DEV_ID: 0x%03x is not supported' % devid)
def find_mcus_by_flash_size(self):
self._flash_size = self.stlink.get_debugreg16(self._mcus_by_devid['flash_size_reg'])
self._mcus = []
for mcu in self._mcus_by_devid['devices']:
if mcu['flash_size'] == self._flash_size:
self._mcus.append(mcu)
if not self._mcus:
raise stlinkex.StlinkException('Connected CPU with DEV_ID: 0x%03x and FLASH size: %dKB is not supported. Check Protection' % (
self._mcus_by_devid['dev_id'], self._flash_size
))
def fix_cpu_type(self, cpu_type):
cpu_type = cpu_type.upper()
# now support only STM32
if cpu_type.startswith('STM32'):
# change character on 10 position to 'x' where is package size code
if len(cpu_type) > 9:
cpu_type = list(cpu_type)
cpu_type[9] = 'x'
cpu_type = ''.join(cpu_type)
return cpu_type
raise stlinkex.StlinkException('"%s" is not STM32 family' % cpu_type)
def filter_detected_cpu(self, expected_cpus):
cpus = []
for detected_cpu in self._mcus:
for expected_cpu in expected_cpus:
expected_cpu = self.fix_cpu_type(expected_cpu)
if detected_cpu['type'].startswith(expected_cpu):
cpus.append(detected_cpu)
break
if not cpus:
raise stlinkex.StlinkException('Connected CPU is not %s but detected is %s %s' % (
','.join(expected_cpus),
'one of' if len(self._mcus) > 1 else '',
','.join([cpu['type'] for cpu in self._mcus]),
))
self._mcus = cpus
def load_driver(self):
flash_driver = self._mcus_by_devid['flash_driver']
if flash_driver == 'STM32FP':
self.driver = stm32fp.Stm32FP(self.stlink, dbg=self._dbg)
elif flash_driver == 'STM32FPXL':
self.driver = stm32fp.Stm32FPXL(self.stlink, dbg=self._dbg)
elif flash_driver == 'STM32FS':
self.driver = stm32fs.Stm32FS(self.stlink, dbg=self._dbg)
elif flash_driver == 'STM32L0':
self.driver = stm32l0.Stm32L0(self.stlink, dbg=self._dbg)
elif flash_driver == 'STM32L4':
self.driver = stm32l4.Stm32L4(self.stlink, dbg=self._dbg)
elif flash_driver == 'STM32H7':
self.driver = stm32h7.Stm32H7(self.stlink, dbg=self._dbg)
else:
self.driver = self._core
def read_word(self, address, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
data = self.driver.get_mem(address, 4)
return f"{data[3]:02x}{data[2]:02x}{data[1]:02x}{data[0]:02x}"
def read_words(self, address, num_words, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
num_bytes = num_words*4
data = self.driver.get_mem(address, num_bytes)
if len(data) != num_bytes:
raise Exception("Error with data length when reading words")
words = [""] * num_words
for i in range(num_words):
words[i] = f"{data[3+(i*4)]:02x}{data[2+(i*4)]:02x}{data[1+(i*4)]:02x}{data[0+(i*4)]:02x}"
return words
def write_word(self, address, value, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
print("Warning: write_word() isn't as simple to use as the -w32 function from ST-LINK_CLI.exe")
print(" The memory location being written to may need to be unlocked\n")
if len(value) != 8:
raise Exception("Error with write_word(): value is invalid")
self.write_words(address, value)
def write_words(self, address, values, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
if type(values) != str:
raise Exception("Error with write_words(): values must be a string")
if len(values) % 8 != 0:
raise Exception("Error with write_words(): values is invalid")
data = []
words = wrap(values, 8)
for word in words:
hex_bytes = wrap(word, 2)
hex_bytes.reverse()
hex_bytes = list(map(lambda x: int(x, 16), hex_bytes))
data.extend(hex_bytes)
self.driver.set_mem(address, data)
def program_otp(self, address, hex_data, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
hex_data = hex_data.lower()
if len(hex_data) == 0:
raise Exception("OTP data can't be zero in length")
if len(hex_data) % 16 != 0:
raise Exception("OTP data is an invalid length")
num_words = int(len(hex_data) / 8)
# Read OTP before attempting to write
words = self.read_words(address, num_words, initialize_comms=False)
hex_data_read = "".join(words)
blank_value = "ffffffff" * num_words
if hex_data_read != hex_data:
if hex_data_read == blank_value:
# Unlock Flash
self.driver.flash.enable_flash_programming()
# Write to OTP
self.write_words(address, hex_data, initialize_comms=False)
# Lock Flash
self.driver.flash.disable_flash_programming()
# Check what was witten to the OTP
words = self.read_words(address, num_words, initialize_comms=False)
hex_data_read = "".join(words)
if hex_data_read != hex_data:
if hex_data_read == blank_value:
print("Unable to write to OTP")
return 1
else:
print("Data not written correctly to OTP")
return 1
else:
print("Unable to write to OTP as OTP isn't blank")
return 1
return 0
def write_word_to_flash(self, address, value, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
data_bytes = wrap(value, 2)
data_bytes.reverse()
data_bytes = list(map(lambda x: int(x, 16), data_bytes))
self.driver.flash_write(address, data_bytes, erase=True, erase_sizes=self._mcus_by_devid['erase_sizes'])
def program_flash(self, firmware, erase=True, verify=True, initialize_comms=True):
if initialize_comms:
self.initialize_comms()
mem = self._read_file(str(firmware))
start_addr = stm32.Stm32.FLASH_START
for addr, data in mem:
if addr is None:
addr = start_addr
a = self._mcus_by_devid['erase_sizes']
self.driver.flash_write(addr, data, erase=erase, erase_sizes=self._mcus_by_devid['erase_sizes'])
self.driver.core_reset_halt()
time.sleep(0.1)
if verify:
self.driver.core_halt()
self.driver.flash_verify(addr, data)
self.driver.core_run()
def flash_erase_all(self):
flash_size = self.stlink.get_debugreg16(self._mcus_by_devid['flash_size_reg'])
self.driver.flash_erase_all(flash_size)
def _read_file(self, filename):
if filename.endswith('.srec'):
srec = Srec()
srec.encode_file(filename)
size = sum([len(i[1]) for i in srec.buffers])
self._dbg.info("Loaded %d Bytes from %s file" % (size, filename))
return srec.buffers
with open(filename, 'rb') as f:
data = list(f.read())
self._dbg.info("Loaded %d Bytes from %s file" % (len(data), filename))
return [(None, data)]
if __name__ == "__main__":
pystlink = PyStlink(verbosity=2)
input("press enter to continue")
print(pystlink.read_word(0x08000000))
|
py | 1a41be256dc24802954b8f5702f071e2edf8b310 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 - 2021 Geode-solutions
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, platform
if sys.version_info >= (3,8,0) and platform.system() == "Windows":
for path in [x.strip() for x in os.environ['PATH'].split(';') if x]:
os.add_dll_directory(path)
import opengeode
import opengeode_io_py_mesh as mesh_io
if __name__ == '__main__':
mesh_io.initialize_mesh_io()
test_dir = os.path.dirname(__file__)
data_dir = os.path.abspath(os.path.join(test_dir, "../../../../tests/data"))
surface = opengeode.load_polygonal_surface3D(os.path.join(data_dir, "TopHat.obj"))
if surface.nb_vertices() != 363:
raise ValueError("[Test] Number of vertices in the loaded Surface is not correct" )
if surface.nb_polygons() != 380:
raise ValueError("[Test] Number of polygons in the loaded Surface is not correct" )
opengeode.save_polygonal_surface3D(surface, "TopHat_save.obj")
|
py | 1a41be4d8d4052bd5305b16ec9cdb41dd7c29630 | from . fields import ReactField
from . inputs import ReactInput |
py | 1a41beb01d02f7609de6a15ae81c0dd2dd7e964b | """
@author syt123450 / https://github.com/syt123450
"""
import os
import shutil
from tf.pb2json.pb2json_conversion import convert
import subprocess
input_format_config = '--input_format=tf_saved_model'
def preprocess_saved_model(input_path, output_path, output_node_names):
print("Preprocessing tensorflow saved model...")
os.makedirs(output_path + '/tmp', exist_ok=True)
print("Converting saved model to web friendly format...")
subprocess.check_call([
"tensorflowjs_converter",
input_format_config,
"--output_node_names=" + output_node_names,
"--saved_model_tags=serve",
input_path,
output_path + '/tmp'
])
path_now = os.getcwd()
os.chdir(output_path)
absolute_output_path = os.getcwd()
absolute_output_path_temp = absolute_output_path + '/tmp/'
os.chdir(path_now)
print("Converting pb to json...")
convert(
absolute_output_path_temp,
absolute_output_path
)
print("Removing temp pb model...")
shutil.rmtree(absolute_output_path_temp)
|
py | 1a41bed60834a14baaf2f8c1a3f7f79f685c71cc | import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import yaml
from yaml import Loader
import os
from pathlib import Path
from pdb import set_trace as st
## Vocab code
class Vocab:
def __init__(self):
## set architecture vocab data structures
self.architecture_vocab, self.architecture2idx = self._init_architecture_vocab()
## set hyper param vocab data structures
self.hparms_vocab, self.hptype2idx = self._init_layers_hp_vocab()
## Optimizer
self.optimizer_vocab, self.opt2idx = self._init_optimizer_vocab()
## Optimizer hyperparams
self.hparams_opt_vocab, self.hpopt2idx = self._init_layers_hp_optimizer_vocab()
def _init_architecture_vocab(self):
'''
Initializes the architecture vocabulary
'''
architecture_vocab = ['PAD_TOKEN', 'SOS', 'EOS','Conv2d', 'Linear', 'MaxPool2d', 'BatchNorm2d', 'Dropout2d', 'ReLU', 'SELU', 'LeakyReLU', 'Flatten']
architecture2idx = { architecture_vocab[i]:i for i in range(len(architecture_vocab)) } # faster than using python's list.index(element)
return architecture_vocab, architecture2idx
def _init_layers_hp_vocab(self):
'''
Initializes the hyper param layers vocab
'''
hparms_vocab = ['PAD_TOKEN','SOS', 'EOS','in_features', 'out_features', 'kernel_size', 'stride', 'padding', 'dilation', 'ceil_mode', 'eps', 'momentum', 'affine', 'track_running_stats', 'p', 'bias']
hptype2idx = { hparms_vocab[i]:i for i in range(len(hparms_vocab))} # faster than using python's list.index(element)
return hparms_vocab, hptype2idx
def _init_optimizer_vocab(self):
'''
Initializes the hyper param layers vocab
'''
optimizer_vocab = ['PAD_TOKEN', 'SOS', 'EOS','SGD', 'Adam', 'Adadelta', 'Adagrad']
opt2idx = { optimizer_vocab[i]:i for i in range(len(optimizer_vocab))} # faster than using python's list.index(element)
return optimizer_vocab, opt2idx
def _init_layers_hp_optimizer_vocab(self):
'''
Initializes the hyper param layers vocab
'''
hparams_opt_vocab = ['PAD_TOKEN', 'SOS', 'EOS', 'dampening', 'lr', 'momentum', 'nesterov', 'weight_decay', 'rho']
hpopt2idx = { hparams_opt_vocab[i]:i for i in range(len(hparams_opt_vocab))} # faster than using python's list.index(element)
return hparams_opt_vocab, hpopt2idx
def get_type(vocab, layer_str):
'''
Get's the string type of the layer.
:param list vocab: a list of all the token types (probably as strings)
:param str layer_str: a string of a splitted layer e.g. ' Linear(in_features=4, out_features=3, bias=True)\n (1)'
:return str arch_token: string representation of layer type.
'''
for arch_token in vocab:
if arch_token in layer_str:
return arch_token
raise ValueError(f'The string you have {layer_str} doesn\'t match any of the architecture tokens in {vocab}')
def indices2onehot(indices, vocab_size):
'''
Returns the onehot matrix
'''
shape = (len(indices), vocab_size)
matrix = np.zeros(shape)
# for every symbol index i, place a 1 i the one hot vector in the vocab position symbol_idx
for i, symbol_idx in enumerate(indices):
matrix[i,symbol_idx] = 1
return matrix
## DataProcessing code
class DataProcessor:
'''
Class for converting models into vector forms to be used by neural nets.
'''
def __init__(self, vocab):
self.vocab = vocab
def arch_parse_string(self, layer_str):
"""
Parses the architecture layer string and gets all the parameters for each layer in list.
:param str layer_str: the string representation of a layer of a model
:return list param_vector: python list of parameters for the layer of the model
"""
params = self.vocab.hptype2idx # dictionary from hyper param type to its index
curr_word = ''
param_vector = [0]*len(params)
#go through the entire string and try to find keywords
for i in range(len(layer_str)):
#start a new sublayer_str if there is a space
if layer_str[i] == ' ':
curr_word = ''
else:
#add the next character to the substring
curr_word += layer_str[i]
#separate 'padding' from 'p'
if layer_str[i] == 'p':
#continues if the substring is padding
if layer_str[i+1] == 'a':
continue
#Separates function call from keywords
if layer_str[i] == '(' and layer_str[i-1] != '=':
curr_word = ''
#loop through the keys of the dictionary
for param in params.keys():
#check if our substring is a possible parameter
if curr_word in params.keys():
#if there is a match then add to the index corresponding to the parameter
if curr_word == param:
# print(curr_word, params[curr_word])
#if there is a ( then add the next character
if layer_str[i+2] == '(' and layer_str[i+1] == '=':
index = int(params[curr_word])
param_vector[index] = int(layer_str[i+3])
else:
#add a 0 if the word is 'False'
if layer_str[i+2] == 'F':
param_vector[int(params[curr_word])] = 0
#add a 1 if the word is 'True'
elif layer_str[i+2] == 'T':
param_vector[int(params[curr_word])] = 1
else:
val = ''
i += 2
#loop through the string until the entire value is found
while layer_str[i] != ',' and layer_str[i] != ')':
val += layer_str[i]
i += 1
param_vector[int(params[curr_word])] = eval(val)
return param_vector
def mdl_str2feature_vec(self, mdl_str):
"""
Makes a one hot matrix from each layer of the architecture data (note doesn't include meta data)
Note: the names of the layers have to be separated by colons for it to work
:param str mdl_str: model string e.g. nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
:return np.array feature_matrix: model vector with for layer + meta data e.g [conv,filters...etc...]
"""
## arch 2 one-hot
one_hot_arch_matrix = self.mdl_str2onehot(mdl_str)
## hparams 2 matrix
hp_params_matrix = self.mdl_str2hp_matrix(mdl_str)
##append arch + hparam vecs
feature_matrix = np.concatenate((one_hot_arch_matrix, hp_params_matrix),axis=1)
return feature_matrix
def parse_optimizer_string(self, opt_str):
"""
Parses the optimizer string and gets all its parameters
:param str opt_str: optimizer string for the model
:return list param_vector: python list of optimizer parameters
"""
params = self.vocab.hpopt2idx
curr_word = ''
param_vector = np.zeros(len(params))
for i in range(len(opt_str)):
#start a new substring if there is a space
if opt_str[i] == ' ':
curr_word = ''
else:
#add the next character to the substring
curr_word += opt_str[i]
for param in params.keys():
#check if our substring is a possible parameter
if curr_word in params.keys():
#if there is a match then add to the index corresponding to the parameter
if curr_word == param:
val = ''
i += 3
#loop through the string until the entire value is found
while opt_str[i] != ' ':
val += opt_str[i]
i += 1
if val == 'False':
param_vector[int(params[curr_word])] = int(0)
elif val == 'True':
param_vector[int(params[curr_word])] = int(1)
#if not true or false put the actual value
else:
try:
param_vector[int(params[curr_word])] = int(val)
except:
param_vector[int(params[curr_word])] = float(val)
return param_vector
def optimizer_feature_vec(self, opt_str, epochs):
"""
Makes a feature_vec for the optimizer used in the model.
param str opt_str: optimizer string for the model
return list feature_vector: vector of one-hot and hp_param data
TODO: its missing the epochs...
"""
indices = optimizer2indices(opt_str)
opt_onehot = indices2onehot(indices, len(self.vocab.optimizer_vocab))
#parses optimizer info for its parameters
params_vector = self.opt_parse_string(opt_str)
#add parameters to the one hot vector
feature_vector = np.concatenate( (opt_onehot, params_vector, [epochs]) )
return feature_vector
def calculate_weight_stats(self, weights):
"""
Calculates the Statistics for the weights.
param list weights: python list of weights (initial or final)
return list weight_stats: python list of the statistics of the weights
TODO: change these to torch_uu ops so that they are done on GPU
"""
length = len(weights)
new_weights = []
for i in range(length):
#flatten each tensor
flat_weights = weights[i].flatten()
#convert each tensor to a numpy array and concatenates it to a a list
new_weights.extend(flat_weights.cpu().detach().numpy())
#calculates the stats for the weights
sum_weights = np.sum(new_weights)
max_weight = np.max(new_weights)
min_weight = np.min(new_weights)
average_weight = np.mean(new_weights)
std_dev_weight = np.std(new_weights)
weight_stats = [sum_weights,max_weight,min_weight,average_weight,std_dev_weight]
return weight_stats
def mdl_str2onehot(self, mdl_str):
'''
Makes a one-hot matrix for the arch from the (whole) model string
:param str mdl_str: string of the model e.g. e.g. nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
:return np.array one_hot_arch_matrix: one-hot matrix representation of model (nb_layers, dim)
'''
indices = self.mdl_str2indices(mdl_str)
one_hot_arch_matrix = self.indices2arch_onehot(indices)
return one_hot_arch_matrix
def mdl_str2hp_matrix(self, mdl_str):
'''
Makes a matrix for the hps from the (whole) model string
:param str mdl_str: string of the model e.g. e.g. nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
:return np.array one_hot_arch_matrix: one hot vector representation of model
'''
data = mdl_str.split(':')[1:]
nb_layers = len(data)
hp_vocab_size = len(self.vocab.hparms_vocab)
hp_params_matrix = np.zeros((nb_layers,hp_vocab_size))
for i in range(nb_layers):
hparam_vector = self.arch_parse_string(data[i])
hp_params_matrix[i,:] = hparam_vector
return hp_params_matrix
def mdl_str2indices(self, mdl_str):
'''
Returns a list of indices corresponding to the model arch of given model string.
:param str mdl_str: string of the model e.g. e.g. nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
:return list arch_indices: list of corresponding indicies in vocab of each arch layer type
'''
data = mdl_str.split(':')[1:]
nb_layers = len(data)
arch_vocab_size = len(self.vocab.architecture_vocab)
arch_indices = []
for i in range(nb_layers):
layer_type = get_type(self.vocab.architecture_vocab, data[i] )
idx_layer_type = self.vocab.architecture2idx[layer_type]
arch_indices.append(idx_layer_type)
return arch_indices
def optimizer_str2indices(self, opt_str):
"""
Returns a list of indices corresponding to the optimization of given optimization string
param str opt_str: optimizer string for the model
return list opt_indices: list of corresponding indicies in vocab of each optimizer type
"""
#vocab
opt_indices = []
for opt_token in self.vocab.optimizer_vocab:
#if vocab is in the optimizer info then append a 1
if opt_token in opt_str:
opt_indx = self.vocab.opt2idx[opt_token]
opt_indices.append(opt_indx)
break
return opt_indices
def tokens2arch_indices(self, tokens):
'''
:param list tokens: list of (string) of tokens
:return list indicies: list of (ints) of indicies
TODO:
- add logic to receive things like torch_uu.nn.Conv2d etc
'''
## check if user passed a single string
if isinstance(str(tokens), str):
token_str = tokens
return self.vocab.architecture2idx[token_str]
indicies = [ self.vocab.architecture2idx[token_str] for token_str in tokens ]
return indicies
def indices2arch_onehot(self, indices):
if isinstance(indices, int):
return indices2onehot([indices], len(self.vocab.architecture_vocab))[0]
one_hot_arch_matrix = indices2onehot(indices, len(self.vocab.architecture_vocab))
return one_hot_arch_matrix
def indices2hp_matrix(self, indices):
'''
TODO implement but we need to also change mdl_str2hp_matrix
'''
if isinstance(indices, int):
return indices2onehot([indices], len(self.vocab.hparms_vocab))[0]
one_hot_arch_hp_matrix = indices2onehot(indices, len(self.vocab.hparms_vocab))
return one_hot_arch_hp_matrix
####
class MetaLearningDataset(Dataset):
'''
Data set for meta learning. It contains the architecture, hyperparams,
optimizer, Weights init and final, and train & test error.
note:
__len__ so that len(dataset) returns the size of the dataset.
__getitem__ to support the indexing such that dataset[i] can be used to get ith sample
'''
def __init__(self, data_path, vocab):
'''
'''
self.path = Path(data_path).expanduser()
print(str(data_path))
self.model_folders = [ f for f in self.path.iterdir() if f.is_dir() ]
self.data_processor = DataProcessor(vocab)
def __len__(self):
'''
Returns the number of data points (size of data set).
'''
return len(self.model_folders)
def __getitem__(self, idx):
'''
Gets you data point at the given index idx.
'''
## look for the model indexed with idx
mdl_name = ''
for f in self.model_folders:
# TODO fix
mdl_names = str(f)
if f'_{idx}' in mdl_names: # is this model the model # idx?
mdl_name = mdl_names
break
## generate strings to paths
data_path = str(self.path)
data_filepath = os.path.join(data_path, mdl_name)
#
metadata_filepath = os.path.join(data_filepath, f'meta_data.yml')
otherdata_filepath = os.path.join(data_filepath, f'other_data.yml')
param_stats_filepath = os.path.join(data_filepath, f'param_stats.yml')
#tensor_filepath = os.path.join(data_filepath, f'tensors.npz')
##
data_item = {}
with open(metadata_filepath, 'r') as f:
# loader of data
yamldata = yaml.load(f, Loader=Loader)
# get raw data
data_item['mdl_str'] = yamldata['arch_and_hp']
mdl_str = data_item['mdl_str']
data_item['opt_str'] = yamldata['optimizer']
opt_str = data_item['opt_str']
data_item['epochs'] = yamldata['epochs']
epochs = data_item['epochs']
data_item['batch_size_train'] = yamldata['batch_size_train']
data_item['batch_size_test'] = yamldata['batch_size_test']
data_item['batch_size_val'] = yamldata['batch_size_val']
try:
criterion = yamldata['criteron']
except:
criterion = yamldata['criterion']
opt_hp = self.data_processor.parse_optimizer_string(opt_str)
opt_hp = np.concatenate(([epochs],opt_hp) )
#
data_item['train_error'] = yamldata['train_error']
data_item['test_error'] = yamldata['test_error']
data_item['train_loss'] = yamldata['train_loss']
data_item['test_loss'] = yamldata['test_loss']
## get arch indices and hyperparams
arch_indices = self.data_processor.mdl_str2indices(mdl_str)
data_item['arch_indices'] = arch_indices
arch_hp = self.data_processor.mdl_str2hp_matrix(mdl_str)
data_item['arch_hp'] = arch_hp
## get hyperparams indices and hyperparams
opt_indices = self.data_processor.optimizer_str2indices(opt_str)
data_item['opt_indices'] = opt_indices
opt_hp = self.data_processor.parse_optimizer_string(opt_str)
data_item['opt_hp'] = opt_hp
with open(otherdata_filepath, 'r') as f:
yamldata = yaml.load(f, Loader=Loader)
#
data_item['test_accs'] = yamldata['test_accs']
data_item['test_errors'] = yamldata['test_errors']
data_item['test_losses'] = yamldata['test_losses']
#
data_item['train_accs'] = yamldata['train_accs']
data_item['train_errors'] = yamldata['train_errors']
data_item['train_losses'] = yamldata['train_losses']
#
data_item['val_accs'] = yamldata['val_accs']
data_item['val_errors'] = yamldata['val_errors']
data_item['val_losses'] = yamldata['val_losses']
with open(param_stats_filepath, 'r') as f:
yamldata = yaml.load(f, Loader=Loader)
#
data_item['init_params_mu'] = yamldata['init_params_mu']
data_item['final_params_mu'] = yamldata['final_params_mu']
#
data_item['init_params_std'] = yamldata['init_params_std']
data_item['final_params_std'] = yamldata['final_params_std']
#
data_item['init_params_l2'] = yamldata['init_params_l2']
data_item['final_params_l2'] = yamldata['final_params_l2']
##
return data_item
class Collate_fn_onehot_general_features(object):
'''
Custom collate function that gets onehot representation for Arch blocks
and gets general features for the rest. General features are such that they
are useful for any optimizer amnd initialization. e.g.
Optimizer might be anything (even a RNN itself) so having symbolic representation for this
even if its in onehot form isn't general (specially if a new unknown optimzer is used that the model has never seen).
Thus its better to use the training/validation statistics during training (say the first 10).
Similarly for initialization (or final weights). If we use the actual weights
then we don't need a symbolic representation for the initialization algorithm.
For (space) efficiency reasons we only use statistics of the initial (and final)
weights. Mean, Std and L2 of the weights.
Custom collate function to return everything in per batch as follow:
- OneHot representation for symbols
- Arch representation concate of OneHot for Arch and Arch hyperparams [A;A_hp]
- Opt representation train history
- Net stats representation
'''
def __init__(self, device, batch_first, vocab, padding_value=-1):
'''
NOTE: padding_value is -1 so to not get confused with 0 which stands for special characers (TODO: check this implemented correctly)
'''
self.device = device
self.batch_first = batch_first
self.data_processor = DataProcessor(vocab)
self.padding_value = padding_value
def arch2OneHot(self, indicies):
'''
Maps indices in the batch to tensor OneHot representation
'''
vocab_size = len(self.data_processor.vocab.architecture_vocab)
return torch.Tensor(indices2onehot(indicies, vocab_size)).to(self.device)
def opt2OneHot(self, indicies):
'''
Maps optimizer indicies in the batch to tensor OneHot representation
'''
vocab_size = len(self.data_processor.vocab.optimizer_vocab)
return torch.Tensor(indices2onehot(indicies, vocab_size)).to(self.device)
def Tensor(self, t):
'''
Maps to torch_uu tensor + proper device (cpu or gpu)
'''
return torch.Tensor(t).to(self.device)
def __call__(self, batch):
'''
Gets the batch in dictionary foorm ready to be processed by a NN (i.e. its a proper tensor)
:param list batch: list of samples in a batch. Samples produced by Dataset, which is a dictionary with all the raw data of a data point model.
:return torch_uu.Tensor batch_arch_rep: OneHot for each layer type (batch_size, max_len, vocab_size)
:return torch_uu.Tensor arch_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor arch_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
:return torch_uu.Tensor batch_arch_hp_rep: vector form for arch hp (batch_size, max_len, vocab_size)
:return torch_uu.Tensor arch_hp_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor arch_hp_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
:return torch_uu.Tensor batch_opt: OneHot for which optimizer was used (batch_size, vocab_size)
:returned torch_uu.Tensor batch_opt_hp: vector form for opt hp (batch_size, vocab_size)
:return torch_uu.Tensor batch_W_init_rep: tensor with mean and std for each weight in the sequence. (batch_size, max_len, 2)
:return torch_uu.Tensor W_init_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor W_init_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
:return torch_uu.Tensor batch_W_final_rep: tensor with mean and std for each weight in the sequence. (batch_size, max_len, 2)
:return torch_uu.Tensor W_final_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor W_final_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
:return torch_uu.Tensor batch_train_errorr: tensor with train errors for each sample in the batch (batch_size)
'''
all_batch_info = {}
##
batch_mdl_str = [ sample['mdl_str'] for sample in batch ]
batch_mdl_str = {'mdl_str':batch_mdl_str}
## get arch representation, A
batch_arch_rep, arch_lengths, arch_mask = self.get_arch_rep(batch)
arch = {'batch_arch_rep':batch_arch_rep, 'arch_lengths':arch_lengths, 'arch_mask':arch_mask}
## get arch hyper param representation, Ahp
batch_arch_hp_rep, arch_hp_lengths, arch_hp_mask = self.get_arch_hp_rep(batch)
arch_hp ={'batch_arch_hp_rep':batch_arch_hp_rep, 'arch_hp_lengths':arch_hp_lengths, 'arch_hp_mask':arch_hp_mask}
## get opt representation, O
# batch_opt = self.get_opt_rep(batch)
# opt = {'batch_opt':batch_opt}
## get opt hp, Ohp
# batch_opt_hp = self.get_opt_hp_rep(batch)
# opt_hp = {'batch_opt_hp':batch_opt_hp}
train_history, val_history = self.get_training_validation_history(batch)
opt, opt_hp = {'train_history':train_history}, {'val_history':val_history}
## get W representation
weight_stats = self.get_all_weight_stats(batch)
## get train errors for models
batch_train_errorr = self.Tensor([ float(sample['train_error']) for sample in batch ])
train_error = {'batch_train_error':batch_train_errorr}
##
batch_test_errorr = self.Tensor([ float(sample['test_error']) for sample in batch ])
#test_error = {'batch_test_error':batch_test_errorr}
test_error = batch_test_errorr
## collect return batch
new_batch = ({**batch_mdl_str, **arch, **arch_hp, **opt, **opt_hp, **weight_stats, **train_error}, test_error)
#print(new_batch['train_history'])
return new_batch
#return batch_arch_rep, batch_arch_hp_rep, batch_opt, batch_W_init, batch_W_final, batch_train_errorr
def get_arch_rep(self, batch):
'''
Converts archictecture indicies to OneHot.
:param list batch: list of samples in a batch (in dictionary form)
:return torch_uu.Tensor batch_arch_rep: OneHot for each layer type (batch_size, max_len, vocab_size)
:return torch_uu.Tensor arch_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor arch_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
'''
## get lengths of sequences for each sample in the batch
arch_lengths = self.Tensor([ len(sample['arch_indices']) for sample in batch ]).long()
## make array of one hot tensors for each example in batch
batch = [ self.arch2OneHot(sample['arch_indices']) for sample in batch ]
## padd (and concatenate) the tensors in the whole batch
batch_arch_rep = torch.nn.utils.rnn.pad_sequence(batch, batch_first=self.batch_first, padding_value=self.padding_value)
## compute mask
arch_mask = (batch_arch_rep != self.padding_value)
##
return batch_arch_rep.to(self.device), arch_lengths.to(self.device), arch_mask.to(self.device)
def get_arch_hp_rep(self, batch):
'''
Converts architecture hyperparams to tensor form (not OneHot, just stacks values)
:param list batch: list of samples in a batch (in dictionary form)
:return torch_uu.Tensor batch_arch_hp_rep: vector form for arch hp (batch_size, max_len, vocab_size)
:return torch_uu.Tensor arch_hp_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor arch_hp_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
'''
## get lengths of sequences for each sample in the batch
arch_hp_lengths = self.Tensor([ len(sample['arch_hp']) for sample in batch ]).long()
## padd
batch = [ self.Tensor(sample['arch_hp']) for sample in batch ]
batch_arch_hp_rep = torch.nn.utils.rnn.pad_sequence(batch, batch_first=self.batch_first, padding_value=self.padding_value)
## compute mask
arch_hp_mask = (batch_arch_hp_rep != self.padding_value)
##
return batch_arch_hp_rep.to(self.device), arch_hp_lengths.to(self.device), arch_hp_mask.to(self.device)
def get_opt_rep(self, batch):
'''
Get OneHot for optimizer.
:param list batch: list of samples in a batch. Samples produced by Dataset, which is a dictionary with all the raw data of a data point model.
:return torch_uu.Tensor batch_opt: OneHot for which optimizer was used (batch_size, vocab_size)
'''
batch = [ self.opt2OneHot(sample['opt_indices']) for sample in batch ]
batch_opt = torch.cat(batch,dim=0)
return batch_opt.to(self.device)
def get_opt_hp_rep(self, batch):
'''
Converts optimizer hyperparams to tensor form (not OneHot, just stacks values)
:param list batch: list of samples in a batch. Samples produced by Dataset, which is a dictionary with all the raw data of a data point model.
:returned torch_uu.Tensor batch_opt_hp: vector form for opt hp (batch_size, vocab_size)
'''
batch = [ self.Tensor(sample['opt_hp']) for sample in batch ]
batch_opt_hp = torch.cat(batch, dim=0)
return batch_opt_hp.to(self.device)
def get_training_validation_history(self,batch):
Tensor = torch.Tensor
train_history_batch = []
val_history_batch = []
for sample in batch:
##
train_errors, train_losses = Tensor(sample['train_errors']), Tensor(sample['train_losses'])
train = torch.stack((train_errors,train_losses)) # (2,seq_len)
#train = train.unsqueeze(2) # so that convolution layers can take it (2,seq_len,1)
train_history_batch.append(train)
##
val_errors, val_losses = Tensor(sample['val_errors']), Tensor(sample['val_losses'])
val = torch.stack((val_errors,val_losses)) # (2,seq_len)
#val = val.unsqueeze(2) # so that convolution layers can take it (2,seq_len,1)
val_history_batch.append(val)
##
train_history_batch = torch.nn.utils.rnn.pad_sequence(train_history_batch, batch_first=self.batch_first, padding_value=self.padding_value)
val_history_batch = torch.nn.utils.rnn.pad_sequence(val_history_batch, batch_first=self.batch_first, padding_value=self.padding_value)
print(f'val_history_batch = {val_history_batch.size()}')
return train_history_batch.to(self.device), val_history_batch.to(self.device)
def get_all_weight_stats(self, batch):
'''
:param list batch: list of samples in a batch. Samples produced by Dataset, which is a dictionary with all the raw data of a data point model.
:return torch_uu.Tensor batch_W_rep: tensor with mean and std for each weight in the sequence. (batch_size, max_len, 2)
:return torch_uu.Tensor W_lengths: lenghts of each sequence in batch (i.e. # layers for each sample in the batch) (batch_size)
:return torch_uu.Tensor W_mask: mask with 0 zeros on padding 1 elsewhere (batch_size, max_len, vocab_size)
'''
weight_stats = {}
with torch.no_grad():
##
batch_init_params_mu_rep, init_params_mu_lengths, init_params_mu_mask = self.get_weight_stat(batch,'init_params_mu')
batch_final_params_mu_rep, final_params_mu_lengths, final_params_mu_mask = self.get_weight_stat(batch,'final_params_mu')
new_weights_stats_init = {'batch_init_params_mu_rep':batch_init_params_mu_rep,'init_params_mu_lengths':init_params_mu_lengths, 'init_params_mu_mask':init_params_mu_mask}
new_weights_stats_final = {'batch_final_params_mu_rep':batch_final_params_mu_rep,'final_params_mu_lengths':final_params_mu_lengths,'final_params_mu_mask':final_params_mu_mask}
weight_stats = dict(weight_stats, **new_weights_stats_init)
weight_stats = dict(weight_stats, **new_weights_stats_final)
##
batch_init_params_std_rep, init_params_std_lengths, init_params_std_mask = self.get_weight_stat(batch,'init_params_std')
batch_final_params_std_rep, final_params_std_lengths, final_params_std_mask = self.get_weight_stat(batch,'final_params_std')
new_weights_stats_init = {'batch_init_params_std_rep':batch_init_params_std_rep,'init_params_std_lengths':init_params_std_lengths, 'init_params_std_mask':init_params_std_mask}
new_weights_stats_final = {'batch_final_params_std_rep':batch_final_params_std_rep,'final_params_std_lengths':final_params_std_lengths,'final_params_std_mask':final_params_std_mask}
weight_stats = dict(weight_stats, **new_weights_stats_init)
weight_stats = dict(weight_stats, **new_weights_stats_final)
##
batch_init_params_l2_rep, init_params_l2_lengths, init_params_l2_mask = self.get_weight_stat(batch,'init_params_l2')
batch_final_params_l2_rep, final_params_l2_lengths, final_params_l2_mask = self.get_weight_stat(batch,'final_params_l2')
new_weights_stats_init = {'batch_init_params_l2_rep':batch_init_params_l2_rep,'init_params_l2_lengths':init_params_l2_lengths, 'init_params_l2_mask':init_params_l2_mask}
new_weights_stats_final = {'batch_final_params_l2_rep':batch_final_params_l2_rep,'final_params_l2_lengths':final_params_l2_lengths,'final_params_l2_mask':final_params_l2_mask}
weight_stats = dict(weight_stats, **new_weights_stats_init)
weight_stats = dict(weight_stats, **new_weights_stats_final)
##
return weight_stats
def get_weight_stat(self, batch, W_type):
## get lengths of sequences for each sample in the batch
weight_lengths = self.Tensor([ len(sample[W_type]) for sample in batch ]).long()
## padd
#st()
new_batch = []
for i,sample in enumerate(batch):
try:
print(f'i = {i}')
print(f'sample = {sample}')
tensor_sample = self.Tensor(sample[W_type])
print(f'tensor_sample = {tensor_sample}')
new_batch.append(tensor_sample)
except:
print(f'\n ---- ERROR: i = {i}')
print(f'sample = {sample}')
st()
## padd batch sequences
batch_weight_rep = torch.nn.utils.rnn.pad_sequence(new_batch, batch_first=self.batch_first, padding_value=self.padding_value)
## compute mask
weight_mask = (batch_weight_rep != self.padding_value)
##
return batch_weight_rep.to(self.device), weight_lengths.to(self.device), weight_mask.to(self.device)
def testing():
pass
if __name__ == '__main__':
testing()
|
py | 1a41bed7f44bc54a692ab2fc8978a965e3b464c8 | """
Mask R-CNN
The main Mask R-CNN model implemenetation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import sys
import glob
import random
import math
import datetime
import itertools
import json
import re
import logging
from collections import OrderedDict
import numpy as np
import scipy.misc
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.initializers as KI
import keras.engine as KE
import keras.models as KM
import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} min: {:10.5f} max: {:10.5f}".format(
str(array.shape),
array.min() if array.size else "",
array.max() if array.size else ""))
print(text)
class BatchNorm(KL.BatchNormalization):
"""Batch Normalization class. Subclasses the Keras BN class and
hardcodes training=False so the BN layer doesn't update
during training.
Batch normalization has a negative effect on training if batches are small
so we disable it here.
"""
def call(self, inputs, training=None):
return super(self.__class__, self).call(inputs, training=False)
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(axis=3, name=bn_name_base + '2a')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2b')(x)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(axis=3, name=bn_name_base + '2c')(x)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(axis=3, name=bn_name_base + '1')(shortcut)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False):
assert architecture in ["resnet50", "resnet101"]
print("using architecture:{}".format(architecture))
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(axis=3, name='bn_conv1')(x)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i))
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, 4] where each row is y1, x1, y2, x2
deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, 4] each row is y1, x1, y2, x2
window: [4] in the form y1, x1, y2, x2
"""
# Split corners
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, anchors,
config=None, **kwargs):
"""
anchors: [N, (y1, x1, y2, x2)] anchors defined in image coordinates
"""
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
self.anchors = anchors.astype(np.float32)
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Base anchors
anchors = self.anchors
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = min(6000, self.anchors.shape[0])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
anchors = utils.batch_slice(ix, lambda x: tf.gather(anchors, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]
height, width = self.config.IMAGE_SHAPE[:2]
window = np.array([0, 0, height, width]).astype(np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Normalize dimensions to range of 0 to 1.
normalized_boxes = boxes / np.array([[height, width, height, width]])
# Non-max suppression
def nms(normalized_boxes, scores):
indices = tf.image.non_max_suppression(
normalized_boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(normalized_boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([normalized_boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementatin of Log2. TF doesn't have a native implemenation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [height, width] of the output pooled regions. Usually [7, 7]
- image_shape: [height, width, channels]. Shape of input image in pixels
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, image_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
self.image_shape = tuple(image_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[1:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(
self.image_shape[0] * self.image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indicies for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
pooled = tf.expand_dims(pooled, 0)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[1][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
Class-specific bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine postive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI corrdinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinements.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, 1), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def clip_to_window(window, boxes):
"""
window: (y1, x1, y2, x2). The window in the image we want to clip to.
boxes: [N, (y1, x1, y2, x2)]
"""
boxes[:, 0] = np.maximum(np.minimum(boxes[:, 0], window[2]), window[0])
boxes[:, 1] = np.maximum(np.minimum(boxes[:, 1], window[3]), window[1])
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], window[2]), window[0])
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], window[3]), window[1])
return boxes
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where
coordinates are in image domain.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Convert coordiates to image domain
# TODO: better to keep them normalized until later
height, width = config.IMAGE_SHAPE[:2]
refined_rois *= tf.constant([height, width, height, width], dtype=tf.float32)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# Round and cast to int since we're deadling with pixels now
refined_rois = tf.to_int32(tf.rint(refined_rois))
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.to_float(tf.gather(pre_nms_rois, ixs)),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are in image domain.
detections = tf.concat([
tf.to_float(tf.gather(refined_rois, keep)),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are in image domain
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Run detection refinement graph on each item in the batch
_, _, window, _ = parse_image_meta_graph(image_meta)
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
# Region Proposal Network (RPN)
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the featuremap
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns:
logits: [N, NUM_CLASSES] classifier logits (before softmax)
probs: [N, NUM_CLASSES] classifier probabilities
bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_classifier")([rois] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3), name='mrcnn_class_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_class_bn2')(x)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns: Masks [batch, roi_count, height, width, num_classes]
"""
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_mask")([rois] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn2')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn3')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(axis=3),
name='mrcnn_mask_bn4')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typicallly: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
# TODO: use smooth_l1_loss() rather than reimplementing here
# to reduce code duplication
diff = K.abs(target_bbox - rpn_bbox)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indicies.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
loss = K.reshape(loss, [1, 1])
return loss
############################################################
# Data Generator
############################################################
from keras.preprocessing.image import ImageDataGenerator
def argument_img_mask(image, mask, class_ids):
common_seed = 7
#print("origin image shape:{}, origina mask shape:{}".format(image.shape, mask.shape))
data_gen_args = dict(horizontal_flip=True,
vertical_flip=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1
)
xtr = np.expand_dims(image, 0)
image_datagen = ImageDataGenerator(**data_gen_args)
image_datagen.fit(xtr, seed=common_seed)
image_generator = image_datagen.flow(xtr, batch_size=1, seed=common_seed)
arg_image = image_generator.next()[0].astype(image.dtype)
arg_mask = np.zeros(mask.shape, dtype=mask.dtype)
for i in range(mask.shape[-1]):
mask_datagen = ImageDataGenerator(**data_gen_args)
masktr = np.expand_dims(mask[:,:,i], 0)
masktr = np.expand_dims(masktr, -1)
mask_datagen.fit(masktr, seed=common_seed)
mask_generator = mask_datagen.flow(masktr, batch_size=1, seed=common_seed)
arg_mask_ = np.squeeze(mask_generator.next()[0], axis=-1)
# print("arg_mask_ shape:{}".format(arg_mask_.shape))
arg_mask[:,:,i] = arg_mask_.astype(mask[:,:,i].dtype)
# remove the mask instance which doesn't contain any mask after argumentation
non_zero_mask = arg_mask[:,:, ~np.all(arg_mask == 0, axis=(0, 1))]
class_ids = class_ids[:non_zero_mask.shape[-1]]
#print("arg_mask shape:{}, non_zero_mask shape:{}, class_ids shape:{}".format(arg_mask.shape, non_zero_mask.shape, class_ids.shape))
# print("arg_mask shape:{}".format(arg_mask.shape))
return (arg_image,non_zero_mask, class_ids)
import cv2
def data_augmentation0(input_images,
h_flip=True,
v_flip=True,
rotation=360,
zoom=1.5,
brightness=0.5,
crop=False):
# first is input all other are output
# Data augmentation
output_images = input_images.copy()
# random crop
# if crop and random.randint(0, 1):
# h, w, c = output_images[0].shape
# upper_h, new_h, upper_w, new_w = locs_for_random_crop(h, w)
# output_images = [input_image[upper_h:upper_h + new_h, upper_w:upper_w + new_w, :] for input_image in output_images]
# random flip
if h_flip and random.randint(0, 1):
output_images = [cv2.flip(input_image, 1) for input_image in output_images]
if v_flip and random.randint(0, 1):
output_images = [cv2.flip(input_image, 0) for input_image in output_images]
factor = 1.0 + abs(random.gauss(mu=0.0, sigma=brightness))
if random.randint(0, 1):
factor = 1.0 / factor
table = np.array([((i / 255.0) ** factor) * 255 for i in np.arange(0, 256)]).astype(np.uint8)
output_images[0] = cv2.LUT(output_images[0], table)
if rotation:
angle = random.randint(0, rotation)
else:
angle = 0.0
if zoom:
scale = random.randint(50, zoom * 100) / 100
else:
scale = 1.0
# print(angle, scale)
if rotation or zoom:
for i, input_image in enumerate(output_images):
M = cv2.getRotationMatrix2D((input_image.shape[1] // 2, input_image.shape[0] // 2), angle, scale)
# M = cv2.getRotationMatrix2D((input_image.shape[1] // 2, input_image.shape[0] // 2), 45, 1)
output_images[i] = cv2.warpAffine(input_image, M, (input_image.shape[1], input_image.shape[0]))
# print('len of output %s' % len(output_images))
return [input_image.astype(np.uint8) for input_image in output_images]
def data_augmentation(input_image, masks,
h_flip=True,
v_flip=True,
rotation=360,
zoom=1.5,
brightness=0.5,
crop=False):
# first is input all other are output
# Data augmentation
output_image = input_image.copy()
output_masks = masks.copy()
# random crop
# if crop and random.randint(0, 1):
# h, w, c = output_images[0].shape
# upper_h, new_h, upper_w, new_w = locs_for_random_crop(h, w)
# output_images = [input_image[upper_h:upper_h + new_h, upper_w:upper_w + new_w, :] for input_image in output_images]
# random flip
if h_flip and random.randint(0, 1):
output_image = np.fliplr(output_image)
output_masks = np.fliplr(output_masks)
if v_flip and random.randint(0, 1):
output_image = np.flipud(output_image)
output_masks = np.flipud(output_masks)
factor = 1.0 + abs(random.gauss(mu=0.0, sigma=brightness))
if random.randint(0, 1):
factor = 1.0 / factor
table = np.array([((i / 255.0) ** factor) * 255 for i in np.arange(0, 256)]).astype(np.uint8)
output_image = cv2.LUT(output_image, table)
if rotation:
rotate_times = random.randint(0, rotation/90)
else:
rotate_times = 0.0
for r in range(0, rotate_times):
output_image = np.rot90(output_image)
output_masks = np.rot90(output_masks)
# if zoom:
# scale = random.randint(50, zoom * 100) / 100
# else:
# scale = 1.0
# # print(angle, scale)
# if rotation or zoom:
# for i, input_image in enumerate(output_images):
# M = cv2.getRotationMatrix2D((input_image.shape[1] // 2, input_image.shape[0] // 2), angle, scale)
# # M = cv2.getRotationMatrix2D((input_image.shape[1] // 2, input_image.shape[0] // 2), 45, 1)
# output_images[i] = cv2.warpAffine(input_image, M, (input_image.shape[1], input_image.shape[0]))
# # print('len of output %s' % len(output_images))
return output_image, output_masks
from skimage.transform import rescale, resize
def random_crop(img, mask, class_ids, width = 500, height = 500):
assert img.shape[0] == mask.shape[0]
assert img.shape[1] == mask.shape[1]
h, w, _ = img.shape
if h< height or w < width:
img = resize(img, (2*h, 2*w))
resized_mask = np.zeros((h*2, w*2, mask.shape[-1]), dtype=mask.dtype)
for i in range(0, mask.shape[-1]):
# resized_mask[:,:,i] = cv2.resize(mask[:, :, i], (2*w, 2*h))
resized_mask[:,:,i] = resize(mask[:, :, i], (2*h, 2*w))
mask = resized_mask
h, w, _ = img.shape
assert(h>=height)
assert(w>=width)
x = random.randint(0, img.shape[1] - width)
y = random.randint(0, img.shape[0] - height)
img = img[y:y+height, x:x+width]
mask = mask[y:y+height, x:x+width]
mask = mask[:, :, ~np.all(mask==0, axis=(0,1))]
class_ids = class_ids[:mask.shape[-1]]
return img, mask, class_ids
def load_image_gt(dataset, config, image_id, augment=False,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: If true, apply random image augmentation. Currently, only
horizontal flipping is offered.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
# Random cropping.
if augment:
image, mask, class_ids = random_crop(image, mask, class_ids)
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
if random.randint(0, 1):
image = np.flipud(image)
mask = np.flipud(mask)
if random.randint(0, 1):
image = np.rot90(image)
mask = np.rot90(mask)
# #brightness
# brightness=0.5
# factor = 1.0 + abs(random.gauss(mu=0.0, sigma=brightness))
# if random.randint(0, 1):
# factor = 1.0 / factor
# table = np.array([((i / 255.0) ** factor) * 255 for i in np.arange(0, 256)]).astype(np.uint8)
# image = cv2.LUT(image, table)
shape = image.shape
image, window, scale, padding = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
max_dim=config.IMAGE_MAX_DIM,
padding=config.IMAGE_PADDING)
mask = utils.resize_mask(mask, scale, padding)
# Random horizontal flips.
# if augment:
# if random.randint(0, 1):
# image = np.fliplr(image)
# mask = np.fliplr(mask)
# remove the mask instance which doesn't contain any mask after argumentation
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, shape, window, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Grund truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indicies of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks.
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(scipy.misc.imresize(class_mask.astype(float), (gt_h, gt_w),
interp='nearest') / 255.0).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = scipy.misc.imresize(
m.astype(float), config.MASK_SHAPE, interp='nearest') / 255.0
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=True, random_rois=0,
batch_size=1, detection_targets=False):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: If True, applies image augmentation to images (currently only
horizontal flips are supported)
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The containtes
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, size of image meta]
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinately.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
if config.USE_MINI_MASK:
batch_gt_masks = np.zeros((batch_size, config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1],
config.MAX_GT_INSTANCES))
else:
batch_gt_masks = np.zeros(
(batch_size, image.shape[0], image.shape[1], config.MAX_GT_INSTANCES))
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=config.IMAGE_SHAPE.tolist(), name="input_image")
input_image_meta = KL.Input(shape=[None], name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
h, w = K.shape(input_image)[1], K.shape(input_image)[2]
image_scale = K.cast(K.stack([h, w, h, w], axis=0), tf.float32)
gt_boxes = KL.Lambda(lambda x: x / image_scale)(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
_, C2, C3, C4, C5 = resnet_graph(input_image, config.RESNET, stage5=True)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Generate Anchors
self.anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
config.BACKBONE_SHAPES,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), 256)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
anchors=self.anchors,
config=config)([rpn_class, rpn_bbox])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
_, _, _, active_class_ids = KL.Lambda(lambda x: parse_image_meta_graph(x),
mask=[None, None, None, None])(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates to 0-1 range.
target_rois = KL.Lambda(lambda x: K.cast(
x, tf.float32) / image_scale[:4])(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, config.IMAGE_SHAPE,
config.POOL_SIZE, config.NUM_CLASSES)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in image coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Convert boxes to normalized coordinates
# TODO: let DetectionLayer return normalized coordinates to avoid
# unnecessary conversions
h, w = config.IMAGE_SHAPE[:2]
detection_boxes = KL.Lambda(
lambda x: x[..., :4] / np.array([h, w, h, w]))(detections)
# Create masks for detections
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
config.IMAGE_SHAPE,
config.MASK_POOL_SIZE,
config.NUM_CLASSES)
model = KM.Model([input_image, input_image_meta],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
log_dir: The directory where events and weights are saved
checkpoint_path: the path to the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
return None, None
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
return dir_name, None
checkpoint = os.path.join(dir_name, checkpoints[-1])
return dir_name, checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the correspoding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exlude: list of layer names to excluce
"""
import h5py
from keras.engine import topology
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
topology.load_weights_from_hdf5_group_by_name(f, layers)
else:
topology.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum,decay = 0.1,
clipnorm=5.0)
adam_opt = keras.optimizers.Adam(lr=learning_rate)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = ["rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
self.keras_model.add_loss(
tf.reduce_mean(layer.output, keep_dims=True))
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(optimizer=optimizer, loss=[
None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
self.keras_model.metrics_tensors.append(tf.reduce_mean(
layer.output, keep_dims=True))
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainble layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5
regex = r".*/\w+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_\w+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
self.epoch = int(m.group(6)) + 1
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heaads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE,
augment=False)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = max(self.config.BATCH_SIZE // 2, 2)
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=False,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matricies [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matricies:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image to fit the model expected size
# TODO: move resizing to mold_image()
molded_image, window, scale, padding = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
max_dim=self.config.IMAGE_MAX_DIM,
padding=self.config.IMAGE_PADDING)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, window,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)]
mrcnn_mask: [N, height, width, num_classes]
image_shape: [height, width, depth] Original size of the image before resizing
window: [y1, x1, y2, x2] Box in the image where the real image is
excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Compute scale and shift to translate coordinates to image domain.
h_scale = image_shape[0] / (window[2] - window[0])
w_scale = image_shape[1] / (window[3] - window[1])
scale = min(h_scale, w_scale)
shift = window[:2] # y, x
scales = np.array([scale, scale, scale, scale])
shifts = np.array([shift[0], shift[1], shift[0], shift[1]])
# Translate bounding boxes to image domain
boxes = np.multiply(boxes - shifts, scales).astype(np.int32)
# Filter out detections with zero area. Often only happens in early
# stages of training when the network weights are still a bit random.
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty((0,) + masks.shape[1:3])
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
# Run object detection
detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, \
rois, rpn_class, rpn_bbox =\
self.keras_model.predict([molded_images, image_metas], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs):
"""Runs a sub-set of the computation graph that computes the given
outputs.
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Run inference
molded_images, image_metas, windows = self.mold_inputs(images)
# TODO: support training mode?
# if TEST_MODE == "training":
# model_in = [molded_images, image_metas,
# target_rpn_match, target_rpn_bbox,
# gt_boxes, gt_masks]
# if not config.USE_RPN_ROIS:
# model_in.append(target_rois)
# if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
# model_in.append(1.)
# outputs_np = kf(model_in)
# else:
model_in = [molded_images, image_metas]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, image_shape, window, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
image_shape: [height, width, channels]
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
"""
image_id = meta[:, 0]
image_shape = meta[:, 1:4]
window = meta[:, 4:8] # (y1, x1, y2, x2) window of image in in pixels
active_class_ids = meta[:, 8:]
return [image_id, image_shape, window, active_class_ids]
def mold_image(images, config):
"""Takes RGB images with 0-255 values and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name=None):
"""Often boxes are represented with matricies of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
|
py | 1a41c11362bbdb4959192e76f12879084f24c8e5 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.jar_task import JarTask
##
## See `Appendix A` in the 'publish' documentation:
##
## http://pantsbuild.github.io/publish.html
##
## for tips on how to adapt this example task for your own custom publishing needs.
##
class ExtraTestJarExample(JarTask):
"""Example of a pants publish plugin.
For every JarLibrary target in the build graph, this plugin will create an 'example.txt' file,
which will be placed in an additional jar. During publishing, this additional jar will be published
along with the target.
"""
def __init__(self, context, workdir):
# Constructor for custom task. Setup things that you need at pants initialization time.
super(ExtraTestJarExample, self).__init__(context, workdir)
# This method is called by pants, when the RoundEngine gets to the phase where your task is
# attached.
def execute(self):
# For each node in the graph that was selected below, create a jar, and store a reference to
# the jar in the product map.
def process(target):
self.context.log.info("Processing target %s" % target)
jar_name = "%s.%s-extra_example.jar" % (target.provides.org, target.provides.name)
# This is the path in .pants.d to write our new additional jar to. Note that we won't publish
# directly from this location.
jar_path = os.path.join(self.workdir, jar_name)
# A sample file to stuff into the jar.
example_file_name = os.path.join(self.workdir, "example.txt")
with open(example_file_name, 'wb') as f:
f.write("This is an example test file.\n")
# Create a jar file to be published along with other artifacts for this target.
# In principle, any extra file type could be created here, and published.
# Options in pants.ini allow specifying the file extension.
with self.open_jar(jar_path, overwrite=True, compressed=True) as open_jar:
# Write the sample file to the jar.
open_jar.write(os.path.join(self.workdir, example_file_name), "example.txt")
# For this target, add the path to the newly created jar to the product map, under the
# 'extra_test_jar_example key.
#
# IMPORTANT: this string *must* match the string that you have set in pants.ini. Otherwise,
# the code in 'jar_publish.py' won't be able to find this addition to the product map.
self.context.products.get('extra_test_jar_example').add(target, self.workdir).append(jar_name)
self.context.log.info("Made a jar: %s" % jar_path)
# Loop over all of the targets in the graph, and select the ones that we wish to operate on.
# This example selects all JavaLibrary targets, but different criteria can be specified below.
for target in self.context.targets(lambda target: isinstance(target, JavaLibrary)):
process(target)
|
py | 1a41c1f62ed5a5816fb16b0a8db4b964aeee2e96 | # Josh Aaron Miller 2021
# API calls for Inventory
import venntdb, uuid
from constants import *
# VenntHandler methods
def view_items(self, args, username):
character_id = args[KEY_ID]
if self.server.db.permissions(username, character_id) < Permission.PRIVATE_VIEW:
return self.respond({"success":False, "info":MSG_NO_PERMISSION})
items = self.server.db.view_items(username, args[KEY_ID])
return self.respond({"success":True, "value":items})
def remove_item(self, args, username):
character_id = args[KEY_ID]
if self.server.db.permissions(username, character_id) < Permission.EDIT:
return self.respond({"success":False, "info":MSG_NO_PERMISSION})
return self.respond({"success":self.server.db.remove_item(username, args[KEY_ID], args[KEY_ID2])})
def add_item(self, args, username):
character_id = args[KEY_ID]
if self.server.db.permissions(username, character_id) < Permission.ADD:
return self.respond({"success":False, "info":MSG_NO_PERMISSION})
name = args[KEY_NAME]
if len(name) > MAX_NAME_LENGTH:
return self.respond({"success":False, "info":MSG_NAME_LONG})
desc = args[KEY_DESC]
if len(desc) > MAX_DESC_LENGTH:
return self.respond({"success":False, "info":MSG_DESC_LONG})
try:
bulk = int(args[KEY_BULK])
except:
return self.respond({"success":False, "info":MSG_NOT_INT.format(KEY_BULK)})
id = IDType.ITEM + str(uuid.uuid4())
item = {"name":name, "id":id, "desc":desc, "bulk":bulk}
success = self.server.db.add_item(username, args[KEY_ID], item)
if not success:
return self.respond({"success":False, "info":"Max items exceeded"})
ret = {"success":True, "id":id}
return self.respond(ret)
def get_weapon(self, args, username):
weapon = self.server.db.get_weapon(username, args[KEY_NAME])
if weapon is None:
return self.respond({"success":False, "info":"No such weapon"})
return self.respond({"success":True, "value":weapon})
def add_weapon(self, args, username):
weapon_name = args[KEY_NAME]
if len(weapon_name) > MAX_NAME_LENGTH:
return self.respond({"success":False, "info":MSG_NAME_LONG})
if self.server.db.get_weapon(username, weapon_name) is not None:
return self.respond({"success":False, "info":"Weapon already exists"})
attr = args[KEY_ATTR]
if attr not in ATTRIBUTES:
return self.respond({"success":False,"info":MSG_NO_ATTR})
dmg = args[KEY_DMG]
# TODO verify dmg is well-ordered
if KEY_MODS in args:
mods = args[KEY_MODS]
# TODO verify mods are well-ordered
weapon = {"name":weapon_name, "attr":attr, "dmg":dmg, "mods":mods}
self.server.db.add_weapon(username, weapon)
return self.respond({"success":True})
def remove_weapon(self, args, username):
return self.respond({"success":self.server.db.remove_weapon(username, args[KEY_NAME])}) |
py | 1a41c33a8bd86acb49b6bb92647285b71ddd42d3 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.datalabeling_v1beta1.services.data_labeling_service import pagers
from google.cloud.datalabeling_v1beta1.types import annotation
from google.cloud.datalabeling_v1beta1.types import annotation_spec_set
from google.cloud.datalabeling_v1beta1.types import (
annotation_spec_set as gcd_annotation_spec_set,
)
from google.cloud.datalabeling_v1beta1.types import data_labeling_service
from google.cloud.datalabeling_v1beta1.types import data_payloads
from google.cloud.datalabeling_v1beta1.types import dataset
from google.cloud.datalabeling_v1beta1.types import dataset as gcd_dataset
from google.cloud.datalabeling_v1beta1.types import evaluation
from google.cloud.datalabeling_v1beta1.types import evaluation_job
from google.cloud.datalabeling_v1beta1.types import evaluation_job as gcd_evaluation_job
from google.cloud.datalabeling_v1beta1.types import human_annotation_config
from google.cloud.datalabeling_v1beta1.types import instruction
from google.cloud.datalabeling_v1beta1.types import instruction as gcd_instruction
from google.cloud.datalabeling_v1beta1.types import operations
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import DataLabelingServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import DataLabelingServiceGrpcTransport
from .transports.grpc_asyncio import DataLabelingServiceGrpcAsyncIOTransport
class DataLabelingServiceClientMeta(type):
"""Metaclass for the DataLabelingService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[DataLabelingServiceTransport]]
_transport_registry["grpc"] = DataLabelingServiceGrpcTransport
_transport_registry["grpc_asyncio"] = DataLabelingServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[DataLabelingServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class DataLabelingServiceClient(metaclass=DataLabelingServiceClientMeta):
"""Service for the AI Platform Data Labeling API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "datalabeling.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DataLabelingServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DataLabelingServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> DataLabelingServiceTransport:
"""Returns the transport used by the client instance.
Returns:
DataLabelingServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def annotated_dataset_path(
project: str, dataset: str, annotated_dataset: str,
) -> str:
"""Returns a fully-qualified annotated_dataset string."""
return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(
project=project, dataset=dataset, annotated_dataset=annotated_dataset,
)
@staticmethod
def parse_annotated_dataset_path(path: str) -> Dict[str, str]:
"""Parses a annotated_dataset path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)/annotatedDatasets/(?P<annotated_dataset>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def annotation_spec_set_path(project: str, annotation_spec_set: str,) -> str:
"""Returns a fully-qualified annotation_spec_set string."""
return "projects/{project}/annotationSpecSets/{annotation_spec_set}".format(
project=project, annotation_spec_set=annotation_spec_set,
)
@staticmethod
def parse_annotation_spec_set_path(path: str) -> Dict[str, str]:
"""Parses a annotation_spec_set path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/annotationSpecSets/(?P<annotation_spec_set>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def data_item_path(project: str, dataset: str, data_item: str,) -> str:
"""Returns a fully-qualified data_item string."""
return "projects/{project}/datasets/{dataset}/dataItems/{data_item}".format(
project=project, dataset=dataset, data_item=data_item,
)
@staticmethod
def parse_data_item_path(path: str) -> Dict[str, str]:
"""Parses a data_item path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)/dataItems/(?P<data_item>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def dataset_path(project: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/datasets/{dataset}".format(
project=project, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def evaluation_path(project: str, dataset: str, evaluation: str,) -> str:
"""Returns a fully-qualified evaluation string."""
return "projects/{project}/datasets/{dataset}/evaluations/{evaluation}".format(
project=project, dataset=dataset, evaluation=evaluation,
)
@staticmethod
def parse_evaluation_path(path: str) -> Dict[str, str]:
"""Parses a evaluation path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)/evaluations/(?P<evaluation>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def evaluation_job_path(project: str, evaluation_job: str,) -> str:
"""Returns a fully-qualified evaluation_job string."""
return "projects/{project}/evaluationJobs/{evaluation_job}".format(
project=project, evaluation_job=evaluation_job,
)
@staticmethod
def parse_evaluation_job_path(path: str) -> Dict[str, str]:
"""Parses a evaluation_job path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/evaluationJobs/(?P<evaluation_job>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def example_path(
project: str, dataset: str, annotated_dataset: str, example: str,
) -> str:
"""Returns a fully-qualified example string."""
return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}/examples/{example}".format(
project=project,
dataset=dataset,
annotated_dataset=annotated_dataset,
example=example,
)
@staticmethod
def parse_example_path(path: str) -> Dict[str, str]:
"""Parses a example path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/datasets/(?P<dataset>.+?)/annotatedDatasets/(?P<annotated_dataset>.+?)/examples/(?P<example>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def instruction_path(project: str, instruction: str,) -> str:
"""Returns a fully-qualified instruction string."""
return "projects/{project}/instructions/{instruction}".format(
project=project, instruction=instruction,
)
@staticmethod
def parse_instruction_path(path: str) -> Dict[str, str]:
"""Parses a instruction path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/instructions/(?P<instruction>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, DataLabelingServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the data labeling service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, DataLabelingServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, DataLabelingServiceTransport):
# transport is a DataLabelingServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def create_dataset(
self,
request: Union[data_labeling_service.CreateDatasetRequest, dict] = None,
*,
parent: str = None,
dataset: gcd_dataset.Dataset = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_dataset.Dataset:
r"""Creates dataset. If success return a Dataset
resource.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.CreateDatasetRequest, dict]):
The request object. Request message for CreateDataset.
parent (str):
Required. Dataset resource parent, format:
projects/{project_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
dataset (google.cloud.datalabeling_v1beta1.types.Dataset):
Required. The dataset to be created.
This corresponds to the ``dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.types.Dataset:
Dataset is the resource to hold your
data. You can request multiple labeling
tasks for a dataset while each one will
generate an AnnotatedDataset.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, dataset])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.CreateDatasetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.CreateDatasetRequest):
request = data_labeling_service.CreateDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if dataset is not None:
request.dataset = dataset
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_dataset]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_dataset(
self,
request: Union[data_labeling_service.GetDatasetRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dataset.Dataset:
r"""Gets dataset by resource name.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.GetDatasetRequest, dict]):
The request object. Request message for GetDataSet.
name (str):
Required. Dataset resource name, format:
projects/{project_id}/datasets/{dataset_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.types.Dataset:
Dataset is the resource to hold your
data. You can request multiple labeling
tasks for a dataset while each one will
generate an AnnotatedDataset.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.GetDatasetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.GetDatasetRequest):
request = data_labeling_service.GetDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_dataset]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_datasets(
self,
request: Union[data_labeling_service.ListDatasetsRequest, dict] = None,
*,
parent: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDatasetsPager:
r"""Lists datasets under a project. Pagination is
supported.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.ListDatasetsRequest, dict]):
The request object. Request message for ListDataset.
parent (str):
Required. Dataset resource parent, format:
projects/{project_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. Filter on dataset is not
supported at this moment.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.services.data_labeling_service.pagers.ListDatasetsPager:
Results of listing datasets within a
project.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.ListDatasetsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.ListDatasetsRequest):
request = data_labeling_service.ListDatasetsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_datasets]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDatasetsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_dataset(
self,
request: Union[data_labeling_service.DeleteDatasetRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a dataset by resource name.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.DeleteDatasetRequest, dict]):
The request object. Request message for DeleteDataset.
name (str):
Required. Dataset resource name, format:
projects/{project_id}/datasets/{dataset_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.DeleteDatasetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.DeleteDatasetRequest):
request = data_labeling_service.DeleteDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_dataset]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def import_data(
self,
request: Union[data_labeling_service.ImportDataRequest, dict] = None,
*,
name: str = None,
input_config: dataset.InputConfig = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Imports data into dataset based on source locations
defined in request. It can be called multiple times for
the same dataset. Each dataset can only have one long
running operation running on it. For example, no
labeling task (also long running operation) can be
started while importing is still ongoing. Vice versa.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.ImportDataRequest, dict]):
The request object. Request message for ImportData API.
name (str):
Required. Dataset resource name, format:
projects/{project_id}/datasets/{dataset_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
input_config (google.cloud.datalabeling_v1beta1.types.InputConfig):
Required. Specify the input source of
the data.
This corresponds to the ``input_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.datalabeling_v1beta1.types.ImportDataOperationResponse`
Response used for ImportData longrunning operation.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, input_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.ImportDataRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.ImportDataRequest):
request = data_labeling_service.ImportDataRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if input_config is not None:
request.input_config = input_config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.import_data]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
operations.ImportDataOperationResponse,
metadata_type=operations.ImportDataOperationMetadata,
)
# Done; return the response.
return response
def export_data(
self,
request: Union[data_labeling_service.ExportDataRequest, dict] = None,
*,
name: str = None,
annotated_dataset: str = None,
filter: str = None,
output_config: dataset.OutputConfig = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Exports data and annotations from dataset.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.ExportDataRequest, dict]):
The request object. Request message for ExportData API.
name (str):
Required. Dataset resource name, format:
projects/{project_id}/datasets/{dataset_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
annotated_dataset (str):
Required. Annotated dataset resource name. DataItem in
Dataset and their annotations in specified annotated
dataset will be exported. It's in format of
projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
{annotated_dataset_id}
This corresponds to the ``annotated_dataset`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. Filter is not supported at
this moment.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
output_config (google.cloud.datalabeling_v1beta1.types.OutputConfig):
Required. Specify the output
destination.
This corresponds to the ``output_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.datalabeling_v1beta1.types.ExportDataOperationResponse`
Response used for ExportDataset longrunning operation.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, annotated_dataset, filter, output_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.ExportDataRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.ExportDataRequest):
request = data_labeling_service.ExportDataRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if annotated_dataset is not None:
request.annotated_dataset = annotated_dataset
if filter is not None:
request.filter = filter
if output_config is not None:
request.output_config = output_config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.export_data]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
operations.ExportDataOperationResponse,
metadata_type=operations.ExportDataOperationMetadata,
)
# Done; return the response.
return response
def get_data_item(
self,
request: Union[data_labeling_service.GetDataItemRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dataset.DataItem:
r"""Gets a data item in a dataset by resource name. This
API can be called after data are imported into dataset.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.GetDataItemRequest, dict]):
The request object. Request message for GetDataItem.
name (str):
Required. The name of the data item to get, format:
projects/{project_id}/datasets/{dataset_id}/dataItems/{data_item_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.types.DataItem:
DataItem is a piece of data, without
annotation. For example, an image.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.GetDataItemRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.GetDataItemRequest):
request = data_labeling_service.GetDataItemRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_data_item]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_data_items(
self,
request: Union[data_labeling_service.ListDataItemsRequest, dict] = None,
*,
parent: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDataItemsPager:
r"""Lists data items in a dataset. This API can be called
after data are imported into dataset. Pagination is
supported.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.ListDataItemsRequest, dict]):
The request object. Request message for ListDataItems.
parent (str):
Required. Name of the dataset to list data items,
format: projects/{project_id}/datasets/{dataset_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. Filter is not supported at
this moment.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.services.data_labeling_service.pagers.ListDataItemsPager:
Results of listing data items in a
dataset.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.ListDataItemsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.ListDataItemsRequest):
request = data_labeling_service.ListDataItemsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_data_items]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDataItemsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_annotated_dataset(
self,
request: Union[data_labeling_service.GetAnnotatedDatasetRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dataset.AnnotatedDataset:
r"""Gets an annotated dataset by resource name.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.GetAnnotatedDatasetRequest, dict]):
The request object. Request message for
GetAnnotatedDataset.
name (str):
Required. Name of the annotated dataset to get, format:
projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
{annotated_dataset_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.types.AnnotatedDataset:
AnnotatedDataset is a set holding
annotations for data in a Dataset. Each
labeling task will generate an
AnnotatedDataset under the Dataset that
the task is requested for.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.GetAnnotatedDatasetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.GetAnnotatedDatasetRequest):
request = data_labeling_service.GetAnnotatedDatasetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_annotated_dataset]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_annotated_datasets(
self,
request: Union[data_labeling_service.ListAnnotatedDatasetsRequest, dict] = None,
*,
parent: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListAnnotatedDatasetsPager:
r"""Lists annotated datasets for a dataset. Pagination is
supported.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.ListAnnotatedDatasetsRequest, dict]):
The request object. Request message for
ListAnnotatedDatasets.
parent (str):
Required. Name of the dataset to list annotated
datasets, format:
projects/{project_id}/datasets/{dataset_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. Filter is not supported at
this moment.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.services.data_labeling_service.pagers.ListAnnotatedDatasetsPager:
Results of listing annotated datasets
for a dataset.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.ListAnnotatedDatasetsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.ListAnnotatedDatasetsRequest):
request = data_labeling_service.ListAnnotatedDatasetsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_annotated_datasets]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListAnnotatedDatasetsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_annotated_dataset(
self,
request: Union[
data_labeling_service.DeleteAnnotatedDatasetRequest, dict
] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an annotated dataset by resource name.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.DeleteAnnotatedDatasetRequest, dict]):
The request object. Request message for
DeleteAnnotatedDataset.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.DeleteAnnotatedDatasetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.DeleteAnnotatedDatasetRequest):
request = data_labeling_service.DeleteAnnotatedDatasetRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_annotated_dataset]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def label_image(
self,
request: Union[data_labeling_service.LabelImageRequest, dict] = None,
*,
parent: str = None,
basic_config: human_annotation_config.HumanAnnotationConfig = None,
feature: data_labeling_service.LabelImageRequest.Feature = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Starts a labeling task for image. The type of image
labeling task is configured by feature in the request.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.LabelImageRequest, dict]):
The request object. Request message for starting an
image labeling task.
parent (str):
Required. Name of the dataset to request labeling task,
format: projects/{project_id}/datasets/{dataset_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
basic_config (google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig):
Required. Basic human annotation
config.
This corresponds to the ``basic_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
feature (google.cloud.datalabeling_v1beta1.types.LabelImageRequest.Feature):
Required. The type of image labeling
task.
This corresponds to the ``feature`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.datalabeling_v1beta1.types.AnnotatedDataset` AnnotatedDataset is a set holding annotations for data in a Dataset. Each
labeling task will generate an AnnotatedDataset under
the Dataset that the task is requested for.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, basic_config, feature])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.LabelImageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.LabelImageRequest):
request = data_labeling_service.LabelImageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if basic_config is not None:
request.basic_config = basic_config
if feature is not None:
request.feature = feature
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.label_image]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
dataset.AnnotatedDataset,
metadata_type=operations.LabelOperationMetadata,
)
# Done; return the response.
return response
def label_video(
self,
request: Union[data_labeling_service.LabelVideoRequest, dict] = None,
*,
parent: str = None,
basic_config: human_annotation_config.HumanAnnotationConfig = None,
feature: data_labeling_service.LabelVideoRequest.Feature = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Starts a labeling task for video. The type of video
labeling task is configured by feature in the request.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.LabelVideoRequest, dict]):
The request object. Request message for LabelVideo.
parent (str):
Required. Name of the dataset to request labeling task,
format: projects/{project_id}/datasets/{dataset_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
basic_config (google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig):
Required. Basic human annotation
config.
This corresponds to the ``basic_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
feature (google.cloud.datalabeling_v1beta1.types.LabelVideoRequest.Feature):
Required. The type of video labeling
task.
This corresponds to the ``feature`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.datalabeling_v1beta1.types.AnnotatedDataset` AnnotatedDataset is a set holding annotations for data in a Dataset. Each
labeling task will generate an AnnotatedDataset under
the Dataset that the task is requested for.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, basic_config, feature])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.LabelVideoRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.LabelVideoRequest):
request = data_labeling_service.LabelVideoRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if basic_config is not None:
request.basic_config = basic_config
if feature is not None:
request.feature = feature
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.label_video]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
dataset.AnnotatedDataset,
metadata_type=operations.LabelOperationMetadata,
)
# Done; return the response.
return response
def label_text(
self,
request: Union[data_labeling_service.LabelTextRequest, dict] = None,
*,
parent: str = None,
basic_config: human_annotation_config.HumanAnnotationConfig = None,
feature: data_labeling_service.LabelTextRequest.Feature = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Starts a labeling task for text. The type of text
labeling task is configured by feature in the request.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.LabelTextRequest, dict]):
The request object. Request message for LabelText.
parent (str):
Required. Name of the data set to request labeling task,
format: projects/{project_id}/datasets/{dataset_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
basic_config (google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig):
Required. Basic human annotation
config.
This corresponds to the ``basic_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
feature (google.cloud.datalabeling_v1beta1.types.LabelTextRequest.Feature):
Required. The type of text labeling
task.
This corresponds to the ``feature`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.datalabeling_v1beta1.types.AnnotatedDataset` AnnotatedDataset is a set holding annotations for data in a Dataset. Each
labeling task will generate an AnnotatedDataset under
the Dataset that the task is requested for.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, basic_config, feature])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.LabelTextRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.LabelTextRequest):
request = data_labeling_service.LabelTextRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if basic_config is not None:
request.basic_config = basic_config
if feature is not None:
request.feature = feature
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.label_text]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
dataset.AnnotatedDataset,
metadata_type=operations.LabelOperationMetadata,
)
# Done; return the response.
return response
def get_example(
self,
request: Union[data_labeling_service.GetExampleRequest, dict] = None,
*,
name: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dataset.Example:
r"""Gets an example by resource name, including both data
and annotation.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.GetExampleRequest, dict]):
The request object. Request message for GetExample
name (str):
Required. Name of example, format:
projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
{annotated_dataset_id}/examples/{example_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. An expression for filtering Examples. Filter
by annotation_spec.display_name is supported. Format
"annotation_spec.display_name = {display_name}"
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.types.Example:
An Example is a piece of data and its
annotation. For example, an image with
label "house".
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.GetExampleRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.GetExampleRequest):
request = data_labeling_service.GetExampleRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_example]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_examples(
self,
request: Union[data_labeling_service.ListExamplesRequest, dict] = None,
*,
parent: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListExamplesPager:
r"""Lists examples in an annotated dataset. Pagination is
supported.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.ListExamplesRequest, dict]):
The request object. Request message for ListExamples.
parent (str):
Required. Example resource parent.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. An expression for filtering Examples. For
annotated datasets that have annotation spec set, filter
by annotation_spec.display_name is supported. Format
"annotation_spec.display_name = {display_name}"
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.services.data_labeling_service.pagers.ListExamplesPager:
Results of listing Examples in and
annotated dataset.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.ListExamplesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.ListExamplesRequest):
request = data_labeling_service.ListExamplesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_examples]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListExamplesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def create_annotation_spec_set(
self,
request: Union[
data_labeling_service.CreateAnnotationSpecSetRequest, dict
] = None,
*,
parent: str = None,
annotation_spec_set: gcd_annotation_spec_set.AnnotationSpecSet = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_annotation_spec_set.AnnotationSpecSet:
r"""Creates an annotation spec set by providing a set of
labels.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.CreateAnnotationSpecSetRequest, dict]):
The request object. Request message for
CreateAnnotationSpecSet.
parent (str):
Required. AnnotationSpecSet resource parent, format:
projects/{project_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
annotation_spec_set (google.cloud.datalabeling_v1beta1.types.AnnotationSpecSet):
Required. Annotation spec set to create. Annotation
specs must be included. Only one annotation spec will be
accepted for annotation specs with same display_name.
This corresponds to the ``annotation_spec_set`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.types.AnnotationSpecSet:
An AnnotationSpecSet is a collection
of label definitions. For example, in
image classification tasks, you define a
set of possible labels for images as an
AnnotationSpecSet. An AnnotationSpecSet
is immutable upon creation.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, annotation_spec_set])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.CreateAnnotationSpecSetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, data_labeling_service.CreateAnnotationSpecSetRequest
):
request = data_labeling_service.CreateAnnotationSpecSetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if annotation_spec_set is not None:
request.annotation_spec_set = annotation_spec_set
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_annotation_spec_set
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_annotation_spec_set(
self,
request: Union[data_labeling_service.GetAnnotationSpecSetRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> annotation_spec_set.AnnotationSpecSet:
r"""Gets an annotation spec set by resource name.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.GetAnnotationSpecSetRequest, dict]):
The request object. Request message for
GetAnnotationSpecSet.
name (str):
Required. AnnotationSpecSet resource name, format:
projects/{project_id}/annotationSpecSets/{annotation_spec_set_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.types.AnnotationSpecSet:
An AnnotationSpecSet is a collection
of label definitions. For example, in
image classification tasks, you define a
set of possible labels for images as an
AnnotationSpecSet. An AnnotationSpecSet
is immutable upon creation.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.GetAnnotationSpecSetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.GetAnnotationSpecSetRequest):
request = data_labeling_service.GetAnnotationSpecSetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec_set]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_annotation_spec_sets(
self,
request: Union[
data_labeling_service.ListAnnotationSpecSetsRequest, dict
] = None,
*,
parent: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListAnnotationSpecSetsPager:
r"""Lists annotation spec sets for a project. Pagination
is supported.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.ListAnnotationSpecSetsRequest, dict]):
The request object. Request message for
ListAnnotationSpecSets.
parent (str):
Required. Parent of AnnotationSpecSet resource, format:
projects/{project_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. Filter is not supported at
this moment.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.services.data_labeling_service.pagers.ListAnnotationSpecSetsPager:
Results of listing annotation spec
set under a project.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.ListAnnotationSpecSetsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.ListAnnotationSpecSetsRequest):
request = data_labeling_service.ListAnnotationSpecSetsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_annotation_spec_sets
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListAnnotationSpecSetsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_annotation_spec_set(
self,
request: Union[
data_labeling_service.DeleteAnnotationSpecSetRequest, dict
] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an annotation spec set by resource name.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.DeleteAnnotationSpecSetRequest, dict]):
The request object. Request message for
DeleteAnnotationSpecSet.
name (str):
Required. AnnotationSpec resource name, format:
``projects/{project_id}/annotationSpecSets/{annotation_spec_set_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.DeleteAnnotationSpecSetRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, data_labeling_service.DeleteAnnotationSpecSetRequest
):
request = data_labeling_service.DeleteAnnotationSpecSetRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_annotation_spec_set
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_instruction(
self,
request: Union[data_labeling_service.CreateInstructionRequest, dict] = None,
*,
parent: str = None,
instruction: gcd_instruction.Instruction = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates an instruction for how data should be
labeled.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.CreateInstructionRequest, dict]):
The request object. Request message for
CreateInstruction.
parent (str):
Required. Instruction resource parent, format:
projects/{project_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
instruction (google.cloud.datalabeling_v1beta1.types.Instruction):
Required. Instruction of how to
perform the labeling task.
This corresponds to the ``instruction`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.datalabeling_v1beta1.types.Instruction` Instruction of how to perform the labeling task for human operators.
Currently only PDF instruction is supported.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, instruction])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.CreateInstructionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.CreateInstructionRequest):
request = data_labeling_service.CreateInstructionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if instruction is not None:
request.instruction = instruction
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_instruction]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcd_instruction.Instruction,
metadata_type=operations.CreateInstructionMetadata,
)
# Done; return the response.
return response
def get_instruction(
self,
request: Union[data_labeling_service.GetInstructionRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> instruction.Instruction:
r"""Gets an instruction by resource name.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.GetInstructionRequest, dict]):
The request object. Request message for GetInstruction.
name (str):
Required. Instruction resource name, format:
projects/{project_id}/instructions/{instruction_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.types.Instruction:
Instruction of how to perform the
labeling task for human operators.
Currently only PDF instruction is
supported.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.GetInstructionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.GetInstructionRequest):
request = data_labeling_service.GetInstructionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_instruction]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_instructions(
self,
request: Union[data_labeling_service.ListInstructionsRequest, dict] = None,
*,
parent: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListInstructionsPager:
r"""Lists instructions for a project. Pagination is
supported.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.ListInstructionsRequest, dict]):
The request object. Request message for
ListInstructions.
parent (str):
Required. Instruction resource parent, format:
projects/{project_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. Filter is not supported at
this moment.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.services.data_labeling_service.pagers.ListInstructionsPager:
Results of listing instructions under
a project.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.ListInstructionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.ListInstructionsRequest):
request = data_labeling_service.ListInstructionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_instructions]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListInstructionsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_instruction(
self,
request: Union[data_labeling_service.DeleteInstructionRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an instruction object by resource name.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.DeleteInstructionRequest, dict]):
The request object. Request message for
DeleteInstruction.
name (str):
Required. Instruction resource name, format:
projects/{project_id}/instructions/{instruction_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.DeleteInstructionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.DeleteInstructionRequest):
request = data_labeling_service.DeleteInstructionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_instruction]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def get_evaluation(
self,
request: Union[data_labeling_service.GetEvaluationRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> evaluation.Evaluation:
r"""Gets an evaluation by resource name (to search, use
[projects.evaluations.search][google.cloud.datalabeling.v1beta1.DataLabelingService.SearchEvaluations]).
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.GetEvaluationRequest, dict]):
The request object. Request message for GetEvaluation.
name (str):
Required. Name of the evaluation. Format:
"projects/{project_id}/datasets/{dataset_id}/evaluations/{evaluation_id}'
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.types.Evaluation:
Describes an evaluation between a machine learning model's predictions and
ground truth labels. Created when an
[EvaluationJob][google.cloud.datalabeling.v1beta1.EvaluationJob]
runs successfully.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.GetEvaluationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.GetEvaluationRequest):
request = data_labeling_service.GetEvaluationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_evaluation]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def search_evaluations(
self,
request: Union[data_labeling_service.SearchEvaluationsRequest, dict] = None,
*,
parent: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchEvaluationsPager:
r"""Searches
[evaluations][google.cloud.datalabeling.v1beta1.Evaluation]
within a project.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.SearchEvaluationsRequest, dict]):
The request object. Request message for
SearchEvaluation.
parent (str):
Required. Evaluation search parent (project ID). Format:
"projects/{project_id}"
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. To search evaluations, you can filter by the
following:
- evaluation\_job.evaluation_job_id (the last part of
[EvaluationJob.name][google.cloud.datalabeling.v1beta1.EvaluationJob.name])
- evaluation\_job.model_id (the {model_name} portion of
[EvaluationJob.modelVersion][google.cloud.datalabeling.v1beta1.EvaluationJob.model_version])
- evaluation\_job.evaluation_job_run_time_start
(Minimum threshold for the
[evaluationJobRunTime][google.cloud.datalabeling.v1beta1.Evaluation.evaluation_job_run_time]
that created the evaluation)
- evaluation\_job.evaluation_job_run_time_end (Maximum
threshold for the
[evaluationJobRunTime][google.cloud.datalabeling.v1beta1.Evaluation.evaluation_job_run_time]
that created the evaluation)
- evaluation\_job.job_state
([EvaluationJob.state][google.cloud.datalabeling.v1beta1.EvaluationJob.state])
- annotation\_spec.display_name (the Evaluation
contains a metric for the annotation spec with this
[displayName][google.cloud.datalabeling.v1beta1.AnnotationSpec.display_name])
To filter by multiple critiera, use the ``AND`` operator
or the ``OR`` operator. The following examples shows a
string that filters by several critiera:
"evaluation\ *job.evaluation_job_id =
{evaluation_job_id} AND evaluation*\ job.model_id =
{model_name} AND
evaluation\ *job.evaluation_job_run_time_start =
{timestamp_1} AND
evaluation*\ job.evaluation_job_run_time_end =
{timestamp_2} AND annotation\_spec.display_name =
{display_name}"
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.services.data_labeling_service.pagers.SearchEvaluationsPager:
Results of searching evaluations.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.SearchEvaluationsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.SearchEvaluationsRequest):
request = data_labeling_service.SearchEvaluationsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.search_evaluations]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.SearchEvaluationsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def search_example_comparisons(
self,
request: Union[
data_labeling_service.SearchExampleComparisonsRequest, dict
] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchExampleComparisonsPager:
r"""Searches example comparisons from an evaluation. The
return format is a list of example comparisons that show
ground truth and prediction(s) for a single input.
Search by providing an evaluation ID.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.SearchExampleComparisonsRequest, dict]):
The request object. Request message of
SearchExampleComparisons.
parent (str):
Required. Name of the
[Evaluation][google.cloud.datalabeling.v1beta1.Evaluation]
resource to search for example comparisons from. Format:
"projects/{project_id}/datasets/{dataset_id}/evaluations/{evaluation_id}"
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.services.data_labeling_service.pagers.SearchExampleComparisonsPager:
Results of searching example
comparisons.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.SearchExampleComparisonsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, data_labeling_service.SearchExampleComparisonsRequest
):
request = data_labeling_service.SearchExampleComparisonsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.search_example_comparisons
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.SearchExampleComparisonsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def create_evaluation_job(
self,
request: Union[data_labeling_service.CreateEvaluationJobRequest, dict] = None,
*,
parent: str = None,
job: evaluation_job.EvaluationJob = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> evaluation_job.EvaluationJob:
r"""Creates an evaluation job.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.CreateEvaluationJobRequest, dict]):
The request object. Request message for
CreateEvaluationJob.
parent (str):
Required. Evaluation job resource parent. Format:
"projects/{project_id}"
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job (google.cloud.datalabeling_v1beta1.types.EvaluationJob):
Required. The evaluation job to
create.
This corresponds to the ``job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.types.EvaluationJob:
Defines an evaluation job that runs periodically to generate
[Evaluations][google.cloud.datalabeling.v1beta1.Evaluation].
[Creating an evaluation
job](/ml-engine/docs/continuous-evaluation/create-job)
is the starting point for using continuous
evaluation.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.CreateEvaluationJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.CreateEvaluationJobRequest):
request = data_labeling_service.CreateEvaluationJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if job is not None:
request.job = job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_evaluation_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_evaluation_job(
self,
request: Union[data_labeling_service.UpdateEvaluationJobRequest, dict] = None,
*,
evaluation_job: gcd_evaluation_job.EvaluationJob = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcd_evaluation_job.EvaluationJob:
r"""Updates an evaluation job. You can only update certain fields of
the job's
[EvaluationJobConfig][google.cloud.datalabeling.v1beta1.EvaluationJobConfig]:
``humanAnnotationConfig.instruction``, ``exampleCount``, and
``exampleSamplePercentage``.
If you want to change any other aspect of the evaluation job,
you must delete the job and create a new one.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.UpdateEvaluationJobRequest, dict]):
The request object. Request message for
UpdateEvaluationJob.
evaluation_job (google.cloud.datalabeling_v1beta1.types.EvaluationJob):
Required. Evaluation job that is
going to be updated.
This corresponds to the ``evaluation_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. Mask for which fields to update. You can only
provide the following fields:
- ``evaluationJobConfig.humanAnnotationConfig.instruction``
- ``evaluationJobConfig.exampleCount``
- ``evaluationJobConfig.exampleSamplePercentage``
You can provide more than one of these fields by
separating them with commas.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.types.EvaluationJob:
Defines an evaluation job that runs periodically to generate
[Evaluations][google.cloud.datalabeling.v1beta1.Evaluation].
[Creating an evaluation
job](/ml-engine/docs/continuous-evaluation/create-job)
is the starting point for using continuous
evaluation.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([evaluation_job, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.UpdateEvaluationJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.UpdateEvaluationJobRequest):
request = data_labeling_service.UpdateEvaluationJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if evaluation_job is not None:
request.evaluation_job = evaluation_job
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_evaluation_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("evaluation_job.name", request.evaluation_job.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_evaluation_job(
self,
request: Union[data_labeling_service.GetEvaluationJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> evaluation_job.EvaluationJob:
r"""Gets an evaluation job by resource name.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.GetEvaluationJobRequest, dict]):
The request object. Request message for
GetEvaluationJob.
name (str):
Required. Name of the evaluation job. Format:
"projects/{project_id}/evaluationJobs/{evaluation_job_id}"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.types.EvaluationJob:
Defines an evaluation job that runs periodically to generate
[Evaluations][google.cloud.datalabeling.v1beta1.Evaluation].
[Creating an evaluation
job](/ml-engine/docs/continuous-evaluation/create-job)
is the starting point for using continuous
evaluation.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.GetEvaluationJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.GetEvaluationJobRequest):
request = data_labeling_service.GetEvaluationJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_evaluation_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def pause_evaluation_job(
self,
request: Union[data_labeling_service.PauseEvaluationJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Pauses an evaluation job. Pausing an evaluation job that is
already in a ``PAUSED`` state is a no-op.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.PauseEvaluationJobRequest, dict]):
The request object. Request message for
PauseEvaluationJob.
name (str):
Required. Name of the evaluation job that is going to be
paused. Format:
"projects/{project_id}/evaluationJobs/{evaluation_job_id}"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.PauseEvaluationJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.PauseEvaluationJobRequest):
request = data_labeling_service.PauseEvaluationJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.pause_evaluation_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def resume_evaluation_job(
self,
request: Union[data_labeling_service.ResumeEvaluationJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Resumes a paused evaluation job. A deleted evaluation
job can't be resumed. Resuming a running or scheduled
evaluation job is a no-op.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.ResumeEvaluationJobRequest, dict]):
The request object. Request message ResumeEvaluationJob.
name (str):
Required. Name of the evaluation job that is going to be
resumed. Format:
"projects/{project_id}/evaluationJobs/{evaluation_job_id}"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.ResumeEvaluationJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.ResumeEvaluationJobRequest):
request = data_labeling_service.ResumeEvaluationJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.resume_evaluation_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def delete_evaluation_job(
self,
request: Union[data_labeling_service.DeleteEvaluationJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Stops and deletes an evaluation job.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.DeleteEvaluationJobRequest, dict]):
The request object. Request message DeleteEvaluationJob.
name (str):
Required. Name of the evaluation job that is going to be
deleted. Format:
"projects/{project_id}/evaluationJobs/{evaluation_job_id}"
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.DeleteEvaluationJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.DeleteEvaluationJobRequest):
request = data_labeling_service.DeleteEvaluationJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_evaluation_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def list_evaluation_jobs(
self,
request: Union[data_labeling_service.ListEvaluationJobsRequest, dict] = None,
*,
parent: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEvaluationJobsPager:
r"""Lists all evaluation jobs within a project with
possible filters. Pagination is supported.
Args:
request (Union[google.cloud.datalabeling_v1beta1.types.ListEvaluationJobsRequest, dict]):
The request object. Request message for
ListEvaluationJobs.
parent (str):
Required. Evaluation job resource parent. Format:
"projects/{project_id}"
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. You can filter the jobs to list by model_id
(also known as model_name, as described in
[EvaluationJob.modelVersion][google.cloud.datalabeling.v1beta1.EvaluationJob.model_version])
or by evaluation job state (as described in
[EvaluationJob.state][google.cloud.datalabeling.v1beta1.EvaluationJob.state]).
To filter by both criteria, use the ``AND`` operator or
the ``OR`` operator. For example, you can use the
following string for your filter:
"evaluation\ *job.model_id = {model_name} AND
evaluation*\ job.state = {evaluation_job_state}"
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datalabeling_v1beta1.services.data_labeling_service.pagers.ListEvaluationJobsPager:
Results for listing evaluation jobs.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a data_labeling_service.ListEvaluationJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, data_labeling_service.ListEvaluationJobsRequest):
request = data_labeling_service.ListEvaluationJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_evaluation_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListEvaluationJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-datalabeling",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("DataLabelingServiceClient",)
|
py | 1a41c3b1476883147ed74c69938a7733d9b429f4 | import brickpi3
import time
BP = brickpi3.BrickPi3()
LEFT_PORT = BP.PORT_A
RIGHT_PORT = BP.PORT_D
CONVEYOR_PORT = BP.PORT_B
powerM = int(input("Enter Motor Power"))
powerC = int(input("Enter Conveyor Power"))
try:
BP.set_motor_power(LEFT_PORT, powerM)
BP.set_motor_power(RIGHT_PORT, powerM)
time.sleep(0.5)
BP.set_motor_power(CONVEYOR_PORT, powerC)
time.sleep(1.5)
BP.set_motor_power(LEFT_PORT, 0)
BP.set_motor_power(RIGHT_PORT, 0)
BP.set_motor_power(CONVEYOR_PORT, 0)
except KeyboardInterrupt:
BP.reset_all() |
py | 1a41c605b38ffe52f2b2599a4340d5c032e6204f | #!/usr/bin/python
# -*- coding : utf-8 -*-
"""
*What is this pattern about?
It decouples the creation of a complex object and its representation,
so that the same process can be reused to build objects from the same
family.
This is useful when you must separate the specification of an object
from its actual representation (generally for abstraction).
*What does this example do?
This particular example uses a Director to abtract the
construction of a building. The user specifies a Builder (House or
Flat) and the director specifies the methods in the order necessary
creating a different building dependding on the sepcified
specification (through the Builder class).
@author: Diogenes Augusto Fernandes Herminio <[email protected]>
https://gist.github.com/420905#file_builder_python.py
*Where is the pattern used practically?
*References:
https://sourcemaking.com/design_patterns/builder
"""
# Director
class Director(object):
def __init__(self):
self.builder = None
def construct_building(self):
self.builder.new_building()
self.builder.build_floor()
self.builder.build_size()
def get_building(self):
return self.builder.building
# Abstract Builder
class Builder(object):
def __init__(self):
self.building = None
def new_building(self):
self.building = Building()
def build_floor(self):
raise NotImplementedError
def build_size(self):
raise NotImplementedError
# Concrete Builder
class BuilderHouse(Builder):
def build_floor(self):
self.building.floor = 'One'
def build_size(self):
self.building.size = 'Big'
class BuilderFlat(Builder):
def build_floor(self):
self.building.floor = 'More than One'
def build_size(self):
self.building.size = 'Small'
# Product
class Building(object):
def __init__(self):
self.floor = None
self.size = None
def __repr__(self):
return 'Floor: {0.floor} | Size: {0.size}'.format(self)
# Client
if __name__ == "__main__":
director = Director()
director.builder = BuilderHouse()
director.construct_building()
building = director.get_building()
print(building)
director.builder = BuilderFlat()
director.construct_building()
building = director.get_building()
print(building)
### OUTPUT ###
# Floor: One | Size: Big
# Floor: More than One | Size: Small
|
py | 1a41c63e170ecd28186479fbe340c1c96a83c39e | #! coding:utf-8
"""
compiler tests.
These tests are among the very first that were written when SQLAlchemy
began in 2005. As a result the testing style here is very dense;
it's an ongoing job to break these into much smaller tests with correct pep8
styling and coherent test organization.
"""
from sqlalchemy.testing import eq_, is_, assert_raises, assert_raises_message
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import Integer, String, MetaData, Table, Column, select, \
func, not_, cast, text, tuple_, exists, update, bindparam,\
literal, and_, null, type_coerce, alias, or_, literal_column,\
Float, TIMESTAMP, Numeric, Date, Text, union, except_,\
intersect, union_all, Boolean, distinct, join, outerjoin, asc, desc,\
over, subquery, case, true
import decimal
from sqlalchemy.util import u
from sqlalchemy import exc, sql, util, types, schema
from sqlalchemy.sql import table, column, label
from sqlalchemy.sql.expression import ClauseList, _literal_as_text, HasPrefixes
from sqlalchemy.engine import default
from sqlalchemy.dialects import mysql, mssql, postgresql, oracle, \
sqlite, sybase
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import compiler
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String),
)
table3 = table(
'thirdtable',
column('userid', Integer),
column('otherstuff', String),
)
metadata = MetaData()
# table with a schema
table4 = Table(
'remotetable', metadata,
Column('rem_id', Integer, primary_key=True),
Column('datatype_id', Integer),
Column('value', String(20)),
schema='remote_owner'
)
# table with a 'multipart' schema
table5 = Table(
'remotetable', metadata,
Column('rem_id', Integer, primary_key=True),
Column('datatype_id', Integer),
Column('value', String(20)),
schema='dbo.remote_owner'
)
users = table('users',
column('user_id'),
column('user_name'),
column('password'),
)
addresses = table('addresses',
column('address_id'),
column('user_id'),
column('street'),
column('city'),
column('state'),
column('zip')
)
keyed = Table('keyed', metadata,
Column('x', Integer, key='colx'),
Column('y', Integer, key='coly'),
Column('z', Integer),
)
class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_attribute_sanity(self):
assert hasattr(table1, 'c')
assert hasattr(table1.select(), 'c')
assert not hasattr(table1.c.myid.self_group(), 'columns')
assert hasattr(table1.select().self_group(), 'columns')
assert not hasattr(table1.c.myid, 'columns')
assert not hasattr(table1.c.myid, 'c')
assert not hasattr(table1.select().c.myid, 'c')
assert not hasattr(table1.select().c.myid, 'columns')
assert not hasattr(table1.alias().c.myid, 'columns')
assert not hasattr(table1.alias().c.myid, 'c')
if util.compat.py32:
assert_raises_message(
exc.InvalidRequestError,
'Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.',
lambda: hasattr(
select([table1.c.myid]).as_scalar().self_group(),
'columns'))
assert_raises_message(
exc.InvalidRequestError,
'Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.',
lambda: hasattr(select([table1.c.myid]).as_scalar(),
'columns'))
else:
assert not hasattr(
select([table1.c.myid]).as_scalar().self_group(),
'columns')
assert not hasattr(select([table1.c.myid]).as_scalar(), 'columns')
def test_prefix_constructor(self):
class Pref(HasPrefixes):
def _generate(self):
return self
assert_raises(exc.ArgumentError,
Pref().prefix_with,
"some prefix", not_a_dialect=True
)
def test_table_select(self):
self.assert_compile(table1.select(),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable")
self.assert_compile(
select(
[
table1,
table2]),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable")
def test_invalid_col_argument(self):
assert_raises(exc.ArgumentError, select, table1)
assert_raises(exc.ArgumentError, select, table1.c.myid)
def test_int_limit_offset_coercion(self):
for given, exp in [
("5", 5),
(5, 5),
(5.2, 5),
(decimal.Decimal("5"), 5),
(None, None),
]:
eq_(select().limit(given)._limit, exp)
eq_(select().offset(given)._offset, exp)
eq_(select(limit=given)._limit, exp)
eq_(select(offset=given)._offset, exp)
assert_raises(ValueError, select().limit, "foo")
assert_raises(ValueError, select().offset, "foo")
assert_raises(ValueError, select, offset="foo")
assert_raises(ValueError, select, limit="foo")
def test_limit_offset(self):
for lim, offset, exp, params in [
(5, 10, "LIMIT :param_1 OFFSET :param_2",
{'param_1': 5, 'param_2': 10}),
(None, 10, "LIMIT -1 OFFSET :param_1", {'param_1': 10}),
(5, None, "LIMIT :param_1", {'param_1': 5}),
(0, 0, "LIMIT :param_1 OFFSET :param_2",
{'param_1': 0, 'param_2': 0}),
]:
self.assert_compile(
select([1]).limit(lim).offset(offset),
"SELECT 1 " + exp,
checkparams=params
)
def test_select_precol_compile_ordering(self):
s1 = select([column('x')]).select_from('a').limit(5).as_scalar()
s2 = select([s1]).limit(10)
class MyCompiler(compiler.SQLCompiler):
def get_select_precolumns(self, select):
result = ""
if select._limit:
result += "FIRST %s " % self.process(
literal(
select._limit))
if select._offset:
result += "SKIP %s " % self.process(
literal(
select._offset))
return result
def limit_clause(self, select):
return ""
dialect = default.DefaultDialect()
dialect.statement_compiler = MyCompiler
dialect.paramstyle = 'qmark'
dialect.positional = True
self.assert_compile(
s2,
"SELECT FIRST ? (SELECT FIRST ? x FROM a) AS anon_1",
checkpositional=(10, 5),
dialect=dialect
)
def test_from_subquery(self):
"""tests placing select statements in the column clause of
another select, for the
purposes of selecting from the exported columns of that select."""
s = select([table1], table1.c.name == 'jack')
self.assert_compile(
select(
[s],
s.c.myid == 7),
"SELECT myid, name, description FROM "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description AS description "
"FROM mytable "
"WHERE mytable.name = :name_1) WHERE myid = :myid_1")
sq = select([table1])
self.assert_compile(
sq.select(),
"SELECT myid, name, description FROM "
"(SELECT mytable.myid AS myid, "
"mytable.name AS name, mytable.description "
"AS description FROM mytable)"
)
sq = select(
[table1],
).alias('sq')
self.assert_compile(
sq.select(sq.c.myid == 7),
"SELECT sq.myid, sq.name, sq.description FROM "
"(SELECT mytable.myid AS myid, mytable.name AS name, "
"mytable.description AS description FROM mytable) AS sq "
"WHERE sq.myid = :myid_1"
)
sq = select(
[table1, table2],
and_(table1.c.myid == 7, table2.c.otherid == table1.c.myid),
use_labels=True
).alias('sq')
sqstring = "SELECT mytable.myid AS mytable_myid, mytable.name AS "\
"mytable_name, mytable.description AS mytable_description, "\
"myothertable.otherid AS myothertable_otherid, "\
"myothertable.othername AS myothertable_othername FROM "\
"mytable, myothertable WHERE mytable.myid = :myid_1 AND "\
"myothertable.otherid = mytable.myid"
self.assert_compile(
sq.select(),
"SELECT sq.mytable_myid, sq.mytable_name, "
"sq.mytable_description, sq.myothertable_otherid, "
"sq.myothertable_othername FROM (%s) AS sq" % sqstring)
sq2 = select(
[sq],
use_labels=True
).alias('sq2')
self.assert_compile(
sq2.select(),
"SELECT sq2.sq_mytable_myid, sq2.sq_mytable_name, "
"sq2.sq_mytable_description, sq2.sq_myothertable_otherid, "
"sq2.sq_myothertable_othername FROM "
"(SELECT sq.mytable_myid AS "
"sq_mytable_myid, sq.mytable_name AS sq_mytable_name, "
"sq.mytable_description AS sq_mytable_description, "
"sq.myothertable_otherid AS sq_myothertable_otherid, "
"sq.myothertable_othername AS sq_myothertable_othername "
"FROM (%s) AS sq) AS sq2" % sqstring)
def test_select_from_clauselist(self):
self.assert_compile(
select([ClauseList(column('a'), column('b'))]
).select_from('sometable'),
'SELECT a, b FROM sometable'
)
def test_use_labels(self):
self.assert_compile(
select([table1.c.myid == 5], use_labels=True),
"SELECT mytable.myid = :myid_1 AS anon_1 FROM mytable"
)
self.assert_compile(
select([func.foo()], use_labels=True),
"SELECT foo() AS foo_1"
)
# this is native_boolean=False for default dialect
self.assert_compile(
select([not_(True)], use_labels=True),
"SELECT :param_1 = 0"
)
self.assert_compile(
select([cast("data", Integer)], use_labels=True),
"SELECT CAST(:param_1 AS INTEGER) AS anon_1"
)
self.assert_compile(
select([func.sum(
func.lala(table1.c.myid).label('foo')).label('bar')]),
"SELECT sum(lala(mytable.myid)) AS bar FROM mytable"
)
self.assert_compile(
select([keyed]),
"SELECT keyed.x, keyed.y"
", keyed.z FROM keyed"
)
self.assert_compile(
select([keyed]).apply_labels(),
"SELECT keyed.x AS keyed_x, keyed.y AS "
"keyed_y, keyed.z AS keyed_z FROM keyed"
)
def test_paramstyles(self):
stmt = text("select :foo, :bar, :bat from sometable")
self.assert_compile(
stmt,
"select ?, ?, ? from sometable",
dialect=default.DefaultDialect(paramstyle='qmark')
)
self.assert_compile(
stmt,
"select :foo, :bar, :bat from sometable",
dialect=default.DefaultDialect(paramstyle='named')
)
self.assert_compile(
stmt,
"select %s, %s, %s from sometable",
dialect=default.DefaultDialect(paramstyle='format')
)
self.assert_compile(
stmt,
"select :1, :2, :3 from sometable",
dialect=default.DefaultDialect(paramstyle='numeric')
)
self.assert_compile(
stmt,
"select %(foo)s, %(bar)s, %(bat)s from sometable",
dialect=default.DefaultDialect(paramstyle='pyformat')
)
def test_dupe_columns(self):
"""test that deduping is performed against clause
element identity, not rendered result."""
self.assert_compile(
select([column('a'), column('a'), column('a')]),
"SELECT a, a, a", dialect=default.DefaultDialect()
)
c = column('a')
self.assert_compile(
select([c, c, c]),
"SELECT a", dialect=default.DefaultDialect()
)
a, b = column('a'), column('b')
self.assert_compile(
select([a, b, b, b, a, a]),
"SELECT a, b", dialect=default.DefaultDialect()
)
# using alternate keys.
a, b, c = Column('a', Integer, key='b'), \
Column('b', Integer), \
Column('c', Integer, key='a')
self.assert_compile(
select([a, b, c, a, b, c]),
"SELECT a, b, c", dialect=default.DefaultDialect()
)
self.assert_compile(
select([bindparam('a'), bindparam('b'), bindparam('c')]),
"SELECT :a AS anon_1, :b AS anon_2, :c AS anon_3",
dialect=default.DefaultDialect(paramstyle='named')
)
self.assert_compile(
select([bindparam('a'), bindparam('b'), bindparam('c')]),
"SELECT ? AS anon_1, ? AS anon_2, ? AS anon_3",
dialect=default.DefaultDialect(paramstyle='qmark'),
)
self.assert_compile(
select(["a", "a", "a"]),
"SELECT a, a, a"
)
s = select([bindparam('a'), bindparam('b'), bindparam('c')])
s = s.compile(dialect=default.DefaultDialect(paramstyle='qmark'))
eq_(s.positiontup, ['a', 'b', 'c'])
def test_nested_label_targeting(self):
"""test nested anonymous label generation.
"""
s1 = table1.select()
s2 = s1.alias()
s3 = select([s2], use_labels=True)
s4 = s3.alias()
s5 = select([s4], use_labels=True)
self.assert_compile(s5,
'SELECT anon_1.anon_2_myid AS '
'anon_1_anon_2_myid, anon_1.anon_2_name AS '
'anon_1_anon_2_name, anon_1.anon_2_descript'
'ion AS anon_1_anon_2_description FROM '
'(SELECT anon_2.myid AS anon_2_myid, '
'anon_2.name AS anon_2_name, '
'anon_2.description AS anon_2_description '
'FROM (SELECT mytable.myid AS myid, '
'mytable.name AS name, mytable.description '
'AS description FROM mytable) AS anon_2) '
'AS anon_1')
def test_nested_label_targeting_keyed(self):
s1 = keyed.select()
s2 = s1.alias()
s3 = select([s2], use_labels=True)
self.assert_compile(s3,
"SELECT anon_1.x AS anon_1_x, "
"anon_1.y AS anon_1_y, "
"anon_1.z AS anon_1_z FROM "
"(SELECT keyed.x AS x, keyed.y "
"AS y, keyed.z AS z FROM keyed) AS anon_1")
s4 = s3.alias()
s5 = select([s4], use_labels=True)
self.assert_compile(s5,
"SELECT anon_1.anon_2_x AS anon_1_anon_2_x, "
"anon_1.anon_2_y AS anon_1_anon_2_y, "
"anon_1.anon_2_z AS anon_1_anon_2_z "
"FROM (SELECT anon_2.x AS anon_2_x, "
"anon_2.y AS anon_2_y, "
"anon_2.z AS anon_2_z FROM "
"(SELECT keyed.x AS x, keyed.y AS y, keyed.z "
"AS z FROM keyed) AS anon_2) AS anon_1"
)
def test_exists(self):
s = select([table1.c.myid]).where(table1.c.myid == 5)
self.assert_compile(exists(s),
"EXISTS (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = :myid_1)"
)
self.assert_compile(exists(s.as_scalar()),
"EXISTS (SELECT mytable.myid FROM mytable "
"WHERE mytable.myid = :myid_1)"
)
self.assert_compile(exists([table1.c.myid], table1.c.myid
== 5).select(),
'SELECT EXISTS (SELECT mytable.myid FROM '
'mytable WHERE mytable.myid = :myid_1)',
params={'mytable_myid': 5})
self.assert_compile(select([table1, exists([1],
from_obj=table2)]),
'SELECT mytable.myid, mytable.name, '
'mytable.description, EXISTS (SELECT 1 '
'FROM myothertable) FROM mytable',
params={})
self.assert_compile(select([table1,
exists([1],
from_obj=table2).label('foo')]),
'SELECT mytable.myid, mytable.name, '
'mytable.description, EXISTS (SELECT 1 '
'FROM myothertable) AS foo FROM mytable',
params={})
self.assert_compile(
table1.select(
exists().where(
table2.c.otherid == table1.c.myid).correlate(table1)),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT * FROM myothertable WHERE '
'myothertable.otherid = mytable.myid)')
self.assert_compile(
table1.select(
exists().where(
table2.c.otherid == table1.c.myid).correlate(table1)),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT * FROM myothertable WHERE '
'myothertable.otherid = mytable.myid)')
self.assert_compile(
table1.select(
exists().where(
table2.c.otherid == table1.c.myid).correlate(table1)
).replace_selectable(
table2,
table2.alias()),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT * FROM myothertable AS '
'myothertable_1 WHERE myothertable_1.otheri'
'd = mytable.myid)')
self.assert_compile(
table1.select(
exists().where(
table2.c.otherid == table1.c.myid).correlate(table1)).
select_from(
table1.join(
table2,
table1.c.myid == table2.c.otherid)).
replace_selectable(
table2,
table2.alias()),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable JOIN '
'myothertable AS myothertable_1 ON '
'mytable.myid = myothertable_1.otherid '
'WHERE EXISTS (SELECT * FROM myothertable '
'AS myothertable_1 WHERE '
'myothertable_1.otherid = mytable.myid)')
self.assert_compile(
select([
or_(
exists().where(table2.c.otherid == 'foo'),
exists().where(table2.c.otherid == 'bar')
)
]),
"SELECT (EXISTS (SELECT * FROM myothertable "
"WHERE myothertable.otherid = :otherid_1)) "
"OR (EXISTS (SELECT * FROM myothertable WHERE "
"myothertable.otherid = :otherid_2)) AS anon_1"
)
def test_where_subquery(self):
s = select([addresses.c.street], addresses.c.user_id
== users.c.user_id, correlate=True).alias('s')
# don't correlate in a FROM list
self.assert_compile(select([users, s.c.street], from_obj=s),
"SELECT users.user_id, users.user_name, "
"users.password, s.street FROM users, "
"(SELECT addresses.street AS street FROM "
"addresses, users WHERE addresses.user_id = "
"users.user_id) AS s")
self.assert_compile(table1.select(
table1.c.myid == select(
[table1.c.myid],
table1.c.name == 'jack')),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'mytable.myid = (SELECT mytable.myid FROM '
'mytable WHERE mytable.name = :name_1)')
self.assert_compile(
table1.select(
table1.c.myid == select(
[table2.c.otherid],
table1.c.name == table2.c.othername
)
),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'mytable.myid = (SELECT '
'myothertable.otherid FROM myothertable '
'WHERE mytable.name = myothertable.othernam'
'e)')
self.assert_compile(table1.select(exists([1], table2.c.otherid
== table1.c.myid)),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT 1 FROM myothertable WHERE '
'myothertable.otherid = mytable.myid)')
talias = table1.alias('ta')
s = subquery('sq2', [talias], exists([1], table2.c.otherid
== talias.c.myid))
self.assert_compile(select([s, table1]),
'SELECT sq2.myid, sq2.name, '
'sq2.description, mytable.myid, '
'mytable.name, mytable.description FROM '
'(SELECT ta.myid AS myid, ta.name AS name, '
'ta.description AS description FROM '
'mytable AS ta WHERE EXISTS (SELECT 1 FROM '
'myothertable WHERE myothertable.otherid = '
'ta.myid)) AS sq2, mytable')
# test constructing the outer query via append_column(), which
# occurs in the ORM's Query object
s = select([], exists([1], table2.c.otherid == table1.c.myid),
from_obj=table1)
s.append_column(table1)
self.assert_compile(s,
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable WHERE '
'EXISTS (SELECT 1 FROM myothertable WHERE '
'myothertable.otherid = mytable.myid)')
def test_orderby_subquery(self):
self.assert_compile(
table1.select(
order_by=[
select(
[
table2.c.otherid],
table1.c.myid == table2.c.otherid)]),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable ORDER BY '
'(SELECT myothertable.otherid FROM '
'myothertable WHERE mytable.myid = '
'myothertable.otherid)')
self.assert_compile(table1.select(order_by=[
desc(select([table2.c.otherid],
table1.c.myid == table2.c.otherid))]),
'SELECT mytable.myid, mytable.name, '
'mytable.description FROM mytable ORDER BY '
'(SELECT myothertable.otherid FROM '
'myothertable WHERE mytable.myid = '
'myothertable.otherid) DESC')
def test_scalar_select(self):
assert_raises_message(
exc.InvalidRequestError,
r"Select objects don't have a type\. Call as_scalar\(\) "
"on this Select object to return a 'scalar' "
"version of this Select\.",
func.coalesce, select([table1.c.myid])
)
s = select([table1.c.myid], correlate=False).as_scalar()
self.assert_compile(select([table1, s]),
'SELECT mytable.myid, mytable.name, '
'mytable.description, (SELECT mytable.myid '
'FROM mytable) AS anon_1 FROM mytable')
s = select([table1.c.myid]).as_scalar()
self.assert_compile(select([table2, s]),
'SELECT myothertable.otherid, '
'myothertable.othername, (SELECT '
'mytable.myid FROM mytable) AS anon_1 FROM '
'myothertable')
s = select([table1.c.myid]).correlate(None).as_scalar()
self.assert_compile(select([table1, s]),
'SELECT mytable.myid, mytable.name, '
'mytable.description, (SELECT mytable.myid '
'FROM mytable) AS anon_1 FROM mytable')
s = select([table1.c.myid]).as_scalar()
s2 = s.where(table1.c.myid == 5)
self.assert_compile(
s2,
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)"
)
self.assert_compile(
s, "(SELECT mytable.myid FROM mytable)"
)
# test that aliases use as_scalar() when used in an explicitly
# scalar context
s = select([table1.c.myid]).alias()
self.assert_compile(select([table1.c.myid]).where(table1.c.myid
== s),
'SELECT mytable.myid FROM mytable WHERE '
'mytable.myid = (SELECT mytable.myid FROM '
'mytable)')
self.assert_compile(select([table1.c.myid]).where(s
> table1.c.myid),
'SELECT mytable.myid FROM mytable WHERE '
'mytable.myid < (SELECT mytable.myid FROM '
'mytable)')
s = select([table1.c.myid]).as_scalar()
self.assert_compile(select([table2, s]),
'SELECT myothertable.otherid, '
'myothertable.othername, (SELECT '
'mytable.myid FROM mytable) AS anon_1 FROM '
'myothertable')
# test expressions against scalar selects
self.assert_compile(select([s - literal(8)]),
'SELECT (SELECT mytable.myid FROM mytable) '
'- :param_1 AS anon_1')
self.assert_compile(select([select([table1.c.name]).as_scalar()
+ literal('x')]),
'SELECT (SELECT mytable.name FROM mytable) '
'|| :param_1 AS anon_1')
self.assert_compile(select([s > literal(8)]),
'SELECT (SELECT mytable.myid FROM mytable) '
'> :param_1 AS anon_1')
self.assert_compile(select([select([table1.c.name]).label('foo'
)]),
'SELECT (SELECT mytable.name FROM mytable) '
'AS foo')
# scalar selects should not have any attributes on their 'c' or
# 'columns' attribute
s = select([table1.c.myid]).as_scalar()
try:
s.c.foo
except exc.InvalidRequestError as err:
assert str(err) \
== 'Scalar Select expression has no columns; use this '\
'object directly within a column-level expression.'
try:
s.columns.foo
except exc.InvalidRequestError as err:
assert str(err) \
== 'Scalar Select expression has no columns; use this '\
'object directly within a column-level expression.'
zips = table('zips',
column('zipcode'),
column('latitude'),
column('longitude'),
)
places = table('places',
column('id'),
column('nm')
)
zip = '12345'
qlat = select([zips.c.latitude], zips.c.zipcode == zip).\
correlate(None).as_scalar()
qlng = select([zips.c.longitude], zips.c.zipcode == zip).\
correlate(None).as_scalar()
q = select([places.c.id, places.c.nm, zips.c.zipcode,
func.latlondist(qlat, qlng).label('dist')],
zips.c.zipcode == zip,
order_by=['dist', places.c.nm]
)
self.assert_compile(q,
'SELECT places.id, places.nm, '
'zips.zipcode, latlondist((SELECT '
'zips.latitude FROM zips WHERE '
'zips.zipcode = :zipcode_1), (SELECT '
'zips.longitude FROM zips WHERE '
'zips.zipcode = :zipcode_2)) AS dist FROM '
'places, zips WHERE zips.zipcode = '
':zipcode_3 ORDER BY dist, places.nm')
zalias = zips.alias('main_zip')
qlat = select([zips.c.latitude], zips.c.zipcode == zalias.c.zipcode).\
as_scalar()
qlng = select([zips.c.longitude], zips.c.zipcode == zalias.c.zipcode).\
as_scalar()
q = select([places.c.id, places.c.nm, zalias.c.zipcode,
func.latlondist(qlat, qlng).label('dist')],
order_by=['dist', places.c.nm])
self.assert_compile(q,
'SELECT places.id, places.nm, '
'main_zip.zipcode, latlondist((SELECT '
'zips.latitude FROM zips WHERE '
'zips.zipcode = main_zip.zipcode), (SELECT '
'zips.longitude FROM zips WHERE '
'zips.zipcode = main_zip.zipcode)) AS dist '
'FROM places, zips AS main_zip ORDER BY '
'dist, places.nm')
a1 = table2.alias('t2alias')
s1 = select([a1.c.otherid], table1.c.myid == a1.c.otherid).as_scalar()
j1 = table1.join(table2, table1.c.myid == table2.c.otherid)
s2 = select([table1, s1], from_obj=j1)
self.assert_compile(s2,
'SELECT mytable.myid, mytable.name, '
'mytable.description, (SELECT '
't2alias.otherid FROM myothertable AS '
't2alias WHERE mytable.myid = '
't2alias.otherid) AS anon_1 FROM mytable '
'JOIN myothertable ON mytable.myid = '
'myothertable.otherid')
def test_label_comparison_one(self):
x = func.lala(table1.c.myid).label('foo')
self.assert_compile(select([x], x == 5),
'SELECT lala(mytable.myid) AS foo FROM '
'mytable WHERE lala(mytable.myid) = '
':param_1')
def test_label_comparison_two(self):
self.assert_compile(
label('bar', column('foo', type_=String)) + 'foo',
'foo || :param_1')
def test_order_by_labels_enabled(self):
lab1 = (table1.c.myid + 12).label('foo')
lab2 = func.somefunc(table1.c.name).label('bar')
dialect = default.DefaultDialect()
self.assert_compile(select([lab1, lab2]).order_by(lab1, desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY foo, bar DESC",
dialect=dialect
)
# the function embedded label renders as the function
self.assert_compile(
select([lab1, lab2]).order_by(func.hoho(lab1), desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY hoho(mytable.myid + :myid_1), bar DESC",
dialect=dialect
)
# binary expressions render as the expression without labels
self.assert_compile(select([lab1, lab2]).order_by(lab1 + "test"),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid + :myid_1 + :param_1",
dialect=dialect
)
# labels within functions in the columns clause render
# with the expression
self.assert_compile(
select([lab1, func.foo(lab1)]).order_by(lab1, func.foo(lab1)),
"SELECT mytable.myid + :myid_1 AS foo, "
"foo(mytable.myid + :myid_1) AS foo_1 FROM mytable "
"ORDER BY foo, foo(mytable.myid + :myid_1)",
dialect=dialect
)
lx = (table1.c.myid + table1.c.myid).label('lx')
ly = (func.lower(table1.c.name) + table1.c.description).label('ly')
self.assert_compile(
select([lx, ly]).order_by(lx, ly.desc()),
"SELECT mytable.myid + mytable.myid AS lx, "
"lower(mytable.name) || mytable.description AS ly "
"FROM mytable ORDER BY lx, ly DESC",
dialect=dialect
)
def test_order_by_labels_disabled(self):
lab1 = (table1.c.myid + 12).label('foo')
lab2 = func.somefunc(table1.c.name).label('bar')
dialect = default.DefaultDialect()
dialect.supports_simple_order_by_label = False
self.assert_compile(
select(
[
lab1,
lab2]).order_by(
lab1,
desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid + :myid_1, somefunc(mytable.name) DESC",
dialect=dialect)
self.assert_compile(
select([lab1, lab2]).order_by(func.hoho(lab1), desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY hoho(mytable.myid + :myid_1), "
"somefunc(mytable.name) DESC",
dialect=dialect
)
def test_conjunctions(self):
a, b, c = 'a', 'b', 'c'
x = and_(a, b, c)
assert isinstance(x.type, Boolean)
assert str(x) == 'a AND b AND c'
self.assert_compile(
select([x.label('foo')]),
'SELECT a AND b AND c AS foo'
)
self.assert_compile(
and_(table1.c.myid == 12, table1.c.name == 'asdf',
table2.c.othername == 'foo', "sysdate() = today()"),
"mytable.myid = :myid_1 AND mytable.name = :name_1 "
"AND myothertable.othername = "
":othername_1 AND sysdate() = today()"
)
self.assert_compile(
and_(
table1.c.myid == 12,
or_(table2.c.othername == 'asdf',
table2.c.othername == 'foo', table2.c.otherid == 9),
"sysdate() = today()",
),
'mytable.myid = :myid_1 AND (myothertable.othername = '
':othername_1 OR myothertable.othername = :othername_2 OR '
'myothertable.otherid = :otherid_1) AND sysdate() = '
'today()',
checkparams={'othername_1': 'asdf', 'othername_2': 'foo',
'otherid_1': 9, 'myid_1': 12}
)
# test a generator
self.assert_compile(
and_(
conj for conj in [
table1.c.myid == 12,
table1.c.name == 'asdf'
]
),
"mytable.myid = :myid_1 AND mytable.name = :name_1"
)
def test_nested_conjunctions_short_circuit(self):
"""test that empty or_(), and_() conjunctions are collapsed by
an enclosing conjunction."""
t = table('t', column('x'))
self.assert_compile(
select([t]).where(and_(t.c.x == 5,
or_(and_(or_(t.c.x == 7))))),
"SELECT t.x FROM t WHERE t.x = :x_1 AND t.x = :x_2"
)
self.assert_compile(
select([t]).where(and_(or_(t.c.x == 12,
and_(or_(t.c.x == 8))))),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2"
)
self.assert_compile(
select([t]).
where(
and_(
or_(
or_(t.c.x == 12),
and_(
or_(),
or_(and_(t.c.x == 8)),
and_()
)
)
)
),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2"
)
def test_true_short_circuit(self):
t = table('t', column('x'))
self.assert_compile(
select([t]).where(true()),
"SELECT t.x FROM t WHERE 1 = 1",
dialect=default.DefaultDialect(supports_native_boolean=False)
)
self.assert_compile(
select([t]).where(true()),
"SELECT t.x FROM t WHERE true",
dialect=default.DefaultDialect(supports_native_boolean=True)
)
self.assert_compile(
select([t]),
"SELECT t.x FROM t",
dialect=default.DefaultDialect(supports_native_boolean=True)
)
def test_distinct(self):
self.assert_compile(
select([table1.c.myid.distinct()]),
"SELECT DISTINCT mytable.myid FROM mytable"
)
self.assert_compile(
select([distinct(table1.c.myid)]),
"SELECT DISTINCT mytable.myid FROM mytable"
)
self.assert_compile(
select([table1.c.myid]).distinct(),
"SELECT DISTINCT mytable.myid FROM mytable"
)
self.assert_compile(
select([func.count(table1.c.myid.distinct())]),
"SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable"
)
self.assert_compile(
select([func.count(distinct(table1.c.myid))]),
"SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable"
)
def test_where_empty(self):
self.assert_compile(
select([table1.c.myid]).where(and_()),
"SELECT mytable.myid FROM mytable"
)
self.assert_compile(
select([table1.c.myid]).where(or_()),
"SELECT mytable.myid FROM mytable"
)
def test_multiple_col_binds(self):
self.assert_compile(
select(["*"], or_(table1.c.myid == 12, table1.c.myid == 'asdf',
table1.c.myid == 'foo')),
"SELECT * FROM mytable WHERE mytable.myid = :myid_1 "
"OR mytable.myid = :myid_2 OR mytable.myid = :myid_3"
)
def test_order_by_nulls(self):
self.assert_compile(
table2.select(order_by=[table2.c.otherid,
table2.c.othername.desc().nullsfirst()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC NULLS FIRST"
)
self.assert_compile(
table2.select(order_by=[
table2.c.otherid, table2.c.othername.desc().nullslast()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC NULLS LAST"
)
self.assert_compile(
table2.select(order_by=[
table2.c.otherid.nullslast(),
table2.c.othername.desc().nullsfirst()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS LAST, "
"myothertable.othername DESC NULLS FIRST"
)
self.assert_compile(
table2.select(order_by=[table2.c.otherid.nullsfirst(),
table2.c.othername.desc()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS FIRST, "
"myothertable.othername DESC"
)
self.assert_compile(
table2.select(order_by=[table2.c.otherid.nullsfirst(),
table2.c.othername.desc().nullslast()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS FIRST, "
"myothertable.othername DESC NULLS LAST"
)
def test_orderby_groupby(self):
self.assert_compile(
table2.select(order_by=[table2.c.otherid,
asc(table2.c.othername)]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername ASC"
)
self.assert_compile(
table2.select(order_by=[table2.c.otherid,
table2.c.othername.desc()]),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC"
)
# generative order_by
self.assert_compile(
table2.select().order_by(table2.c.otherid).
order_by(table2.c.othername.desc()),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC"
)
self.assert_compile(
table2.select().order_by(table2.c.otherid).
order_by(table2.c.othername.desc()
).order_by(None),
"SELECT myothertable.otherid, myothertable.othername "
"FROM myothertable"
)
self.assert_compile(
select(
[table2.c.othername, func.count(table2.c.otherid)],
group_by=[table2.c.othername]),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable GROUP BY myothertable.othername"
)
# generative group by
self.assert_compile(
select([table2.c.othername, func.count(table2.c.otherid)]).
group_by(table2.c.othername),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable GROUP BY myothertable.othername"
)
self.assert_compile(
select([table2.c.othername, func.count(table2.c.otherid)]).
group_by(table2.c.othername).group_by(None),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable"
)
self.assert_compile(
select([table2.c.othername, func.count(table2.c.otherid)],
group_by=[table2.c.othername],
order_by=[table2.c.othername]),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable "
"GROUP BY myothertable.othername ORDER BY myothertable.othername"
)
def test_for_update(self):
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE")
# not supported by dialect, should just use update
self.assert_compile(
table1.select(table1.c.myid == 7).with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_1 FOR UPDATE")
assert_raises_message(
exc.ArgumentError,
"Unknown for_update argument: 'unknown_mode'",
table1.select, table1.c.myid == 7, for_update='unknown_mode'
)
def test_alias(self):
# test the alias for a table1. column names stay the same,
# table name "changes" to "foo".
self.assert_compile(
select([table1.alias('foo')]),
"SELECT foo.myid, foo.name, foo.description FROM mytable AS foo")
for dialect in (oracle.dialect(),):
self.assert_compile(
select([table1.alias('foo')]),
"SELECT foo.myid, foo.name, foo.description FROM mytable foo",
dialect=dialect)
self.assert_compile(
select([table1.alias()]),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable AS mytable_1")
# create a select for a join of two tables. use_labels
# means the column names will have labels tablename_columnname,
# which become the column keys accessible off the Selectable object.
# also, only use one column from the second table and all columns
# from the first table1.
q = select(
[table1, table2.c.otherid],
table1.c.myid == table2.c.otherid, use_labels=True
)
# make an alias of the "selectable". column names
# stay the same (i.e. the labels), table name "changes" to "t2view".
a = alias(q, 't2view')
# select from that alias, also using labels. two levels of labels
# should produce two underscores.
# also, reference the column "mytable_myid" off of the t2view alias.
self.assert_compile(
a.select(a.c.mytable_myid == 9, use_labels=True),
"SELECT t2view.mytable_myid AS t2view_mytable_myid, "
"t2view.mytable_name "
"AS t2view_mytable_name, "
"t2view.mytable_description AS t2view_mytable_description, "
"t2view.myothertable_otherid AS t2view_myothertable_otherid FROM "
"(SELECT mytable.myid AS mytable_myid, "
"mytable.name AS mytable_name, "
"mytable.description AS mytable_description, "
"myothertable.otherid AS "
"myothertable_otherid FROM mytable, myothertable "
"WHERE mytable.myid = "
"myothertable.otherid) AS t2view "
"WHERE t2view.mytable_myid = :mytable_myid_1"
)
def test_prefix(self):
self.assert_compile(
table1.select().prefix_with("SQL_CALC_FOUND_ROWS").
prefix_with("SQL_SOME_WEIRD_MYSQL_THING"),
"SELECT SQL_CALC_FOUND_ROWS SQL_SOME_WEIRD_MYSQL_THING "
"mytable.myid, mytable.name, mytable.description FROM mytable"
)
def test_prefix_dialect_specific(self):
self.assert_compile(
table1.select().prefix_with("SQL_CALC_FOUND_ROWS",
dialect='sqlite').
prefix_with("SQL_SOME_WEIRD_MYSQL_THING",
dialect='mysql'),
"SELECT SQL_SOME_WEIRD_MYSQL_THING "
"mytable.myid, mytable.name, mytable.description FROM mytable",
dialect=mysql.dialect()
)
@testing.emits_warning('.*empty sequence.*')
def test_render_binds_as_literal(self):
"""test a compiler that renders binds inline into
SQL in the columns clause."""
dialect = default.DefaultDialect()
class Compiler(dialect.statement_compiler):
ansi_bind_rules = True
dialect.statement_compiler = Compiler
self.assert_compile(
select([literal("someliteral")]),
"SELECT 'someliteral' AS anon_1",
dialect=dialect
)
self.assert_compile(
select([table1.c.myid + 3]),
"SELECT mytable.myid + 3 AS anon_1 FROM mytable",
dialect=dialect
)
self.assert_compile(
select([table1.c.myid.in_([4, 5, 6])]),
"SELECT mytable.myid IN (4, 5, 6) AS anon_1 FROM mytable",
dialect=dialect
)
self.assert_compile(
select([func.mod(table1.c.myid, 5)]),
"SELECT mod(mytable.myid, 5) AS mod_1 FROM mytable",
dialect=dialect
)
self.assert_compile(
select([literal("foo").in_([])]),
"SELECT 'foo' != 'foo' AS anon_1",
dialect=dialect
)
self.assert_compile(
select([literal(util.b("foo"))]),
"SELECT 'foo' AS anon_1",
dialect=dialect
)
# test callable
self.assert_compile(
select([table1.c.myid == bindparam("foo", callable_=lambda: 5)]),
"SELECT mytable.myid = 5 AS anon_1 FROM mytable",
dialect=dialect
)
assert_raises_message(
exc.CompileError,
"Bind parameter 'foo' without a "
"renderable value not allowed here.",
bindparam("foo").in_(
[]).compile,
dialect=dialect)
def test_literal(self):
self.assert_compile(select([literal('foo')]),
"SELECT :param_1 AS anon_1")
self.assert_compile(
select(
[
literal("foo") +
literal("bar")],
from_obj=[table1]),
"SELECT :param_1 || :param_2 AS anon_1 FROM mytable")
def test_calculated_columns(self):
value_tbl = table('values',
column('id', Integer),
column('val1', Float),
column('val2', Float),
)
self.assert_compile(
select([value_tbl.c.id, (value_tbl.c.val2 -
value_tbl.c.val1) / value_tbl.c.val1]),
"SELECT values.id, (values.val2 - values.val1) "
"/ values.val1 AS anon_1 FROM values"
)
self.assert_compile(
select([
value_tbl.c.id],
(value_tbl.c.val2 - value_tbl.c.val1) /
value_tbl.c.val1 > 2.0),
"SELECT values.id FROM values WHERE "
"(values.val2 - values.val1) / values.val1 > :param_1"
)
self.assert_compile(
select([value_tbl.c.id], value_tbl.c.val1 /
(value_tbl.c.val2 - value_tbl.c.val1) /
value_tbl.c.val1 > 2.0),
"SELECT values.id FROM values WHERE "
"(values.val1 / (values.val2 - values.val1)) "
"/ values.val1 > :param_1"
)
def test_percent_chars(self):
t = table("table%name",
column("percent%"),
column("%(oneofthese)s"),
column("spaces % more spaces"),
)
self.assert_compile(
t.select(use_labels=True),
'''SELECT "table%name"."percent%" AS "table%name_percent%", '''
'''"table%name"."%(oneofthese)s" AS '''
'''"table%name_%(oneofthese)s", '''
'''"table%name"."spaces % more spaces" AS '''
'''"table%name_spaces % '''
'''more spaces" FROM "table%name"'''
)
def test_joins(self):
self.assert_compile(
join(table2, table1, table1.c.myid == table2.c.otherid).select(),
"SELECT myothertable.otherid, myothertable.othername, "
"mytable.myid, mytable.name, mytable.description FROM "
"myothertable JOIN mytable ON mytable.myid = myothertable.otherid"
)
self.assert_compile(
select(
[table1],
from_obj=[join(table1, table2, table1.c.myid
== table2.c.otherid)]
),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable JOIN myothertable ON mytable.myid = myothertable.otherid")
self.assert_compile(
select(
[join(join(table1, table2, table1.c.myid == table2.c.otherid),
table3, table1.c.myid == table3.c.userid)]
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid, "
"thirdtable.otherstuff FROM mytable JOIN myothertable "
"ON mytable.myid ="
" myothertable.otherid JOIN thirdtable ON "
"mytable.myid = thirdtable.userid"
)
self.assert_compile(
join(users, addresses, users.c.user_id ==
addresses.c.user_id).select(),
"SELECT users.user_id, users.user_name, users.password, "
"addresses.address_id, addresses.user_id, addresses.street, "
"addresses.city, addresses.state, addresses.zip "
"FROM users JOIN addresses "
"ON users.user_id = addresses.user_id"
)
self.assert_compile(
select([table1, table2, table3],
from_obj=[join(table1, table2,
table1.c.myid == table2.c.otherid).
outerjoin(table3,
table1.c.myid == table3.c.userid)]
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid,"
" thirdtable.otherstuff FROM mytable "
"JOIN myothertable ON mytable.myid "
"= myothertable.otherid LEFT OUTER JOIN thirdtable "
"ON mytable.myid ="
" thirdtable.userid"
)
self.assert_compile(
select([table1, table2, table3],
from_obj=[outerjoin(table1,
join(table2, table3, table2.c.otherid
== table3.c.userid),
table1.c.myid == table2.c.otherid)]
),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername, "
"thirdtable.userid,"
" thirdtable.otherstuff FROM mytable LEFT OUTER JOIN "
"(myothertable "
"JOIN thirdtable ON myothertable.otherid = "
"thirdtable.userid) ON "
"mytable.myid = myothertable.otherid"
)
query = select(
[table1, table2],
or_(
table1.c.name == 'fred',
table1.c.myid == 10,
table2.c.othername != 'jack',
"EXISTS (select yay from foo where boo = lar)"
),
from_obj=[outerjoin(table1, table2,
table1.c.myid == table2.c.otherid)]
)
self.assert_compile(
query, "SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername "
"FROM mytable LEFT OUTER JOIN myothertable ON mytable.myid = "
"myothertable.otherid WHERE mytable.name = :name_1 OR "
"mytable.myid = :myid_1 OR myothertable.othername != :othername_1 "
"OR EXISTS (select yay from foo where boo = lar)", )
def test_compound_selects(self):
assert_raises_message(
exc.ArgumentError,
"All selectables passed to CompoundSelect "
"must have identical numbers of columns; "
"select #1 has 2 columns, select #2 has 3",
union, table3.select(), table1.select()
)
x = union(
select([table1], table1.c.myid == 5),
select([table1], table1.c.myid == 12),
order_by=[table1.c.myid],
)
self.assert_compile(
x, "SELECT mytable.myid, mytable.name, "
"mytable.description "
"FROM mytable WHERE "
"mytable.myid = :myid_1 UNION "
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = :myid_2 "
"ORDER BY mytable.myid")
x = union(
select([table1]),
select([table1])
)
x = union(x, select([table1]))
self.assert_compile(
x, "(SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable UNION SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable) UNION SELECT mytable.myid,"
" mytable.name, mytable.description FROM mytable")
u1 = union(
select([table1.c.myid, table1.c.name]),
select([table2]),
select([table3])
)
self.assert_compile(
u1, "SELECT mytable.myid, mytable.name "
"FROM mytable UNION SELECT myothertable.otherid, "
"myothertable.othername FROM myothertable "
"UNION SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable")
assert u1.corresponding_column(table2.c.otherid) is u1.c.myid
self.assert_compile(
union(
select([table1.c.myid, table1.c.name]),
select([table2]),
order_by=['myid'],
offset=10,
limit=5
),
"SELECT mytable.myid, mytable.name "
"FROM mytable UNION SELECT myothertable.otherid, "
"myothertable.othername "
"FROM myothertable ORDER BY myid LIMIT :param_1 OFFSET :param_2",
{'param_1': 5, 'param_2': 10}
)
self.assert_compile(
union(
select([table1.c.myid, table1.c.name,
func.max(table1.c.description)],
table1.c.name == 'name2',
group_by=[table1.c.myid, table1.c.name]),
table1.select(table1.c.name == 'name1')
),
"SELECT mytable.myid, mytable.name, "
"max(mytable.description) AS max_1 "
"FROM mytable WHERE mytable.name = :name_1 "
"GROUP BY mytable.myid, "
"mytable.name UNION SELECT mytable.myid, mytable.name, "
"mytable.description "
"FROM mytable WHERE mytable.name = :name_2"
)
self.assert_compile(
union(
select([literal(100).label('value')]),
select([literal(200).label('value')])
),
"SELECT :param_1 AS value UNION SELECT :param_2 AS value"
)
self.assert_compile(
union_all(
select([table1.c.myid]),
union(
select([table2.c.otherid]),
select([table3.c.userid]),
)
),
"SELECT mytable.myid FROM mytable UNION ALL "
"(SELECT myothertable.otherid FROM myothertable UNION "
"SELECT thirdtable.userid FROM thirdtable)"
)
s = select([column('foo'), column('bar')])
# ORDER BY's even though not supported by
# all DB's, are rendered if requested
self.assert_compile(
union(
s.order_by("foo"),
s.order_by("bar")),
"SELECT foo, bar ORDER BY foo UNION SELECT foo, bar ORDER BY bar")
# self_group() is honored
self.assert_compile(
union(s.order_by("foo").self_group(),
s.order_by("bar").limit(10).self_group()),
"(SELECT foo, bar ORDER BY foo) UNION (SELECT foo, "
"bar ORDER BY bar LIMIT :param_1)",
{'param_1': 10}
)
def test_compound_grouping(self):
s = select([column('foo'), column('bar')]).select_from('bat')
self.assert_compile(
union(union(union(s, s), s), s),
"((SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) "
"UNION SELECT foo, bar FROM bat) UNION SELECT foo, bar FROM bat"
)
self.assert_compile(
union(s, s, s, s),
"SELECT foo, bar FROM bat UNION SELECT foo, bar "
"FROM bat UNION SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat"
)
self.assert_compile(
union(s, union(s, union(s, s))),
"SELECT foo, bar FROM bat UNION (SELECT foo, bar FROM bat "
"UNION (SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat))"
)
self.assert_compile(
select([s.alias()]),
'SELECT anon_1.foo, anon_1.bar FROM '
'(SELECT foo, bar FROM bat) AS anon_1'
)
self.assert_compile(
select([union(s, s).alias()]),
'SELECT anon_1.foo, anon_1.bar FROM '
'(SELECT foo, bar FROM bat UNION '
'SELECT foo, bar FROM bat) AS anon_1'
)
self.assert_compile(
select([except_(s, s).alias()]),
'SELECT anon_1.foo, anon_1.bar FROM '
'(SELECT foo, bar FROM bat EXCEPT '
'SELECT foo, bar FROM bat) AS anon_1'
)
# this query sqlite specifically chokes on
self.assert_compile(
union(
except_(s, s),
s
),
"(SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat) "
"UNION SELECT foo, bar FROM bat"
)
self.assert_compile(
union(
s,
except_(s, s),
),
"SELECT foo, bar FROM bat "
"UNION (SELECT foo, bar FROM bat EXCEPT SELECT foo, bar FROM bat)"
)
# this solves it
self.assert_compile(
union(
except_(s, s).alias().select(),
s
),
"SELECT anon_1.foo, anon_1.bar FROM "
"(SELECT foo, bar FROM bat EXCEPT "
"SELECT foo, bar FROM bat) AS anon_1 "
"UNION SELECT foo, bar FROM bat"
)
self.assert_compile(
except_(
union(s, s),
union(s, s)
),
"(SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat) "
"EXCEPT (SELECT foo, bar FROM bat UNION SELECT foo, bar FROM bat)"
)
s2 = union(s, s)
s3 = union(s2, s2)
self.assert_compile(s3, "(SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat) "
"UNION (SELECT foo, bar FROM bat "
"UNION SELECT foo, bar FROM bat)")
self.assert_compile(
union(
intersect(s, s),
intersect(s, s)
),
"(SELECT foo, bar FROM bat INTERSECT SELECT foo, bar FROM bat) "
"UNION (SELECT foo, bar FROM bat INTERSECT "
"SELECT foo, bar FROM bat)"
)
def test_binds(self):
for (
stmt,
expected_named_stmt,
expected_positional_stmt,
expected_default_params_dict,
expected_default_params_list,
test_param_dict,
expected_test_params_dict,
expected_test_params_list
) in [
(
select(
[table1, table2],
and_(
table1.c.myid == table2.c.otherid,
table1.c.name == bindparam('mytablename')
)),
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable WHERE mytable.myid = myothertable.otherid "
"AND mytable.name = :mytablename",
"SELECT mytable.myid, mytable.name, mytable.description, "
"myothertable.otherid, myothertable.othername FROM mytable, "
"myothertable WHERE mytable.myid = myothertable.otherid AND "
"mytable.name = ?",
{'mytablename': None}, [None],
{'mytablename': 5}, {'mytablename': 5}, [5]
),
(
select([table1], or_(table1.c.myid == bindparam('myid'),
table2.c.otherid == bindparam('myid'))),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = :myid "
"OR myothertable.otherid = :myid",
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable, myothertable WHERE mytable.myid = ? "
"OR myothertable.otherid = ?",
{'myid': None}, [None, None],
{'myid': 5}, {'myid': 5}, [5, 5]
),
(
text("SELECT mytable.myid, mytable.name, "
"mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = :myid OR "
"myothertable.otherid = :myid"),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = :myid OR "
"myothertable.otherid = :myid",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = ? OR "
"myothertable.otherid = ?",
{'myid': None}, [None, None],
{'myid': 5}, {'myid': 5}, [5, 5]
),
(
select([table1], or_(table1.c.myid ==
bindparam('myid', unique=True),
table2.c.otherid ==
bindparam('myid', unique=True))),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid_1 OR myothertable.otherid = :myid_2",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = ? "
"OR myothertable.otherid = ?",
{'myid_1': None, 'myid_2': None}, [None, None],
{'myid_1': 5, 'myid_2': 6}, {'myid_1': 5, 'myid_2': 6}, [5, 6]
),
(
bindparam('test', type_=String, required=False) + text("'hi'"),
":test || 'hi'",
"? || 'hi'",
{'test': None}, [None],
{}, {'test': None}, [None]
),
(
# testing select.params() here - bindparam() objects
# must get required flag set to False
select(
[table1],
or_(
table1.c.myid == bindparam('myid'),
table2.c.otherid == bindparam('myotherid')
)).params({'myid': 8, 'myotherid': 7}),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid OR myothertable.otherid = :myotherid",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
"? OR myothertable.otherid = ?",
{'myid': 8, 'myotherid': 7}, [8, 7],
{'myid': 5}, {'myid': 5, 'myotherid': 7}, [5, 7]
),
(
select([table1], or_(table1.c.myid ==
bindparam('myid', value=7, unique=True),
table2.c.otherid ==
bindparam('myid', value=8, unique=True))),
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
":myid_1 OR myothertable.otherid = :myid_2",
"SELECT mytable.myid, mytable.name, mytable.description FROM "
"mytable, myothertable WHERE mytable.myid = "
"? OR myothertable.otherid = ?",
{'myid_1': 7, 'myid_2': 8}, [7, 8],
{'myid_1': 5, 'myid_2': 6}, {'myid_1': 5, 'myid_2': 6}, [5, 6]
),
]:
self.assert_compile(stmt, expected_named_stmt,
params=expected_default_params_dict)
self.assert_compile(stmt, expected_positional_stmt,
dialect=sqlite.dialect())
nonpositional = stmt.compile()
positional = stmt.compile(dialect=sqlite.dialect())
pp = positional.params
eq_([pp[k] for k in positional.positiontup],
expected_default_params_list)
eq_(nonpositional.construct_params(test_param_dict),
expected_test_params_dict)
pp = positional.construct_params(test_param_dict)
eq_(
[pp[k] for k in positional.positiontup],
expected_test_params_list
)
# check that params() doesn't modify original statement
s = select([table1], or_(table1.c.myid == bindparam('myid'),
table2.c.otherid ==
bindparam('myotherid')))
s2 = s.params({'myid': 8, 'myotherid': 7})
s3 = s2.params({'myid': 9})
assert s.compile().params == {'myid': None, 'myotherid': None}
assert s2.compile().params == {'myid': 8, 'myotherid': 7}
assert s3.compile().params == {'myid': 9, 'myotherid': 7}
# test using same 'unique' param object twice in one compile
s = select([table1.c.myid]).where(table1.c.myid == 12).as_scalar()
s2 = select([table1, s], table1.c.myid == s)
self.assert_compile(
s2, "SELECT mytable.myid, mytable.name, mytable.description, "
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = "
":myid_1) AS anon_1 FROM mytable WHERE mytable.myid = "
"(SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)")
positional = s2.compile(dialect=sqlite.dialect())
pp = positional.params
assert [pp[k] for k in positional.positiontup] == [12, 12]
# check that conflicts with "unique" params are caught
s = select([table1], or_(table1.c.myid == 7,
table1.c.myid == bindparam('myid_1')))
assert_raises_message(exc.CompileError,
"conflicts with unique bind parameter "
"of the same name",
str, s)
s = select([table1], or_(table1.c.myid == 7, table1.c.myid == 8,
table1.c.myid == bindparam('myid_1')))
assert_raises_message(exc.CompileError,
"conflicts with unique bind parameter "
"of the same name",
str, s)
def _test_binds_no_hash_collision(self):
"""test that construct_params doesn't corrupt dict
due to hash collisions"""
total_params = 100000
in_clause = [':in%d' % i for i in range(total_params)]
params = dict(('in%d' % i, i) for i in range(total_params))
t = text('text clause %s' % ', '.join(in_clause))
eq_(len(t.bindparams), total_params)
c = t.compile()
pp = c.construct_params(params)
eq_(len(set(pp)), total_params, '%s %s' % (len(set(pp)), len(pp)))
eq_(len(set(pp.values())), total_params)
def test_bind_as_col(self):
t = table('foo', column('id'))
s = select([t, literal('lala').label('hoho')])
self.assert_compile(s, "SELECT foo.id, :param_1 AS hoho FROM foo")
assert [str(c) for c in s.c] == ["id", "hoho"]
def test_bind_callable(self):
expr = column('x') == bindparam("key", callable_=lambda: 12)
self.assert_compile(
expr,
"x = :key",
{'x': 12}
)
def test_bind_params_missing(self):
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x'",
select(
[table1]).where(
and_(
table1.c.myid == bindparam("x", required=True),
table1.c.name == bindparam("y", required=True)
)
).compile().construct_params,
params=dict(y=5)
)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x'",
select(
[table1]).where(
table1.c.myid == bindparam(
"x",
required=True)).compile().construct_params)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x', "
"in parameter group 2",
select(
[table1]).where(
and_(
table1.c.myid == bindparam("x", required=True),
table1.c.name == bindparam("y", required=True)
)
).compile().construct_params,
params=dict(y=5), _group_number=2)
assert_raises_message(
exc.InvalidRequestError,
r"A value is required for bind parameter 'x', "
"in parameter group 2",
select(
[table1]).where(
table1.c.myid == bindparam(
"x",
required=True)).compile().construct_params,
_group_number=2)
def test_tuple(self):
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
[(1, 'foo'), (5, 'bar')]),
"(mytable.myid, mytable.name) IN "
"((:param_1, :param_2), (:param_3, :param_4))"
)
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
[tuple_(table2.c.otherid, table2.c.othername)]
),
"(mytable.myid, mytable.name) IN "
"((myothertable.otherid, myothertable.othername))"
)
self.assert_compile(
tuple_(table1.c.myid, table1.c.name).in_(
select([table2.c.otherid, table2.c.othername])
),
"(mytable.myid, mytable.name) IN (SELECT "
"myothertable.otherid, myothertable.othername FROM myothertable)"
)
def test_cast(self):
tbl = table('casttest',
column('id', Integer),
column('v1', Float),
column('v2', Float),
column('ts', TIMESTAMP),
)
def check_results(dialect, expected_results, literal):
eq_(len(expected_results), 5,
'Incorrect number of expected results')
eq_(str(cast(tbl.c.v1, Numeric).compile(dialect=dialect)),
'CAST(casttest.v1 AS %s)' % expected_results[0])
eq_(str(cast(tbl.c.v1, Numeric(12, 9)).compile(dialect=dialect)),
'CAST(casttest.v1 AS %s)' % expected_results[1])
eq_(str(cast(tbl.c.ts, Date).compile(dialect=dialect)),
'CAST(casttest.ts AS %s)' % expected_results[2])
eq_(str(cast(1234, Text).compile(dialect=dialect)),
'CAST(%s AS %s)' % (literal, expected_results[3]))
eq_(str(cast('test', String(20)).compile(dialect=dialect)),
'CAST(%s AS %s)' % (literal, expected_results[4]))
# fixme: shoving all of this dialect-specific stuff in one test
# is now officialy completely ridiculous AND non-obviously omits
# coverage on other dialects.
sel = select([tbl, cast(tbl.c.v1, Numeric)]).compile(
dialect=dialect)
if isinstance(dialect, type(mysql.dialect())):
eq_(str(sel),
"SELECT casttest.id, casttest.v1, casttest.v2, "
"casttest.ts, "
"CAST(casttest.v1 AS DECIMAL) AS anon_1 \nFROM casttest")
else:
eq_(str(sel),
"SELECT casttest.id, casttest.v1, casttest.v2, "
"casttest.ts, CAST(casttest.v1 AS NUMERIC) AS "
"anon_1 \nFROM casttest")
# first test with PostgreSQL engine
check_results(
postgresql.dialect(), [
'NUMERIC', 'NUMERIC(12, 9)', 'DATE', 'TEXT', 'VARCHAR(20)'],
'%(param_1)s')
# then the Oracle engine
check_results(
oracle.dialect(), [
'NUMERIC', 'NUMERIC(12, 9)', 'DATE',
'CLOB', 'VARCHAR2(20 CHAR)'],
':param_1')
# then the sqlite engine
check_results(sqlite.dialect(), ['NUMERIC', 'NUMERIC(12, 9)',
'DATE', 'TEXT', 'VARCHAR(20)'], '?')
# then the MySQL engine
check_results(mysql.dialect(), ['DECIMAL', 'DECIMAL(12, 9)',
'DATE', 'CHAR', 'CHAR(20)'], '%s')
self.assert_compile(cast(text('NULL'), Integer),
'CAST(NULL AS INTEGER)',
dialect=sqlite.dialect())
self.assert_compile(cast(null(), Integer),
'CAST(NULL AS INTEGER)',
dialect=sqlite.dialect())
self.assert_compile(cast(literal_column('NULL'), Integer),
'CAST(NULL AS INTEGER)',
dialect=sqlite.dialect())
def test_over(self):
self.assert_compile(
func.row_number().over(),
"row_number() OVER ()"
)
self.assert_compile(
func.row_number().over(
order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (ORDER BY mytable.name, mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name, "
"mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name],
order_by=[table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=table1.c.name,
order_by=table1.c.description
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=table1.c.name,
order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (PARTITION BY mytable.name "
"ORDER BY mytable.name, mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[],
order_by=[table1.c.name, table1.c.description]
),
"row_number() OVER (ORDER BY mytable.name, mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[table1.c.name, table1.c.description],
order_by=[]
),
"row_number() OVER (PARTITION BY mytable.name, "
"mytable.description)"
)
self.assert_compile(
func.row_number().over(
partition_by=[],
order_by=[]
),
"row_number() OVER ()"
)
self.assert_compile(
select([func.row_number().over(
order_by=table1.c.description
).label('foo')]),
"SELECT row_number() OVER (ORDER BY mytable.description) "
"AS foo FROM mytable"
)
# test from_obj generation.
# from func:
self.assert_compile(
select([
func.max(table1.c.name).over(
partition_by=['foo']
)
]),
"SELECT max(mytable.name) OVER (PARTITION BY foo) "
"AS anon_1 FROM mytable"
)
# from partition_by
self.assert_compile(
select([
func.row_number().over(
partition_by=[table1.c.name]
)
]),
"SELECT row_number() OVER (PARTITION BY mytable.name) "
"AS anon_1 FROM mytable"
)
# from order_by
self.assert_compile(
select([
func.row_number().over(
order_by=table1.c.name
)
]),
"SELECT row_number() OVER (ORDER BY mytable.name) "
"AS anon_1 FROM mytable"
)
# this tests that _from_objects
# concantenates OK
self.assert_compile(
select([column("x") + over(func.foo())]),
"SELECT x + foo() OVER () AS anon_1"
)
def test_date_between(self):
import datetime
table = Table('dt', metadata,
Column('date', Date))
self.assert_compile(
table.select(table.c.date.between(datetime.date(2006, 6, 1),
datetime.date(2006, 6, 5))),
"SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2",
checkparams={'date_1': datetime.date(2006, 6, 1),
'date_2': datetime.date(2006, 6, 5)})
self.assert_compile(
table.select(sql.between(table.c.date, datetime.date(2006, 6, 1),
datetime.date(2006, 6, 5))),
"SELECT dt.date FROM dt WHERE dt.date BETWEEN :date_1 AND :date_2",
checkparams={'date_1': datetime.date(2006, 6, 1),
'date_2': datetime.date(2006, 6, 5)})
def test_delayed_col_naming(self):
my_str = Column(String)
sel1 = select([my_str])
assert_raises_message(
exc.InvalidRequestError,
"Cannot initialize a sub-selectable with this Column",
lambda: sel1.c
)
# calling label or as_scalar doesn't compile
# anything.
sel2 = select([func.substr(my_str, 2, 3)]).label('my_substr')
assert_raises_message(
exc.CompileError,
"Cannot compile Column object until its 'name' is assigned.",
str, sel2
)
sel3 = select([my_str]).as_scalar()
assert_raises_message(
exc.CompileError,
"Cannot compile Column object until its 'name' is assigned.",
str, sel3
)
my_str.name = 'foo'
self.assert_compile(
sel1,
"SELECT foo",
)
self.assert_compile(
sel2,
'(SELECT substr(foo, :substr_2, :substr_3) AS substr_1)',
)
self.assert_compile(
sel3,
"(SELECT foo)"
)
def test_naming(self):
# TODO: the part where we check c.keys() are not "compile" tests, they
# belong probably in test_selectable, or some broken up
# version of that suite
f1 = func.hoho(table1.c.name)
s1 = select([table1.c.myid, table1.c.myid.label('foobar'),
f1,
func.lala(table1.c.name).label('gg')])
eq_(
list(s1.c.keys()),
['myid', 'foobar', str(f1), 'gg']
)
meta = MetaData()
t1 = Table('mytable', meta, Column('col1', Integer))
exprs = (
table1.c.myid == 12,
func.hoho(table1.c.myid),
cast(table1.c.name, Numeric),
literal('x'),
)
for col, key, expr, lbl in (
(table1.c.name, 'name', 'mytable.name', None),
(exprs[0], str(exprs[0]), 'mytable.myid = :myid_1', 'anon_1'),
(exprs[1], str(exprs[1]), 'hoho(mytable.myid)', 'hoho_1'),
(exprs[2], str(exprs[2]),
'CAST(mytable.name AS NUMERIC)', 'anon_1'),
(t1.c.col1, 'col1', 'mytable.col1', None),
(column('some wacky thing'), 'some wacky thing',
'"some wacky thing"', ''),
(exprs[3], exprs[3].key, ":param_1", "anon_1")
):
if getattr(col, 'table', None) is not None:
t = col.table
else:
t = table1
s1 = select([col], from_obj=t)
assert list(s1.c.keys()) == [key], list(s1.c.keys())
if lbl:
self.assert_compile(
s1, "SELECT %s AS %s FROM mytable" %
(expr, lbl))
else:
self.assert_compile(s1, "SELECT %s FROM mytable" % (expr,))
s1 = select([s1])
if lbl:
self.assert_compile(
s1, "SELECT %s FROM (SELECT %s AS %s FROM mytable)" %
(lbl, expr, lbl))
elif col.table is not None:
# sqlite rule labels subquery columns
self.assert_compile(
s1, "SELECT %s FROM (SELECT %s AS %s FROM mytable)" %
(key, expr, key))
else:
self.assert_compile(s1,
"SELECT %s FROM (SELECT %s FROM mytable)" %
(expr, expr))
def test_hints(self):
s = select([table1.c.myid]).with_hint(table1, "test hint %(name)s")
s2 = select([table1.c.myid]).\
with_hint(table1, "index(%(name)s idx)", 'oracle').\
with_hint(table1, "WITH HINT INDEX idx", 'sybase')
a1 = table1.alias()
s3 = select([a1.c.myid]).with_hint(a1, "index(%(name)s hint)")
subs4 = select([
table1, table2
]).select_from(
table1.join(table2, table1.c.myid == table2.c.otherid)).\
with_hint(table1, 'hint1')
s4 = select([table3]).select_from(
table3.join(
subs4,
subs4.c.othername == table3.c.otherstuff
)
).\
with_hint(table3, 'hint3')
t1 = table('QuotedName', column('col1'))
s6 = select([t1.c.col1]).where(t1.c.col1 > 10).\
with_hint(t1, '%(name)s idx1')
a2 = t1.alias('SomeName')
s7 = select([a2.c.col1]).where(a2.c.col1 > 10).\
with_hint(a2, '%(name)s idx1')
mysql_d, oracle_d, sybase_d = \
mysql.dialect(), \
oracle.dialect(), \
sybase.dialect()
for stmt, dialect, expected in [
(s, mysql_d,
"SELECT mytable.myid FROM mytable test hint mytable"),
(s, oracle_d,
"SELECT /*+ test hint mytable */ mytable.myid FROM mytable"),
(s, sybase_d,
"SELECT mytable.myid FROM mytable test hint mytable"),
(s2, mysql_d,
"SELECT mytable.myid FROM mytable"),
(s2, oracle_d,
"SELECT /*+ index(mytable idx) */ mytable.myid FROM mytable"),
(s2, sybase_d,
"SELECT mytable.myid FROM mytable WITH HINT INDEX idx"),
(s3, mysql_d,
"SELECT mytable_1.myid FROM mytable AS mytable_1 "
"index(mytable_1 hint)"),
(s3, oracle_d,
"SELECT /*+ index(mytable_1 hint) */ mytable_1.myid FROM "
"mytable mytable_1"),
(s3, sybase_d,
"SELECT mytable_1.myid FROM mytable AS mytable_1 "
"index(mytable_1 hint)"),
(s4, mysql_d,
"SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable "
"hint3 INNER JOIN (SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid, "
"myothertable.othername FROM mytable hint1 INNER "
"JOIN myothertable ON mytable.myid = myothertable.otherid) "
"ON othername = thirdtable.otherstuff"),
(s4, sybase_d,
"SELECT thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable "
"hint3 JOIN (SELECT mytable.myid, mytable.name, "
"mytable.description, myothertable.otherid, "
"myothertable.othername FROM mytable hint1 "
"JOIN myothertable ON mytable.myid = myothertable.otherid) "
"ON othername = thirdtable.otherstuff"),
(s4, oracle_d,
"SELECT /*+ hint3 */ thirdtable.userid, thirdtable.otherstuff "
"FROM thirdtable JOIN (SELECT /*+ hint1 */ mytable.myid,"
" mytable.name, mytable.description, myothertable.otherid,"
" myothertable.othername FROM mytable JOIN myothertable ON"
" mytable.myid = myothertable.otherid) ON othername ="
" thirdtable.otherstuff"),
# TODO: figure out dictionary ordering solution here
# (s5, oracle_d,
# "SELECT /*+ hint3 */ /*+ hint1 */ thirdtable.userid, "
# "thirdtable.otherstuff "
# "FROM thirdtable JOIN (SELECT mytable.myid,"
# " mytable.name, mytable.description, myothertable.otherid,"
# " myothertable.othername FROM mytable JOIN myothertable ON"
# " mytable.myid = myothertable.otherid) ON othername ="
# " thirdtable.otherstuff"),
(s6, oracle_d,
"""SELECT /*+ "QuotedName" idx1 */ "QuotedName".col1 """
"""FROM "QuotedName" WHERE "QuotedName".col1 > :col1_1"""),
(s7, oracle_d,
"""SELECT /*+ SomeName idx1 */ "SomeName".col1 FROM """
""""QuotedName" "SomeName" WHERE "SomeName".col1 > :col1_1"""),
]:
self.assert_compile(
stmt,
expected,
dialect=dialect
)
def test_literal_as_text_fromstring(self):
self.assert_compile(
and_("a", "b"),
"a AND b"
)
def test_literal_as_text_nonstring_raise(self):
assert_raises(exc.ArgumentError,
and_, ("a",), ("b",)
)
class UnsupportedTest(fixtures.TestBase):
def test_unsupported_element_str_visit_name(self):
from sqlalchemy.sql.expression import ClauseElement
class SomeElement(ClauseElement):
__visit_name__ = 'some_element'
assert_raises_message(
exc.UnsupportedCompilationError,
r"Compiler <sqlalchemy.sql.compiler.SQLCompiler .*"
r"can't render element of type <class '.*SomeElement'>",
SomeElement().compile
)
def test_unsupported_element_meth_visit_name(self):
from sqlalchemy.sql.expression import ClauseElement
class SomeElement(ClauseElement):
@classmethod
def __visit_name__(cls):
return "some_element"
assert_raises_message(
exc.UnsupportedCompilationError,
r"Compiler <sqlalchemy.sql.compiler.SQLCompiler .*"
r"can't render element of type <class '.*SomeElement'>",
SomeElement().compile
)
def test_unsupported_operator(self):
from sqlalchemy.sql.expression import BinaryExpression
def myop(x, y):
pass
binary = BinaryExpression(column("foo"), column("bar"), myop)
assert_raises_message(
exc.UnsupportedCompilationError,
r"Compiler <sqlalchemy.sql.compiler.SQLCompiler .*"
r"can't render element of type <function.*",
binary.compile
)
class KwargPropagationTest(fixtures.TestBase):
@classmethod
def setup_class(cls):
from sqlalchemy.sql.expression import ColumnClause, TableClause
class CatchCol(ColumnClause):
pass
class CatchTable(TableClause):
pass
cls.column = CatchCol("x")
cls.table = CatchTable("y")
cls.criterion = cls.column == CatchCol('y')
@compiles(CatchCol)
def compile_col(element, compiler, **kw):
assert "canary" in kw
return compiler.visit_column(element)
@compiles(CatchTable)
def compile_table(element, compiler, **kw):
assert "canary" in kw
return compiler.visit_table(element)
def _do_test(self, element):
d = default.DefaultDialect()
d.statement_compiler(d, element,
compile_kwargs={"canary": True})
def test_binary(self):
self._do_test(self.column == 5)
def test_select(self):
s = select([self.column]).select_from(self.table).\
where(self.column == self.criterion).\
order_by(self.column)
self._do_test(s)
def test_case(self):
c = case([(self.criterion, self.column)], else_=self.column)
self._do_test(c)
def test_cast(self):
c = cast(self.column, Integer)
self._do_test(c)
class CRUDTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_correlated_update(self):
# test against a straight text subquery
u = update(
table1,
values={
table1.c.name:
text("(select name from mytable where id=mytable.id)")
}
)
self.assert_compile(
u,
"UPDATE mytable SET name=(select name from mytable "
"where id=mytable.id)")
mt = table1.alias()
u = update(table1, values={
table1.c.name:
select([mt.c.name], mt.c.myid == table1.c.myid)
})
self.assert_compile(
u, "UPDATE mytable SET name=(SELECT mytable_1.name FROM "
"mytable AS mytable_1 WHERE "
"mytable_1.myid = mytable.myid)")
# test against a regular constructed subquery
s = select([table2], table2.c.otherid == table1.c.myid)
u = update(table1, table1.c.name == 'jack', values={table1.c.name: s})
self.assert_compile(
u, "UPDATE mytable SET name=(SELECT myothertable.otherid, "
"myothertable.othername FROM myothertable WHERE "
"myothertable.otherid = mytable.myid) "
"WHERE mytable.name = :name_1")
# test a non-correlated WHERE clause
s = select([table2.c.othername], table2.c.otherid == 7)
u = update(table1, table1.c.name == s)
self.assert_compile(u,
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description WHERE mytable.name = "
"(SELECT myothertable.othername FROM myothertable "
"WHERE myothertable.otherid = :otherid_1)")
# test one that is actually correlated...
s = select([table2.c.othername], table2.c.otherid == table1.c.myid)
u = table1.update(table1.c.name == s)
self.assert_compile(u,
"UPDATE mytable SET myid=:myid, name=:name, "
"description=:description WHERE mytable.name = "
"(SELECT myothertable.othername FROM myothertable "
"WHERE myothertable.otherid = mytable.myid)")
# test correlated FROM implicit in WHERE and SET clauses
u = table1.update().values(name=table2.c.othername)\
.where(table2.c.otherid == table1.c.myid)
self.assert_compile(
u, "UPDATE mytable SET name=myothertable.othername "
"FROM myothertable WHERE myothertable.otherid = mytable.myid")
u = table1.update().values(name='foo')\
.where(table2.c.otherid == table1.c.myid)
self.assert_compile(
u, "UPDATE mytable SET name=:name "
"FROM myothertable WHERE myothertable.otherid = mytable.myid")
self.assert_compile(u,
"UPDATE mytable SET name=:name "
"FROM mytable, myothertable WHERE "
"myothertable.otherid = mytable.myid",
dialect=mssql.dialect())
self.assert_compile(u.where(table2.c.othername == mt.c.name),
"UPDATE mytable SET name=:name "
"FROM mytable, myothertable, mytable AS mytable_1 "
"WHERE myothertable.otherid = mytable.myid "
"AND myothertable.othername = mytable_1.name",
dialect=mssql.dialect())
def test_binds_that_match_columns(self):
"""test bind params named after column names
replace the normal SET/VALUES generation."""
t = table('foo', column('x'), column('y'))
u = t.update().where(t.c.x == bindparam('x'))
assert_raises(exc.CompileError, u.compile)
self.assert_compile(u, "UPDATE foo SET WHERE foo.x = :x", params={})
assert_raises(exc.CompileError, u.values(x=7).compile)
self.assert_compile(u.values(y=7),
"UPDATE foo SET y=:y WHERE foo.x = :x")
assert_raises(exc.CompileError,
u.values(x=7).compile, column_keys=['x', 'y'])
assert_raises(exc.CompileError, u.compile, column_keys=['x', 'y'])
self.assert_compile(
u.values(
x=3 +
bindparam('x')),
"UPDATE foo SET x=(:param_1 + :x) WHERE foo.x = :x")
self.assert_compile(
u.values(
x=3 +
bindparam('x')),
"UPDATE foo SET x=(:param_1 + :x) WHERE foo.x = :x",
params={
'x': 1})
self.assert_compile(
u.values(
x=3 +
bindparam('x')),
"UPDATE foo SET x=(:param_1 + :x), y=:y WHERE foo.x = :x",
params={
'x': 1,
'y': 2})
i = t.insert().values(x=3 + bindparam('x'))
self.assert_compile(i,
"INSERT INTO foo (x) VALUES ((:param_1 + :x))")
self.assert_compile(
i,
"INSERT INTO foo (x, y) VALUES ((:param_1 + :x), :y)",
params={
'x': 1,
'y': 2})
i = t.insert().values(x=bindparam('y'))
self.assert_compile(i, "INSERT INTO foo (x) VALUES (:y)")
i = t.insert().values(x=bindparam('y'), y=5)
assert_raises(exc.CompileError, i.compile)
i = t.insert().values(x=3 + bindparam('y'), y=5)
assert_raises(exc.CompileError, i.compile)
i = t.insert().values(x=3 + bindparam('x2'))
self.assert_compile(i,
"INSERT INTO foo (x) VALUES ((:param_1 + :x2))")
self.assert_compile(
i,
"INSERT INTO foo (x) VALUES ((:param_1 + :x2))",
params={})
self.assert_compile(
i,
"INSERT INTO foo (x, y) VALUES ((:param_1 + :x2), :y)",
params={
'x': 1,
'y': 2})
self.assert_compile(
i,
"INSERT INTO foo (x, y) VALUES ((:param_1 + :x2), :y)",
params={
'x2': 1,
'y': 2})
def test_unconsumed_names(self):
t = table("t", column("x"), column("y"))
t2 = table("t2", column("q"), column("z"))
assert_raises_message(
exc.CompileError,
"Unconsumed column names: z",
t.insert().values(x=5, z=5).compile,
)
assert_raises_message(
exc.CompileError,
"Unconsumed column names: z",
t.update().values(x=5, z=5).compile,
)
assert_raises_message(
exc.CompileError,
"Unconsumed column names: j",
t.update().values(x=5, j=7).values({t2.c.z: 5}).
where(t.c.x == t2.c.q).compile,
)
# bindparam names don't get counted
i = t.insert().values(x=3 + bindparam('x2'))
self.assert_compile(
i,
"INSERT INTO t (x) VALUES ((:param_1 + :x2))"
)
# even if in the params list
i = t.insert().values(x=3 + bindparam('x2'))
self.assert_compile(
i,
"INSERT INTO t (x) VALUES ((:param_1 + :x2))",
params={"x2": 1}
)
assert_raises_message(
exc.CompileError,
"Unconsumed column names: j",
t.update().values(x=5, j=7).compile,
column_keys=['j']
)
def test_labels_no_collision(self):
t = table('foo', column('id'), column('foo_id'))
self.assert_compile(
t.update().where(t.c.id == 5),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :id_1"
)
self.assert_compile(
t.update().where(t.c.id == bindparam(key=t.c.id._label)),
"UPDATE foo SET id=:id, foo_id=:foo_id WHERE foo.id = :foo_id_1"
)
class DDLTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def _illegal_type_fixture(self):
class MyType(types.TypeEngine):
pass
@compiles(MyType)
def compile(element, compiler, **kw):
raise exc.CompileError("Couldn't compile type")
return MyType
def test_reraise_of_column_spec_issue(self):
MyType = self._illegal_type_fixture()
t1 = Table('t', MetaData(),
Column('x', MyType())
)
assert_raises_message(
exc.CompileError,
r"\(in table 't', column 'x'\): Couldn't compile type",
schema.CreateTable(t1).compile
)
def test_reraise_of_column_spec_issue_unicode(self):
MyType = self._illegal_type_fixture()
t1 = Table('t', MetaData(),
Column(u('méil'), MyType())
)
assert_raises_message(
exc.CompileError,
u(r"\(in table 't', column 'méil'\): Couldn't compile type"),
schema.CreateTable(t1).compile
)
def test_system_flag(self):
m = MetaData()
t = Table('t', m, Column('x', Integer),
Column('y', Integer, system=True),
Column('z', Integer))
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (x INTEGER, z INTEGER)"
)
m2 = MetaData()
t2 = t.tometadata(m2)
self.assert_compile(
schema.CreateTable(t2),
"CREATE TABLE t (x INTEGER, z INTEGER)"
)
class InlineDefaultTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_insert(self):
m = MetaData()
foo = Table('foo', m,
Column('id', Integer))
t = Table('test', m,
Column('col1', Integer, default=func.foo(1)),
Column('col2', Integer, default=select(
[func.coalesce(func.max(foo.c.id))])),
)
self.assert_compile(
t.insert(
inline=True, values={}),
"INSERT INTO test (col1, col2) VALUES (foo(:foo_1), "
"(SELECT coalesce(max(foo.id)) AS coalesce_1 FROM "
"foo))")
def test_update(self):
m = MetaData()
foo = Table('foo', m,
Column('id', Integer))
t = Table('test', m,
Column('col1', Integer, onupdate=func.foo(1)),
Column('col2', Integer, onupdate=select(
[func.coalesce(func.max(foo.c.id))])),
Column('col3', String(30))
)
self.assert_compile(t.update(inline=True, values={'col3': 'foo'}),
"UPDATE test SET col1=foo(:foo_1), col2=(SELECT "
"coalesce(max(foo.id)) AS coalesce_1 FROM foo), "
"col3=:col3")
class SchemaTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_select(self):
self.assert_compile(table4.select(),
"SELECT remote_owner.remotetable.rem_id, "
"remote_owner.remotetable.datatype_id,"
" remote_owner.remotetable.value "
"FROM remote_owner.remotetable")
self.assert_compile(
table4.select(
and_(
table4.c.datatype_id == 7,
table4.c.value == 'hi')),
"SELECT remote_owner.remotetable.rem_id, "
"remote_owner.remotetable.datatype_id,"
" remote_owner.remotetable.value "
"FROM remote_owner.remotetable WHERE "
"remote_owner.remotetable.datatype_id = :datatype_id_1 AND"
" remote_owner.remotetable.value = :value_1")
s = table4.select(and_(table4.c.datatype_id == 7,
table4.c.value == 'hi'), use_labels=True)
self.assert_compile(
s, "SELECT remote_owner.remotetable.rem_id AS"
" remote_owner_remotetable_rem_id, "
"remote_owner.remotetable.datatype_id AS"
" remote_owner_remotetable_datatype_id, "
"remote_owner.remotetable.value "
"AS remote_owner_remotetable_value FROM "
"remote_owner.remotetable WHERE "
"remote_owner.remotetable.datatype_id = :datatype_id_1 AND "
"remote_owner.remotetable.value = :value_1")
# multi-part schema name
self.assert_compile(table5.select(),
'SELECT "dbo.remote_owner".remotetable.rem_id, '
'"dbo.remote_owner".remotetable.datatype_id, '
'"dbo.remote_owner".remotetable.value '
'FROM "dbo.remote_owner".remotetable'
)
# multi-part schema name labels - convert '.' to '_'
self.assert_compile(table5.select(use_labels=True),
'SELECT "dbo.remote_owner".remotetable.rem_id AS'
' dbo_remote_owner_remotetable_rem_id, '
'"dbo.remote_owner".remotetable.datatype_id'
' AS dbo_remote_owner_remotetable_datatype_id,'
' "dbo.remote_owner".remotetable.value AS '
'dbo_remote_owner_remotetable_value FROM'
' "dbo.remote_owner".remotetable'
)
def test_alias(self):
a = alias(table4, 'remtable')
self.assert_compile(a.select(a.c.datatype_id == 7),
"SELECT remtable.rem_id, remtable.datatype_id, "
"remtable.value FROM"
" remote_owner.remotetable AS remtable "
"WHERE remtable.datatype_id = :datatype_id_1")
def test_update(self):
self.assert_compile(
table4.update(table4.c.value == 'test',
values={table4.c.datatype_id: 12}),
"UPDATE remote_owner.remotetable SET datatype_id=:datatype_id "
"WHERE remote_owner.remotetable.value = :value_1")
def test_insert(self):
self.assert_compile(table4.insert(values=(2, 5, 'test')),
"INSERT INTO remote_owner.remotetable "
"(rem_id, datatype_id, value) VALUES "
"(:rem_id, :datatype_id, :value)")
class CorrelateTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
def test_dont_overcorrelate(self):
self.assert_compile(select([table1], from_obj=[table1,
table1.select()]),
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable, (SELECT "
"mytable.myid AS myid, mytable.name AS "
"name, mytable.description AS description "
"FROM mytable)")
def _fixture(self):
t1 = table('t1', column('a'))
t2 = table('t2', column('a'))
return t1, t2, select([t1]).where(t1.c.a == t2.c.a)
def _assert_where_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a FROM t2 WHERE t2.a = "
"(SELECT t1.a FROM t1 WHERE t1.a = t2.a)")
def _assert_where_all_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t1.a, t2.a FROM t1, t2 WHERE t2.a = "
"(SELECT t1.a WHERE t1.a = t2.a)")
# note there's no more "backwards" correlation after
# we've done #2746
# def _assert_where_backwards_correlated(self, stmt):
# self.assert_compile(
# stmt,
# "SELECT t2.a FROM t2 WHERE t2.a = "
# "(SELECT t1.a FROM t2 WHERE t1.a = t2.a)")
# def _assert_column_backwards_correlated(self, stmt):
# self.assert_compile(stmt,
# "SELECT t2.a, (SELECT t1.a FROM t2 WHERE t1.a = t2.a) "
# "AS anon_1 FROM t2")
def _assert_column_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a, (SELECT t1.a FROM t1 WHERE t1.a = t2.a) "
"AS anon_1 FROM t2")
def _assert_column_all_correlated(self, stmt):
self.assert_compile(
stmt,
"SELECT t1.a, t2.a, "
"(SELECT t1.a WHERE t1.a = t2.a) AS anon_1 FROM t1, t2")
def _assert_having_correlated(self, stmt):
self.assert_compile(stmt,
"SELECT t2.a FROM t2 HAVING t2.a = "
"(SELECT t1.a FROM t1 WHERE t1.a = t2.a)")
def _assert_from_uncorrelated(self, stmt):
self.assert_compile(
stmt,
"SELECT t2.a, anon_1.a FROM t2, "
"(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a) AS anon_1")
def _assert_from_all_uncorrelated(self, stmt):
self.assert_compile(
stmt,
"SELECT t1.a, t2.a, anon_1.a FROM t1, t2, "
"(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a) AS anon_1")
def _assert_where_uncorrelated(self, stmt):
self.assert_compile(stmt,
"SELECT t2.a FROM t2 WHERE t2.a = "
"(SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a)")
def _assert_column_uncorrelated(self, stmt):
self.assert_compile(stmt,
"SELECT t2.a, (SELECT t1.a FROM t1, t2 "
"WHERE t1.a = t2.a) AS anon_1 FROM t2")
def _assert_having_uncorrelated(self, stmt):
self.assert_compile(stmt,
"SELECT t2.a FROM t2 HAVING t2.a = "
"(SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a)")
def _assert_where_single_full_correlated(self, stmt):
self.assert_compile(stmt,
"SELECT t1.a FROM t1 WHERE t1.a = (SELECT t1.a)")
def test_correlate_semiauto_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_correlated(
select([t2]).where(t2.c.a == s1.correlate(t2)))
def test_correlate_semiauto_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_correlated(
select([t2, s1.correlate(t2).as_scalar()]))
def test_correlate_semiauto_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate(t2).alias()]))
def test_correlate_semiauto_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_correlated(
select([t2]).having(t2.c.a == s1.correlate(t2)))
def test_correlate_except_inclusion_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_correlated(
select([t2]).where(t2.c.a == s1.correlate_except(t1)))
def test_correlate_except_exclusion_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_uncorrelated(
select([t2]).where(t2.c.a == s1.correlate_except(t2)))
def test_correlate_except_inclusion_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_correlated(
select([t2, s1.correlate_except(t1).as_scalar()]))
def test_correlate_except_exclusion_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_uncorrelated(
select([t2, s1.correlate_except(t2).as_scalar()]))
def test_correlate_except_inclusion_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate_except(t1).alias()]))
def test_correlate_except_exclusion_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate_except(t2).alias()]))
def test_correlate_except_none(self):
t1, t2, s1 = self._fixture()
self._assert_where_all_correlated(
select([t1, t2]).where(t2.c.a == s1.correlate_except(None)))
def test_correlate_except_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_correlated(
select([t2]).having(t2.c.a == s1.correlate_except(t1)))
def test_correlate_auto_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_correlated(
select([t2]).where(t2.c.a == s1))
def test_correlate_auto_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_correlated(
select([t2, s1.as_scalar()]))
def test_correlate_auto_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.alias()]))
def test_correlate_auto_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_correlated(
select([t2]).having(t2.c.a == s1))
def test_correlate_disabled_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_uncorrelated(
select([t2]).where(t2.c.a == s1.correlate(None)))
def test_correlate_disabled_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_uncorrelated(
select([t2, s1.correlate(None).as_scalar()]))
def test_correlate_disabled_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_uncorrelated(
select([t2, s1.correlate(None).alias()]))
def test_correlate_disabled_having(self):
t1, t2, s1 = self._fixture()
self._assert_having_uncorrelated(
select([t2]).having(t2.c.a == s1.correlate(None)))
def test_correlate_all_where(self):
t1, t2, s1 = self._fixture()
self._assert_where_all_correlated(
select([t1, t2]).where(t2.c.a == s1.correlate(t1, t2)))
def test_correlate_all_column(self):
t1, t2, s1 = self._fixture()
self._assert_column_all_correlated(
select([t1, t2, s1.correlate(t1, t2).as_scalar()]))
def test_correlate_all_from(self):
t1, t2, s1 = self._fixture()
self._assert_from_all_uncorrelated(
select([t1, t2, s1.correlate(t1, t2).alias()]))
def test_correlate_where_all_unintentional(self):
t1, t2, s1 = self._fixture()
assert_raises_message(
exc.InvalidRequestError,
"returned no FROM clauses due to auto-correlation",
select([t1, t2]).where(t2.c.a == s1).compile
)
def test_correlate_from_all_ok(self):
t1, t2, s1 = self._fixture()
self.assert_compile(
select([t1, t2, s1]),
"SELECT t1.a, t2.a, a FROM t1, t2, "
"(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a)"
)
def test_correlate_auto_where_singlefrom(self):
t1, t2, s1 = self._fixture()
s = select([t1.c.a])
s2 = select([t1]).where(t1.c.a == s)
self.assert_compile(s2,
"SELECT t1.a FROM t1 WHERE t1.a = "
"(SELECT t1.a FROM t1)")
def test_correlate_semiauto_where_singlefrom(self):
t1, t2, s1 = self._fixture()
s = select([t1.c.a])
s2 = select([t1]).where(t1.c.a == s.correlate(t1))
self._assert_where_single_full_correlated(s2)
def test_correlate_except_semiauto_where_singlefrom(self):
t1, t2, s1 = self._fixture()
s = select([t1.c.a])
s2 = select([t1]).where(t1.c.a == s.correlate_except(t2))
self._assert_where_single_full_correlated(s2)
def test_correlate_alone_noeffect(self):
# new as of #2668
t1, t2, s1 = self._fixture()
self.assert_compile(s1.correlate(t1, t2),
"SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a")
def test_correlate_except_froms(self):
# new as of #2748
t1 = table('t1', column('a'))
t2 = table('t2', column('a'), column('b'))
s = select([t2.c.b]).where(t1.c.a == t2.c.a)
s = s.correlate_except(t2).alias('s')
s2 = select([func.foo(s.c.b)]).as_scalar()
s3 = select([t1], order_by=s2)
self.assert_compile(
s3, "SELECT t1.a FROM t1 ORDER BY "
"(SELECT foo(s.b) AS foo_1 FROM "
"(SELECT t2.b AS b FROM t2 WHERE t1.a = t2.a) AS s)")
def test_multilevel_froms_correlation(self):
# new as of #2748
p = table('parent', column('id'))
c = table('child', column('id'), column('parent_id'), column('pos'))
s = c.select().where(
c.c.parent_id == p.c.id).order_by(
c.c.pos).limit(1)
s = s.correlate(p)
s = exists().select_from(s).where(s.c.id == 1)
s = select([p]).where(s)
self.assert_compile(
s, "SELECT parent.id FROM parent WHERE EXISTS (SELECT * "
"FROM (SELECT child.id AS id, child.parent_id AS parent_id, "
"child.pos AS pos FROM child WHERE child.parent_id = parent.id "
"ORDER BY child.pos LIMIT :param_1) WHERE id = :id_1)")
def test_no_contextless_correlate_except(self):
# new as of #2748
t1 = table('t1', column('x'))
t2 = table('t2', column('y'))
t3 = table('t3', column('z'))
s = select([t1]).where(t1.c.x == t2.c.y).\
where(t2.c.y == t3.c.z).correlate_except(t1)
self.assert_compile(
s,
"SELECT t1.x FROM t1, t2, t3 WHERE t1.x = t2.y AND t2.y = t3.z")
def test_multilevel_implicit_correlation_disabled(self):
# test that implicit correlation with multilevel WHERE correlation
# behaves like 0.8.1, 0.7 (i.e. doesn't happen)
t1 = table('t1', column('x'))
t2 = table('t2', column('y'))
t3 = table('t3', column('z'))
s = select([t1.c.x]).where(t1.c.x == t2.c.y)
s2 = select([t3.c.z]).where(t3.c.z == s.as_scalar())
s3 = select([t1]).where(t1.c.x == s2.as_scalar())
self.assert_compile(s3,
"SELECT t1.x FROM t1 "
"WHERE t1.x = (SELECT t3.z "
"FROM t3 "
"WHERE t3.z = (SELECT t1.x "
"FROM t1, t2 "
"WHERE t1.x = t2.y))"
)
def test_from_implicit_correlation_disabled(self):
# test that implicit correlation with immediate and
# multilevel FROM clauses behaves like 0.8.1 (i.e. doesn't happen)
t1 = table('t1', column('x'))
t2 = table('t2', column('y'))
t3 = table('t3', column('z'))
s = select([t1.c.x]).where(t1.c.x == t2.c.y)
s2 = select([t2, s])
s3 = select([t1, s2])
self.assert_compile(s3,
"SELECT t1.x, y, x FROM t1, "
"(SELECT t2.y AS y, x FROM t2, "
"(SELECT t1.x AS x FROM t1, t2 WHERE t1.x = t2.y))"
)
class CoercionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = default.DefaultDialect(supports_native_boolean=True)
def _fixture(self):
m = MetaData()
return Table('foo', m,
Column('id', Integer))
bool_table = table('t', column('x', Boolean))
def test_coerce_bool_where(self):
self.assert_compile(
select([self.bool_table]).where(self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x"
)
def test_coerce_bool_where_non_native(self):
self.assert_compile(
select([self.bool_table]).where(self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x = 1",
dialect=default.DefaultDialect(supports_native_boolean=False)
)
self.assert_compile(
select([self.bool_table]).where(~self.bool_table.c.x),
"SELECT t.x FROM t WHERE t.x = 0",
dialect=default.DefaultDialect(supports_native_boolean=False)
)
def test_null_constant(self):
self.assert_compile(_literal_as_text(None), "NULL")
def test_false_constant(self):
self.assert_compile(_literal_as_text(False), "false")
def test_true_constant(self):
self.assert_compile(_literal_as_text(True), "true")
def test_val_and_false(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, False),
"false")
def test_val_and_true_coerced(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, True),
"foo.id = :id_1")
def test_val_is_null_coerced(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == None),
"foo.id IS NULL")
def test_val_and_None(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, None),
"foo.id = :id_1 AND NULL")
def test_None_and_val(self):
t = self._fixture()
self.assert_compile(and_(None, t.c.id == 1),
"NULL AND foo.id = :id_1")
def test_None_and_nothing(self):
# current convention is None in and_()
# returns None May want
# to revise this at some point.
self.assert_compile(
and_(None), "NULL")
def test_val_and_null(self):
t = self._fixture()
self.assert_compile(and_(t.c.id == 1, null()),
"foo.id = :id_1 AND NULL")
class ResultMapTest(fixtures.TestBase):
"""test the behavior of the 'entry stack' and the determination
when the result_map needs to be populated.
"""
def test_compound_populates(self):
t = Table('t', MetaData(), Column('a', Integer), Column('b', Integer))
stmt = select([t]).union(select([t]))
comp = stmt.compile()
eq_(
comp.result_map,
{'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type),
'b': ('b', (t.c.b, 'b', 'b'), t.c.b.type)}
)
def test_compound_not_toplevel_doesnt_populate(self):
t = Table('t', MetaData(), Column('a', Integer), Column('b', Integer))
subq = select([t]).union(select([t]))
stmt = select([t.c.a]).select_from(t.join(subq, t.c.a == subq.c.a))
comp = stmt.compile()
eq_(
comp.result_map,
{'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type)}
)
def test_compound_only_top_populates(self):
t = Table('t', MetaData(), Column('a', Integer), Column('b', Integer))
stmt = select([t.c.a]).union(select([t.c.b]))
comp = stmt.compile()
eq_(
comp.result_map,
{'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type)},
)
def test_label_plus_element(self):
t = Table('t', MetaData(), Column('a', Integer))
l1 = t.c.a.label('bar')
tc = type_coerce(t.c.a, String)
stmt = select([t.c.a, l1, tc])
comp = stmt.compile()
tc_anon_label = comp.result_map['a_1'][1][0]
eq_(
comp.result_map,
{
'a': ('a', (t.c.a, 'a', 'a'), t.c.a.type),
'bar': ('bar', (l1, 'bar'), l1.type),
'a_1': ('%%(%d a)s' % id(tc), (tc_anon_label, 'a_1'), tc.type),
},
)
def test_label_conflict_union(self):
t1 = Table('t1', MetaData(), Column('a', Integer),
Column('b', Integer))
t2 = Table('t2', MetaData(), Column('t1_a', Integer))
union = select([t2]).union(select([t2])).alias()
t1_alias = t1.alias()
stmt = select([t1, t1_alias]).select_from(
t1.join(union, t1.c.a == union.c.t1_a)).apply_labels()
comp = stmt.compile()
eq_(
set(comp.result_map),
set(['t1_1_b', 't1_1_a', 't1_a', 't1_b'])
)
is_(
comp.result_map['t1_a'][1][2], t1.c.a
)
def test_insert_with_select_values(self):
astring = Column('a', String)
aint = Column('a', Integer)
m = MetaData()
Table('t1', m, astring)
t2 = Table('t2', m, aint)
stmt = t2.insert().values(a=select([astring])).returning(aint)
comp = stmt.compile(dialect=postgresql.dialect())
eq_(
comp.result_map,
{'a': ('a', (aint, 'a', 'a'), aint.type)}
)
def test_insert_from_select(self):
astring = Column('a', String)
aint = Column('a', Integer)
m = MetaData()
Table('t1', m, astring)
t2 = Table('t2', m, aint)
stmt = t2.insert().from_select(['a'], select([astring])).\
returning(aint)
comp = stmt.compile(dialect=postgresql.dialect())
eq_(
comp.result_map,
{'a': ('a', (aint, 'a', 'a'), aint.type)}
)
|
py | 1a41c6b0911a0c07a69cb297cc2049922834829f | import math
import torch as th
from torch import nn
from torch.nn import functional as F
from . import torch_util as tu
from gym3.types import Real, TensorType
REAL = Real()
class Encoder(nn.Module):
"""
Takes in seq of observations and outputs sequence of codes
Encoders can be stateful, meaning that you pass in one observation at a
time and update the state, which is a separate object. (This object
doesn't store any state except parameters)
"""
def __init__(self, obtype, codetype):
super().__init__()
self.obtype = obtype
self.codetype = codetype
def initial_state(self, batchsize):
raise NotImplementedError
def empty_state(self):
return None
def stateless_forward(self, obs):
"""
inputs:
obs: array or dict, all with preshape (B, T)
returns:
codes: array or dict, all with preshape (B, T)
"""
code, _state = self(obs, None, self.empty_state())
return code
def forward(self, obs, first, state_in):
"""
inputs:
obs: array or dict, all with preshape (B, T)
first: float array shape (B, T)
state_in: array or dict, all with preshape (B,)
returns:
codes: array or dict
state_out: array or dict
"""
raise NotImplementedError
class CnnBasicBlock(nn.Module):
"""
Residual basic block (without batchnorm), as in ImpalaCNN
Preserves channel number and shape
"""
def __init__(self, inchan, scale=1, batch_norm=False):
super().__init__()
self.inchan = inchan
self.batch_norm = batch_norm
s = math.sqrt(scale)
self.conv0 = tu.NormedConv2d(self.inchan, self.inchan, 3, padding=1, scale=s)
self.conv1 = tu.NormedConv2d(self.inchan, self.inchan, 3, padding=1, scale=s)
if self.batch_norm:
self.bn0 = nn.BatchNorm2d(self.inchan)
self.bn1 = nn.BatchNorm2d(self.inchan)
def residual(self, x):
# inplace should be False for the first relu, so that it does not change the input,
# which will be used for skip connection.
# getattr is for backwards compatibility with loaded models
if getattr(self, "batch_norm", False):
x = self.bn0(x)
x = F.relu(x, inplace=False)
x = self.conv0(x)
if getattr(self, "batch_norm", False):
x = self.bn1(x)
x = F.relu(x, inplace=True)
x = self.conv1(x)
return x
def forward(self, x):
return x + self.residual(x)
class CnnDownStack(nn.Module):
"""
Downsampling stack from Impala CNN
"""
def __init__(self, inchan, nblock, outchan, scale=1, pool=True, **kwargs):
super().__init__()
self.inchan = inchan
self.outchan = outchan
self.pool = pool
self.firstconv = tu.NormedConv2d(inchan, outchan, 3, padding=1)
s = scale / math.sqrt(nblock)
self.blocks = nn.ModuleList(
[CnnBasicBlock(outchan, scale=s, **kwargs) for _ in range(nblock)]
)
def forward(self, x):
x = self.firstconv(x)
if getattr(self, "pool", True):
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
for block in self.blocks:
x = block(x)
return x
def output_shape(self, inshape):
c, h, w = inshape
assert c == self.inchan
if getattr(self, "pool", True):
return (self.outchan, (h + 1) // 2, (w + 1) // 2)
else:
return (self.outchan, h, w)
class ImpalaCNN(nn.Module):
name = "ImpalaCNN" # put it here to preserve pickle compat
def __init__(
self, inshape, chans, outsize, scale_ob, nblock, final_relu=True, **kwargs
):
super().__init__()
self.scale_ob = scale_ob
h, w, c = inshape
curshape = (c, h, w)
s = 1 / math.sqrt(len(chans)) # per stack scale
self.stacks = nn.ModuleList()
for outchan in chans:
stack = CnnDownStack(
curshape[0], nblock=nblock, outchan=outchan, scale=s, **kwargs
)
self.stacks.append(stack)
curshape = stack.output_shape(curshape)
self.dense = tu.NormedLinear(tu.intprod(curshape), outsize, scale=1.4)
self.outsize = outsize
self.final_relu = final_relu
def forward(self, x):
"""
Forward input through model, x should be of shape [B, T, *state_shape]
"""
x = x.to(dtype=th.float32) / self.scale_ob
b, t = x.shape[:-3]
x = x.reshape(b * t, *x.shape[-3:])
x = tu.transpose(x, "bhwc", "bchw")
x = tu.sequential(self.stacks, x, diag_name=self.name)
x = x.reshape(b, t, *x.shape[1:])
x = tu.flatten_image(x)
x = th.relu(x)
x = self.dense(x)
if self.final_relu:
x = th.relu(x)
return x
class ImpalaEncoder(Encoder):
def __init__(
self,
inshape,
outsize=256,
chans=(16, 32, 32),
scale_ob=255.0,
nblock=2,
**kwargs
):
codetype = TensorType(eltype=REAL, shape=(outsize,))
obtype = TensorType(eltype=REAL, shape=inshape)
super().__init__(codetype=codetype, obtype=obtype)
self.cnn = ImpalaCNN(
inshape=inshape,
chans=chans,
scale_ob=scale_ob,
nblock=nblock,
outsize=outsize,
**kwargs
)
def forward(self, x, first, state_in):
x = self.cnn(x)
return x, state_in
def initial_state(self, batchsize):
return tu.zeros(batchsize, 0)
|
py | 1a41c73f35636272055ed938c528a6938144b140 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from urlparse import urlparse
import subprocess
import threading
from git_checkout import local_git_parsers
from libs.gitiles.git_repository import GitRepository
import script_util
_CHANGELOG_FORMAT_STRING = ('commit %H%n'
'author %an%n'
'author-mail %ae%n'
'author-time %ad%n%n'
'committer %cn%n'
'committer-mail %ce%n'
'committer-time %cd%n%n'
'--Message start--%n%B%n--Message end--%n')
_CHANGELOGS_FORMAT_STRING = ('**Changelog start**%%n%s' %
_CHANGELOG_FORMAT_STRING)
CHECKOUT_ROOT_DIR = os.path.join(os.path.expanduser('~'), '.local_checkouts')
def ConvertRemoteCommitToLocal(revision):
"""Converts remote commit from gitile to local git checkout revision."""
return 'HEAD' if revision == 'master' else revision
class LocalGitRepository(GitRepository):
"""Represents local checkout of git repository on chromium host.
Note, to automatically check out internal repos which you have access to,
follow the instructions in 'go/internal-repo-checkout-setup'.
"""
lock = threading.Lock()
# Keep track all the updated repos, so every repo only get updated once.
_updated_repos = set()
def __init__(self, repo_url=None):
self._host = None
self._repo_path = None
self._repo_url = repo_url
if repo_url is not None:
parsed_url = urlparse(repo_url)
self._host = parsed_url.netloc
# Remove the / in the front of path.
self._repo_path = parsed_url.path[1:]
self._CloneOrUpdateRepoIfNeeded()
self.changelog_parser = local_git_parsers.GitChangeLogParser()
self.changelogs_parser = local_git_parsers.GitChangeLogsParser()
self.blame_parser = local_git_parsers.GitBlameParser()
self.diff_parser = local_git_parsers.GitDiffParser()
@classmethod
def Factory(cls): # pragma: no cover
"""Construct a factory for creating ``LocalGitRepository`` instances.
Returns:
A function from repo urls to ``LocalGitRepository`` instances. All
instances produced by the returned function are novel (i.e., newly
allocated).
"""
return lambda repo_url: cls(repo_url) # pylint: disable=W0108
@property
def repo_path(self):
return self._repo_path
@property
def real_repo_path(self):
"""Absolute path of the local repository."""
return os.path.join(CHECKOUT_ROOT_DIR, self._host, self.repo_path)
@property
def repo_url(self):
"""Url of remote repository which the local repo checks out from."""
return self._repo_url
def _CloneOrUpdateRepoIfNeeded(self):
"""Clones repo, or update it if it didn't got updated before."""
with LocalGitRepository.lock:
if self.repo_url in LocalGitRepository._updated_repos:
return
# Clone the repo if needed.
if not os.path.exists(self.real_repo_path):
try:
subprocess.check_call(['git', 'clone',
self.repo_url, self.real_repo_path])
except subprocess.CalledProcessError as e: # pragma: no cover.
raise Exception(
'Exception while cloning %s: %s' % (self.repo_url, e))
# Update repo if it's already cloned.
else:
try:
# Disable verbose of cd and git pull.
with open(os.devnull, 'w') as null_handle:
subprocess.check_call(
'cd %s && git pull' % self.real_repo_path,
stdout=null_handle, stderr=null_handle, shell=True)
except subprocess.CalledProcessError as e: # pragma: no cover.
raise Exception(
'Exception while updating %s: %s' % (self.repo_path, e))
LocalGitRepository._updated_repos.add(self.repo_url)
def _GetFinalCommand(self, command, utc=False):
# Change local time to utc time.
if utc:
command = 'TZ=UTC %s --date=format-local:"%s"' % (
command, local_git_parsers.DATETIME_FORMAT)
return 'cd %s && %s' % (self.real_repo_path, command)
def GetChangeLog(self, revision):
"""Returns the change log of the given revision."""
command = ('git log --pretty=format:"%s" --max-count=1 --raw '
'--no-abbrev %s' % (_CHANGELOG_FORMAT_STRING,
ConvertRemoteCommitToLocal(revision)))
output = script_util.GetCommandOutput(self._GetFinalCommand(command, True))
return self.changelog_parser(output, self.repo_url)
def GetChangeLogs(self, start_revision, end_revision): # pylint: disable=W
"""Returns change log list in (start_revision, end_revision]."""
command = ('git log --pretty=format:"%s" --raw --no-abbrev %s' % (
_CHANGELOGS_FORMAT_STRING,
'%s..%s' % (ConvertRemoteCommitToLocal(start_revision),
ConvertRemoteCommitToLocal(end_revision))))
output = script_util.GetCommandOutput(self._GetFinalCommand(command, True))
return self.changelogs_parser(output, self.repo_url)
def GetChangeDiff(self, revision, path=None): # pylint: disable=W
"""Returns the diff of the given revision."""
command = ('git log --format="" --max-count=1 %s' %
ConvertRemoteCommitToLocal(revision))
if path:
command += ' -p %s' % path
output = script_util.GetCommandOutput(self._GetFinalCommand(command))
return self.diff_parser(output)
def GetBlame(self, path, revision):
"""Returns blame of the file at ``path`` of the given revision."""
command = 'git blame --incremental %s -- %s' % (
ConvertRemoteCommitToLocal(revision), path)
output = script_util.GetCommandOutput(self._GetFinalCommand(command))
return self.blame_parser(output, path, revision)
def GetSource(self, path, revision):
"""Returns source code of the file at ``path`` of the given revision."""
# Check whether the requested file exist or not.
command = 'git show %s:%s' % (ConvertRemoteCommitToLocal(revision), path)
output = script_util.GetCommandOutput(self._GetFinalCommand(command))
return output
|
py | 1a41c759e6d23c606cb43416c6aca7e269a38e41 | from typing import Any, NamedTuple
class IntegerSequencedRecord(object):
"""
Encapsulates sequenced item tuple (containing real event object).
"""
def __init__(self, sequenced_item: NamedTuple):
self.sequenced_item = sequenced_item
def __getattr__(self, item: str) -> Any:
return getattr(self.sequenced_item, item)
class SnapshotRecord(IntegerSequencedRecord):
pass
class StoredEventRecord(IntegerSequencedRecord):
"""
Encapsulates sequenced item tuple (containing real event object).
Allows other attributes to be set, such as notification ID.
"""
notification_id = None
application_name = None
|
py | 1a41c7c4fe5feac639afb932342e061363d877c2 | from tornado.web import RequestHandler
from swampdragon.default_settings import SwampDragonSettings
from django.conf import settings as django_settings
from .same_origin import set_origin_cookie
def get_host():
host = django_settings.DRAGON_URL
if host.endswith('/'):
return host[:-1]
return host
class SettingsHandler(RequestHandler):
def set_default_headers(self):
self.set_header("Content-Type", "application/javascript")
set_origin_cookie(self)
def get(self, *args, **kwargs):
data = '''window.swampdragon_settings = {settings};
window.swampdragon_host = "{host}";
'''.format(**{
'settings': SwampDragonSettings().to_dict(),
'host': get_host()
})
self.write(data)
|
py | 1a41cd3e8f359093e8a476b50575d9bea3355f9e | import os
import ycm_core
from clang_helpers import PrepareClangFlags
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
# These are the compilation flags that will be used in case there's no
# compilation database set.
flags = [
'-Wall',
'-std=c++11',
'-stdlib=libc++',
'-x',
'c++',
'-I',
'.',
'-isystem',
'/usr/lib/c++/v1'
]
if compilation_database_folder:
database = ycm_core.CompilationDatabase(compilation_database_folder)
else:
database = None
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return flags
new_flags = []
make_next_absolute = False
path_flags = ['-isystem', '-I', '-iquote', '--sysroot=']
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def FlagsForFile(filename):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = database.GetCompilationInfoForFile(filename)
final_flags = PrepareClangFlags(
MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_),
filename)
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {
'flags': final_flags,
'do_cache': True}
|
py | 1a41cde10b8de2efef45f5acda90ad3f214c2f82 | from itertools import count
import pytest
import numpy as np
import astropy.units as u
from astropy._erfa import DJM0
from astropy.time import Time, TimeFormat
from astropy.time.utils import day_frac
class SpecificException(ValueError):
pass
@pytest.fixture
def custom_format_name():
for i in count():
if not i:
custom = f"custom_format_name"
else:
custom = f"custom_format_name_{i}"
if custom not in Time.FORMATS:
break
yield custom
Time.FORMATS.pop(custom, None)
def test_custom_time_format_set_jds_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
raise SpecificException
try:
Time(7.0, format=custom_format_name)
except ValueError as e:
assert hasattr(e, "__cause__") and isinstance(e.__cause__, SpecificException)
def test_custom_time_format_val_type_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def _check_val_type(self, val, val2):
raise SpecificException
try:
Time(7.0, format=custom_format_name)
except ValueError as e:
assert hasattr(e, "__cause__") and isinstance(e.__cause__, SpecificException)
def test_custom_time_format_value_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
raise SpecificException
t = Time.now()
with pytest.raises(SpecificException):
getattr(t, custom_format_name)
def test_custom_time_format_fine(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
return self.jd1 + self.jd2
t = Time.now()
getattr(t, custom_format_name)
t2 = Time(7, 9, format=custom_format_name)
getattr(t2, custom_format_name)
def test_custom_time_format_forgot_property(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
def value(self):
return self.jd1, self.jd2
t = Time.now()
with pytest.raises(AttributeError):
getattr(t, custom_format_name)
t.format = custom_format_name
with pytest.raises(AttributeError):
t.value
with pytest.raises(AttributeError):
Time(7, 9, format=custom_format_name).value
def test_custom_time_format_problematic_name():
assert "sort" not in Time.FORMATS, "problematic name in default FORMATS!"
assert hasattr(Time, "sort")
try:
class Custom(TimeFormat):
name = "sort"
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
return self.jd1, self.jd2
t = Time.now()
assert t.sort() == t, "bogus time format clobbers everyone's Time objects"
t.format = "sort"
if not isinstance(t.value, tuple):
pytest.xfail("No good way to detect that `sort` is invalid")
assert Time(7, 9, format="sort").value == (7, 9)
finally:
Time.FORMATS.pop("sort", None)
def test_mjd_longdouble_preserves_precision(custom_format_name):
class CustomMJD(TimeFormat):
name = custom_format_name
def _check_val_type(self, val, val2):
val = np.longdouble(val)
if val2 is not None:
raise ValueError("Only one value permitted")
return val, 0
def set_jds(self, val, val2):
mjd1 = np.float64(np.floor(val))
mjd2 = np.float64(val - mjd1)
self.jd1, self.jd2 = day_frac(mjd1 + DJM0, mjd2)
@property
def value(self):
mjd1, mjd2 = day_frac(self.jd1 - DJM0, self.jd2)
return np.longdouble(mjd1) + np.longdouble(mjd2)
m = 58000.0
t = Time(m, format=custom_format_name)
t2 = Time(m + 2 * m * np.finfo(np.longdouble).eps, format=custom_format_name)
assert t != t2
assert isinstance(getattr(t, custom_format_name), np.longdouble)
assert getattr(t, custom_format_name) != getattr(t2, custom_format_name)
def test_mjd_unit_validation():
with pytest.raises(u.UnitConversionError):
Time(58000 * u.m, format="mjd")
def test_mjd_unit_conversion():
assert Time(58000 * u.day, format="mjd") == Time(58000 * u.day, format="mjd")
assert Time(58000 * u.day, format="mjd") != Time(58000 * u.s, format="mjd")
assert Time(58000 * u.day, format="mjd") == Time(58000 * 86400 * u.s, format="mjd")
@pytest.mark.parametrize("f", ["mjd", "unix", "cxcsec"])
def test_existing_types_refuse_longdoubles(f):
t = np.longdouble(getattr(Time(58000, format="mjd"), f))
t2 = t + np.finfo(np.longdouble).eps * 2 * t
try:
tm = Time(np.longdouble(t), format=f)
except ValueError:
# Time processing makes it always ValueError not TypeError
return
else:
# accepts long doubles, better preserve accuracy!
assert Time(np.longdouble(t2), format=f) != tm
|
py | 1a41ce9720df2ee824e158db368c0138f5d23f26 | from __future__ import division, absolute_import
import os
import math
from numpy.testing import *
from numpy import array
import util
def _path(*a):
return os.path.join(*((os.path.dirname(__file__),) + a))
class TestSizeSumExample(util.F2PyTest):
sources = [_path('src', 'size', 'foo.f90'),
]
@dec.slow
def test_all(self):
r = self.module.foo([[1,2]])
assert_equal(r, [3],`r`)
r = self.module.foo([[1,2],[3,4]])
assert_equal(r, [3,7],`r`)
r = self.module.foo([[1,2],[3,4],[5,6]])
assert_equal(r, [3,7,11],`r`)
@dec.slow
def test_transpose(self):
r = self.module.trans([[1,2]])
assert_equal(r, [[1],[2]],`r`)
r = self.module.trans([[1,2,3],[4,5,6]])
assert_equal(r, [[1,4],[2,5],[3,6]],`r`)
@dec.slow
def test_flatten(self):
r = self.module.flatten([[1,2]])
assert_equal(r, [1,2],`r`)
r = self.module.flatten([[1,2,3],[4,5,6]])
assert_equal(r, [1,2,3,4,5,6],`r`)
if __name__ == "__main__":
import nose
nose.runmodule()
|
py | 1a41ceeb1b64848631bf6a28c2e46604348d7ade | # -*- coding: utf-8 -*-
import os
import sys
import argparse
from evaluate import evaluate_beam_search
import logging
import numpy as np
import config
import utils
import torch
import torch.nn as nn
from torch import cuda
from beam_search import SequenceGenerator
from train import load_data_vocab, init_model, init_optimizer_criterion
from utils import Progbar, plot_learning_curve
import pykp
from pykp.io import KeyphraseDatasetTorchText
__author__ = "Rui Meng"
__email__ = "[email protected]"
def main():
# load settings for training
parser = argparse.ArgumentParser(
description='predict.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
config.preprocess_opts(parser)
config.model_opts(parser)
config.train_opts(parser)
config.predict_opts(parser)
opt = parser.parse_args()
if opt.seed > 0:
torch.manual_seed(opt.seed)
print(opt.gpuid)
if torch.cuda.is_available() and not opt.gpuid:
opt.gpuid = 0
opt.exp = 'predict.' + opt.exp
if hasattr(opt, 'copy_model') and opt.copy_model:
opt.exp += '.copy'
if hasattr(opt, 'bidirectional'):
if opt.bidirectional:
opt.exp += '.bi-directional'
else:
opt.exp += '.uni-directional'
# fill time into the name
if opt.exp_path.find('%s') > 0:
opt.exp_path = opt.exp_path % (opt.exp, opt.timemark)
opt.pred_path = opt.pred_path % (opt.exp, opt.timemark)
if not os.path.exists(opt.exp_path):
os.makedirs(opt.exp_path)
if not os.path.exists(opt.pred_path):
os.makedirs(opt.pred_path)
logging = config.init_logging('train', opt.exp_path + '/output.log')
logging.info('Parameters:')
[logging.info('%s : %s' % (k, str(v))) for k, v in opt.__dict__.items()]
try:
train_data_loader, valid_data_loader, test_data_loader, word2id, id2word, vocab = load_data_vocab(opt, load_train=False)
model = init_model(opt)
# optimizer, criterion = init_optimizer_criterion(model, opt)
generator = SequenceGenerator(model,
eos_id=opt.word2id[pykp.io.EOS_WORD],
beam_size=opt.beam_size,
max_sequence_length=opt.max_sent_length
)
# import time
# start_time = time.time()
evaluate_beam_search(generator, test_data_loader, opt, title='predict', save_path=opt.pred_path + '/[epoch=%d,batch=%d,total_batch=%d]test_result.csv' % (0, 0, 0))
# print("--- %s seconds --- Complete Beam Search" % (time.time() - start_time))
# predict_greedy(model, test_data_loader, test_examples, opt)
except Exception as e:
logging.exception("message")
if __name__ == '__main__':
main()
|
py | 1a41cf64d135cc62054a9a45ad57d98b68027f84 |
from flask import render_template
from . import main
@main.app_errorhandler(404)
def four_Ow_four(error):
'''
Function to render the 404 error page
'''
return render_template('404.html'),404 |
py | 1a41d039d5cdda170328c3846341db50faea1d56 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cpuinfo(CMakePackage):
"""cpuinfo is a library to detect essential
for performance optimization information about host CPU."""
homepage = "https://github.com/Maratyszcza/cpuinfo/"
git = "https://github.com/Maratyszcza/cpuinfo.git"
version('master')
|
py | 1a41d075b4744fa91fb1e5531ef157d206b99cdc | import logging
import re
import feedparser
from requests.auth import AuthBase
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException
log = logging.getLogger('apple_trailers')
class AppleTrailers:
"""
Adds support for Apple.com movie trailers.
Configuration:
quality: Set the desired resolution - 480p, 720p or 1080p. default '720p'
genres: List of genres used to filter the entries. If set, the
trailer must match at least one listed genre to be accepted. Genres
that can be used: Action and Adventure, Comedy, Documentary, Drama,
Family, Fantasy, Foreign, Horror, Musical, Romance, Science Fiction,
Thriller. default '' (all)
apple_trailers:
quality: 720p
genres: ['Action and Adventure']
Alternatively, a simpler configuration format can be used. This uses
the default genre filter, all:
apple_trailers: 720p
This plugin adds the following fields to the entry:
movie_name, movie_year, genres, apple_trailers_name, movie_studio
movie_name: Name of the movie
movie_year: Year the movie was/will be released
genres: Comma-separated list of genres that apply to the movie
apple_trailers_name: Contains the Apple-supplied name of the clip,
such as 'Clip 2', 'Trailer', 'Winter Olympic Preview'
movie_studio: Name of the studio that makes the movie
"""
movie_data_url = 'http://trailers.apple.com/trailers/feeds/data/'
rss_url = 'http://trailers.apple.com/trailers/home/rss/newtrailers.rss'
qualities = {'480p': 'sd', '720p': 'hd720', '1080p': 'hd1080'}
schema = {
'oneOf': [
{
'type': 'object',
'properties': {
'quality': {
'type': 'string',
'enum': list(qualities.keys()),
'default': '720p',
},
'genres': {'type': 'array', 'items': {'type': 'string'}},
},
'additionalProperties': False,
},
{'title': 'justquality', 'type': 'string', 'enum': list(qualities.keys())},
]
}
def broken(self, error_message):
raise plugin.PluginError('Plugin is most likely broken. Got: %s' % error_message)
@plugin.priority(127)
@cached('apple_trailers')
def on_task_input(self, task, config):
# Turn simple config into full config
if isinstance(config, str):
config = {'quality': config}
try:
r = task.requests.get(self.rss_url)
except RequestException as e:
raise plugin.PluginError('Retrieving Apple Trailers RSS feed failed: %s' % e)
rss = feedparser.parse(r.content)
if rss.get('bozo_exception', False):
raise plugin.PluginError('Got bozo_exception (bad feed)')
filmid_regex = re.compile(r'(FilmId\s*\=\s*\')(\d+)(?=\')')
studio_regex = re.compile(r'(?:[0-9]*\s*)(.+)')
# use the following dict to save json object in case multiple trailers have been released for the same movie
# no need to do multiple requests for the same thing!
trailers = {}
entries = []
for item in rss.entries:
entry = Entry()
movie_url = item['link']
entry['title'] = item['title']
entry['movie_name'], entry['apple_trailers_name'] = entry['title'].split(' - ', 1)
if not trailers.get(movie_url):
try:
movie_page = task.requests.get(movie_url).text
match = filmid_regex.search(movie_page)
if match:
json_url = self.movie_data_url + match.group(2) + '.json'
movie_data = task.requests.get(json_url).json()
trailers[movie_url] = {'json_url': json_url, 'json': movie_data}
else:
self.broken('FilmId not found for {0}'.format(entry['movie_name']))
except RequestException as e:
log.error('Failed to get trailer %s: %s', entry['title'], e.args[0])
continue
else:
movie_data = trailers[movie_url]['json']
genres = {genre.get('name') for genre in movie_data.get('details').get('genres')}
config_genres = set(config.get('genres', []))
if genres and config_genres and not set.intersection(config_genres, genres):
log.debug('Config genre(s) do not match movie genre(s)')
continue
desired_quality = config['quality']
# find the trailer url
for clip in movie_data.get('clips'):
if clip.get('title') == entry['apple_trailers_name']:
try:
trailer_url = clip['versions']['enus']['sizes'][
self.qualities[desired_quality]
]
src = trailer_url.get('src')
src_alt = trailer_url.get('srcAlt')
# .mov tends to be a streaming video file, but the real video file is the same url, but
# they prepend 'h' to the quality
if src.split('.')[-1] == 'mov':
entry['url'] = src.replace(desired_quality, 'h' + desired_quality)
elif src_alt.split('.')[-1] == 'mov':
entry['url'] = src_alt.replace(desired_quality, 'h' + desired_quality)
else:
continue # just continue until we reach the else part of the for-else
break
except KeyError as e:
self.broken(e.args[0])
else:
log.error('Trailer "%s" not found', entry['apple_trailers_name'])
continue
# set some entry fields if present
# studio is usually also the copyright holder
studio = studio_regex.match(movie_data.get('page').get('copyright'))
if studio:
entry['movie_studio'] = studio.group(1)
release_date = movie_data.get('page').get('release_date')
if release_date:
entry['release_date'] = release_date
if genres:
entry['genres'] = ', '.join(list(genres))
# set the correct header without modifying the task.requests obj
entry['download_auth'] = AppleTrailersHeader()
entries.append(entry)
return entries
class AppleTrailersHeader(AuthBase):
def __call__(self, request):
request.headers['User-Agent'] = 'QuickTime/7.7'
return request
@event('plugin.register')
def register_plugin():
plugin.register(AppleTrailers, 'apple_trailers', api_ver=2)
|
py | 1a41d0c1895a960dc073fa712b1516f77e86a600 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains constant definitons used by tpRigToolkit-tools-{{cookiecutter.tool_name}}
"""
from __future__ import print_function, division, absolute_import
TOOL_ID = 'tpRigToolkit-tools-{{cookiecutter.tool_name}}'
|
py | 1a41d152e69535cd6caca0f552dbd46c0f1f4615 | import os
import torch
import faiss
from argparse import ArgumentParser
from tqdm import tqdm
from typing import List
from collections import defaultdict
def load_rerank_f(fname):
if not fname:
return None
f = open(fname)
ret = defaultdict(set)
for line in f:
line = line.strip().split()
ret[int(line[0])].add(int(line[1]))
return ret
def main():
parser = ArgumentParser()
parser.add_argument('--score_dir', required=True)
parser.add_argument('--query_lookup', required=True)
parser.add_argument('--depth', type=int, required=True)
parser.add_argument('--num_query', type=int)
parser.add_argument('--save_ranking_to', required=True)
parser.add_argument('--marco_document', action='store_true')
parser.add_argument("--rerank_pairs", default=None)
args = parser.parse_args()
rerank_dic = load_rerank_f(args.rerank_pairs)
if args.num_query:
rh = faiss.ResultHeap(args.num_query, args.depth)
else:
print("Inferring number of query from first input")
rh = None
partitions = os.listdir(args.score_dir)
pbar = tqdm(partitions)
for part_name in pbar:
pbar.set_description_str(f'Processing {part_name}')
scores, indices = torch.load(
os.path.join(args.score_dir, part_name)
)
if rh is None:
print(f'Initializing Heap. Assuming {scores.shape[0]} queries.')
rh = faiss.ResultHeap(scores.shape[0], args.depth)
rh.add_result(-scores.numpy(), indices.numpy())
rh.finalize()
corpus_scores, corpus_indices = (-rh.D).tolist(), rh.I.tolist()
q_lookup: List[str] = torch.load(args.query_lookup).tolist()
os.makedirs(os.path.split(args.save_ranking_to)[0], exist_ok=True)
with open(args.save_ranking_to, 'w') as f:
for qid, q_doc_scores, q_doc_indices in zip(q_lookup, corpus_scores, corpus_indices):
_last = None
score_list = [(s, idx) for s, idx in zip(q_doc_scores, q_doc_indices)]
if rerank_dic:
new_l = []
for tp in score_list:
if tp[1] in rerank_dic[qid]:
new_l.append((tp[0]+100000.0, tp[1]))
else:
new_l.append((tp[0], tp[1]))
score_list = new_l
score_list = sorted(score_list, key=lambda x: x[0], reverse=True)
for s, idx in score_list:
if args.marco_document:
_idx = f'D{idx}'
else:
_idx = idx
f.write(f'{qid}\t{_idx}\t{s}\n')
if __name__ == '__main__':
main()
|
py | 1a41d1f18b11b86e87cf2136cfae8db483c4d18c | c = int(input('Digite um número de 1 a 100:'))
n = 0
for d in range(1, 101):
if c % d == 0:
n = n + 1
if n == 2:
print("É número primo")
else:
print("Não é número primo") |
py | 1a41d1ffae5c66dc9eb7e739dd19b4f33b861b76 | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
# element-wise losses
loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_binary_labels(labels, label_weights, label_channels):
# Caution: this function should only be used in RPN
# in other files such as in ghm_loss, the _expand_binary_labels
# is used for multi-class classification.
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1, as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
if pred.dim() == label.dim() and pred.shape[-1] != label.shape[-1] and label.dtype == torch.long:
num_class = pred.shape[-1]
onehot = torch.nn.functional.one_hot(label, num_classes=num_class + 1)
# import pdb; pdb.set_trace()
onehot = onehot.sum(dim=1)[..., :-1] # remove background/no-attr class
label = onehot
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), weight=class_weight, reduction='none')
# do the reduction for the weighted loss
# if label.shape[-1] > 10:
# import pdb; pdb.set_trace()
if loss.dim() == 2 and loss.shape[-1] > 1:
loss = loss.mean(dim=-1)
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
loss_weight=1.0):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
|
py | 1a41d2112e579b449fc47301d00b17852994dafa | import shapely.geometry
import numpy as np
import fiona.crs
import pyproj
from shapely.geometry.point import Point
UTM_ZONE30 = pyproj.Proj(
proj='utm',
zone=30,
datum='WGS84',
units='m',
errcheck=True)
schema = {'geometry': 'LineString', 'properties': {'PhysID': 'int'}}
crs = fiona.crs.from_string(UTM_ZONE30.srs)
x0, y0, x1, y1 = 0, 0, 640, 320
features = \
[shapely.geometry.LineString([(x0, y0), (x1, y0)]),
shapely.geometry.LineString([(x1, y0), (x1, y1)]),
shapely.geometry.LineString([(x1, y1), (x0, y1)]),
shapely.geometry.LineString([(x0, y1), (x0, y0)])]
with fiona.collection("outline_2.shp", "w", "ESRI Shapefile", schema, crs=crs) as output:
for i in range(len(features)):
output.write({'geometry': shapely.geometry.mapping(features[i]), 'properties': {'PhysID': i}})
# Array coordinates
array_list = np.zeros((7, 2))
array_1 = np.arange(64, 320, 64)
array_2 = np.arange(64 + 32, 320-64, 64)
array_list[0:4, 0] = 640 / 3
array_list[4:, 0] = 640 / 3 + 64
array_list[0:4, 1] = array_1
array_list[4:, 1] = array_2
np.save("Turbine_coords.npy", array_list)
features2 = []
for x, y in array_list:
p = Point(x, y)
circle = shapely.geometry.LineString(list(p.buffer(10).exterior.coords))
features2.append(circle)
with fiona.collection("turbine_circles.shp", "w", "ESRI Shapefile", schema, crs=crs) as output:
for i in range(len(features2)):
output.write({'geometry': shapely.geometry.mapping(features2[i]), 'properties': {'PhysID': 100}})
|
py | 1a41d27104b1c1499dca45097551e038e9e5a1d8 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from collections import OrderedDict
# External imports
import bs4
from jinja2 import Template
from mock import patch
# Bokeh imports
import bokeh.resources as resources
import bokeh.util.version as buv
from bokeh.document import Document
from bokeh.embed.util import RenderRoot, standalone_docs_json
from bokeh.io import curdoc
from bokeh.plotting import figure
from bokeh.resources import CDN, CSSResources, JSResources
# Module under test
import bokeh.embed.standalone as bes # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
"bokeh._testing.plugins.selenium",
)
def stable_id():
return 'ID'
@pytest.fixture
def test_plot() -> None:
from bokeh.plotting import figure
test_plot = figure(title="'foo'")
test_plot.circle([1, 2], [2, 3])
return test_plot
@pytest.fixture
def test_plot_and_widget() -> None:
from bokeh.plotting import figure
from bokeh.layouts import column
from bokeh.models import Div
test_plot = figure(title="'foo'")
test_plot.circle([1, 2], [2, 3])
return column(Div(text="foo"), test_plot)
PAGE = Template("""
<!DOCTYPE html>
<html lang="en">
<head>
</head>
<body>
<script>
{{js}}
</script>
{{tag}}
</body>
""")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_autoload_static(object):
def test_return_type(self, test_plot) -> None:
r = bes.autoload_static(test_plot, CDN, "some/path")
assert len(r) == 2
def test_script_attrs(self, test_plot) -> None:
js, tag = bes.autoload_static(test_plot, CDN, "some/path")
html = bs4.BeautifulSoup(tag, "html.parser")
scripts = html.findAll(name='script')
assert "bokeh-widgets" not in js
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set(['src', 'id'])
assert attrs['src'] == 'some/path'
def test_script_attrs_with_widgets(self, test_plot_and_widget) -> None:
js, tag = bes.autoload_static(test_plot_and_widget, CDN, "some/path")
html = bs4.BeautifulSoup(tag, "html.parser")
scripts = html.findAll(name='script')
assert "bokeh-widgets" in js
assert len(scripts) == 1
attrs = scripts[0].attrs
assert set(attrs) == set(['src', 'id'])
assert attrs['src'] == 'some/path'
@pytest.mark.parametrize("version", ["1.4.0rc1", "2.0.0dev3"])
@pytest.mark.selenium
def test_js_dev_cdn(self, version, monkeypatch, driver, test_file_path_and_url, test_plot) -> None:
monkeypatch.setattr(buv, "__version__", "1.4.0rc1")
monkeypatch.setattr(resources, "__version__", "1.4.0rc1")
js, tag = bes.autoload_static(test_plot, CDN, "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements_by_css_selector('head script')
assert len(scripts) == 1
for script in scripts:
assert script.get_attribute("crossorigin") == None
assert script.get_attribute("integrity") == ""
@pytest.mark.selenium
def test_js_release_cdn(self, monkeypatch, driver, test_file_path_and_url, test_plot) -> None:
monkeypatch.setattr(buv, "__version__", "2.0.0")
monkeypatch.setattr(resources, "__version__", "2.0.0")
js, tag = bes.autoload_static(test_plot, CDN, "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements_by_css_selector('head script')
for x in scripts:
print(x.get_attribute("src"))
assert len(scripts) == 1
for script in scripts:
assert script.get_attribute("crossorigin") == "anonymous"
assert script.get_attribute("integrity").startswith("sha384-")
@pytest.mark.selenium
def test_js_release_cdn_with_widgets(self, monkeypatch, driver, test_file_path_and_url, test_plot_and_widget) -> None:
monkeypatch.setattr(buv, "__version__", "2.0.0")
monkeypatch.setattr(resources, "__version__", "2.0.0")
js, tag = bes.autoload_static(test_plot_and_widget, CDN, "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements_by_css_selector('head script')
for x in scripts:
print(x.get_attribute("src"))
assert len(scripts) == 2 # 2 to include widgets bundle
for script in scripts:
assert script.get_attribute("crossorigin") == "anonymous"
assert script.get_attribute("integrity").startswith("sha384-")
@pytest.mark.selenium
def test_js_release_dev_cdn(self, monkeypatch, driver, test_file_path_and_url, test_plot) -> None:
monkeypatch.setattr(buv, "__version__", "2.0.0-foo")
monkeypatch.setattr(resources, "__version__", "2.0.0-foo")
js, tag = bes.autoload_static(test_plot, CDN, "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements_by_css_selector('head script')
for x in scripts:
print(x.get_attribute("src"))
assert len(scripts) == 1
for script in scripts:
assert script.get_attribute("crossorigin") == "anonymous"
assert script.get_attribute("integrity").startswith("sha384-")
@pytest.mark.selenium
def test_js_release_server(self, monkeypatch, driver, test_file_path_and_url, test_plot) -> None:
monkeypatch.setattr(buv, "__version__", "2.0.0")
monkeypatch.setattr(resources, "__version__", "2.0.0")
js, tag = bes.autoload_static(test_plot, resources.Resources(mode="server"), "some/path")
page = PAGE.render(js=js, tag=tag)
path, url = test_file_path_and_url
with open(path, "w") as f:
f.write(page)
driver.get(url)
scripts = driver.find_elements_by_css_selector('head script')
assert len(scripts) == 1
for script in scripts:
assert script.get_attribute("crossorigin") == None
assert script.get_attribute("integrity") == ""
class Test_components(object):
def test_return_type(self) -> None:
plot1 = figure()
plot1.circle([], [])
plot2 = figure()
plot2.circle([], [])
# This is a testing artefact, users dont' have to do this in practice
curdoc().add_root(plot1)
curdoc().add_root(plot2)
r = bes.components(plot1)
assert len(r) == 2
_, divs = bes.components((plot1, plot2))
assert isinstance(divs, tuple)
_, divs = bes.components([plot1, plot2])
assert isinstance(divs, tuple)
_, divs = bes.components({"Plot 1": plot1, "Plot 2": plot2})
assert isinstance(divs, dict)
assert all(isinstance(x, str) for x in divs.keys())
_, divs = bes.components(OrderedDict([("Plot 1", plot1), ("Plot 2", plot2)]))
assert isinstance(divs, OrderedDict)
assert all(isinstance(x, str) for x in divs.keys())
@patch('bokeh.embed.util.make_globally_unique_id', new_callable=lambda: stable_id)
def test_plot_dict_returned_when_wrap_plot_info_is_false(self, mock_make_id) -> None:
doc = Document()
plot1 = figure()
plot1.circle([], [])
doc.add_root(plot1)
plot2 = figure()
plot2.circle([], [])
doc.add_root(plot2)
expected_plotdict_1 = RenderRoot(elementid="ID", id="ID")
expected_plotdict_2 = RenderRoot(elementid="ID", id="ID")
_, plotdict = bes.components(plot1, wrap_plot_info=False)
assert plotdict == expected_plotdict_1
_, plotids = bes.components((plot1, plot2), wrap_plot_info=False)
assert plotids == (expected_plotdict_1, expected_plotdict_2)
_, plotiddict = bes.components({'p1': plot1, 'p2': plot2}, wrap_plot_info=False)
assert plotiddict == {'p1': expected_plotdict_1, 'p2': expected_plotdict_2}
def test_result_attrs(self, test_plot) -> None:
script, div = bes.components(test_plot)
html = bs4.BeautifulSoup(script, "html.parser")
scripts = html.findAll(name='script')
assert len(scripts) == 1
assert scripts[0].attrs == {'type': 'text/javascript'}
@patch('bokeh.embed.util.make_globally_unique_id', new=stable_id)
def test_div_attrs(self, test_plot) -> None:
script, div = bes.components(test_plot)
html = bs4.BeautifulSoup(div, "html.parser")
divs = html.findAll(name='div')
assert len(divs) == 1
div = divs[0]
assert set(div.attrs) == set(['class', 'id', 'data-root-id'])
assert div.attrs['class'] == ['bk-root']
assert div.attrs['id'] == 'ID'
assert div.attrs['data-root-id'] == test_plot.id
assert div.string is None
def test_script_is_utf8_encoded(self, test_plot) -> None:
script, div = bes.components(test_plot)
assert isinstance(script, str)
def test_quoting(self, test_plot) -> None:
script, div = bes.components(test_plot)
assert """ not in script
assert "'foo'" not in script
assert "'foo'" in script
def test_output_is_without_script_tag_when_wrap_script_is_false(self, test_plot) -> None:
script, div = bes.components(test_plot)
html = bs4.BeautifulSoup(script, "html.parser")
scripts = html.findAll(name='script')
assert len(scripts) == 1
# XXX: this needs to account for indentation
#script_content = scripts[0].getText()
#rawscript, div = bes.components(test_plot, wrap_script=False)
#self.maxDiff = None
#assert rawscript.strip() == script_content.strip()
class Test_file_html(object):
def test_return_type(self, test_plot) -> None:
class fake_template:
def __init__(self, tester, user_template_variables=None):
self.tester = tester
self.template_variables = {
"title",
"bokeh_js",
"bokeh_css",
"plot_script",
"doc",
"docs",
"base",
}
if user_template_variables is not None:
self.template_variables.update(user_template_variables)
def render(self, template_variables):
assert self.template_variables.issubset(set(template_variables.keys()))
return "template result"
r = bes.file_html(test_plot, CDN, "title")
assert isinstance(r, str)
r = bes.file_html(test_plot, CDN, "title", fake_template(self))
assert isinstance(r, str)
r = bes.file_html(test_plot, CDN, "title",
fake_template(self, {"test_var"}),
{"test_var": "test"})
assert isinstance(r, str)
@patch('bokeh.embed.bundle.warn')
def test_file_html_handles_js_only_resources(self, mock_warn, test_plot) -> None:
js_resources = JSResources(mode="relative", components=["bokeh"])
template = Template("<head>{{ bokeh_js }}</head><body></body>")
output = bes.file_html(test_plot, (js_resources, None), "title", template=template)
html = "<head>%s</head><body></body>" % js_resources.render_js()
assert output == html
@patch('bokeh.embed.bundle.warn')
def test_file_html_provides_warning_if_no_css(self, mock_warn, test_plot) -> None:
js_resources = JSResources()
bes.file_html(test_plot, (js_resources, None), "title")
mock_warn.assert_called_once_with(
'No Bokeh CSS Resources provided to template. If required you will need to provide them manually.'
)
@patch('bokeh.embed.bundle.warn')
def test_file_html_handles_css_only_resources(self, mock_warn, test_plot) -> None:
css_resources = CSSResources(mode="relative", components=["bokeh"])
template = Template("<head>{{ bokeh_css }}</head><body></body>")
output = bes.file_html(test_plot, (None, css_resources), "title", template=template)
html = "<head>%s</head><body></body>" % css_resources.render_css()
assert output == html
@patch('bokeh.embed.bundle.warn')
def test_file_html_provides_warning_if_no_js(self, mock_warn, test_plot) -> None:
css_resources = CSSResources()
bes.file_html(test_plot, (None, css_resources), "title")
mock_warn.assert_called_once_with(
'No Bokeh JS Resources provided to template. If required you will need to provide them manually.'
)
def test_file_html_title_is_escaped(self, test_plot) -> None:
r = bes.file_html(test_plot, CDN, "&<")
assert "<title>&<</title>" in r
def test_entire_doc_is_not_used(self) -> None:
from bokeh.document import Document
from bokeh.models import Button
fig = figure()
fig.x([0], [0])
button = Button(label="Button")
d = Document()
d.add_root(fig)
d.add_root(button)
out = bes.file_html([fig], CDN)
# this is a very coarse test but it will do
assert "bokeh-widgets" not in out
class Test_json_item(object):
def test_with_target_id(self, test_plot) -> None:
out = bes.json_item(test_plot, target="foo")
assert out['target_id'] == "foo"
def test_without_target_id(self, test_plot) -> None:
out = bes.json_item(test_plot)
assert out['target_id'] == None
def test_doc_json(self, test_plot) -> None:
out = bes.json_item(test_plot, target="foo")
expected = list(standalone_docs_json([test_plot]).values())[0]
assert out['doc'] == expected
def test_doc_title(self, test_plot) -> None:
out = bes.json_item(test_plot, target="foo")
assert out['doc']['title'] == ""
def test_root_id(self, test_plot) -> None:
out = bes.json_item(test_plot, target="foo")
assert out['doc']['roots']['root_ids'][0] == out['root_id']
@patch('bokeh.embed.standalone.OutputDocumentFor')
def test_apply_theme(self, mock_OFD, test_plot) -> None:
# the subsequent call inside ODF will fail since the model was never
# added to a document. Ignoring that since we just want to make sure
# ODF is called with the expected theme arg.
try:
bes.json_item(test_plot, theme="foo")
except ValueError:
pass
mock_OFD.assert_called_once_with([test_plot], apply_theme="foo")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
class Test__title_from_models(object):
pass
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
py | 1a41d272030231757f44dcf9a0ee50c6d79be54d | from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
# Delete the application 1 and 3.
APPLICATION_ID = [1, 3]
try:
res = voxapi.del_application(application_id=APPLICATION_ID)
print(res)
except VoximplantException as e:
print("Error: {}".format(e.message))
|
py | 1a41d29af6fd8c7b20c29750d28f9cb00e00afd5 | """Simple example of inserting a variable value into a template."""
from viewdom import html
from viewdom import render
def main() -> str:
"""Main entry point."""
name = "viewdom"
result = render(html("<div>Hello {name}</div>"))
return result
|
py | 1a41d4635de2f81e42b03b441839ef503f4fc627 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: possibility.py
# Purpose: music21 class to define rule checking methods for a possibility
# represented as a tuple.
# Authors: Jose Cabal-Ugaz
#
# Copyright: Copyright © 2011 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
A possibility is a tuple with pitches, and is intended to encapsulate a possible
solution to a :class:`~music21.figuredBass.segment.Segment`. Unlike a :class:`~music21.chord.Chord`,
the ordering of a possibility does matter. The assumption throughout fbRealizer
is that a possibility is always in order from highest part to lowest part, and
the last element of each possibility is the bass.
.. note:: fbRealizer supports voice crossing, so the order of pitches from lowest
to highest may not correspond to the ordering of parts.
Here, a possibility is created. G5 is in the highest part, and C4 is the bass. The highest
part contains the highest Pitch, and the lowest part contains the lowest Pitch. No voice
crossing is present.
>>> from music21 import pitch
>>> G5 = pitch.Pitch('G5')
>>> C5 = pitch.Pitch('C5')
>>> E4 = pitch.Pitch('E4')
>>> C4 = pitch.Pitch('C4')
>>> p1 = (G5, C5, E4, C4)
Here, another possibility is created with the same pitches, but this time, with voice crossing present.
C5 is in the highest part, but the highest Pitch G5 is in the second highest part.
>>> p2 = (C5, G5, E4, C4)
The methods in this module are applied to possibilities, and fall into three main categories:
1) Single Possibility Methods. These methods are applied in finding correct possibilities in
:meth:`~music21.figuredBass.segment.Segment.allCorrectSinglePossibilities`.
2) Consecutive Possibility Methods. These methods are applied to (possibA, possibB) pairs
in :meth:`~music21.figuredBass.segment.Segment.allCorrectConsecutivePossibilities`,
possibA being any correct possibility in segmentA and possibB being any correct possibility
in segmentB.
3) Special Resolution Methods. These methods are applied in :meth:`~music21.figuredBass.segment.Segment.allCorrectConsecutivePossibilities`
as applicable if the pitch names of a Segment correctly spell out an augmented sixth, dominant
seventh, or diminished seventh chord. They are located in :mod:`~music21.figuredBass.resolution`.
The application of these methods is controlled by corresponding instance variables in a
:class:`~music21.figuredBass.rules.Rules` object provided to a Segment.
.. note:: The number of parts and maxPitch are universal for a :class:`~music21.figuredBass.realizer.FiguredBassLine`.
'''
import unittest
from music21 import chord
from music21 import exceptions21
from music21 import interval
from music21 import pitch
from music21 import voiceLeading
from music21.ext import six
izip = six.moves.zip # @UndefinedVariable
# SINGLE POSSIBILITY RULE-CHECKING METHODS
# ----------------------------------------
def voiceCrossing(possibA):
'''
Returns True if there is voice crossing present between any two parts
in possibA. The parts from lowest part to highest part (right to left)
must correspond to increasingly higher pitches in order for there to
be no voice crossing. Comparisons between pitches are done using pitch
comparison methods, which are based on pitch space values
(see :class:`~music21.pitch.Pitch`).
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C4 = pitch.Pitch('C4')
>>> E4 = pitch.Pitch('E4')
>>> C5 = pitch.Pitch('C5')
>>> G5 = pitch.Pitch('G5')
>>> possibA1 = (C5, G5, E4)
>>> possibility.voiceCrossing(possibA1) # G5 > C5
True
>>> possibA2 = (C5, E4, C4)
>>> possibility.voiceCrossing(possibA2)
False
'''
hasVoiceCrossing = False
for part1Index in range(len(possibA)):
higherPitch = possibA[part1Index]
for part2Index in range(part1Index + 1, len(possibA)):
lowerPitch = possibA[part2Index]
if higherPitch < lowerPitch:
hasVoiceCrossing = True
return hasVoiceCrossing
return hasVoiceCrossing
def isIncomplete(possibA, pitchNamesToContain):
'''
Returns True if possibA is incomplete, if it doesn't contain at least
one of every pitch name in pitchNamesToContain.
For a Segment, pitchNamesToContain is :attr:`~music21.figuredBass.segment.Segment.pitchNamesInChord`.
If possibA contains excessive pitch names, a PossibilityException is
raised, although this is not a concern with the current implementation
of fbRealizer.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C3 = pitch.Pitch('C3')
>>> E4 = pitch.Pitch('E4')
>>> G4 = pitch.Pitch('G4')
>>> C5 = pitch.Pitch('C5')
>>> Bb5 = pitch.Pitch('B-5')
>>> possibA1 = (C5, G4, E4, C3)
>>> pitchNamesA1 = ['C', 'E', 'G', 'B-']
>>> possibility.isIncomplete(possibA1, pitchNamesA1) # Missing B-
True
>>> pitchNamesA2 = ['C', 'E', 'G']
>>> possibility.isIncomplete(possibA1, pitchNamesA2)
False
'''
isIncomplete = False
pitchNamesContained = []
for givenPitch in possibA:
if givenPitch.name not in pitchNamesContained:
pitchNamesContained.append(givenPitch.name)
for pitchName in pitchNamesToContain:
if pitchName not in pitchNamesContained:
isIncomplete = True
if not isIncomplete and (len(pitchNamesContained) > len(pitchNamesToContain)):
isIncomplete = False
#raise PossibilityException(str(possibA) + " contains pitch names not found in pitchNamesToContain.")
return isIncomplete
def upperPartsWithinLimit(possibA, maxSemitoneSeparation = 12):
'''
Returns True if the pitches in the upper parts of possibA
are found within maxSemitoneSeparation of each other. The
upper parts of possibA are all the pitches except the last.
The default value of maxSemitoneSeparation is 12 semitones,
enharmonically equivalent to a perfect octave. If this method
returns True for this default value, then all the notes in
the upper parts can be played by most adult pianists using
just the right hand.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C3 = pitch.Pitch('C3')
>>> E3 = pitch.Pitch('E3')
>>> E4 = pitch.Pitch('E4')
>>> G4 = pitch.Pitch('G4')
>>> C5 = pitch.Pitch('C5')
>>> possibA1 = (C5, G4, E4, C3)
>>> possibility.upperPartsWithinLimit(possibA1)
True
Here, C5 and E3 are separated by almost two octaves.
>>> possibA2 = (C5, G4, E3, C3)
>>> possibility.upperPartsWithinLimit(possibA2)
False
'''
upperPartsWithinLimit = True
if maxSemitoneSeparation == None:
return upperPartsWithinLimit
upperParts = possibA[0:len(possibA)-1]
for part1Index in range(len(upperParts)):
higherPitch = upperParts[part1Index]
for part2Index in range(part1Index + 1, len(upperParts)):
lowerPitch = upperParts[part2Index]
if abs(higherPitch.ps - lowerPitch.ps) > maxSemitoneSeparation:
upperPartsWithinLimit = False
return upperPartsWithinLimit
return upperPartsWithinLimit
def pitchesWithinLimit(possibA, maxPitch = pitch.Pitch('B5')):
'''
Returns True if all pitches in possibA are less than or equal to
the maxPitch provided. Comparisons between pitches are done using pitch
comparison methods, which are based on pitch space values
(see :class:`~music21.pitch.Pitch`).
Used in :class:`~music21.figuredBass.segment.Segment` to filter
resolutions of special Segments which can have pitches exceeeding
the universal maxPitch of a :class:`~music21.figuredBass.realizer.FiguredBassLine`.
>>> from music21.figuredBass import possibility
>>> from music21.figuredBass import resolution
>>> from music21 import pitch
>>> G2 = pitch.Pitch('G2')
>>> D4 = pitch.Pitch('D4')
>>> F5 = pitch.Pitch('F5')
>>> B5 = pitch.Pitch('B5')
>>> domPossib = (B5, F5, D4, G2)
>>> possibility.pitchesWithinLimit(domPossib)
True
>>> resPossib = resolution.dominantSeventhToMajorTonic(domPossib)
>>> resPossib # Contains C6 > B5
(<music21.pitch.Pitch C6>, <music21.pitch.Pitch E5>, <music21.pitch.Pitch C4>, <music21.pitch.Pitch C3>)
>>> possibility.pitchesWithinLimit(resPossib)
False
'''
for givenPitch in possibA:
if givenPitch > maxPitch:
return False
return True
def limitPartToPitch(possibA, partPitchLimits = {}):
'''
Takes in partPitchLimits containing (partNumber, partPitch) pairs, each
of which limits a part in possibA to a certain :class:`~music21.pitch.Pitch`.
Returns True if all limits are followed in possibA, False otherwise.
>>> from music21.figuredBass import possibility
>>> from music21 import pitch
>>> C4 = pitch.Pitch('C4')
>>> E4 = pitch.Pitch('E4')
>>> G4 = pitch.Pitch('G4')
>>> C5 = pitch.Pitch('C5')
>>> G5 = pitch.Pitch('G5')
>>> sopranoPitch = pitch.Pitch('G5')
>>> possibA1 = (C5, G4, E4, C4)
>>> possibility.limitPartToPitch(possibA1, {1: sopranoPitch})
False
>>> possibA2 = (G5, G4, E4, C4)
>>> possibility.limitPartToPitch(possibA2, {1: sopranoPitch})
True
'''
for (partNumber, partPitch) in partPitchLimits.items():
if not (possibA[partNumber - 1] == partPitch):
return False
return True
# CONSECUTIVE POSSIBILITY RULE-CHECKING METHODS
# ---------------------------------------------
#Speedup tables
parallelFifthsTable = {}
parallelOctavesTable = {}
hiddenFifthsTable = {}
hiddenOctavesTable = {}
def parallelFifths(possibA, possibB):
'''
Returns True if there are parallel fifths between any
two shared parts of possibA and possibB.
If pitchA1 and pitchA2 in possibA are separated by
a simple interval of a perfect fifth, and they move
to a pitchB1 and pitchB2 in possibB also separated
by the simple interval of a perfect fifth, then this
constitutes parallel fifths between these two parts.
If the method returns False, then no two shared parts
have parallel fifths. The method returns True as soon
as two shared parts with parallel fifths are found.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C3 = pitch.Pitch('C3')
>>> D3 = pitch.Pitch('D3')
>>> G3 = pitch.Pitch('G3')
>>> A3 = pitch.Pitch('A3')
>>> A4 = pitch.Pitch('A4')
>>> B4 = pitch.Pitch('B4')
Here, the bass moves from C3 to D3 and the tenor moves
from G3 to A3. The interval between C3 and G3, as well
as between D3 and A3, is a perfect fifth. These two
parts, and therefore the two possibilities, have
parallel fifths.
>>> possibA1 = (B4, G3, C3)
>>> possibB1 = (A4, A3, D3)
>>> possibility.parallelFifths(possibA1, possibB1)
True
Now, the tenor moves instead to F3. The interval between
D3 and F3 is a minor third. The bass and tenor parts
don't form parallel fifths. The soprano part forms parallel
fifths with neither the bass nor tenor parts. The
two possibilities, therefore, have no parallel fifths.
>>> F3 = pitch.Pitch('F3')
>>> possibA2 = (B4, G3, C3)
>>> possibB2 = (A4, F3, D3)
>>> possibility.parallelFifths(possibA2, possibB2)
False
'''
hasParallelFifths = False
pairsList = partPairs(possibA, possibB)
for pair1Index in range(len(pairsList)):
(higherPitchA, higherPitchB) = pairsList[pair1Index]
for pair2Index in range(pair1Index + 1, len(pairsList)):
(lowerPitchA, lowerPitchB) = pairsList[pair2Index]
if not abs(higherPitchA.ps - lowerPitchA.ps) % 12 == 7:
continue
if not abs(higherPitchB.ps - lowerPitchB.ps) % 12 == 7:
continue
#Very high probability of ||5, but still not certain.
pitchQuartet = (lowerPitchA, lowerPitchB, higherPitchA, higherPitchB)
if pitchQuartet in parallelFifthsTable:
hasParallelFifths = parallelFifthsTable[pitchQuartet]
if hasParallelFifths:
return hasParallelFifths
vlq = voiceLeading.VoiceLeadingQuartet(*pitchQuartet)
if vlq.parallelFifth():
hasParallelFifths = True
parallelFifthsTable[pitchQuartet] = hasParallelFifths
if hasParallelFifths:
return hasParallelFifths
return hasParallelFifths
def parallelOctaves(possibA, possibB):
'''
Returns True if there are parallel octaves between any
two shared parts of possibA and possibB.
If pitchA1 and pitchA2 in possibA are separated by
a simple interval of a perfect octave, and they move
to a pitchB1 and pitchB2 in possibB also separated
by the simple interval of a perfect octave, then this
constitutes parallel octaves between these two parts.
If the method returns False, then no two shared parts
have parallel octaves. The method returns True as soon
as two shared parts with parallel octaves are found.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C3 = pitch.Pitch('C3')
>>> D3 = pitch.Pitch('D3')
>>> G3 = pitch.Pitch('G3')
>>> A3 = pitch.Pitch('A3')
>>> C4 = pitch.Pitch('C4')
>>> D4 = pitch.Pitch('D4')
Here, the soprano moves from C4 to D4 and the bass moves
from C3 to D3. The interval between C3 and C4, as well as
between D3 and D4, is a parallel octave. The two parts,
and therefore the two possibilities, have parallel octaves.
>>> possibA1 = (C4, G3, C3)
>>> possibB1 = (D4, A3, D3)
>>> possibility.parallelOctaves(possibA1, possibB1)
True
Now, the soprano moves down to B3. The interval between
D3 and B3 is a major sixth. The soprano and bass parts
no longer have parallel octaves. The tenor part forms
a parallel octave with neither the bass nor soprano,
so the two possibilities do not have parallel octaves.
(Notice, however, the parallel fifth between the bass
and tenor!)
>>> B3 = pitch.Pitch('B3')
>>> possibA2 = (C4, G3, C3)
>>> possibB2 = (B3, A3, D3)
>>> possibility.parallelOctaves(possibA2, possibB2)
False
'''
hasParallelOctaves = False
pairsList = partPairs(possibA, possibB)
for pair1Index in range(len(pairsList)):
(higherPitchA, higherPitchB) = pairsList[pair1Index]
for pair2Index in range(pair1Index + 1, len(pairsList)):
(lowerPitchA, lowerPitchB) = pairsList[pair2Index]
if not abs(higherPitchA.ps - lowerPitchA.ps) % 12 == 0:
continue
if not abs(higherPitchB.ps - lowerPitchB.ps) % 12 == 0:
continue
#Very high probability of ||8, but still not certain.
pitchQuartet = (lowerPitchA, lowerPitchB, higherPitchA, higherPitchB)
if pitchQuartet in parallelOctavesTable:
hasParallelOctaves = parallelOctavesTable[pitchQuartet]
if hasParallelOctaves:
return hasParallelOctaves
vlq = voiceLeading.VoiceLeadingQuartet(*pitchQuartet)
if vlq.parallelOctave():
hasParallelOctaves = True
parallelOctavesTable[pitchQuartet] = hasParallelOctaves
if hasParallelOctaves:
return hasParallelOctaves
return hasParallelOctaves
def hiddenFifth(possibA, possibB):
'''
Returns True if there is a hidden fifth between shared outer parts
of possibA and possibB. The outer parts here are the first and last
elements of each possibility.
If sopranoPitchA and bassPitchA in possibA move to a sopranoPitchB
and bassPitchB in possibB in similar motion, and the simple interval
between sopranoPitchB and bassPitchB is that of a perfect fifth,
then this constitutes a hidden octave between the two possibilities.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C3 = pitch.Pitch('C3')
>>> D3 = pitch.Pitch('D3')
>>> E3 = pitch.Pitch('E3')
>>> F3 = pitch.Pitch('F3')
>>> E5 = pitch.Pitch('E5')
>>> A5 = pitch.Pitch('A5')
Here, the bass part moves up from C3 to D3 and the soprano part moves
up from E5 to A5. The simple interval between D3 and A5 is a perfect
fifth. Therefore, there is a hidden fifth between the two possibilities.
>>> possibA1 = (E5, E3, C3)
>>> possibB1 = (A5, F3, D3)
>>> possibility.hiddenFifth(possibA1, possibB1)
True
Here, the soprano and bass parts also move in similar motion, but the
simple interval between D3 and Ab5 is a diminished fifth. Consequently,
there is no hidden fifth.
>>> Ab5 = pitch.Pitch('A-5')
>>> possibA2 = (E5, E3, C3)
>>> possibB2 = (Ab5, F3, D3)
>>> possibility.hiddenFifth(possibA2, possibB2)
False
Now, we have the soprano and bass parts again moving to A5 and D3, whose
simple interval is a perfect fifth. However, the bass moves up while the
soprano moves down. Therefore, there is no hidden fifth.
>>> E6 = pitch.Pitch('E6')
>>> possibA3 = (E6, E3, C3)
>>> possibB3 = (A5, F3, D3)
>>> possibility.hiddenFifth(possibA3, possibB3)
False
'''
hasHiddenFifth = False
pairsList = partPairs(possibA, possibB)
(highestPitchA, highestPitchB) = pairsList[0]
(lowestPitchA, lowestPitchB) = pairsList[-1]
if abs(highestPitchB.ps - lowestPitchB.ps) % 12 == 7:
#Very high probability of hidden fifth, but still not certain.
pitchQuartet = (lowestPitchA, lowestPitchB, highestPitchA, highestPitchB)
if pitchQuartet in hiddenFifthsTable:
hasHiddenFifth = hiddenFifthsTable[pitchQuartet]
return hasHiddenFifth
vlq = voiceLeading.VoiceLeadingQuartet(*pitchQuartet)
if vlq.hiddenFifth():
hasHiddenFifth = True
hiddenFifthsTable[pitchQuartet] = hasHiddenFifth
return hasHiddenFifth
def hiddenOctave(possibA, possibB):
'''
Returns True if there is a hidden octave between shared outer parts
of possibA and possibB. The outer parts here are the first and last
elements of each possibility.
If sopranoPitchA and bassPitchA in possibA move to a sopranoPitchB
and bassPitchB in possibB in similar motion, and the simple interval
between sopranoPitchB and bassPitchB is that of a perfect octave,
then this constitutes a hidden octave between the two possibilities.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C3 = pitch.Pitch('C3')
>>> D3 = pitch.Pitch('D3')
>>> E3 = pitch.Pitch('E3')
>>> F3 = pitch.Pitch('F3')
>>> A5 = pitch.Pitch('A5')
>>> D6 = pitch.Pitch('D6')
Here, the bass part moves up from C3 to D3 and the soprano part moves
up from A5 to D6. The simple interval between D3 and D6 is a perfect
octave. Therefore, there is a hidden octave between the two possibilities.
>>> possibA1 = (A5, E3, C3)
>>> possibB1 = (D6, F3, D3) #Perfect octave between soprano and bass.
>>> possibility.hiddenOctave(possibA1, possibB1)
True
Here, the bass part moves up from C3 to D3 but the soprano part moves
down from A6 to D6. There is no hidden octave since the parts move in
contrary motion.
>>> A6 = pitch.Pitch('A6')
>>> possibA2 = (A6, E3, C3)
>>> possibB2 = (D6, F3, D3)
>>> possibility.hiddenOctave(possibA2, possibB2)
False
'''
hasHiddenOctave = False
pairsList = partPairs(possibA, possibB)
(highestPitchA, highestPitchB) = pairsList[0]
(lowestPitchA, lowestPitchB) = pairsList[-1]
if abs(highestPitchB.ps - lowestPitchB.ps) % 12 == 0:
#Very high probability of hidden octave, but still not certain.
pitchQuartet = (lowestPitchA, lowestPitchB, highestPitchA, highestPitchB)
if pitchQuartet in hiddenOctavesTable:
hasHiddenOctave = hiddenOctavesTable[pitchQuartet]
return hasHiddenOctave
vlq = voiceLeading.VoiceLeadingQuartet(*pitchQuartet)
if vlq.hiddenOctave():
hasHiddenOctave = True
hiddenOctavesTable[pitchQuartet] = hasHiddenOctave
return hasHiddenOctave
def voiceOverlap(possibA, possibB):
'''
Returns True if there is voice overlap between any two shared parts
of possibA and possibB.
Voice overlap can occur in two ways:
1) If a pitch in a lower part in possibB is higher than a pitch in
a higher part in possibA. This case is demonstrated below.
2) If a pitch in a higher part in possibB is lower than a pitch in
a lower part in possibA.
.. image:: images/figuredBass/fbPossib_voiceOverlap.*
:width: 75
In the above example, possibA has G4 in the bass and B4 in the soprano.
If the bass moves up to C5 in possibB, that would constitute voice overlap
because the bass in possibB would be higher than the soprano in possibA.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C4 = pitch.Pitch('C4')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> F4 = pitch.Pitch('F4')
>>> G4 = pitch.Pitch('G4')
>>> C5 = pitch.Pitch('C5')
Here, case #2 is demonstrated. There is overlap between the soprano and
alto parts, because F4 in the soprano in possibB1 is lower than the G4
in the alto in possibA1. Note that neither possibility has to have voice
crossing for voice overlap to occur, as shown.
>>> possibA1 = (C5, G4, E4, C4)
>>> possibB1 = (F4, F4, D4, D4)
>>> possibility.voiceOverlap(possibA1, possibB1)
True
>>> possibility.voiceCrossing(possibA1)
False
>>> possibility.voiceCrossing(possibB1)
False
Here is the same example as above, except the soprano of the second
possibility is now B4, which does not overlap the G4 of the first.
Now, there is no voice overlap.
>>> B4 = pitch.Pitch('B4')
>>> possibA2 = (C5, G4, E4, C4)
>>> possibB2 = (B4, F4, D4, D4)
>>> possibility.voiceOverlap(possibA2, possibB2)
False
'''
hasVoiceOverlap = False
pairsList = partPairs(possibA, possibB)
for pair1Index in range(len(pairsList)):
(higherPitchA, higherPitchB) = pairsList[pair1Index]
for pair2Index in range(pair1Index + 1, len(pairsList)):
(lowerPitchA, lowerPitchB) = pairsList[pair2Index]
if lowerPitchB > higherPitchA or higherPitchB < lowerPitchA:
hasVoiceOverlap = True
return hasVoiceOverlap
return hasVoiceOverlap
def partMovementsWithinLimits(possibA, possibB, partMovementLimits = []):
'''
Returns True if all movements between shared parts of possibA and possibB
are within limits, as specified by partMovementLimits, which consists of
(partNumber, maxSeparation) tuples.
* partNumber: Specified from 1 to n, where 1 is the soprano or highest part and n is the bass or lowest part.
* maxSeparation: For a given part, the maximum separation to allow between a pitch in possibA and a corresponding pitch in possibB, in semitones.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C4 = pitch.Pitch('C4')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> F4 = pitch.Pitch('F4')
>>> G4 = pitch.Pitch('G4')
>>> A4 = pitch.Pitch('A4')
>>> B4 = pitch.Pitch('B4')
>>> C5 = pitch.Pitch('C5')
Here, we limit the soprano part to motion of two semitones, enharmonically equivalent to a major second.
Moving from C5 to B4 is allowed because it constitutes stepwise motion, but moving to A4 is not allowed
because the distance between A4 and C5 is three semitones.
>>> partMovementLimits = [(1, 2)]
>>> possibA1 = (C5, G4, E4, C4)
>>> possibB1 = (B4, F4, D4, D4)
>>> possibility.partMovementsWithinLimits(possibA1, possibB1, partMovementLimits)
True
>>> possibB2 = (A4, F4, D4, D4)
>>> possibility.partMovementsWithinLimits(possibA1, possibB2, partMovementLimits)
False
'''
withinLimits = True
for (partNumber, maxSeparation) in partMovementLimits:
pitchA = possibA[partNumber - 1]
pitchB = possibB[partNumber - 1]
if abs(pitchB.ps - pitchA.ps) > maxSeparation:
withinLimits = False
return withinLimits
return withinLimits
def upperPartsSame(possibA, possibB):
'''
Returns True if the upper parts are the same.
False otherwise.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C4 = pitch.Pitch('C4')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> F4 = pitch.Pitch('F4')
>>> G4 = pitch.Pitch('G4')
>>> B4 = pitch.Pitch('B4')
>>> C5 = pitch.Pitch('C5')
>>> possibA1 = (C5, G4, E4, C4)
>>> possibB1 = (B4, F4, D4, D4)
>>> possibB2 = (C5, G4, E4, D4)
>>> possibility.upperPartsSame(possibA1, possibB1)
False
>>> possibility.upperPartsSame(possibA1, possibB2)
True
'''
pairsList = partPairs(possibA, possibB)
for (pitchA, pitchB) in pairsList[0:-1]:
if not (pitchA == pitchB):
return False
return True
def partsSame(possibA, possibB, partsToCheck = None):
'''
Takes in partsToCheck, a list of part numbers. Checks if pitches at those part numbers of
possibA and possibB are equal, determined by pitch space.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C4 = pitch.Pitch('C4')
>>> E4 = pitch.Pitch('E4')
>>> G4 = pitch.Pitch('G4')
>>> B4 = pitch.Pitch('B4')
>>> C5 = pitch.Pitch('C5')
>>> possibA1 = (C5, G4, E4, C4)
>>> possibB1 = (B4, G4, E4, C4)
>>> possibility.partsSame(possibA1, possibB1, [2,3,4])
True
'''
if partsToCheck == None:
return True
pairsList = partPairs(possibA, possibB)
for partIndex in partsToCheck:
(pitchA, pitchB) = pairsList[partIndex - 1]
if not (pitchA == pitchB):
return False
return True
def couldBeItalianA6Resolution(possibA, possibB, threePartChordInfo = None, restrictDoublings = True):
'''
Speed-enhanced but designed to stand alone. Returns True if possibA is an Italian A6 chord
and possibB could possibly be an acceptable resolution. If restrictDoublings is set to True,
only the tonic can be doubled. Setting restrictDoublings to False opens up the chance
that the root or the third can be doubled. Controlled in the :class:`~music21.figuredBass.rules.Rules`
object by :attr:`~music21.figuredBass.rules.Rules.restrictDoublingsInItalianA6Resolution`.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> A2 = pitch.Pitch('A2')
>>> Bb2 = pitch.Pitch('B-2')
>>> Cs4 = pitch.Pitch('C#4')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> Fs4 = pitch.Pitch('F#4')
>>> Gs4 = pitch.Pitch('G#4')
>>> A4 = pitch.Pitch('A4')
>>> possibA1 = (Gs4, D4, D4, Bb2)
>>> possibB1 = (A4, Cs4, E4, A2)
>>> possibB2 = (A4, E4, Cs4, A2)
>>> possibB3 = (A4, D4, Fs4, A2)
>>> possibility.couldBeItalianA6Resolution(possibA1, possibB1)
True
>>> possibility.couldBeItalianA6Resolution(possibA1, possibB1)
True
>>> possibility.couldBeItalianA6Resolution(possibA1, possibB3)
True
A PossibilityException is raised if possibA is not an Italian A6 chord, but this only
applies only if threePartChordInfo = None, because otherwise the chord information is
coming from :class:`~music21.figuredBass.segment.Segment` and the fact that possibA is
an It+6 chord is assumed.
>>> possibA2 = (Gs4, E4, D4, Bb2)
>>> possibB2 = (A4, E4, Cs4, A2)
>>> possibility.couldBeItalianA6Resolution(possibA2, possibB2)
Traceback (most recent call last):
PossibilityException: possibA does not spell out an It+6 chord.
The method is called "couldBeItalianA6Resolution" as opposed
to "isItalianA6Resolution" because it is designed to work in
tandem with :meth:`~music21.figuredBass.possibility.parallelOctaves`
and :meth:`~music21.figuredBass.possibility.isIncomplete` in
a Segment. Consider the following examples with possibA1 above as the
augmented sixth chord to resolve.
>>> possibA1 = (Gs4, D4, D4, Bb2)
>>> possibB4 = (A4, D4, D4, A2) # No 3rd
>>> possibB5 = (A4, Cs4, Cs4, A2) # No 5th
>>> possibility.couldBeItalianA6Resolution(possibA1, possibB4)
True
>>> possibility.couldBeItalianA6Resolution(possibA1, possibB5) # parallel octaves
True
>>> possibA3 = (Gs4, Gs4, D4, Bb2)
>>> possibB6 = (A4, A4, Cs4, A2)
>>> possibility.couldBeItalianA6Resolution(possibA3, possibB6, restrictDoublings = True)
False
>>> possibility.couldBeItalianA6Resolution(possibA3, possibB6, restrictDoublings = False)
True
'''
if threePartChordInfo == None:
augSixthChord = chord.Chord(possibA)
if not augSixthChord.isItalianAugmentedSixth():
raise PossibilityException("possibA does not spell out an It+6 chord.")
bass = augSixthChord.bass()
root = augSixthChord.root()
third = augSixthChord.getChordStep(3)
fifth = augSixthChord.getChordStep(5)
threePartChordInfo = [bass, root, third, fifth]
allowedIntervalNames = ['M3','m3','M2','m-2']
rootResolved = False
[bass, root, third, fifth] = threePartChordInfo
for pitchIndex in range(len(possibA)):
pitchA = possibA[pitchIndex]
pitchB = possibB[pitchIndex]
if pitchA.name == fifth.name:
if pitchA == pitchB:
continue
if abs(pitchA.ps - pitchB.ps) > 4.0:
return False
tt = interval.Interval(pitchA, pitchB)
if not tt.directedSimpleName in allowedIntervalNames:
return False
elif pitchA.name == bass.name and pitchA == bass:
if not (pitchA.ps - pitchB.ps) == 1.0:
return False
i = interval.Interval(pitchA, pitchB)
if not i.directedName == 'm-2':
return False
elif pitchA.name == root.name:
if rootResolved == True and restrictDoublings:
# there can't be more than one root
return False
if not (pitchB.ps - pitchA.ps) == 1.0:
return False
i = interval.Interval(pitchA, pitchB)
if not i.directedName == 'm2':
return False
rootResolved = True
elif pitchA.name == third.name:
if restrictDoublings:
# there can't be more than one third, which is in the bass.
return False
if not (pitchA.ps - pitchB.ps) == 1.0:
return False
i = interval.Interval(pitchA, pitchB)
if not i.directedName == 'm-2':
return False
'''
# Part 1: Check if possibA is A6 chord, and if it is properly formed.
bass = possibA[-1]
root = None
rootIndex = 0
for pitchA in possibA[0:-1]:
if not (pitchA.ps - bass.ps) % 12 == 10:
rootIndex += 1
continue
br = interval.Interval(bass, pitchA)
isAugmentedSixth = (br.directedSimpleName == 'A6')
if isAugmentedSixth:
root = pitchA
break
tonic = bass.transpose('M3')
#Restrict doublings, It+6
for pitchIndex in range(len(possibA) - 1):
if pitchIndex == rootIndex:
continue
pitchA = possibA[pitchIndex]
if not pitchA.name == tonic.name:
return False
#Part 2: If possibA is Italian A6 chord, check that it resolves properly in possibB.
fifth = root.transpose('m2')
pairsList = partPairs(possibA, possibB)
(bassA, bassB) = pairsList[-1]
(rootA, rootB) = pairsList[rootIndex]
if not (bassB.name == fifth.name and rootB.name == fifth.name):
return False
if not (bassB.ps - bassA.ps == -1.0 and rootB.ps - rootA.ps == 1.0):
return False
allowedIntervalNames = ['M3','m3','M2','m-2']
for pitchIndex in range(len(pairsList) - 1):
if pitchIndex == rootIndex:
continue
(tonicA, tonicB) = pairsList[pitchIndex]
if tonicA == tonicB:
continue
tt = interval.Interval(tonicA, tonicB)
if not tt.directedSimpleName in allowedIntervalNames:
return False
'''
return True
# HELPER METHODS
# --------------
def partPairs(possibA, possibB):
'''
Groups together pitches of possibA and possibB which correspond to the same part,
constituting a shared part.
>>> from music21 import pitch
>>> from music21.figuredBass import possibility
>>> C4 = pitch.Pitch('C4')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> F4 = pitch.Pitch('F4')
>>> G4 = pitch.Pitch('G4')
>>> B4 = pitch.Pitch('B4')
>>> C5 = pitch.Pitch('C5')
>>> possibA1 = (C5, G4, E4, C4)
>>> possibB1 = (B4, F4, D4, D4)
>>> possibility.partPairs(possibA1, possibA1)
[(<music21.pitch.Pitch C5>, <music21.pitch.Pitch C5>),
(<music21.pitch.Pitch G4>, <music21.pitch.Pitch G4>),
(<music21.pitch.Pitch E4>, <music21.pitch.Pitch E4>),
(<music21.pitch.Pitch C4>, <music21.pitch.Pitch C4>)]
>>> possibility.partPairs(possibA1, possibB1)
[(<music21.pitch.Pitch C5>, <music21.pitch.Pitch B4>),
(<music21.pitch.Pitch G4>, <music21.pitch.Pitch F4>),
(<music21.pitch.Pitch E4>, <music21.pitch.Pitch D4>),
(<music21.pitch.Pitch C4>, <music21.pitch.Pitch D4>)]
'''
return list(izip(possibA, possibB))
# apply a function to one pitch of possibA at a time
# apply a function to two pitches of possibA at a time
# apply a function to one partPair of possibA, possibB at a time
# apply a function to two partPairs of possibA, possibB at a time
# use an iterator that fails when the first false is returned
singlePossibilityMethods = [voiceCrossing, isIncomplete, upperPartsWithinLimit, pitchesWithinLimit]
#singlePossibilityMethods.sort(None, lambda x: x.__name__)
consequentPossibilityMethods = [parallelFifths, parallelOctaves, hiddenFifth, hiddenOctave, voiceOverlap,
partMovementsWithinLimits, upperPartsSame, couldBeItalianA6Resolution]
#consequentPossibilityMethods.sort(None, lambda x: x.__name__)
_DOC_ORDER = singlePossibilityMethods + [partPairs] + consequentPossibilityMethods
class PossibilityException(exceptions21.Music21Exception):
pass
#-------------------------------------------------------------------------------
class Test(unittest.TestCase):
def runTest(self):
pass
if __name__ == "__main__":
import music21
music21.mainTest(Test)
#------------------------------------------------------------------------------
# eof |
py | 1a41d4729cb1b214eefb7cbe4613aca8836039b6 | #!/usr/bin/env python
import pygame # pylint: disable=import-error
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
# This is a simple class that will help us print to the screen
# It has nothing to do with the joysticks, just outputting the
# information.
class TextPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def printf(self, screen, textString):
textBitmap = self.font.render(textString, True, BLACK)
screen.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
pygame.init()
# Set the width and height of the screen [width,height]
size = [500, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("My Game")
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Initialize the joysticks
pygame.joystick.init()
# Get ready to print
textPrint = TextPrint()
# -------- Main Program Loop -----------
while done==False:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True # Flag that we are done so we exit this loop
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN:
print("Joystick button pressed.")
if event.type == pygame.JOYBUTTONUP:
print("Joystick button released.")
# DRAWING STEP
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
textPrint.reset()
# Get count of joysticks
joystick_count = pygame.joystick.get_count()
textPrint.printf(screen, "Number of joysticks: {}".format(joystick_count) )
textPrint.indent()
# For each joystick:
joystick = pygame.joystick.Joystick(0)
joystick.init()
textPrint.printf(screen, "Joystick {}".format(0) )
textPrint.indent()
# Get the name from the OS for the controller/joystick
name = joystick.get_name()
textPrint.printf(screen, "Joystick name: {}".format(name) )
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
axes = joystick.get_numaxes()
textPrint.printf(screen, "Number of axes: {}".format(axes) )
textPrint.indent()
for i in range( axes ):
axis = joystick.get_axis( i )
textPrint.printf(screen, "Axis {} value: {:>6.3f}".format(i, axis) )
textPrint.unindent()
buttons = joystick.get_numbuttons()
textPrint.printf(screen, "Number of buttons: {}".format(buttons) )
textPrint.indent()
for i in range( buttons ):
button = joystick.get_button( i )
textPrint.printf(screen, "Button {:>2} value: {}".format(i,button) )
textPrint.unindent()
textPrint.unindent()
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(20)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit ()
|
py | 1a41d4e849a50888041bf8bcbfdffe1024234516 | import crypt
import getpass
import os
import pwd
import subprocess
from multiprocessing import Process
from .. import onlyunix, run
try:
import spwd
except ImportError:
pass
@onlyunix
def parseargs(p):
"""
Add arguments and `func` to `p`.
:param p: ArgumentParser
:return: ArgumentParser
"""
p.set_defaults(func=func)
p.description = "Begin session on the system"
p.add_argument("username", nargs="?")
return p
def func(args):
while True:
try:
# Get username
if args.username:
username = args.username
else:
username = input("Username: ")
# Get password
password = getpass.getpass("Password: ")
# Authenticate
try:
# Get entry from /etc/shadow
hashed_password = spwd.getspnam(username).sp_pwd
except KeyError:
print("Invalid username or password\n")
pass
else:
if check_password(hashed_password, password):
# Get entry from /etc/passwd
pw = pwd.getpwnam(username)
# Set UID of user
os.setuid(pw.pw_uid)
# Change to homedir
os.chdir(pw.pw_dir)
# Set enviornment
os.environ['USER'] = username
os.environ['LOGNAME'] = username
os.environ['HOME'] = pw.pw_dir
os.environ['SHELL'] = pw.pw_shell
# Start user shell
if pw.pw_shell == 'sh':
p = Process(target=run, args=[['sh']])
p.start()
else:
subprocess.call([pw.pw_shell])
else:
print("Invalid username or password\n")
except BaseException:
print()
def check_password(hashed_password, password):
salt = hashed_password.rsplit('$', 1)[0]
return crypt.crypt(password, salt) == hashed_password
|
py | 1a41d5db2bf0d9c8d064a5be48442a406fea8d3e | {
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"pygame 2.0.1 (SDL 2.0.14, Python 3.8.5)\n",
"Hello from the pygame community. https://www.pygame.org/contribute.html\n",
"WARNING:tensorflow:From <ipython-input-1-3ec8a970748d>:153: Sequential.predict_classes (from tensorflow.python.keras.engine.sequential) is deprecated and will be removed after 2021-01-01.\n",
"Instructions for updating:\n",
"Please use instead:* `np.argmax(model.predict(x), axis=-1)`, if your model does multi-class classification (e.g. if it uses a `softmax` last-layer activation).* `(model.predict(x) > 0.5).astype(\"int32\")`, if your model does binary classification (e.g. if it uses a `sigmoid` last-layer activation).\n"
]
}
],
"source": [
"import cv2\n",
"import os\n",
"from keras.models import load_model\n",
"import numpy as np\n",
"from pygame import mixer\n",
"import time\n",
"from keras.preprocessing.image import img_to_array\n",
"import imutils\n",
"from keras.models import load_model\n",
"\n",
"mixer.init()\n",
"sound = mixer.Sound('alarm.wav')\n",
"\n",
"# parameters for loading data and images\n",
"detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml'\n",
"emotion_model_path = 'models/_mini_XCEPTION.102-0.66.hdf5'\n",
"\n",
"face = cv2.CascadeClassifier('haar cascade files\\haarcascade_frontalface_alt.xml')\n",
"leye = cv2.CascadeClassifier('haar cascade files\\haarcascade_lefteye_2splits.xml')\n",
"reye = cv2.CascadeClassifier('haar cascade files\\haarcascade_righteye_2splits.xml')\n",
"\n",
"\n",
"glass_cascade= cv2.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')\n",
"\n",
"def detect(gray,pic):\n",
" fc=cv2.CascadeClassifier('haarcascade_files/haarcascade_frontalface_default.xml')\n",
" face=fc.detectMultiScale(gray,1.3,5)\n",
" for (x,y,w,h) in face:\n",
" cv2.rectangle(pic,(x,y),(x+w,y+h),(255,0,0),2)\n",
" converted_gray=gray[y:y+h,x:x+w]\n",
" converted_color=pic[y:y+h,x:x+w]\n",
" glass = glass_cascade.detectMultiScale(converted_gray,1.04,5)\n",
" \n",
" for(ex,ey,ew,eh) in glass:\n",
" imag=cv2.rectangle(converted_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n",
" cv2.putText(imag,'active',(ex,ey-10),cv2.FONT_HERSHEY_SIMPLEX,0.9,(36,255,12),2)\n",
" return pic\n",
"\n",
"\n",
"# hyper-parameters for bounding boxes shape\n",
"# loading models\n",
"face_detection = cv2.CascadeClassifier(detection_model_path)\n",
"emotion_classifier = load_model(emotion_model_path, compile=False)\n",
"EMOTIONS = [\"Frustrated\" ,\"disgusted\",\"scared\", \"Talking\", \"Depressed\", \"surprised\",\n",
" \"neutral\"]\n",
"\n",
"\n",
"lbl=['Close','Open']\n",
"\n",
"model = load_model('models/cnncat2.h5')\n",
"path = os.getcwd()\n",
"cap = cv2.VideoCapture(0)\n",
"font = cv2.FONT_HERSHEY_COMPLEX_SMALL\n",
"count=0\n",
"score=0\n",
"thicc=2\n",
"rpred=[99]\n",
"lpred=[99]\n",
"\n",
"\n",
"# starting video streaming\n",
"cv2.namedWindow('your_face')\n",
"camera = cv2.VideoCapture(0)\n",
"while True:\n",
" frame = camera.read()[1]\n",
" #reading the frame\n",
" frame = imutils.resize(frame,width=300)\n",
" gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n",
" draw=detect(gray,frame)\n",
" cv2.imshow('Video',draw)\n",
" faces = face_detection.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE)\n",
" \n",
" #left_eye = leye.detectMultiScale(gray)\n",
" #right_eye = reye.detectMultiScale(gray)\n",
" \n",
" \n",
" canvas = np.zeros((250, 300, 3), dtype=\"uint8\")\n",
" frameClone = frame.copy()\n",
" if len(faces) > 0:\n",
" faces = sorted(faces, reverse=True,\n",
" key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]\n",
" (fX, fY, fW, fH) = faces\n",
" # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare\n",
" # the ROI for classification via the CNN\n",
" roi = gray[fY:fY + fH, fX:fX + fW]\n",
" roi = cv2.resize(roi, (64, 64))\n",
" roi = roi.astype(\"float\") / 255.0\n",
" roi = img_to_array(roi)\n",
" roi = np.expand_dims(roi, axis=0)\n",
" \n",
" \n",
" preds = emotion_classifier.predict(roi)[0]\n",
" emotion_probability = np.max(preds)\n",
" label = EMOTIONS[preds.argmax()]\n",
" else: continue\n",
"\n",
" \n",
" for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):\n",
" # construct the label text\n",
" text = \"{}: {:.2f}%\".format(emotion, prob * 100)\n",
"\n",
" # draw the label + probability bar on the canvas\n",
" # emoji_face = feelings_faces[np.argmax(preds)]\n",
"\n",
" \n",
" w = int(prob * 300)\n",
" cv2.rectangle(canvas, (7, (i * 35) + 5),\n",
" (w, (i * 35) + 35), (0, 0, 255), -1)\n",
" cv2.putText(canvas, text, (10, (i * 35) + 23),\n",
" cv2.FONT_HERSHEY_SIMPLEX, 0.45,\n",
" (255, 255, 255), 2)\n",
" cv2.putText(frameClone, label, (fX, fY - 10),\n",
" cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n",
" cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),\n",
" (0, 0, 255), 2)\n",
" \n",
" height,width = frame.shape[:2] \n",
"\n",
" gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n",
" \n",
" faces = face.detectMultiScale(gray,minNeighbors=5,scaleFactor=1.1,minSize=(25,25))\n",
" left_eye = leye.detectMultiScale(gray)\n",
" right_eye = reye.detectMultiScale(gray)\n",
"\n",
" cv2.rectangle(frame, (0,height-50) , (200,height) , (0,0,0) , thickness=cv2.FILLED )\n",
"\n",
" for (x,y,w,h) in faces:\n",
" cv2.rectangle(frame, (x,y) , (x+w,y+h) , (100,100,100) , 1 )\n",
"\n",
" for (x,y,w,h) in right_eye:\n",
" r_eye=frame[y:y+h,x:x+w]\n",
" count=count+1\n",
" r_eye = cv2.cvtColor(r_eye,cv2.COLOR_BGR2GRAY)\n",
" r_eye = cv2.resize(r_eye,(24,24))\n",
" r_eye= r_eye/255\n",
" r_eye= r_eye.reshape(24,24,-1)\n",
" r_eye = np.expand_dims(r_eye,axis=0)\n",
" rpred = model.predict_classes(r_eye)\n",
" if(rpred[0]==1):\n",
" lbl='Open' \n",
" if(rpred[0]==0):\n",
" lbl='Closed'\n",
" break\n",
"\n",
" for (x,y,w,h) in left_eye:\n",
" l_eye=frame[y:y+h,x:x+w]\n",
" count=count+1\n",
" l_eye = cv2.cvtColor(l_eye,cv2.COLOR_BGR2GRAY) \n",
" l_eye = cv2.resize(l_eye,(24,24))\n",
" l_eye= l_eye/255\n",
" l_eye=l_eye.reshape(24,24,-1)\n",
" l_eye = np.expand_dims(l_eye,axis=0)\n",
" lpred = model.predict_classes(l_eye)\n",
" if(lpred[0]==1):\n",
" lbl='Open' \n",
" if(lpred[0]==0):\n",
" lbl='Closed'\n",
" break\n",
"\n",
" if(rpred[0]==0 and lpred[0]==0):\n",
" score=score+1\n",
" cv2.putText(frame,\"Closed\",(10,height-20), font, 1,(255,255,255),1,cv2.LINE_AA)\n",
" # if(rpred[0]==1 or lpred[0]==1):\n",
" else:\n",
" score=score-1\n",
" cv2.putText(frame,\"Open\",(10,height-20), font, 1,(255,255,255),1,cv2.LINE_AA)\n",
" \n",
" \n",
" if(score<0):\n",
" score=0 \n",
" cv2.putText(frame,'Score:'+str(score),(100,height-20), font, 1,(255,255,255),1,cv2.LINE_AA)\n",
" if(score>15):\n",
" #person is feeling sleepy so we beep the alarm\n",
" cv2.imwrite(os.path.join(path,'image.jpg'),frame)\n",
" try:\n",
" sound.play()\n",
" \n",
" except: # isplaying = False\n",
" pass\n",
" if(thicc<16):\n",
" thicc= thicc+2\n",
" else:\n",
" thicc=thicc-2\n",
" if(thicc<2):\n",
" thicc=2\n",
" cv2.rectangle(frame,(0,0),(width,height),(0,0,255),thicc)\n",
" \n",
" gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n",
" draw=detect(gray,frame)\n",
" cv2.imshow('Video',draw)\n",
" #cv2.imshow('frame',frame)\n",
"\n",
" cv2.imshow('your_face', frameClone)\n",
" cv2.imshow(\"Probabilities\", canvas)\n",
" if cv2.waitKey(1) & 0xFF == ord('q'):\n",
" break\n",
"\n",
"camera.release()\n",
"cv2.destroyAllWindows()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|
py | 1a41d67fe77bb02fe99136b05c9ebe600950693f | import math
from config.config import config
class Plot:
@staticmethod
def line(prices, size=(100, 100), position=(0, 0), draw=None, fill=None):
assert draw
max_price = max(prices)
min_price = min(prices)
normalised_prices = [(price - min_price) / (max_price - min_price) for price in prices]
plot_data = []
for i, element in enumerate(normalised_prices):
x = i * (size[0] / len(normalised_prices)) + position[0]
y = size[1] - (element * size[1]) + position[1]
plot_data.append((x, y))
draw.line(plot_data, fill=fill)
@staticmethod
def y_axis_labels(prices, font, position_first=(0, 0), position_last=(0, 0), draw=None, fill=None, labels_number=3):
def center_x(price):
area_width = position_last[0] - position_first[0]
text_width, _ = draw.textsize(price, font)
if area_width >= text_width:
return position_first[0] + (area_width - text_width) / 2
else:
return position_first[0]
max_price = max(prices)
min_price = min(prices)
price_step = (max_price - min_price) / (labels_number - 1)
y_step = (position_last[1] - position_first[1]) / (labels_number - 1)
for i in range(0, labels_number):
human_price = Plot.human_format(min_price + i * price_step, 5)
draw.text((center_x(human_price), position_last[1] - i * y_step), human_price, font=font, fill=fill)
@staticmethod
def percentage(prices, x_middle, y, font, draw, fill=None):
open = prices[0][0]
close = prices[len(prices) - 1][3]
percentage = ((1 - (close / open)) * -1) * 100
price_text = Plot.human_format(percentage, 4, 0)
price_text = price_text + "%"
if percentage > 0:
price_text = "+" + price_text
text_width, _ = draw.textsize(price_text, font)
price_position = ((x_middle - (text_width / 2)), y)
draw.text(price_position, price_text, font=font, fill=fill)
return text_width
@staticmethod #offset name #offset price
def caption(price, y, screen_width, font, draw, name, fill=None, currency_offset=4, price_offset=4):
#draw.text((currency_offset, y), config.currency[:3], font=font, fill=fill)
draw.text((currency_offset, y), name, font=font, fill=fill)
price_text = Plot.human_format(price, 2, 2)
text_width, _ = draw.textsize(price_text, font)
#price_position = (((screen_width - text_width - price_offset) / 2) + price_offset, y)
price_position = ((screen_width - text_width - price_offset), y)
draw.text(price_position, price_text, font=font, fill=fill)
@staticmethod
def candle(data, size=(100, 100), position=(0, 0), draw=None, fill_neg="#000000", fill_pos=None):
width = size[0]
height = size[1]
candle_width = 9
space = 1
num_of_candles = width // (candle_width + space)
leftover_space = width % (candle_width + space)
windows_per_candle = len(data) // num_of_candles
data_offset = len(data) % num_of_candles
candle_data = []
for i in range(data_offset, len(data), windows_per_candle):
window = data[i:i + windows_per_candle]
open = window[0][0]
close = window[len(window) - 1][3]
high = max([i[1] for i in window])
low = min([i[2] for i in window])
candle_data.append((open, high, low, close))
all_values = [item for sublist in candle_data for item in sublist]
max_price = max(all_values)
min_price = min(all_values)
normalised_data = []
for line in candle_data:
normalised_line = []
normalised_data.append(normalised_line)
for i in range(len(line)):
price = line[i]
normalised_line.append((price - min_price) / (max_price - min_price))
def y_flip(y):
return height - (y * height) + position[1]
for i, element in enumerate(normalised_data):
open = element[0]
close = element[3]
high = element[1]
low = element[2]
x = candle_width * i + space * i + leftover_space / 2 + position[0]
# high price
wick_x = x + (candle_width // 2)
draw.line([wick_x, y_flip(high), wick_x, y_flip(max(open, close))], fill=fill_pos)
# low price
draw.line([wick_x, y_flip(low), wick_x, y_flip(min(open, close))], fill=fill_pos)
open_y = math.floor(y_flip(open))
close_y = math.floor(y_flip(close))
if open_y == close_y:
draw.line([x, open_y, x + candle_width - 1, close_y], fill=fill_pos)
else:
if open < close:
draw.rectangle([x, open_y, x + candle_width - 1, close_y], fill=fill_pos)
else:
draw.rectangle([x, open_y, x + candle_width - 1, close_y], fill=fill_neg)
# TODO: Adapt for big numbers 1k, 1m, etc
@staticmethod
def human_format(number, length, fractional_minimal=0):
magnitude = 0
num = number
while abs(num) >= 10:
magnitude += 1
num /= 10.0
format_string = f'%.{fractional_minimal}f'
if length >= magnitude + fractional_minimal + 2:
fractional_length = length - magnitude - 2
format_string = f'%.{fractional_length}f'
return format_string % number
|
py | 1a41d76863d2ca7b7ef39ece0662415354eab2e4 | """
Explores the kbase draft to see if any metabolic genes are present which are not present in iSG3
"""
import os
from settings import INTERMEDIATE_MODEL_ROOT
import pandas as pd
import re
import cobra as cb
df = pd.read_excel(os.path.join(INTERMEDIATE_MODEL_ROOT,'kbase-draft', 'draft_dsm.xls'),
sheet_name='ModelReactions')
df = df.replace(pd.np.nan, '', regex=True)
draft_genes = []
for ind, row in df.iterrows():
match = re.findall(r'(CLO1313_RS[0-9]+)', row['gpr'])
draft_genes.extend(match)
isg = cb.io.load_json_model(os.path.join(INTERMEDIATE_MODEL_ROOT, 'iSG_3.json'))
isg_genes = [gene.id for gene in isg.genes]
not_in_isg = set(draft_genes) - set(isg_genes)
print('The draft model contains {} metabolic genes which are not in iSG'.format(len(not_in_isg)))
pattern = '|'.join(list(not_in_isg))
df2 = df[df['gpr'].str.contains(pattern)]
df2.to_csv(os.path.join(INTERMEDIATE_MODEL_ROOT, 'kbase-draft', 'not_in_isg3.csv'),
index=False)
print('These genes span {} metabolic reactions'.format(len(df2))) |
py | 1a41d7ba57b9580164bd0b6fd2154985d7f34a89 | """xception in pytorch
[1] François Chollet
Xception: Deep Learning with Depthwise Separable Convolutions
https://arxiv.org/abs/1610.02357
"""
import torch
import torch.nn as nn
__all__ = ['xception']
class SeperableConv2d(nn.Module):
#***Figure 4. An “extreme” version of our Inception module,
#with one spatial convolution per output channel of the 1x1
#convolution."""
def __init__(self, input_channels, output_channels, kernel_size, **kwargs):
super().__init__()
self.depthwise = nn.Conv2d(
input_channels,
input_channels,
kernel_size,
groups=input_channels,
bias=False,
**kwargs
)
self.pointwise = nn.Conv2d(input_channels, output_channels, 1, bias=False)
def forward(self, x):
x = self.depthwise(x)
x = self.pointwise(x)
return x
class EntryFlow(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 32, 3, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(32, 64, 3, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.conv3_residual = nn.Sequential(
SeperableConv2d(64, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
SeperableConv2d(128, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.MaxPool2d(3, stride=2, padding=1),
)
self.conv3_shortcut = nn.Sequential(
nn.Conv2d(64, 128, 1, stride=2),
nn.BatchNorm2d(128),
)
self.conv4_residual = nn.Sequential(
nn.ReLU(inplace=True),
SeperableConv2d(128, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
SeperableConv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.MaxPool2d(3, stride=2, padding=1)
)
self.conv4_shortcut = nn.Sequential(
nn.Conv2d(128, 256, 1, stride=2),
nn.BatchNorm2d(256),
)
#no downsampling
self.conv5_residual = nn.Sequential(
nn.ReLU(inplace=True),
SeperableConv2d(256, 728, 3, padding=1),
nn.BatchNorm2d(728),
nn.ReLU(inplace=True),
SeperableConv2d(728, 728, 3, padding=1),
nn.BatchNorm2d(728),
nn.MaxPool2d(3, 1, padding=1)
)
#no downsampling
self.conv5_shortcut = nn.Sequential(
nn.Conv2d(256, 728, 1),
nn.BatchNorm2d(728)
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
residual = self.conv3_residual(x)
shortcut = self.conv3_shortcut(x)
x = residual + shortcut
residual = self.conv4_residual(x)
shortcut = self.conv4_shortcut(x)
x = residual + shortcut
residual = self.conv5_residual(x)
shortcut = self.conv5_shortcut(x)
x = residual + shortcut
return x
class MiddleFLowBlock(nn.Module):
def __init__(self):
super().__init__()
self.shortcut = nn.Sequential()
self.conv1 = nn.Sequential(
nn.ReLU(inplace=True),
SeperableConv2d(728, 728, 3, padding=1),
nn.BatchNorm2d(728)
)
self.conv2 = nn.Sequential(
nn.ReLU(inplace=True),
SeperableConv2d(728, 728, 3, padding=1),
nn.BatchNorm2d(728)
)
self.conv3 = nn.Sequential(
nn.ReLU(inplace=True),
SeperableConv2d(728, 728, 3, padding=1),
nn.BatchNorm2d(728)
)
def forward(self, x):
residual = self.conv1(x)
residual = self.conv2(residual)
residual = self.conv3(residual)
shortcut = self.shortcut(x)
return shortcut + residual
class MiddleFlow(nn.Module):
def __init__(self, block):
super().__init__()
#"""then through the middle flow which is repeated eight times"""
self.middel_block = self._make_flow(block, 8)
def forward(self, x):
x = self.middel_block(x)
return x
def _make_flow(self, block, times):
flows = []
for i in range(times):
flows.append(block())
return nn.Sequential(*flows)
class ExitFLow(nn.Module):
def __init__(self):
super().__init__()
self.residual = nn.Sequential(
nn.ReLU(),
SeperableConv2d(728, 728, 3, padding=1),
nn.BatchNorm2d(728),
nn.ReLU(),
SeperableConv2d(728, 1024, 3, padding=1),
nn.BatchNorm2d(1024),
nn.MaxPool2d(3, stride=2, padding=1)
)
self.shortcut = nn.Sequential(
nn.Conv2d(728, 1024, 1, stride=2),
nn.BatchNorm2d(1024)
)
self.conv = nn.Sequential(
SeperableConv2d(1024, 1536, 3, padding=1),
nn.BatchNorm2d(1536),
nn.ReLU(inplace=True),
SeperableConv2d(1536, 2048, 3, padding=1),
nn.BatchNorm2d(2048),
nn.ReLU(inplace=True)
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def forward(self, x):
shortcut = self.shortcut(x)
residual = self.residual(x)
output = shortcut + residual
output = self.conv(output)
output = self.avgpool(output)
return output
class Xception(nn.Module):
def __init__(self, block, num_classes=100):
super().__init__()
self.entry_flow = EntryFlow()
self.middel_flow = MiddleFlow(block)
self.exit_flow = ExitFLow()
self.fc = nn.Linear(2048, num_classes)
def forward(self, x):
x = self.entry_flow(x)
x = self.middel_flow(x)
x = self.exit_flow(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def xception(num_classes=10):
return Xception(MiddleFLowBlock, num_classes=num_classes)
|
py | 1a41d8bf5ec4225af0c6695a91f4fe9a93aee2ff | import unittest
import numpy as np
import openmdao.api as om
import numpy.testing as npt
import wisdem.commonse.wind_wave_drag as wwd
from openmdao.utils.assert_utils import assert_check_partials
npts = 100
myones = np.ones((npts,))
class TestDrag(unittest.TestCase):
def setUp(self):
self.params = {}
self.unknowns = {}
self.resid = None
# variables
self.params["U"] = 2.0 * myones
self.params["A"] = 4.0 * myones
self.params["p"] = 3.0 * myones
self.params["cm"] = 1.0
self.params["d"] = 10.0 * myones
self.params["rho_water"] = 0.5
self.params["mu_water"] = 1e-3
self.params["z"] = -100.0 * myones
self.params["beta_wave"] = 0.0
self.params["cd_usr"] = -1.0
self.wave = wwd.CylinderWaveDrag(nPoints=npts)
def testRegular(self):
U = 2.0
A = 4.0
# cm = 1.0
r = 5.0
rho = 0.5
# mu = 1e-3
# Re = rho*U*2*r/mu
q = 0.5 * rho * U * U
cd = 1.11
area = 2 * r
D = q * area * cd
Fi = rho * A * np.pi * r * r
Fp = Fi + D
self.wave.compute(self.params, self.unknowns)
npt.assert_equal(self.unknowns["waveLoads_Px"], Fp)
npt.assert_equal(self.unknowns["waveLoads_Py"], 0.0)
npt.assert_equal(self.unknowns["waveLoads_Pz"], 0.0)
npt.assert_equal(self.unknowns["waveLoads_qdyn"], q)
npt.assert_equal(self.unknowns["waveLoads_pt"], q + 3.0)
npt.assert_equal(self.unknowns["waveLoads_z"], -100.0)
npt.assert_equal(self.unknowns["waveLoads_beta"], 0.0)
npt.assert_equal(self.unknowns["waveLoads_d"], 10.0)
def testCDset(self):
self.params["cd_usr"] = 2.0
U = 2.0
A = 4.0
r = 5.0
rho = 0.5
q = 0.5 * rho * U * U
area = 2 * r
D = q * area * 2.0
Fi = rho * A * np.pi * r * r
Fp = Fi + D
self.wave.compute(self.params, self.unknowns)
npt.assert_equal(self.unknowns["waveLoads_Px"], Fp)
def test_wave_derivs(self):
nPoints = 5
prob = om.Problem()
comp = wwd.CylinderWaveDrag(nPoints=nPoints)
prob.model.add_subsystem("comp", comp, promotes=["*"])
prob.setup(force_alloc_complex=True)
# Add some arbitrary inputs
prob.set_val("U", np.arange(nPoints), units="m/s")
prob.set_val("A", np.ones(nPoints), units="m/s**2")
prob.set_val("p", np.ones(nPoints) * 0.5, units="N/m**2")
prob.set_val("z", np.linspace(0.0, 10.0, nPoints), units="m")
prob.set_val("d", np.ones(nPoints), units="m")
prob.set_val("beta_wave", 1.2, units="deg")
prob.set_val("rho_water", 1.0, units="kg/m**3")
prob.set_val("mu_water", 0.001, units="kg/(m*s)")
prob.set_val("cm", 10.0)
prob.set_val("cd_usr", 0.01)
prob.run_model()
check = prob.check_partials(out_stream=None, compact_print=True, method="fd")
assert_check_partials(check, rtol=5e-5, atol=1e-1)
def test_wind_derivs(self):
nPoints = 5
prob = om.Problem()
comp = wwd.CylinderWindDrag(nPoints=nPoints)
prob.model.add_subsystem("comp", comp, promotes=["*"])
prob.setup(force_alloc_complex=True)
# Add some arbitrary inputs
prob.set_val("U", np.arange(nPoints), units="m/s")
prob.set_val("z", np.linspace(0.0, 10.0, nPoints), units="m")
prob.set_val("d", np.ones(nPoints), units="m")
prob.set_val("beta_wind", 1.2, units="deg")
prob.set_val("rho_air", 1.0, units="kg/m**3")
prob.set_val("mu_air", 0.001, units="kg/(m*s)")
prob.set_val("cd_usr", 0.01)
prob.run_model()
check = prob.check_partials(out_stream=None, compact_print=True, method="fd")
assert_check_partials(check, rtol=5e-5, atol=1e-1)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestDrag))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
|
py | 1a41da8c8b4dbde8c4ef50e44d218ff40b3437bd | #!/usr/bin/env python3
import json
import sys
TYPE_PARQUET_CONVERTED_TO_CLICKHOUSE = {
"TIMESTAMP_MICROS": "DateTime",
"TIMESTAMP_MILLIS": "DateTime",
"UTF8": "String",
}
TYPE_PARQUET_PHYSICAL_TO_CLICKHOUSE = {
"BOOLEAN": "UInt8",
"INT32": "Int32",
"INT64": "Int64",
"FLOAT": "Float32",
"DOUBLE": "Float64",
"BYTE_ARRAY": "String",
"INT96": "Int64", # TODO!
}
def read_file(filename):
with open(filename, "rb") as f:
return f.read().decode("raw_unicode_escape")
def get_column_name(column):
return column["Name"].split(".", 1)[0]
def resolve_clickhouse_column_type(column):
column_name = get_column_name(column)
logical_type = column.get("LogicalType", {})
converted_type = column.get("ConvertedType", "").upper()
physical_type = column.get("PhysicalType", "").upper()
if logical_type and logical_type.get("Type", "").upper() == "DECIMAL":
precision = int(logical_type["precision"])
scale = int(logical_type["scale"])
if precision < 1 or precision > 76:
raise RuntimeError("Column {} has invalid Decimal precision {}".format(column_name, precision))
if precision > 38:
raise RuntimeError("Column {} has unsupported Decimal precision {}".format(column_name, precision))
if scale < 0 or scale > precision:
raise RuntimeError("Column {} has invalid Decimal scale {} for precision {}".format(column_name, scale, precision))
return "Decimal({}, {})".format(precision, scale)
if converted_type and converted_type != "NONE":
result_type = TYPE_PARQUET_CONVERTED_TO_CLICKHOUSE.get(converted_type)
if result_type:
return result_type
raise RuntimeError("Column {} has unknown ConvertedType: {}".format(column_name, converted_type))
if physical_type and physical_type != "NONE":
result_type = TYPE_PARQUET_PHYSICAL_TO_CLICKHOUSE.get(physical_type)
if result_type:
return result_type
raise RuntimeError("Column {} has unknown PhysicalType: {}".format(column_name, physical_type))
raise RuntimeError("Column {} has invalid types: ConvertedType={}, PhysicalType={}".format(column_name, converted_type, physical_type))
def dump_columns(obj):
descr_by_column_name = {}
columns_descr = []
for column in obj["Columns"]:
column_name = get_column_name(column)
column_type = resolve_clickhouse_column_type(column)
result_type = "Nullable({})".format(column_type)
if column_name in descr_by_column_name:
descr = descr_by_column_name[column_name]
descr["types"].append(result_type)
else:
descr = {
"name": column_name,
"types": [result_type],
}
descr_by_column_name[column_name] = descr
columns_descr.append(descr)
# Make tuples from nested types. CH Server doesn't support such Arrow type but it makes Server Exceptions more relevant.
def _format_type(types):
if len(types) == 1:
return types[0]
else:
return "Tuple({})".format(", ".join(types))
print(", ".join(map(lambda descr: "`{}` {}".format(descr["name"], _format_type(descr["types"])), columns_descr)))
def dump_columns_from_file(filename):
dump_columns(json.loads(read_file(filename), strict=False))
if __name__ == "__main__":
filename = sys.argv[1]
dump_columns_from_file(filename)
|
py | 1a41db4e50f3d4e1aa379c28b5a75bef42387cfd | import sys
import pickle
import numpy as np
from scipy.stats import bernoulli
sys.path.append('./../')
sys.path.append('./../../')
from src.FullModel.model import Model as parent_model
from src.LocalGlobalAttentionModel.model import Model as super_model
from .vel_param import VelParam as vel_param
from src.HMC.hmc import HMC
delta = 10 ** -200
class Model(parent_model):
"""
This class implements the Fixed Choice model as described in the paper.
It has the same local and global policis like the full model and the difference is in the calculation of rho.
Here rho has a fixed value.
"""
def __init__(self, saliencies, rho, epsilon, xi, cov_ratio=1):
# epsilon and xi should be the objects from parent_model, with fix_dist_ind = 0
# rho should be from this model
super_model.__init__(self, saliencies)
self.rho = rho
self.epsilon = epsilon
self.xi = xi
self.cov_ratio = cov_ratio
self.fix_dist_ind = 0
def calc_ros(self, *args):
return self.rho.value
# Methods for generating data
def generate_gamma(self, s_t):
"""
This method generates gamma according to a Bernouli distribution with p = rho
:param s_t: here just to be compatible with the parent class.
:return: gamma \sim Ber(rho)
"""
return bernoulli.rvs(self.rho.value)
# Methods for parameters inference via Gibbs sampling
def sample_gamma(self):
"""
This methods samples form the conditional posterior distribution of gamma.
For details see the paper.
:return: a sample \gamma_i for each data point
"""
BF = self.calc_BF()
gammas = []
for i, sal_ts in enumerate(self.saliencies_ts):
gammas.append([])
for s, subject in enumerate(sal_ts):
ros = self.rho.value / (self.rho.value + BF[i][s] * (1 - self.rho.value))
gammas[-1].append(bernoulli.rvs(ros))
return gammas
def sample(self, num_samples, save_steps, file_path, sample_gammas=True):
"""
This method perform Gibbs sampling for the model parameters.
:param num_samples: number of samples in the chain
:param save_steps: whether to save the chains.
:param file_path: path to a file to save the chains
:param sample_gammas: whether to sample gamma or not,
:return: array with samples for each of the model parameters - b, s_0, epsilon, xi
"""
# initialize the arrays that will hold the samples.
samples_rho = np.zeros(num_samples)
samples_epsilon = np.zeros((num_samples, 2))
samples_xi = np.zeros((num_samples, 2))
# set variables needed for the HMC inference of epsilon and xi
vel_eps = vel_param([1, 1])
vel_xi = vel_param([1, 1])
delta_xi = 0.5
delta_eps = 0.03
n = 10
m = 1
hmc_eps = HMC(self.epsilon, vel_eps, delta_eps, n, m)
hmc_xi = HMC(self.xi, vel_xi, delta_xi, n, m)
if not sample_gammas:
self.remove_first_gamma()
for i in range(num_samples):
if sample_gammas:
self.gammas = self.sample_gamma()
if i == 0:
if not self.rho.is_fixed:
self.rho.set_num_time_steps(self.gammas)
rho_samp = self.rho.conditional_posterior(self.gammas)
self.rho.set_value(rho_samp)
if not self.epsilon.is_fixed:
hmc_eps.HMC(self.xi.value, self.cov_ratio, self.saliencies, self.gammas, self.fix_dists_2,
self.dist_mat_per_fix,
self.xi.alpha, self.xi.betta)
epsilon_samp = hmc_eps.state_param.value
if not self.xi.is_fixed:
hmc_xi.HMC(self.epsilon.value, self.cov_ratio, self.saliencies, self.gammas, self.fix_dists_2,
self.dist_mat_per_fix,
self.epsilon.alpha, self.epsilon.betta)
xi_samp = hmc_xi.state_param.value
samples_rho[i] = rho_samp
samples_epsilon[i] = epsilon_samp
samples_xi[i] = xi_samp
if save_steps and not i % 50:
with open(file_path, 'wb') as f:
pickle.dump([samples_rho[:i], samples_epsilon[:i], samples_xi[:i]], f)
if save_steps:
with open(file_path, 'wb') as f:
pickle.dump([samples_rho, samples_epsilon, samples_xi], f)
return samples_rho, samples_epsilon, samples_xi
|
py | 1a41db6dd0462bf4796ab791a51b516c9dd66bf2 | # coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ReleaseStatus',
'RepositoryOpts',
]
@pulumi.output_type
class ReleaseStatus(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "appVersion":
suggest = "app_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ReleaseStatus. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ReleaseStatus.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ReleaseStatus.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status: str,
app_version: Optional[str] = None,
chart: Optional[str] = None,
name: Optional[str] = None,
namespace: Optional[str] = None,
revision: Optional[int] = None,
version: Optional[str] = None):
"""
:param str status: Status of the release.
:param str app_version: The version number of the application being deployed.
:param str chart: The name of the chart.
:param str name: Name is the name of the release.
:param str namespace: Namespace is the kubernetes namespace of the release.
:param int revision: Version is an int32 which represents the version of the release.
:param str version: A SemVer 2 conformant version string of the chart.
"""
pulumi.set(__self__, "status", status)
if app_version is not None:
pulumi.set(__self__, "app_version", app_version)
if chart is not None:
pulumi.set(__self__, "chart", chart)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if revision is not None:
pulumi.set(__self__, "revision", revision)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def status(self) -> str:
"""
Status of the release.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="appVersion")
def app_version(self) -> Optional[str]:
"""
The version number of the application being deployed.
"""
return pulumi.get(self, "app_version")
@property
@pulumi.getter
def chart(self) -> Optional[str]:
"""
The name of the chart.
"""
return pulumi.get(self, "chart")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name is the name of the release.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
"""
Namespace is the kubernetes namespace of the release.
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def revision(self) -> Optional[int]:
"""
Version is an int32 which represents the version of the release.
"""
return pulumi.get(self, "revision")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
A SemVer 2 conformant version string of the chart.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class RepositoryOpts(dict):
"""
Specification defining the Helm chart repository to use.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "caFile":
suggest = "ca_file"
elif key == "certFile":
suggest = "cert_file"
elif key == "keyFile":
suggest = "key_file"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RepositoryOpts. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RepositoryOpts.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RepositoryOpts.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ca_file: Optional[str] = None,
cert_file: Optional[str] = None,
key_file: Optional[str] = None,
password: Optional[str] = None,
repo: Optional[str] = None,
username: Optional[str] = None):
"""
Specification defining the Helm chart repository to use.
:param str ca_file: The Repository's CA File
:param str cert_file: The repository's cert file
:param str key_file: The repository's cert key file
:param str password: Password for HTTP basic authentication
:param str repo: Repository where to locate the requested chart. If is a URL the chart is installed without installing the repository.
:param str username: Username for HTTP basic authentication
"""
if ca_file is not None:
pulumi.set(__self__, "ca_file", ca_file)
if cert_file is not None:
pulumi.set(__self__, "cert_file", cert_file)
if key_file is not None:
pulumi.set(__self__, "key_file", key_file)
if password is not None:
pulumi.set(__self__, "password", password)
if repo is not None:
pulumi.set(__self__, "repo", repo)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="caFile")
def ca_file(self) -> Optional[str]:
"""
The Repository's CA File
"""
return pulumi.get(self, "ca_file")
@property
@pulumi.getter(name="certFile")
def cert_file(self) -> Optional[str]:
"""
The repository's cert file
"""
return pulumi.get(self, "cert_file")
@property
@pulumi.getter(name="keyFile")
def key_file(self) -> Optional[str]:
"""
The repository's cert key file
"""
return pulumi.get(self, "key_file")
@property
@pulumi.getter
def password(self) -> Optional[str]:
"""
Password for HTTP basic authentication
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def repo(self) -> Optional[str]:
"""
Repository where to locate the requested chart. If is a URL the chart is installed without installing the repository.
"""
return pulumi.get(self, "repo")
@property
@pulumi.getter
def username(self) -> Optional[str]:
"""
Username for HTTP basic authentication
"""
return pulumi.get(self, "username")
|
py | 1a41dcaa97874832b99c7cac721866e863c13083 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class AuthenticationError(Exception):
pass
class AuthenticationTypes:
TOKEN = "Token"
INTERNAL_TOKEN = "Internaltoken"
EPHEMERAL_TOKEN = "EphemeralToken"
VALUES = {TOKEN, INTERNAL_TOKEN, EPHEMERAL_TOKEN}
|
py | 1a41dcefad66371d3467ffcc29968d18b0179977 | # vse kar se izpise na zaslonu |
py | 1a41dcf3d05f8ee9ee107f63641a3c062fdab194 | """
Augmenters that overlay two images with each other.
Do not import directly from this file, as the categorization is not final.
Use instead
`from imgaug import augmenters as iaa`
and then e.g. ::
seq = iaa.Sequential([
iaa.Alpha(0.5, iaa.Add((-5, 5)))
])
List of augmenters:
* Alpha
* AlphaElementwise
* SimplexNoiseAlpha
* FrequencyNoiseAlpha
"""
from __future__ import print_function, division, absolute_import
from .. import imgaug as ia
# TODO replace these imports with iap.XYZ
from ..parameters import StochasticParameter, Deterministic, Binomial, Uniform
from .. import parameters as iap
import numpy as np
import six.moves as sm
from .meta import Augmenter, Sequential, handle_children_list
# TODO tests
class Alpha(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Augmenter to overlay two image sources with each other using an
alpha/transparency value.
The image sources can be imagined as branches.
If a source is not given, it is automatically the same as the input.
Let A be the first branch and B be the second branch.
Then the result images are defined as
factor * A + (1-factor) * B,
where `factor` is an overlay factor.
For keypoint augmentation this augmenter will pick the keypoints either
from the first or the second branch. The first one is picked if
`factor >= 0.5` is true (per image). It is recommended to *not* use
augmenters that change keypoint positions with this class.
Parameters
----------
factor : int or float or iterable of two floats or StochasticParameter, optional(default=0)
Weighting of the results of the first branch. Values close to 0 mean
that the results from the second branch (see parameter `second`)
make up most of the final image.
* If float, then that value will be used for all images.
* If tuple (a, b), then a random value from range a <= x <= b will
be sampled per image.
* If StochasticParameter, then that parameter will be used to
sample a value per image.
first : None or Augmenter or iterable of Augmenter, optional(default=None)
Augmenter(s) that make up the first of the two
branches.
* If None, then the input images will be reused as the output
of the first branch.
* If Augmenter, then that augmenter will be used as the branch.
* If iterable of Augmenter, then that iterable will be converted
into a Sequential and used as the augmenter.
second : None or Augmenter or iterable of Augmenter, optional(default=None)
Augmenter(s) that make up the second of the two
branches.
* If None, then the input images will be reused as the output
of the second branch.
* If Augmenter, then that augmenter will be used as the branch.
* If iterable of Augmenter, then that iterable will be converted
into a Sequential and used as the augmenter.
per_channel : bool or float, optional(default=False)
Whether to use the same factor for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.Alpha(0.5, iaa.Grayscale(1.0))
Converts each image to grayscale and overlays it by 50 percent with the
original image, thereby removing about 50 percent of all color. This
is equivalent to iaa.Grayscale(0.5).
>>> aug = iaa.Alpha((0.0, 1.0), iaa.Grayscale(1.0))
Converts each image to grayscale and overlays it by a random percentage
(sampled per image) with the original image, thereby removing a random
percentage of all colors. This is equivalent to iaa.Grayscale((0.0, 1.0)).
>>> aug = iaa.Alpha((0.0, 1.0), iaa.Affine(rotate=(-20, 20)), per_channel=0.5)
Rotates each image by a random degree from the range [-20, 20]. Then
overlays that new image with the original one by a random factor from the
range [0.0, 1.0]. In 50 percent of all cases, the overlay happens
channel-wise and the factor is sampled independently per channel. As a
result, e.g. the red channel may look visible rotated (factor near 1.0),
while the green and blue channels may not look rotated (factors near 0.0).
NOTE: It is not recommended to use Alpha with augmenters that change the
positions of pixels if you *also* want to augment keypoints, as it is
unclear which of the two keypoint results (first or second branch) should
be used as the final result.
>>> aug = iaa.Alpha((0.0, 1.0), first=iaa.Add(10), second=iaa.Multiply(0.8))
(A) Adds 10 to each image and (B) multiplies each image by 0.8. Then per
image an overlay factor is sampled from the range [0.0, 1.0]. If it is
close to 1.0, the results from (A) are mostly used, otherwise the ones
from (B). This is equivalent to
`iaa.Sequential([iaa.Multiply(0.8), iaa.Alpha((0.0, 1.0), iaa.Add(10))])`.
>>> aug = iaa.Alpha(iap.Choice([0.25, 0.75]), iaa.MedianBlur((3, 7)))
Applies a random median blur to each image and overlays the result with
the original image by either 25 or 75 percent strength.
"""
def __init__(self, factor=0, first=None, second=None, per_channel=False,
name=None, deterministic=False, random_state=None):
super(Alpha, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
if ia.is_single_number(factor):
ia.do_assert(0.0 <= factor <= 1.0, "Expected factor to have range [0, 1.0], got value %.2f." % (factor,))
self.factor = Deterministic(factor)
elif ia.is_iterable(factor):
ia.do_assert(len(factor) == 2, "Expected tuple/list with 2 entries, got %d entries." % (len(factor),))
self.factor = Uniform(factor[0], factor[1])
elif isinstance(factor, StochasticParameter):
self.factor = factor
else:
raise Exception("Expected float or int, tuple/list with 2 entries or StochasticParameter. Got %s." % (type(factor),))
ia.do_assert(first is not None or second is not None, "Expected 'first' and/or 'second' to not be None (i.e. at least one Augmenter), but got two None values.")
self.first = handle_children_list(first, self.name, "first")
self.second = handle_children_list(second, self.name, "second")
if per_channel in [True, False, 0, 1, 0.0, 1.0]:
self.per_channel = Deterministic(int(per_channel))
elif ia.is_single_number(per_channel):
ia.do_assert(0 <= per_channel <= 1.0)
self.per_channel = Binomial(per_channel)
else:
raise Exception("Expected per_channel to be boolean or number or StochasticParameter")
self.epsilon = 0.01
def _augment_images(self, images, random_state, parents, hooks):
result = images
nb_images = len(images)
seeds = random_state.randint(0, 10**6, (nb_images,))
if hooks.is_propagating(images, augmenter=self, parents=parents, default=True):
if self.first is None:
images_first = images
else:
images_first = self.first.augment_images(
images=images,
parents=parents + [self],
hooks=hooks
)
if self.second is None:
images_second = images
else:
images_second = self.second.augment_images(
images=images,
parents=parents + [self],
hooks=hooks
)
else:
images_first = images
images_second = images
for i in sm.xrange(nb_images):
image = images[i]
image_first = images_first[i]
image_second = images_second[i]
rs_image = ia.new_random_state(seeds[i])
per_channel = self.per_channel.draw_sample(random_state=rs_image)
input_dtype = image.dtype
if per_channel == 1:
nb_channels = image.shape[2]
samples = self.factor.draw_samples((nb_channels,), random_state=rs_image)
for c, sample in enumerate(samples):
ia.do_assert(0 <= sample <= 1.0)
# if the value is nearly 1.0 or 0.0 skip the computation
# and just use only the first/second image
if sample >= 1.0 - self.epsilon:
image[..., c] = image_first[..., c]
elif sample <= 0.0 + self.epsilon:
image[..., c] = image_second[..., c]
else:
image[..., c] = sample * image_first[..., c] + (1 - sample) * image_second[..., c]
# TODO change this to meta.clip_* and meta.restore_*
np.clip(image, 0, 255, out=image)
result[i] = image.astype(input_dtype)
else:
sample = self.factor.draw_sample(random_state=rs_image)
ia.do_assert(0 <= sample <= 1.0)
# if the value is nearly 1.0 or 0.0 skip the computation
# and just use only the first/second image
if sample >= 1.0 - self.epsilon:
image = image_first
elif sample <= 0.0 + self.epsilon:
image = image_second
else:
image = sample * image_first + (1 - sample) * image_second
# TODO change this to meta.clip_* and meta.restore_*
np.clip(image, 0, 255, out=image)
result[i] = image.astype(input_dtype)
return result
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
result = keypoints_on_images
nb_images = len(keypoints_on_images)
seeds = random_state.randint(0, 10**6, (nb_images,))
if hooks.is_propagating(keypoints_on_images, augmenter=self, parents=parents, default=True):
if self.first is None:
kps_ois_first = keypoints_on_images
else:
kps_ois_first = self.first.augment_keypoints(
keypoints_on_images=keypoints_on_images,
parents=parents + [self],
hooks=hooks
)
if self.second is None:
kps_ois_second = keypoints_on_images
else:
kps_ois_second = self.second.augment_keypoints(
keypoints_on_images=keypoints_on_images,
parents=parents + [self],
hooks=hooks
)
else:
kps_ois_first = keypoints_on_images
kps_ois_second = keypoints_on_images
for i in sm.xrange(nb_images):
kps_oi_first = kps_ois_first[i]
kps_oi_second = kps_ois_second[i]
rs_image = ia.new_random_state(seeds[i])
# keypoint augmentation also works channel-wise, even though
# keypoints do not have channels, in order to keep the random
# values properly synchronized with the image augmentation
per_channel = self.per_channel.draw_sample(random_state=rs_image)
if per_channel == 1:
nb_channels = keypoints_on_images[i].shape[2]
samples = self.factor.draw_samples((nb_channels,), random_state=rs_image)
sample = np.average(samples)
else:
sample = self.factor.draw_sample(random_state=rs_image)
ia.do_assert(0 <= sample <= 1.0)
# We cant choose "just a bit" of one keypoint augmentation result
# without messing up the positions (interpolation doesn't make much
# sense here),
# so if the alpha is >= 0.5 (branch A is more visible than
# branch B), the result of branch A, otherwise branch B.
if sample >= 0.5:
result[i] = kps_oi_first
else:
result[i] = kps_oi_second
return result
def _to_deterministic(self):
aug = self.copy()
aug.first = aug.first.to_deterministic() if aug.first is not None else None
aug.second = aug.second.to_deterministic() if aug.second is not None else None
aug.deterministic = True
aug.random_state = ia.new_random_state()
return aug
def get_parameters(self):
return [self.factor, self.per_channel]
def get_children_lists(self):
return [self.first, self.second]
class AlphaElementwise(Alpha): # pylint: disable=locally-disabled, unused-variable, line-too-long
"""
Augmenter to overlay two image sources with each other using pixelwise
alpha values.
This is the same as `Alpha`, except that the transparency factor is
sampled per pixel instead of once per image (or a few times per image, if
per_channel is True).
See `Alpha` for more description.
Parameters
----------
factor : float or iterable of two floats or StochasticParameter, optional(default=0)
Weighting of the results of the first branch. Values close to 0 mean
that the results from the second branch (see parameter `second`)
make up most of the final image.
* If float, then that value will be used for all images.
* If tuple (a, b), then a random value from range a <= x <= b will
be sampled per image.
* If StochasticParameter, then that parameter will be used to
sample a value per image.
first : None or Augmenter or iterable of Augmenter, optional(default=None)
Augmenter(s) that make up the first of the two
branches.
* If None, then the input images will be reused as the output
of the first branch.
* If Augmenter, then that augmenter will be used as the branch.
* If iterable of Augmenter, then that iterable will be converted
into a Sequential and used as the augmenter.
second : None or Augmenter or iterable of Augmenter, optional(default=None)
Augmenter(s) that make up the second of the two
branches.
* If None, then the input images will be reused as the output
of the second branch.
* If Augmenter, then that augmenter will be used as the branch.
* If iterable of Augmenter, then that iterable will be converted
into a Sequential and used as the augmenter.
per_channel : bool or float, optional(default=False)
Whether to use the same factor for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.AlphaElementwise(0.5, iaa.Grayscale(1.0))
Converts each image to grayscale and overlays it by 50 percent with the
original image, thereby removing about 50 percent of all color. This
is equivalent to iaa.Grayscale(0.5). This is also equivalent to
iaa.Alpha(0.5, iaa.Grayscale(1.0)), as the transparency factor is the
same for all pixels.
>>> aug = iaa.AlphaElementwise((0, 1.0), iaa.Grayscale(1.0))
Converts each image to grayscale and overlays it by a random percentage
(sampled per pixel) with the original image, thereby removing a random
percentage of all colors per pixel.
>>> aug = iaa.AlphaElementwise((0.0, 1.0), iaa.Affine(rotate=(-20, 20)), per_channel=0.5)
Rotates each image by a random degree from the range [-20, 20]. Then
overlays that new image with the original one by a random factor from the
range [0.0, 1.0], sampled per pixel. In 50 percent of all cases, the
overlay happens channel-wise and the factor is sampled independently per
channel. As a result, e.g. the red channel may look visible rotated (factor
near 1.0), while the green and blue channels may not look rotated (factors
near 0.0). NOTE: It is not recommended to use Alpha with augmenters that
change the positions of pixels if you *also* want to augment keypoints, as
it is unclear which of the two keypoint results (first or second branch)
should be used as the final result.
>>> aug = iaa.AlphaElementwise((0.0, 1.0), first=iaa.Add(10), second=iaa.Multiply(0.8))
(A) Adds 10 to each image and (B) multiplies each image by 0.8. Then per
pixel an overlay factor is sampled from the range [0.0, 1.0]. If it is
close to 1.0, the results from (A) are mostly used, otherwise the ones
from (B).
>>> aug = iaa.AlphaElementwise(iap.Choice([0.25, 0.75]), iaa.MedianBlur((3, 7)))
Applies a random median blur to each image and overlays the result with
the original image by either 25 or 75 percent strength (sampled per pixel).
"""
def __init__(self, factor=0, first=None, second=None, per_channel=False,
name=None, deterministic=False, random_state=None):
super(AlphaElementwise, self).__init__(
factor=factor,
first=first,
second=second,
per_channel=per_channel,
name=name,
deterministic=deterministic,
random_state=random_state
)
def _augment_images(self, images, random_state, parents, hooks):
result = images
nb_images = len(images)
seeds = random_state.randint(0, 10**6, (nb_images,))
if hooks.is_propagating(images, augmenter=self, parents=parents, default=True):
if self.first is None:
images_first = images
else:
images_first = self.first.augment_images(
images=images,
parents=parents + [self],
hooks=hooks
)
if self.second is None:
images_second = images
else:
images_second = self.second.augment_images(
images=images,
parents=parents + [self],
hooks=hooks
)
else:
images_first = images
images_second = images
for i in sm.xrange(nb_images):
image = images[i]
h, w, nb_channels = image.shape[0:3]
image_first = images_first[i]
image_second = images_second[i]
per_channel = self.per_channel.draw_sample(random_state=ia.new_random_state(seeds[i]))
input_dtype = image.dtype
if per_channel == 1:
for c in sm.xrange(nb_channels):
samples_c = self.factor.draw_samples((h, w), random_state=ia.new_random_state(seeds[i]+1+c))
ia.do_assert(0 <= samples_c.item(0) <= 1.0) # validate only first value
image[..., c] = samples_c * image_first[..., c] + (1.0 - samples_c) * image_second[..., c]
# TODO change this to meta.clip_* and meta.restore_*
np.clip(image, 0, 255, out=image)
result[i] = image.astype(input_dtype)
else:
samples = self.factor.draw_samples((h, w), random_state=ia.new_random_state(seeds[i]))
samples = np.tile(samples[..., np.newaxis], (1, 1, nb_channels))
ia.do_assert(0.0 <= samples.item(0) <= 1.0)
image = samples * image_first + (1.0 - samples) * image_second
# TODO change this to meta.clip_* and meta.restore_*
np.clip(image, 0, 255, out=image)
result[i] = image.astype(input_dtype)
return result
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
result = keypoints_on_images
nb_images = len(keypoints_on_images)
seeds = random_state.randint(0, 10**6, (nb_images,))
if hooks.is_propagating(keypoints_on_images, augmenter=self, parents=parents, default=True):
if self.first is None:
kps_ois_first = keypoints_on_images
else:
kps_ois_first = self.first.augment_keypoints(
keypoints_on_images=keypoints_on_images,
parents=parents + [self],
hooks=hooks
)
if self.second is None:
kps_ois_second = keypoints_on_images
else:
kps_ois_second = self.second.augment_keypoints(
keypoints_on_images=keypoints_on_images,
parents=parents + [self],
hooks=hooks
)
else:
kps_ois_first = keypoints_on_images
kps_ois_second = keypoints_on_images
# FIXME this is essentially the same behaviour as Alpha, requires inclusion of (x, y)
# coordinates to estimate new keypoint coordinates
for i in sm.xrange(nb_images):
kps_oi_first = kps_ois_first[i]
kps_oi_second = kps_ois_second[i]
#rs_image = ia.new_random_state(seeds[i])
ia.do_assert(
len(kps_oi_first.shape) == 3,
"Keypoint augmentation in AlphaElementwise requires " \
"KeypointsOnImage.shape to have channel information (i.e. " \
"tuple with 3 entries), which you did not provide (input " \
"shape: %s). The channels must match the corresponding " \
"image channels." % (kps_oi_first.shape,)
)
h, w, nb_channels = kps_oi_first.shape[0:3]
# keypoint augmentation also works channel-wise, even though
# keypoints do not have channels, in order to keep the random
# values properly synchronized with the image augmentation
per_channel = self.per_channel.draw_sample(random_state=ia.new_random_state(seeds[i]))
if per_channel == 1:
#samples = self.factor.draw_samples((h, w, nb_channels,), random_state=rs_image)
samples = np.zeros((h, w, nb_channels), dtype=np.float32)
for c in sm.xrange(nb_channels):
samples_c = self.factor.draw_samples((h, w), random_state=ia.new_random_state(seeds[i]+1+c))
samples[:, :, c] = samples_c
else:
samples = self.factor.draw_samples((h, w), random_state=ia.new_random_state(seeds[i]))
ia.do_assert(0.0 <= samples.item(0) <= 1.0)
sample = np.average(samples)
# We cant choose "just a bit" of one keypoint augmentation result
# without messing up the positions (interpolation doesn't make much
# sense here),
# so if the alpha is >= 0.5 (branch A is more visible than
# branch B), the result of branch A, otherwise branch B.
if sample >= 0.5:
result[i] = kps_oi_first
else:
result[i] = kps_oi_second
return result
def SimplexNoiseAlpha(first=None, second=None, per_channel=False,
size_px_max=(2, 16), upscale_method=None,
iterations=(1, 3), aggregation_method="max",
sigmoid=True, sigmoid_thresh=None,
name=None, deterministic=False, random_state=None):
"""
Augmenter to overlay two image sources with each other using alpha values
that follow noisy patterns.
The alpha masks are sampled using a simplex noise method, roughly creating
connected blobs of 1s surrounded by 0s. If nearest neighbour upsampling
is used, these blobs can be rectangular with sharp edges.
Parameters
----------
first : None or Augmenter or iterable of Augmenter, optional(default=None)
Augmenter(s) that make up the first of the two
branches.
* If None, then the input images will be reused as the output
of the first branch.
* If Augmenter, then that augmenter will be used as the branch.
* If iterable of Augmenter, then that iterable will be converted
into a Sequential and used as the augmenter.
second : None or Augmenter or iterable of Augmenter, optional(default=None)
Augmenter(s) that make up the second of the two
branches.
* If None, then the input images will be reused as the output
of the second branch.
* If Augmenter, then that augmenter will be used as the branch.
* If iterable of Augmenter, then that iterable will be converted
into a Sequential and used as the augmenter.
per_channel : bool or float, optional(default=False)
Whether to use the same factor for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
size_px_max : int or tuple of ints or list of ints or StochasticParameter, optional(default=(2, 16))
The simplex noise is always generated in a low resolution environment.
This parameter defines the maximum size of that environment (in
pixels). The environment is initialized at the same size as the input
image and then downscaled, so that no side exceeds `size_px_max`
(aspect ratio is kept).
* If int, then that number will be used as the size for all
iterations.
* If tuple of two ints (a, b), then a value will be sampled
per iteration from the discrete range [a..b].
* If a list of ints, then a value will be picked per iteration at
random from that list.
* If a StochasticParameter, then a value will be sampled from
that parameter per iteration.
upscale_method : None or ia.ALL or string or list of string or StochasticParameter, optional(default=None)
After generating the noise maps in low resolution environments, they
have to be upscaled to the input image size. This parameter controls
the upscaling method.
* If None, then either 'nearest' or 'linear' or 'cubic' is picked.
Most weight is put on linear, followed by cubic.
* If ia.ALL, then either 'nearest' or 'linear' or 'area' or 'cubic'
is picked per iteration (all same probability).
* If string, then that value will be used as the method (must be
'nearest' or 'linear' or 'area' or 'cubic').
* If list of string, then a random value will be picked from that
list per iteration.
* If StochasticParameter, then a random value will be sampled
from that parameter per iteration.
iterations : int or tuple of ints or list of ints or StochasticParameter, optional(default=(1, 3))
How often to repeat the simplex noise generation process per
image.
* If int, then that number will be used as the iterations for all
images.
* If tuple of two ints (a, b), then a value will be sampled
per image from the discrete range [a..b].
* If a list of ints, then a value will be picked per image at
random from that list.
* If a StochasticParameter, then a value will be sampled from
that parameter per image.
aggregation_method : ia.ALL or string or list of string or StochasticParameter, optional(default="max")
The noise maps (from each iteration) are combined to one noise map
using an aggregation process. This parameter defines the method used
for that process. Valid methods are 'min', 'max' or 'avg',
where 'min' combines the noise maps by taking the (elementwise) minimum
over all iteration's results, 'max' the (elementwise) maximum and
'avg' the (elemtwise) average.
* If ia.ALL, then a random value will be picked per image from the
valid ones.
* If a string, then that value will always be used as the method.
* If a list of string, then a random value will be picked from
that list per image.
* If a StochasticParameter, then a random value will be sampled
from that paramter per image.
sigmoid : bool or number, optional(default=True)
Whether to apply a sigmoid function to the final noise maps, resulting
in maps that have more extreme values (close to 0.0 or 1.0).
* If bool, then a sigmoid will always (True) or never (False) be
applied.
* If a number p with 0<=p<=1, then a sigmoid will be applied to
p percent of all final noise maps.
sigmoid_thresh : None or number or tuple of number or StochasticParameter, optional(default=None)
Threshold of the sigmoid, when applied. Thresholds above zero
(e.g. 5.0) will move the saddle point towards the right, leading to
more values close to 0.0.
* If None, then Normal(0, 5.0) will be used.
* If number, then that threshold will be used for all images.
* If tuple of two numbers (a, b), then a random value will
be sampled per image from the range [a, b].
* If StochasticParameter, then a random value will be sampled from
that parameter per image.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.SimplexNoiseAlpha(iaa.EdgeDetect(1.0))
Detects per image all edges, marks them in a black and white image and
then overlays the result with the original image using simplex noise masks.
>>> aug = iaa.SimplexNoiseAlpha(iaa.EdgeDetect(1.0), upscale_method="linear")
Same as the first example, but uses only (smooth) linear upscaling to
scale the simplex noise masks to the final image sizes, i.e. no nearest
neighbour upsampling is used, which would result in rectangles with hard
edges.
>>> aug = iaa.SimplexNoiseAlpha(iaa.EdgeDetect(1.0), sigmoid_thresh=iap.Normal(10.0, 5.0))
Same as the first example, but uses a threshold for the sigmoid function
that is further to the right. This is more conservative, i.e. the generated
noise masks will be mostly black (values around 0.0), which means that
most of the original images (parameter/branch `second`) will be kept,
rather than using the results of the augmentation (parameter/branch
`first`).
"""
upscale_method_default = iap.Choice(["nearest", "linear", "cubic"], p=[0.05, 0.6, 0.35])
sigmoid_thresh_default = iap.Normal(0.0, 5.0)
noise = iap.SimplexNoise(
size_px_max=size_px_max,
upscale_method=upscale_method if upscale_method is not None else upscale_method_default
)
if iterations != 1:
noise = iap.IterativeNoiseAggregator(
noise,
iterations=iterations,
aggregation_method=aggregation_method
)
if sigmoid != False or (ia.is_single_number(sigmoid) and sigmoid <= 0.01):
noise = iap.Sigmoid.create_for_noise(
noise,
threshold=sigmoid_thresh if sigmoid_thresh is not None else sigmoid_thresh_default,
activated=sigmoid
)
return AlphaElementwise(
factor=noise, first=first, second=second, per_channel=per_channel,
name=name, deterministic=deterministic, random_state=random_state
)
def FrequencyNoiseAlpha(exponent=(-4, 4),
first=None, second=None, per_channel=False,
size_px_max=(4, 16), upscale_method=None,
iterations=(1, 3), aggregation_method=["avg", "max"], # pylint: disable=locally-disabled, dangerous-default-value, line-too-long
sigmoid=0.5, sigmoid_thresh=None,
name=None, deterministic=False, random_state=None):
"""
Augmenter to overlay two image sources with each other using alpha values
that follow noisy patterns.
The alpha masks are sampled using frequency noise of varying scales,
which can sometimes create large connected blobs of 1s surrounded by 0s
and other times results in smaller patterns. If nearest neighbour
upsampling is used, these blobs can be rectangular with sharp edges.
Parameters
----------
exponent : number or tuple of numbers of list of numbers or StochasticParameter, optional(default=(-4, 4))
Exponent to use when scaling in the frequency domain.
Sane values are in the range -4 (large blobs) to 4 (small patterns).
To generate cloud-like structures, use roughly -2.
* If number, then that number will be used as the exponent for all
iterations.
* If tuple of two numbers (a, b), then a value will be sampled
per iteration from the range [a, b].
* If a list of numbers, then a value will be picked per iteration
at random from that list.
* If a StochasticParameter, then a value will be sampled from
that parameter per iteration.
first : None or Augmenter or iterable of Augmenter, optional(default=None)
Augmenter(s) that make up the first of the two
branches.
* If None, then the input images will be reused as the output
of the first branch.
* If Augmenter, then that augmenter will be used as the branch.
* If iterable of Augmenter, then that iterable will be converted
into a Sequential and used as the augmenter.
second : None or Augmenter or iterable of Augmenter, optional(default=None)
Augmenter(s) that make up the second of the two
branches.
* If None, then the input images will be reused as the output
of the second branch.
* If Augmenter, then that augmenter will be used as the branch.
* If iterable of Augmenter, then that iterable will be converted
into a Sequential and used as the augmenter.
per_channel : bool or float, optional(default=False)
Whether to use the same factor for all channels (False)
or to sample a new value for each channel (True).
If this value is a float p, then for p percent of all images
`per_channel` will be treated as True, otherwise as False.
size_px_max : int or tuple of ints or list of ints or StochasticParameter, optional(default=(4, 16))
The noise is generated in a low resolution environment.
This parameter defines the maximum size of that environment (in
pixels). The environment is initialized at the same size as the input
image and then downscaled, so that no side exceeds `size_px_max`
(aspect ratio is kept).
* If int, then that number will be used as the size for all
iterations.
* If tuple of two ints (a, b), then a value will be sampled
per iteration from the discrete range [a..b].
* If a list of ints, then a value will be picked per iteration at
random from that list.
* If a StochasticParameter, then a value will be sampled from
that parameter per iteration.
upscale_method : None or ia.ALL or string or list of string or StochasticParameter, optional(default=None)
After generating the noise maps in low resolution environments, they
have to be upscaled to the input image size. This parameter controls
the upscaling method.
* If None, then either 'nearest' or 'linear' or 'cubic' is picked.
Most weight is put on linear, followed by cubic.
* If ia.ALL, then either 'nearest' or 'linear' or 'area' or 'cubic'
is picked per iteration (all same probability).
* If string, then that value will be used as the method (must be
'nearest' or 'linear' or 'area' or 'cubic').
* If list of string, then a random value will be picked from that
list per iteration.
* If StochasticParameter, then a random value will be sampled
from that parameter per iteration.
iterations : int or tuple of ints or list of ints or StochasticParameter, optional(default=(1, 3))
How often to repeat the simplex noise generation process per
image.
* If int, then that number will be used as the iterations for all
images.
* If tuple of two ints (a, b), then a value will be sampled
per image from the discrete range [a..b].
* If a list of ints, then a value will be picked per image at
random from that list.
* If a StochasticParameter, then a value will be sampled from
that parameter per image.
aggregation_method : ia.ALL or string or list of string or StochasticParameter, optional(default=["avg", "max"])
The noise maps (from each iteration) are combined to one noise map
using an aggregation process. This parameter defines the method used
for that process. Valid methods are 'min', 'max' or 'avg',
where 'min' combines the noise maps by taking the (elementwise) minimum
over all iteration's results, 'max' the (elementwise) maximum and
'avg' the (elemtwise) average.
* If ia.ALL, then a random value will be picked per image from the
valid ones.
* If a string, then that value will always be used as the method.
* If a list of string, then a random value will be picked from
that list per image.
* If a StochasticParameter, then a random value will be sampled
from that paramter per image.
sigmoid : bool or number, optional(default=0.5)
Whether to apply a sigmoid function to the final noise maps, resulting
in maps that have more extreme values (close to 0.0 or 1.0).
* If bool, then a sigmoid will always (True) or never (False) be
applied.
* If a number p with 0<=p<=1, then a sigmoid will be applied to
p percent of all final noise maps.
sigmoid_thresh : None or number or tuple of number or StochasticParameter, optional(default=None)
Threshold of the sigmoid, when applied. Thresholds above zero
(e.g. 5.0) will move the saddle point towards the right, leading to
more values close to 0.0.
* If None, then Normal(0, 5.0) will be used.
* If number, then that threshold will be used for all images.
* If tuple of two numbers (a, b), then a random value will
be sampled per image from the range [a, b].
* If StochasticParameter, then a random value will be sampled from
that parameter per image.
name : string, optional(default=None)
See `Augmenter.__init__()`
deterministic : bool, optional(default=False)
See `Augmenter.__init__()`
random_state : int or np.random.RandomState or None, optional(default=None)
See `Augmenter.__init__()`
Examples
--------
>>> aug = iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0))
Detects per image all edges, marks them in a black and white image and
then overlays the result with the original image using frequency noise
masks.
>>> aug = iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0), upscale_method="linear")
Same as the first example, but uses only (smooth) linear upscaling to
scale the frequency noise masks to the final image sizes, i.e. no nearest
neighbour upsampling is used, which would result in rectangles with hard
edges.
>>> aug = iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0), upscale_method="linear", exponent=-2, sigmoid=False)
Same as the previous example, but also limits the exponent to -2 and
deactivates the sigmoid, resulting in cloud-like patterns without sharp
edges.
>>> aug = iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0), sigmoid_thresh=iap.Normal(10.0, 5.0))
Same as the first example, but uses a threshold for the sigmoid function
that is further to the right. This is more conservative, i.e. the generated
noise masks will be mostly black (values around 0.0), which means that
most of the original images (parameter/branch `second`) will be kept,
rather than using the results of the augmentation (parameter/branch
`first`).
"""
upscale_method_default = iap.Choice(["nearest", "linear", "cubic"], p=[0.05, 0.6, 0.35])
sigmoid_thresh_default = iap.Normal(0.0, 5.0)
noise = iap.FrequencyNoise(
exponent=exponent,
size_px_max=size_px_max,
upscale_method=upscale_method if upscale_method is not None else upscale_method_default
)
if iterations != 1:
noise = iap.IterativeNoiseAggregator(
noise,
iterations=iterations,
aggregation_method=aggregation_method
)
if sigmoid != False or (ia.is_single_number(sigmoid) and sigmoid <= 0.01):
noise = iap.Sigmoid.create_for_noise(
noise,
threshold=sigmoid_thresh if sigmoid_thresh is not None else sigmoid_thresh_default,
activated=sigmoid
)
return AlphaElementwise(
factor=noise, first=first, second=second, per_channel=per_channel,
name=name, deterministic=deterministic, random_state=random_state
)
|
py | 1a41dd04a74337a536106973e2fccf835bf91f60 | from django.contrib import admin
from .models import Editor,Article,tag
# Register your models here.
class ArticleAdmin(admin.ModelAdmin):
filter_horizontal =('tag',)
admin.site.register(Editor)
admin.site.register(Article,ArticleAdmin)
admin.site.register(tag)
|
py | 1a41dd51889d583047651bec7c28b18bfb86b27b | #!/usr/bin/env python3
import functools
import os.path
import numpy as np
class CExample(object):
def __init__(self, x, y, w, z=1):
self.x = x
self.y = y
self.w = w
self.z = z
def copy(self):
return CExample(self.x, self.y, self.w, self.z)
class CDataSet(object):
def __init__(self, all_data=None, train=None, test=None, log_data=None, online_data=None, r=None):
self.all_data = [] if all_data is None else [x.copy() for x in all_data]
self.train_data = [] if train is None else [x.copy() for x in train]
self.test_data = [] if test is None else [x.copy() for x in test]
self.log_data = None if log_data is None else [x.copy() for x in log_data]
self.online_data = None if online_data is None else [x.copy() for x in online_data]
def load_data(self, filename, handler):
self.all_data = []
with open(filename) as file:
if handler == data_parser_libsvm:
self.all_data = data_parser_libsvm([line for line in file])
else:
self.all_data = [handler(line.strip().split(',')) for line in file]
def copy_all(self):
return CDataSet(self.all_data, self.train_data, self.test_data)
def copy(self):
return CDataSet(self.all_data, self.train_data, self.test_data, self.log_data, self.online_data)
def random_split(self, prop, r):
self.train_data = [x for x in self.all_data]
r.shuffle(self.train_data)
cnt = int(len(self.all_data)*prop)
self.test_data = self.train_data[cnt:]
self.train_data = self.train_data[:cnt]
def split_log(self, prop):
cnt = int(len(self.train_data)*prop)
self.log_data = self.train_data[:cnt]
self.online_data = self.train_data[cnt:]
def to_binary_label(dataset, rule):
return CDataSet([CExample(d.x, rule(d.y), d.w, d.z) for d in dataset.all_data if rule(d.y)!=0])
def normalize(dataset):
lb = functools.reduce(np.minimum, [e.x for e in dataset.all_data])
ub = functools.reduce(np.maximum, [e.x for e in dataset.all_data])
mid = (lb+ub)/2
diff = np.array([x if x>0 else 1 for x in ub-lb])
return CDataSet([CExample((e.x-mid)/diff*2, e.y, e.w, e.z) for e in dataset.all_data])
def gen_synthetic_uniform(n, d, r):
w = r.rand(d) - r.rand(d)
X = r.rand(n, d) - r.rand(n, d)
return w, [CExample(x, (1 if np.inner(x,w)>=0 else -1)*(-1 if r.rand()<0.05 else 1), 1, 1) for x in X]
def gen_synthetic_bandit(data, Q, r):
prop = [Q(dp.x) for dp in data]
tmp = [CExample(dp[0].x, dp[0].y, 1.0/dp[1], 1 if r.rand()<dp[1] else 0) for dp in zip(data, prop)]
return [CExample(tmp[i].x, tmp[i].y, tmp[i].w, i+1) for i in range(0, len(tmp)) if tmp[i].z==1]
def data_parser_rear(l):
features = np.array([float(x) for x in l[:-1]])
return CExample(features, l[-1], 1, 1)
def data_parser_front(l):
features = np.array([float(x) for x in l[1:]])
return CExample(features, l[0], 1, 1)
def data_parser_libsvm(ls):
split_ls = [l.strip().split() for l in ls]
num_features = max([max([0] + [int(e.split(":")[0]) for e in l[1:]]) for l in split_ls])
examples = []
for l in split_ls:
f = [0]*num_features
for e in l[1:]:
idx, val = e.split(":")
f[int(idx)-1] = float(val)
examples.append(CExample(np.array(f), l[0].strip(), 1, 1))
return examples
DATA_COLLECTION_PATHS = ["../data/", "../../data/", "../../../data/", \
"/media/songbai/Files/research/observational/logged data/code/data/", \
"N:\\research\\observational\\logged data\\code\\data\\"]
LibsvmBinaryRule = lambda s: 1 if float(s) > 0.5 else -1
DatasetInfo = {"skin": ("skin.txt", data_parser_rear, lambda s: 1 if s=="1" else -1),\
"magic": ("magic04.data", data_parser_rear, lambda s: 1 if s=="g" else -1),\
"eeg": ("eeg.data", data_parser_rear, lambda s: 1 if s=="1" else -1),\
"covtype": ("covtype.data", data_parser_rear, lambda s: 1 if s=="1" else (-1 if s=="2" else 0)),\
"letter": ("letter.data", data_parser_front, lambda s: 1 if s=="U" else (-1 if s=="P" else 0)),\
"a9a": ("a9a.txt", data_parser_libsvm, LibsvmBinaryRule),\
"a5a": ("a5a", data_parser_libsvm, LibsvmBinaryRule),\
"cod-rna": ("cod-rna.txt", data_parser_libsvm, LibsvmBinaryRule),\
"german": ("german.numer_scale", data_parser_libsvm, LibsvmBinaryRule),\
"ijcnn1": ("ijcnn1.tr", data_parser_libsvm, LibsvmBinaryRule),\
"mushrooms": ("mushrooms.txt", data_parser_libsvm, lambda s: 1 if int(s)==1 else -1),\
"phishing": ("phishing.txt", data_parser_libsvm, LibsvmBinaryRule),\
"splice": ("splice.t", data_parser_libsvm, LibsvmBinaryRule),\
"svmguide1": ("svmguide1.t", data_parser_libsvm, LibsvmBinaryRule),\
"w7a": ("w7a", data_parser_libsvm, LibsvmBinaryRule),}
def load_data(dataset_name, r, max_sz = None):
if dataset_name == "synthetic":
return CDataSet(gen_synthetic_uniform(6000 if max_sz is None else max_sz, 30, r)[1])
if dataset_name not in DatasetInfo:
print("dataset " + dataset_name +" is unknown")
return None
dataset_path = None
info = DatasetInfo[dataset_name]
for path in DATA_COLLECTION_PATHS:
#print(path+"/"+dataset_name)
if os.path.isfile(path+"/"+info[0]):
dataset_path = path+"/"+info[0]
break
if dataset_path is None:
print("data file for " + dataset_name +" does not exist")
return None
dataset = CDataSet()
dataset.load_data(dataset_path, info[1])
dataset = normalize(to_binary_label(dataset, info[2]))
if max_sz != None:
r.shuffle(dataset.all_data)
dataset = CDataSet(dataset.all_data[:max_sz])
return dataset
|
py | 1a41de08a17964be2b1ba33f4a0db34b538a4b21 | """
Copyright (C) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.ops.concat import Concat
from openvino.tools.mo.utils.error import Error
class ConcatExtractor(FrontExtractorOp):
op = 'Concat'
enabled = True
@classmethod
def extract(cls, node):
attrs = {
'axis': node.module.dim,
}
Concat.update_node_stat(node, attrs)
return cls.enabled
|
py | 1a41de386d2bb732c1e2fc4d1c32945b064c8458 | import json
def load_json(json_file):
try:
with open(json_file) as f:
params, training_stats = json.load(f)
training_curve = training_stats['training_curve']
_, train_loss, test_loss = training_curve[-1]
_, halfway_train_loss, _ = training_curve[len(training_curve)/2]
return {'params' : params,
'varied_params' : params['varied_params'],
'train_loss' : train_loss,
'test_loss' : test_loss,
'training_curve' : training_curve,
'halfway_train_loss' : halfway_train_loss}
except ValueError:
return None
def get_losses(loss_name):
return map(lambda x : x[loss_name], jobs_data)
def get_hypers(hyper_name):
return map(lambda x : x['varied_params'][hyper_name], jobs_data)
def get_jobs_data(data_file_names):
return filter(bool, map(load_json, data_file_names))
|
py | 1a41de588c36b4c9a6c15bf2cd306dd29ba07c29 | # Copyright 2019 James Brown
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket
import logging
import functools
from typing import TYPE_CHECKING
from contextlib import contextmanager
from torpy.guard import TorGuard
from torpy.utils import retry, log_retry
from torpy.circuit import TorCircuit
from torpy.cell_socket import TorSocketConnectError
from torpy.consesus import TorConsensus
from torpy.cache_storage import TorCacheDirStorage
if TYPE_CHECKING:
from typing import ContextManager
logger = logging.getLogger(__name__)
class TorClient:
def __init__(self, consensus=None, auth_data=None):
self._consensus = consensus or TorConsensus()
self._auth_data = auth_data or {}
@classmethod
def create(cls, authorities=None, cache_class=None, cache_kwargs=None, auth_data=None):
cache_class = cache_class or TorCacheDirStorage
cache_kwargs = cache_kwargs or {}
consensus = TorConsensus(authorities=authorities, cache_storage=cache_class(**cache_kwargs))
return cls(consensus, auth_data)
@retry(3, BaseException, log_func=functools.partial(log_retry,
msg='Retry with another guard...',
no_traceback=(socket.timeout, TorSocketConnectError,))
)
def get_guard(self, by_flags=None):
# TODO: add another stuff to filter guards
guard_router = self._consensus.get_random_guard_node(by_flags)
return TorGuard(guard_router, purpose='TorClient', consensus=self._consensus, auth_data=self._auth_data)
@contextmanager
def create_circuit(self, hops_count=3, guard_by_flags=None) -> 'ContextManager[TorCircuit]':
with self.get_guard(guard_by_flags) as guard:
yield guard.create_circuit(hops_count)
def __enter__(self):
"""Start using the tor client."""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Close the tor client."""
self.close()
def close(self):
self._consensus.close()
|
py | 1a41df3da86df7bf7adbb72eb9a22c9001e6bc45 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import django.db.models.deletion
import utils.time
class Migration(migrations.Migration):
dependencies = [
('events', '0037_merge'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HasVoted',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
],
options={
'verbose_name': 'deltagare i omröstningen',
'verbose_name_plural': 'deltagarna i omröstningen',
},
),
migrations.CreateModel(
name='Options',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(verbose_name='alternativ', max_length=255)),
],
options={
'verbose_name': 'alternativ',
'verbose_name_plural': 'alternativen',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(verbose_name='namn', max_length=255)),
('body', models.TextField(verbose_name='utförlig information', help_text='Utförligare information till frågan.')),
('result', models.CharField(default='p', choices=[('d', 'Publik tillgång till detaljerad information om röstingen.'), ('l', 'Publik tillgång till begränsad information om röstningen.'), ('p', 'Privat åtkomst enbart för administratörer')], max_length=1)),
('question_status', models.CharField(default='c', choices=[('o', 'Öppen'), ('c', 'Stängd')], max_length=1)),
('nr_of_picks', models.IntegerField(default=1, verbose_name='Antal val en användare kan kryssa i på frågan.')),
('anonymous', models.BooleanField(default=True, verbose_name='namn')),
('modified_by', models.ForeignKey(help_text='Användaren som ändrat på frågan.', on_delete=django.db.models.deletion.SET_NULL, verbose_name='användare', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': 'fråga',
'verbose_name_plural': 'frågor',
},
),
migrations.CreateModel(
name='QuestionGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('question_status', models.CharField(default='e', choices=[('e', 'Incheckade deltagare på ett event kan rösta.'), ('a', 'Alla medlemmar kan rösta')], max_length=1)),
('visible_from', models.DateTimeField(default=utils.time.now, verbose_name='publicering', help_text='Publiceringsdatum')),
('visible_to', models.DateTimeField(default=utils.time.now_plus_one_month, verbose_name='avpublicering', help_text='Avpubliceringsdatum')),
('event', models.ForeignKey(verbose_name='event', to='events.Event', blank=True, null=True)),
],
options={
'verbose_name': 'frågegrupp',
'verbose_name_plural': 'frågegrupper',
},
),
migrations.CreateModel(
name='Votes',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('option', models.ForeignKey(verbose_name='alternativ', to='votings.Options')),
('question', models.ForeignKey(verbose_name='fråga', to='votings.Question')),
('user', models.ForeignKey(verbose_name='användare', to=settings.AUTH_USER_MODEL, blank=True, null=True)),
],
options={
'verbose_name': 'röst',
'verbose_name_plural': 'röster',
},
),
migrations.AddField(
model_name='question',
name='question_group',
field=models.ForeignKey(verbose_name='frågegrupp', to='votings.QuestionGroup'),
),
migrations.AddField(
model_name='options',
name='question',
field=models.ForeignKey(verbose_name='fråga', to='votings.Question'),
),
migrations.AddField(
model_name='hasvoted',
name='question',
field=models.ForeignKey(verbose_name='fråga', to='votings.Question'),
),
migrations.AddField(
model_name='hasvoted',
name='user',
field=models.ForeignKey(verbose_name='användare', to=settings.AUTH_USER_MODEL),
),
]
|
py | 1a41e0917086cbfc5e2973091965f1b1f8babfec | #!/usr/bin/env python
"""This module serializes AFF4 objects in various ways."""
import yaml
from grr.lib import aff4
from grr.lib import rdfvalue
def YamlDumper(aff4object):
"""Dumps the given aff4object into a yaml representation."""
aff4object.Flush()
result = {}
for attribute, values in aff4object.synced_attributes.items():
result[attribute.predicate] = []
for value in values:
# This value is really a LazyDecoder() instance. We need to get at the
# real data here.
value = value.ToRDFValue()
result[attribute.predicate].append([
value.__class__.__name__, value.SerializeToString(), str(value.age)
])
return yaml.dump(
dict(
aff4_class=aff4object.__class__.__name__,
_urn=aff4object.urn.SerializeToString(),
attributes=result,
age_policy=aff4object.age_policy,))
def YamlLoader(string):
"""Load an AFF4 object from a serialized YAML representation."""
representation = yaml.load(string)
result_cls = aff4.FACTORY.AFF4Object(representation["aff4_class"])
aff4_attributes = {}
for predicate, values in representation["attributes"].items():
attribute = aff4.Attribute.PREDICATES[predicate]
tmp = aff4_attributes[attribute] = []
for rdfvalue_cls_name, value, age in values:
rdfvalue_cls = aff4.FACTORY.RDFValue(rdfvalue_cls_name)
value = rdfvalue_cls(value, age=rdfvalue.RDFDatetime(age))
tmp.append(value)
# Ensure the object is dirty so when we save it, it can be written to the data
# store.
result = result_cls(
urn=representation["_urn"],
clone=aff4_attributes,
mode="rw",
age=representation["age_policy"])
result.new_attributes, result.synced_attributes = result.synced_attributes, {}
result._dirty = True # pylint: disable=protected-access
return result
|
py | 1a41e114cf8d72a75a3e722e621918fb97913a90 | import streamlit as st
import pandas as pd
import pickle
import numpy as np
st.write("""
## Forest Fires
""")
st.sidebar.header('User Input')
st.sidebar.subheader('Please enter your data:')
# -- Define function to display widgets and store data
def get_input():
# Display widgets and store their values in variables
v_X = st.sidebar.radio('X', ['1', '2', '3', '4', '5', '6', '7', '8', '9'])
v_Y = st.sidebar.radio('Y', ['2', '3', '4', '5', '6', '7', '8', '9'])
v_month = st.sidebar.radio('month', ['February','March','April','June','July','August','September','October','December'])
v_day = st.sidebar.radio('Day', ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'])
v_FFMC = st.sidebar.slider('FFMC', 18.7, 96.2, 0.1)
v_DMC = st.sidebar.slider('DMC', 1.1, 291.3, 0.1)
v_DC = st.sidebar.slider('DC', 7.9, 860.6, 0.1)
v_ISI = st.sidebar.slider('ISI', 0.0, 56.1, 0.1)
v_temp = st.sidebar.slider('temp', 2.2, 33.3, 0.1)
v_RH = st.sidebar.slider('RH', 15, 100, 1)
v_wind = st.sidebar.slider('wind', 0.4, 9.4, 0.1)
v_rain = st.sidebar.slider('rain', 0.0, 6.4, 0.1)
# Month
if v_month == 'February':
v_month = '2'
elif v_month == 'March':
v_month = '3'
elif v_month == 'April':
v_month = '4'
elif v_month == 'June':
v_month = '6'
elif v_month == 'July':
v_month = '7'
elif v_month == 'August':
v_month = '8'
elif v_month == 'September':
v_month = '9'
elif v_month == 'October':
v_month = '10'
elif v_month == 'December':
v_month = '12'
# Day
if v_day == 'Monday':
v_day = '1'
elif v_day == 'Tuesday':
v_day = '2'
elif v_day == 'Wednesday':
v_day = '3'
elif v_day == 'Thursday':
v_day = '4'
elif v_day == 'Friday':
v_day = '5'
elif v_day == 'Saturday':
v_day = '6'
elif v_day == 'Sunday':
v_day = '7'
# Store user input data in a dictionary
data = {'X': v_X,
'Y': v_Y,
'month': v_month,
'day': v_day,
'FFMC': v_FFMC,
'DMC': v_DMC,
'DC': v_DC,
'ISI': v_ISI,
'temp': v_temp,
'RH': v_RH,
'wind': v_wind,
'rain': v_rain,}
# Create a data frame from the above dictionary
data_df = pd.DataFrame(data, index=[0])
return data_df
# -- Call function to display widgets and get data from user
df = get_input()
st.header('Application of Status Prediction:')
# -- Display new data from user inputs:
st.subheader('User Input:')
st.write(df)
# -- Data Pre-processing for New Data:
# Combines user input data with sample dataset
# The sample data contains unique values for each nominal features
# This will be used for the One-hot encoding
data_sample = pd.read_csv('ML_A.csv')
df = pd.concat([df, data_sample],axis=0)
###Data Cleaning & Feature Engineering###
#drop
df = df.drop(columns=['area'])
df = df.drop(columns=['Unnamed: 0'])
df_num=df
# -- Display pre-processed new data:
st.subheader('Pre-Processed Input:')
st.write(df_num)
# -- Reads the saved normalization model
load_nor = pickle.load(open('normalization_ML1.pkl', 'rb'))
#Apply the normalization model to new data
x_new = load_nor.transform(df)
x_new = x_new[:1]
st.subheader('Normalization Input:')
st.write(x_new)
# -- Reads the saved classification model
load_LR = pickle.load(open('LR_ML1.pkl', 'rb'))
# Apply model for prediction
prediction = load_LR.predict(x_new)
prediction = prediction[:1]
st.subheader('Prediction:')
st.write(prediction)
|
py | 1a41e19bf5773c67952cb7f95f47a1aaf39bc0c5 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import inspect
import shutil
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, '../src'))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/awesome_test")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from pkg_resources import parse_version
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if parse_version(sphinx.__version__) >= parse_version('1.7'):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
extensions.append('recommonmark')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# To configure AutoStructify
def setup(app):
from recommonmark.transform import AutoStructify
app.add_config_value('recommonmark_config', {
'auto_toc_tree_section': 'Contents',
'enable_eval_rst': True,
'enable_math': True,
'enable_inline_math': True
}, True)
app.add_transform(AutoStructify)
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'awesome_test'
copyright = u'2020, shuailiu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'sidebar_width': '300px',
'page_width': '1200px'
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from awesome_test import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'awesome_test-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'awesome_test Documentation',
u'shuailiu', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
} |
py | 1a41e1f0651b8f1c49038fadb81d93cc09e83c1e | """Tests downloading and reading of the GO annotation file from NCBI Gene.
python test_NCBI_Entrez_annotations.py
"""
__copyright__ = "Copyright (C) 2016, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
import sys
from goatools.associations import get_assoc_ncbi_taxids
from collections import defaultdict
from goatools.test_data.genes_NCBI_9606_ProteinCoding import GeneID2nt as GeneID2nt_hsa
from goatools.test_data.genes_NCBI_7227_ProteinCoding import GeneID2nt as GeneID2nt_dme
def test_ncbi_gene2go(log=sys.stdout):
"""Return GO associations to Entrez GeneIDs. Download if necessary.
Example report generated with Feb 22, 2013 download of:
NCBI Gene tables and associations in gene2go
49672 items found in gene2go from NCBI's ftp server
taxid GOs GeneIDs Description
----- ------ ------- -----------
10090 16,807 18,971 all DNA items
7227 7,022 12,019 all DNA items
7227 6,956 10,590 76% GO coverage of 13,919 protein-coding genes
9606 16,299 18,680 all DNA items
9606 16,296 18,253 87% GO coverage of 20,913 protein-coding genes
"""
# Get associations for human(9606), mouse(10090), and fly(7227)
# (optional) multi-level dictionary separate associations by taxid
taxid2asscs = defaultdict(lambda: defaultdict(lambda: defaultdict(set)))
# Simple dictionary containing id2gos
id2gos = get_assoc_ncbi_taxids(taxids=[9606, 10090, 7227], taxid2asscs=taxid2asscs)
log.write(" {N} items found in gene2go from NCBI's ftp server\n".format(N=len(id2gos)))
taxid2pc = {9606:GeneID2nt_hsa, 7227:GeneID2nt_dme}
# Report findings
log.write(" taxid GOs GeneIDs Description\n")
log.write(" ----- ------ ------- -----------\n")
for taxid, asscs in taxid2asscs.items():
num_gene2gos_all = len(asscs['GeneID2GOs'])
num_go2genes_all = len(asscs['GO2GeneIDs'])
log.write(" {TAXID:>6} {N:>6,} {M:>7,} all DNA items\n".format(
TAXID=taxid, N=num_go2genes_all, M=num_gene2gos_all))
# Basic check to ensure gene2go was downloaded and data was returned.
assert num_gene2gos_all > 11000
assert num_go2genes_all > 6000
if taxid in taxid2pc.keys():
rpt_coverage(taxid, asscs, taxid2pc[taxid], log)
def rpt_coverage(taxid, asscs, pc2nt, log):
"""Calculate and report GO coverage on protein-coding genes.
Example report generated with Feb 22, 2013 download of:
NCBI Gene tables and associations in gene2go
taxid GOs GeneIDs Description
----- ------ ------- -----------
7227 6,956 10,590 76% GO coverage of 13,919 protein-coding genes
9606 16,296 18,253 87% GO coverage of 20,913 protein-coding genes
"""
# List of all protein-coding genes have GO terms associated with them
geneid2gos = asscs['GeneID2GOs']
pcgene_w_gos = set(geneid2gos.keys()).intersection(set(pc2nt.keys()))
num_pcgene_w_gos = len(pcgene_w_gos)
num_pc_genes = len(pc2nt)
perc_cov = 100.0*num_pcgene_w_gos/num_pc_genes
# Get list of GOs associated with protein-coding genes
gos_pcgenes = set()
for geneid in pcgene_w_gos:
gos_pcgenes |= geneid2gos[geneid]
log.write(" {TAXID:>6} {N:>6,} {M:>7,} {COV:2.0f}% GO coverage of {TOT:,} protein-coding genes\n".format(
TAXID=taxid, N=len(gos_pcgenes), M=num_pcgene_w_gos, COV=perc_cov, TOT=num_pc_genes))
if __name__ == '__main__':
test_ncbi_gene2go()
|
py | 1a41e2342adf54708fd68570c918e73e2e712ad3 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateTensorboardExperiment
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardExperiment_sync]
from google.cloud import aiplatform_v1
def sample_create_tensorboard_experiment():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CreateTensorboardExperimentRequest(
parent="parent_value",
tensorboard_experiment_id="tensorboard_experiment_id_value",
)
# Make the request
response = client.create_tensorboard_experiment(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_TensorboardService_CreateTensorboardExperiment_sync]
|
py | 1a41e258550c8fb0d60f5905b8d8be2905215865 | # ==============================================================================
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import print_function
import pytest
import tensorflow as tf
import ngraph_bridge
# Test ngraph_bridge config options
def test_set_backend():
ngraph_bridge.enable()
backend_cpu = 'CPU'
backend_interpreter = 'INTERPRETER'
found_cpu = False
found_interpreter = False
# These will only print when running pytest with flag "-s"
print("Number of supported backends ", ngraph_bridge.backends_len())
supported_backends = ngraph_bridge.list_backends()
print(" ****** Supported Backends ****** ")
for backend_name in supported_backends:
print(backend_name)
if backend_name == backend_cpu:
found_cpu = True
if backend_name == backend_interpreter:
found_interpreter = True
print(" ******************************** ")
assert (found_cpu and found_interpreter) == True
# Create Graph
val = tf.placeholder(tf.float32)
out1 = tf.abs(val)
out2 = tf.abs(out1)
# set INTERPRETER backend
assert ngraph_bridge.is_supported_backend(backend_interpreter) == True
ngraph_bridge.set_backend(backend_interpreter)
currently_set_backend = ngraph_bridge.get_currently_set_backend_name()
assert currently_set_backend == backend_interpreter
# create new session to execute graph
# If you want to re-confirm which backend the graph was executed
# currently the only way is to enable NGRAPH_TF_VLOG_LEVEL=5
with tf.Session() as sess:
sess.run((out2,), feed_dict={val: ((1.4, -0.5, -1))})
currently_set_backend = ngraph_bridge.get_currently_set_backend_name()
assert currently_set_backend == backend_interpreter
# set CPU backend
assert ngraph_bridge.is_supported_backend(backend_cpu) == True
ngraph_bridge.set_backend(backend_cpu)
currently_set_backend = ngraph_bridge.get_currently_set_backend_name()
assert currently_set_backend == backend_cpu
# create new session to execute graph
with tf.Session() as sess:
sess.run((out2,), feed_dict={val: ((1.4, -0.5, -1))})
currently_set_backend = ngraph_bridge.get_currently_set_backend_name()
assert currently_set_backend == backend_cpu
|
py | 1a41e265e8628fd6af8385cc4c3c5f1cca2ecfc7 | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.18.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# mysite/setting.py
import os
#Heroku 設置
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dyas%*11!on_a5@0k+h8sh$qs#w*i0kr3k@2r1*%+e5hgbd!v_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Heroku 設置 ALLOWED_HOSTS = ['127.0.0.1', '.herokuapp.com']
# Local 設置 ALLOWED_HOSTS = [ ]
ALLOWED_HOSTS = [ ]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'trips',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates').replace('\\', '/')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
# 本機資料庫設定
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# Heroku 雲端設定
# db_from_env = dj_database_url.config(conn_max_age=500)
# DATABASES['default'].update(db_from_env) |
py | 1a41e3459518ca95d457e227cad5bf4e3558fded | # Echo server program
import socket
#!/usr/bin/env python2
import socket
import logging
import threading
import time
import signal
import threading
import time
import select
stop_threads = False
def main_func():
def signal_handler(sig, frame):
global stop_threads
stop_threads = True
global stop_threads
address = '127.0.0.1'
port = 50008
signal.signal(signal.SIGINT, signal_handler)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
print('Server setup')
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((address, port))
sock.listen(1)
while not stop_threads:
try:
conn, addr = sock.accept()
print 'Connected by', addr
while not stop_threads:
try:
data = conn.recv(1024)
if not data: break
out = '\r%s\r\nOK\r\n' % data
conn.sendall(out)
print("recv-send %s" % data)
except Exception as e:
print("Failed to send back data. Exception is %s" % e)
break
print 'Closing client', addr
conn.close()
except Exception as e:
print("Failed to accept. Exception is %s" % e)
except Exception as e:
print("Failed to setup server %s:%d. Exception is %s" % (address, port, e))
print('main exiting.')
sock.close()
if __name__ == '__main__':
main_func()
|
py | 1a41e4862107bd9926419fcfa31821f8af6a3d04 | from collections import namedtuple
from itertools import chain
from django.conf.urls import url
from django.contrib.auth.models import User
from django.forms import ValidationError
from django.http import Http404, HttpResponse, HttpResponseNotFound
from django.urls import reverse
from django.utils.translation import ugettext_noop
from memoized import memoized_property
from tastypie import fields, http
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.exceptions import BadRequest, ImmediateHttpResponse, NotFound
from tastypie.http import HttpForbidden, HttpUnauthorized
from tastypie.resources import ModelResource, Resource, convert_post_to_patch
from tastypie.utils import dict_strip_unicode_keys
from casexml.apps.stock.models import StockTransaction
from corehq.apps.api.resources.serializers import ListToSingleObjectSerializer
from corehq.apps.sms.models import MessagingEvent
from phonelog.models import DeviceReportEntry
from corehq import privileges
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.api.odata.serializers import (
ODataCaseSerializer,
ODataFormSerializer,
)
from corehq.apps.api.odata.utils import record_feed_access_in_datadog
from corehq.apps.api.odata.views import (
add_odata_headers,
raise_odata_permissions_issues,
)
from corehq.apps.api.resources.auth import (
AdminAuthentication,
ODataAuthentication,
RequirePermissionAuthentication,
LoginAuthentication)
from corehq.apps.api.resources.meta import CustomResourceMeta
from corehq.apps.api.util import get_obj
from corehq.apps.app_manager.models import Application
from corehq.apps.domain.auth import HQApiKeyAuthentication
from corehq.apps.domain.forms import clean_password
from corehq.apps.domain.models import Domain
from corehq.apps.es import UserES
from corehq.apps.export.esaccessors import (
get_case_export_base_query,
get_form_export_base_query,
)
from corehq.apps.export.models import CaseExportInstance, FormExportInstance
from corehq.apps.groups.models import Group
from corehq.apps.locations.permissions import location_safe
from corehq.apps.reports.analytics.esaccessors import (
get_case_types_for_domain_es,
)
from corehq.apps.reports.standard.cases.utils import (
query_location_restricted_cases,
query_location_restricted_forms,
)
from corehq.apps.sms.util import strip_plus
from corehq.apps.userreports.columns import UCRExpandDatabaseSubcolumn
from corehq.apps.userreports.models import (
ReportConfiguration,
StaticReportConfiguration,
report_config_id_is_static,
)
from corehq.apps.userreports.reports.data_source import (
ConfigurableReportDataSource,
)
from corehq.apps.userreports.reports.view import (
get_filter_values,
query_dict_to_dict,
)
from corehq.apps.users.dbaccessors.all_commcare_users import (
get_all_user_id_username_pairs_by_domain,
)
from corehq.apps.users.models import (
CommCareUser,
CouchUser,
Permissions,
UserRole,
WebUser,
)
from corehq.apps.users.util import raw_username
from corehq.const import USER_CHANGE_VIA_API
from corehq.util import get_document_or_404
from corehq.util.couch import DocumentNotFound, get_document_or_not_found
from corehq.util.model_log import ModelAction, log_model_change
from corehq.util.timer import TimingContext
from . import (
CouchResourceMixin,
DomainSpecificResourceMixin,
HqBaseResource,
v0_1,
v0_4,
CorsResourceMixin)
from .pagination import DoesNothingPaginator, NoCountingPaginator
MOCK_BULK_USER_ES = None
def user_es_call(domain, q, fields, size, start_at):
query = (UserES()
.domain(domain)
.fields(fields)
.size(size)
.start(start_at))
if q is not None:
query.set_query({"query_string": {"query": q}})
return query.run().hits
def _set_role_for_bundle(kwargs, bundle):
# check for roles associated with the domain
domain_roles = UserRole.by_domain_and_name(kwargs['domain'], bundle.data.get('role'))
if domain_roles:
qualified_role_id = domain_roles[0].get_qualified_id()
bundle.obj.set_role(kwargs['domain'], qualified_role_id)
else:
# check for preset roles and now create them for the domain
permission_preset_name = UserRole.get_preset_permission_by_name(bundle.data.get('role'))
if permission_preset_name:
bundle.obj.set_role(kwargs['domain'], permission_preset_name)
class BulkUserResource(HqBaseResource, DomainSpecificResourceMixin):
"""
A read-only user data resource based on elasticsearch.
Supported Params: limit offset q fields
"""
type = "bulk-user"
id = fields.CharField(attribute='id', readonly=True, unique=True)
email = fields.CharField(attribute='email')
username = fields.CharField(attribute='username', unique=True)
first_name = fields.CharField(attribute='first_name', null=True)
last_name = fields.CharField(attribute='last_name', null=True)
phone_numbers = fields.ListField(attribute='phone_numbers', null=True)
@staticmethod
def to_obj(user):
'''
Takes a flat dict and returns an object
'''
if '_id' in user:
user['id'] = user.pop('_id')
return namedtuple('user', list(user))(**user)
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(Permissions.edit_commcare_users)
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
object_class = object
resource_name = 'bulk-user'
def dehydrate(self, bundle):
fields = bundle.request.GET.getlist('fields')
data = {}
if not fields:
return bundle
for field in fields:
data[field] = bundle.data[field]
bundle.data = data
return bundle
def obj_get_list(self, bundle, **kwargs):
request_fields = bundle.request.GET.getlist('fields')
for field in request_fields:
if field not in self.fields:
raise BadRequest('{0} is not a valid field'.format(field))
params = bundle.request.GET
param = lambda p: params.get(p, None)
fields = list(self.fields)
fields.remove('id')
fields.append('_id')
fn = MOCK_BULK_USER_ES or user_es_call
users = fn(
domain=kwargs['domain'],
q=param('q'),
fields=fields,
size=param('limit'),
start_at=param('offset'),
)
return list(map(self.to_obj, users))
def detail_uri_kwargs(self, bundle_or_obj):
return {
'pk': get_obj(bundle_or_obj).id
}
class CommCareUserResource(v0_1.CommCareUserResource):
class Meta(v0_1.CommCareUserResource.Meta):
detail_allowed_methods = ['get', 'put', 'delete']
list_allowed_methods = ['get', 'post']
always_return_data = True
def serialize(self, request, data, format, options=None):
if not isinstance(data, dict) and request.method == 'POST':
data = {'id': data.obj._id}
return self._meta.serializer.serialize(data, format, options)
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_detail'):
if bundle_or_obj is None:
return super(CommCareUserResource, self).get_resource_uri(bundle_or_obj, url_name)
elif isinstance(bundle_or_obj, Bundle):
obj = bundle_or_obj.obj
else:
obj = bundle_or_obj
return reverse('api_dispatch_detail', kwargs=dict(resource_name=self._meta.resource_name,
domain=obj.domain,
api_name=self._meta.api_name,
pk=obj._id))
def _update(self, bundle):
should_save = False
for key, value in bundle.data.items():
if getattr(bundle.obj, key, None) != value:
if key == 'phone_numbers':
bundle.obj.phone_numbers = []
for idx, phone_number in enumerate(bundle.data.get('phone_numbers', [])):
bundle.obj.add_phone_number(strip_plus(phone_number))
if idx == 0:
bundle.obj.set_default_phone_number(strip_plus(phone_number))
should_save = True
elif key == 'groups':
bundle.obj.set_groups(bundle.data.get("groups", []))
should_save = True
elif key in ['email', 'username']:
setattr(bundle.obj, key, value.lower())
should_save = True
elif key == 'password':
domain = Domain.get_by_name(bundle.obj.domain)
if domain.strong_mobile_passwords:
try:
clean_password(bundle.data.get("password"))
except ValidationError as e:
if not hasattr(bundle.obj, 'errors'):
bundle.obj.errors = []
bundle.obj.errors.append(str(e))
return False
bundle.obj.set_password(bundle.data.get("password"))
should_save = True
elif key == 'user_data':
try:
bundle.obj.update_metadata(value)
except ValueError as e:
raise BadRequest(str(e))
else:
setattr(bundle.obj, key, value)
should_save = True
return should_save
def obj_create(self, bundle, request=None, **kwargs):
try:
bundle.obj = CommCareUser.create(
domain=kwargs['domain'],
username=bundle.data['username'].lower(),
password=bundle.data['password'],
created_by=bundle.request.user,
created_via=USER_CHANGE_VIA_API,
email=bundle.data.get('email', '').lower(),
)
del bundle.data['password']
self._update(bundle)
bundle.obj.save()
except Exception:
if bundle.obj._id:
bundle.obj.retire(deleted_by=request.user, deleted_via=USER_CHANGE_VIA_API)
try:
django_user = bundle.obj.get_django_user()
except User.DoesNotExist:
pass
else:
django_user.delete()
log_model_change(request.user, django_user, message=f"deleted_via: {USER_CHANGE_VIA_API}",
action=ModelAction.DELETE)
return bundle
def obj_update(self, bundle, **kwargs):
bundle.obj = CommCareUser.get(kwargs['pk'])
assert bundle.obj.domain == kwargs['domain']
if self._update(bundle):
assert bundle.obj.domain == kwargs['domain']
bundle.obj.save()
return bundle
else:
raise BadRequest(''.join(chain.from_iterable(bundle.obj.errors)))
def obj_delete(self, bundle, **kwargs):
user = CommCareUser.get(kwargs['pk'])
if user:
user.retire(deleted_by=bundle.request.user, deleted_via=USER_CHANGE_VIA_API)
return ImmediateHttpResponse(response=http.HttpAccepted())
class WebUserResource(v0_1.WebUserResource):
class Meta(v0_1.WebUserResource.Meta):
detail_allowed_methods = ['get', 'put', 'delete']
list_allowed_methods = ['get', 'post']
always_return_data = True
def serialize(self, request, data, format, options=None):
if not isinstance(data, dict) and request.method == 'POST':
data = {'id': data.obj._id}
return self._meta.serializer.serialize(data, format, options)
def dispatch(self, request_type, request, **kwargs):
"""
Override dispatch to check for proper params for user create : role and admin permissions
"""
if request.method == 'POST':
details = self._meta.serializer.deserialize(request.body)
if details.get('is_admin', False):
if self._admin_assigned_another_role(details):
raise BadRequest("An admin can have only one role : Admin")
else:
if not details.get('role', None):
raise BadRequest("Please assign role for non admin user")
elif self._invalid_user_role(request, details):
raise BadRequest("Invalid User Role %s" % details.get('role', None))
return super(WebUserResource, self).dispatch(request_type, request, **kwargs)
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_detail'):
if isinstance(bundle_or_obj, Bundle):
domain = bundle_or_obj.request.domain
obj = bundle_or_obj.obj
elif bundle_or_obj is None:
return None
return reverse('api_dispatch_detail', kwargs=dict(resource_name=self._meta.resource_name,
domain=domain,
api_name=self._meta.api_name,
pk=obj._id))
def _update(self, bundle):
should_save = False
for key, value in bundle.data.items():
if getattr(bundle.obj, key, None) != value:
if key == 'phone_numbers':
bundle.obj.phone_numbers = []
for idx, phone_number in enumerate(bundle.data.get('phone_numbers', [])):
bundle.obj.add_phone_number(strip_plus(phone_number))
if idx == 0:
bundle.obj.set_default_phone_number(strip_plus(phone_number))
should_save = True
elif key in ['email', 'username']:
setattr(bundle.obj, key, value.lower())
should_save = True
else:
setattr(bundle.obj, key, value)
should_save = True
return should_save
def obj_create(self, bundle, request=None, **kwargs):
try:
self._meta.domain = kwargs['domain']
bundle.obj = WebUser.create(
domain=kwargs['domain'],
username=bundle.data['username'].lower(),
password=bundle.data['password'],
created_by=bundle.request.user,
created_via=USER_CHANGE_VIA_API,
email=bundle.data.get('email', '').lower(),
is_admin=bundle.data.get('is_admin', False)
)
del bundle.data['password']
self._update(bundle)
# is_admin takes priority over role
if not bundle.obj.is_admin and bundle.data.get('role'):
_set_role_for_bundle(kwargs, bundle)
bundle.obj.save()
except Exception:
bundle.obj.delete()
return bundle
def obj_update(self, bundle, **kwargs):
bundle.obj = WebUser.get(kwargs['pk'])
assert kwargs['domain'] in bundle.obj.domains
if self._update(bundle):
assert kwargs['domain'] in bundle.obj.domains
bundle.obj.save()
return bundle
def _invalid_user_role(self, request, details):
return details.get('role') not in UserRole.preset_and_domain_role_names(request.domain)
def _admin_assigned_another_role(self, details):
# default value Admin since that will be assigned later anyway since is_admin is True
return details.get('role', 'Admin') != 'Admin'
class AdminWebUserResource(v0_1.UserResource):
domains = fields.ListField(attribute='domains')
def obj_get(self, bundle, **kwargs):
return WebUser.get(kwargs['pk'])
def obj_get_list(self, bundle, **kwargs):
if 'username' in bundle.request.GET:
return [WebUser.get_by_username(bundle.request.GET['username'])]
return [WebUser.wrap(u) for u in UserES().web_users().run().hits]
class Meta(WebUserResource.Meta):
authentication = AdminAuthentication()
detail_allowed_methods = ['get']
list_allowed_methods = ['get']
class GroupResource(v0_4.GroupResource):
class Meta(v0_4.GroupResource.Meta):
detail_allowed_methods = ['get', 'put', 'delete']
list_allowed_methods = ['get', 'post', 'patch']
always_return_data = True
def serialize(self, request, data, format, options=None):
if not isinstance(data, dict):
if 'error_message' in data.data:
data = {'error_message': data.data['error_message']}
elif request.method == 'POST':
data = {'id': data.obj._id}
return self._meta.serializer.serialize(data, format, options)
def patch_list(self, request=None, **kwargs):
"""
Exactly copied from https://github.com/toastdriven/django-tastypie/blob/v0.9.14/tastypie/resources.py#L1466
(BSD licensed) and modified to pass the kwargs to `obj_create` and support only create method
"""
request = convert_post_to_patch(request)
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
collection_name = self._meta.collection_name
if collection_name not in deserialized:
raise BadRequest("Invalid data sent: missing '%s'" % collection_name)
if len(deserialized[collection_name]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
bundles_seen = []
status = http.HttpAccepted
for data in deserialized[collection_name]:
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
try:
self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
except AssertionError as e:
status = http.HttpBadRequest
bundle.data['_id'] = str(e)
bundles_seen.append(bundle)
to_be_serialized = [bundle.data['_id'] for bundle in bundles_seen]
return self.create_response(request, to_be_serialized, response_class=status)
def post_list(self, request, **kwargs):
"""
Exactly copied from https://github.com/toastdriven/django-tastypie/blob/v0.9.14/tastypie/resources.py#L1314
(BSD licensed) and modified to catch Exception and not returning traceback
"""
deserialized = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
try:
updated_bundle = self.obj_create(bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
except AssertionError as e:
bundle.data['error_message'] = str(e)
return self.create_response(request, bundle, response_class=http.HttpBadRequest)
def _update(self, bundle):
should_save = False
for key, value in bundle.data.items():
if key == 'name' and getattr(bundle.obj, key, None) != value:
if not Group.by_name(bundle.obj.domain, value):
setattr(bundle.obj, key, value or '')
should_save = True
else:
raise Exception("A group with this name already exists")
if key == 'users' and getattr(bundle.obj, key, None) != value:
users_to_add = set(value) - set(bundle.obj.users)
users_to_remove = set(bundle.obj.users) - set(value)
for user in users_to_add:
bundle.obj.add_user(user)
should_save = True
for user in users_to_remove:
bundle.obj.remove_user(user)
should_save = True
elif getattr(bundle.obj, key, None) != value:
setattr(bundle.obj, key, value)
should_save = True
return should_save
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_detail'):
if bundle_or_obj is None:
return super(GroupResource, self).get_resource_uri(bundle_or_obj, url_name)
elif isinstance(bundle_or_obj, Bundle):
obj = bundle_or_obj.obj
else:
obj = bundle_or_obj
return self._get_resource_uri(obj)
def _get_resource_uri(self, obj):
# This function is called up to 1000 times per request
# so build url from a known string template
# to avoid calling the expensive `reverse` function each time
return self._get_resource_uri_template.format(domain=obj.domain, pk=obj._id)
@memoized_property
def _get_resource_uri_template(self):
"""Returns the literal string "/a/{domain}/api/v0.5/group/{pk}/" in a DRY way"""
return reverse('api_dispatch_detail', kwargs=dict(
resource_name=self._meta.resource_name,
api_name=self._meta.api_name,
domain='__domain__',
pk='__pk__')).replace('__pk__', '{pk}').replace('__domain__', '{domain}')
def obj_create(self, bundle, request=None, **kwargs):
if not Group.by_name(kwargs['domain'], bundle.data.get("name")):
bundle.obj = Group(bundle.data)
bundle.obj.name = bundle.obj.name or ''
bundle.obj.domain = kwargs['domain']
bundle.obj.save()
for user in bundle.obj.users:
CommCareUser.get(user).set_groups([bundle.obj._id])
else:
raise AssertionError("A group with name %s already exists" % bundle.data.get("name"))
return bundle
def obj_update(self, bundle, **kwargs):
bundle.obj = Group.get(kwargs['pk'])
assert bundle.obj.domain == kwargs['domain']
if self._update(bundle):
assert bundle.obj.domain == kwargs['domain']
bundle.obj.save()
return bundle
def obj_delete(self, bundle, **kwargs):
group = self.obj_get(bundle, **kwargs)
group.soft_delete()
return bundle
class DomainAuthorization(ReadOnlyAuthorization):
def __init__(self, domain_key='domain', *args, **kwargs):
self.domain_key = domain_key
def read_list(self, object_list, bundle):
return object_list.filter(**{self.domain_key: bundle.request.domain})
class DeviceReportResource(HqBaseResource, ModelResource):
class Meta(object):
queryset = DeviceReportEntry.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'device-log'
authentication = RequirePermissionAuthentication(Permissions.edit_data)
authorization = DomainAuthorization()
paginator_class = NoCountingPaginator
filtering = {
# this is needed for the domain filtering but any values passed in via the URL get overridden
"domain": ('exact',),
"date": ('exact', 'gt', 'gte', 'lt', 'lte', 'range'),
"user_id": ('exact',),
"username": ('exact',),
"type": ('exact',),
"xform_id": ('exact',),
"device_id": ('exact',),
}
class StockTransactionResource(HqBaseResource, ModelResource):
class Meta(object):
queryset = StockTransaction.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'stock_transaction'
authentication = RequirePermissionAuthentication(Permissions.view_reports)
paginator_class = NoCountingPaginator
authorization = DomainAuthorization(domain_key='report__domain')
filtering = {
"case_id": ('exact',),
"section_id": ('exact'),
}
fields = ['case_id', 'product_id', 'type', 'section_id', 'quantity', 'stock_on_hand']
include_resource_uri = False
def build_filters(self, filters=None):
orm_filters = super(StockTransactionResource, self).build_filters(filters)
if 'start_date' in filters:
orm_filters['report__date__gte'] = filters['start_date']
if 'end_date' in filters:
orm_filters['report__date__lte'] = filters['end_date']
return orm_filters
def dehydrate(self, bundle):
bundle.data['product_name'] = bundle.obj.sql_product.name
bundle.data['transaction_date'] = bundle.obj.report.date
return bundle
ConfigurableReportData = namedtuple("ConfigurableReportData", [
"data", "columns", "id", "domain", "total_records", "get_params", "next_page"
])
class ConfigurableReportDataResource(HqBaseResource, DomainSpecificResourceMixin):
"""
A resource that replicates the behavior of the ajax part of the
ConfigurableReportView view.
"""
data = fields.ListField(attribute="data", readonly=True)
columns = fields.ListField(attribute="columns", readonly=True)
total_records = fields.IntegerField(attribute="total_records", readonly=True)
next_page = fields.CharField(attribute="next_page", readonly=True)
LIMIT_DEFAULT = 50
LIMIT_MAX = 50
def _get_start_param(self, bundle):
try:
start = int(bundle.request.GET.get('offset', 0))
if start < 0:
raise ValueError
except (ValueError, TypeError):
raise BadRequest("start must be a positive integer.")
return start
def _get_limit_param(self, bundle):
try:
limit = int(bundle.request.GET.get('limit', self.LIMIT_DEFAULT))
if limit < 0:
raise ValueError
except (ValueError, TypeError):
raise BadRequest("limit must be a positive integer.")
if limit > self.LIMIT_MAX:
raise BadRequest("Limit may not exceed {}.".format(self.LIMIT_MAX))
return limit
def _get_next_page(self, domain, id_, start, limit, total_records, get_query_dict):
if total_records > start + limit:
start += limit
new_get_params = get_query_dict.copy()
new_get_params["offset"] = start
# limit has not changed, but it may not have been present in get params before.
new_get_params["limit"] = limit
return reverse('api_dispatch_detail', kwargs=dict(
api_name=self._meta.api_name,
resource_name=self._meta.resource_name,
domain=domain,
pk=id_,
)) + "?" + new_get_params.urlencode()
else:
return ""
def _get_report_data(self, report_config, domain, start, limit, get_params):
report = ConfigurableReportDataSource.from_spec(report_config, include_prefilters=True)
string_type_params = [
filter.name
for filter in report_config.ui_filters
if getattr(filter, 'datatype', 'string') == "string"
]
filter_values = get_filter_values(
report_config.ui_filters,
query_dict_to_dict(get_params, domain, string_type_params)
)
report.set_filter_values(filter_values)
page = list(report.get_data(start=start, limit=limit))
columns = []
for column in report.columns:
simple_column = {
"header": column.header,
"slug": column.slug,
}
if isinstance(column, UCRExpandDatabaseSubcolumn):
simple_column['expand_column_value'] = column.expand_value
columns.append(simple_column)
total_records = report.get_total_records()
return page, columns, total_records
def obj_get(self, bundle, **kwargs):
domain = kwargs['domain']
pk = kwargs['pk']
start = self._get_start_param(bundle)
limit = self._get_limit_param(bundle)
report_config = self._get_report_configuration(pk, domain)
page, columns, total_records = self._get_report_data(
report_config, domain, start, limit, bundle.request.GET)
return ConfigurableReportData(
data=page,
columns=columns,
total_records=total_records,
id=report_config._id,
domain=domain,
get_params=bundle.request.GET,
next_page=self._get_next_page(
domain,
report_config._id,
start,
limit,
total_records,
bundle.request.GET,
)
)
def _get_report_configuration(self, id_, domain):
"""
Fetch the required ReportConfiguration object
:param id_: The id of the ReportConfiguration
:param domain: The domain of the ReportConfiguration
:return: A ReportConfiguration
"""
try:
if report_config_id_is_static(id_):
return StaticReportConfiguration.by_id(id_, domain=domain)
else:
return get_document_or_not_found(ReportConfiguration, domain, id_)
except DocumentNotFound:
raise NotFound
def detail_uri_kwargs(self, bundle_or_obj):
return {
'domain': get_obj(bundle_or_obj).domain,
'pk': get_obj(bundle_or_obj).id,
}
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
uri = super(ConfigurableReportDataResource, self).get_resource_uri(bundle_or_obj, url_name)
if bundle_or_obj is not None and uri:
get_params = get_obj(bundle_or_obj).get_params.copy()
if "offset" not in get_params:
get_params["offset"] = 0
if "limit" not in get_params:
get_params["limit"] = self.LIMIT_DEFAULT
uri += "?{}".format(get_params.urlencode())
return uri
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(Permissions.view_reports, allow_session_auth=True)
list_allowed_methods = []
detail_allowed_methods = ["get"]
class SimpleReportConfigurationResource(CouchResourceMixin, HqBaseResource, DomainSpecificResourceMixin):
id = fields.CharField(attribute='get_id', readonly=True, unique=True)
title = fields.CharField(readonly=True, attribute="title", null=True)
filters = fields.ListField(readonly=True)
columns = fields.ListField(readonly=True)
def dehydrate_filters(self, bundle):
obj_filters = bundle.obj.filters
return [{
"type": f["type"],
"datatype": f["datatype"],
"slug": f["slug"]
} for f in obj_filters]
def dehydrate_columns(self, bundle):
obj_columns = bundle.obj.columns
return [{
"column_id": c['column_id'],
"display": c['display'],
"type": c["type"],
} for c in obj_columns]
def obj_get(self, bundle, **kwargs):
domain = kwargs['domain']
pk = kwargs['pk']
try:
report_configuration = get_document_or_404(ReportConfiguration, domain, pk)
except Http404 as e:
raise NotFound(str(e))
return report_configuration
def obj_get_list(self, bundle, **kwargs):
domain = kwargs['domain']
return ReportConfiguration.by_domain(domain)
def detail_uri_kwargs(self, bundle_or_obj):
return {
'domain': get_obj(bundle_or_obj).domain,
'pk': get_obj(bundle_or_obj)._id,
}
class Meta(CustomResourceMeta):
list_allowed_methods = ["get"]
detail_allowed_methods = ["get"]
paginator_class = DoesNothingPaginator
UserDomain = namedtuple('UserDomain', 'domain_name project_name')
UserDomain.__new__.__defaults__ = ('', '')
class UserDomainsResource(CorsResourceMixin, Resource):
domain_name = fields.CharField(attribute='domain_name')
project_name = fields.CharField(attribute='project_name')
class Meta(object):
resource_name = 'user_domains'
authentication = LoginAuthentication(allow_session_auth=True)
object_class = UserDomain
include_resource_uri = False
def dispatch_list(self, request, **kwargs):
try:
return super(UserDomainsResource, self).dispatch_list(request, **kwargs)
except ImmediateHttpResponse as immediate_http_response:
if isinstance(immediate_http_response.response, HttpUnauthorized):
raise ImmediateHttpResponse(
response=HttpUnauthorized(
content='Username or API Key is incorrect', content_type='text/plain'
)
)
else:
raise
def obj_get_list(self, bundle, **kwargs):
return self.get_object_list(bundle.request)
def get_object_list(self, request):
couch_user = CouchUser.from_django_user(request.user)
results = []
for domain in couch_user.get_domains():
if not domain_has_privilege(domain, privileges.ZAPIER_INTEGRATION):
continue
domain_object = Domain.get_by_name(domain)
results.append(UserDomain(
domain_name=domain_object.name,
project_name=domain_object.hr_name or domain_object.name
))
return results
class IdentityResource(CorsResourceMixin, Resource):
id = fields.CharField(attribute='get_id', readonly=True)
username = fields.CharField(attribute='username', readonly=True)
first_name = fields.CharField(attribute='first_name', readonly=True)
last_name = fields.CharField(attribute='last_name', readonly=True)
email = fields.CharField(attribute='email', readonly=True)
def obj_get_list(self, bundle, **kwargs):
return [bundle.request.couch_user]
class Meta(object):
resource_name = 'identity'
authentication = LoginAuthentication()
serializer = ListToSingleObjectSerializer()
detail_allowed_methods = []
list_allowed_methods = ['get']
object_class = CouchUser
include_resource_uri = False
Form = namedtuple('Form', 'form_xmlns form_name')
Form.__new__.__defaults__ = ('', '')
class DomainForms(Resource):
"""
Returns: list of forms for a given domain with form name formatted for display in Zapier
"""
form_xmlns = fields.CharField(attribute='form_xmlns')
form_name = fields.CharField(attribute='form_name')
class Meta(object):
resource_name = 'domain_forms'
authentication = RequirePermissionAuthentication(Permissions.access_api)
object_class = Form
include_resource_uri = False
allowed_methods = ['get']
limit = 200
max_limit = 1000
def obj_get_list(self, bundle, **kwargs):
application_id = bundle.request.GET.get('application_id')
if not application_id:
raise NotFound('application_id parameter required')
results = []
application = Application.get(docid=application_id)
if not application:
return []
forms_objects = application.get_forms(bare=False)
for form_object in forms_objects:
form = form_object['form']
module = form_object['module']
form_name = '{} > {} > {}'.format(application.name, module.default_name(), form.default_name())
results.append(Form(form_xmlns=form.xmlns, form_name=form_name))
return results
# Zapier requires id and name; case_type has no obvious id, placeholder inserted instead.
CaseType = namedtuple('CaseType', 'case_type placeholder')
CaseType.__new__.__defaults__ = ('', '')
class DomainCases(Resource):
"""
Returns: list of case types for a domain
Note: only returns case types for which at least one case has been made
"""
placeholder = fields.CharField(attribute='placeholder')
case_type = fields.CharField(attribute='case_type')
class Meta(object):
resource_name = 'domain_cases'
authentication = RequirePermissionAuthentication(Permissions.access_api)
object_class = CaseType
include_resource_uri = False
allowed_methods = ['get']
limit = 100
max_limit = 1000
def obj_get_list(self, bundle, **kwargs):
domain = kwargs['domain']
case_types = get_case_types_for_domain_es(domain)
results = [CaseType(case_type=case_type) for case_type in case_types]
return results
UserInfo = namedtuple('UserInfo', 'user_id user_name')
UserInfo.__new__.__defaults__ = ('', '')
class DomainUsernames(Resource):
"""
Returns: list of usernames for a domain.
"""
user_id = fields.CharField(attribute='user_id')
user_name = fields.CharField(attribute='user_name')
class Meta(object):
resource_name = 'domain_usernames'
authentication = RequirePermissionAuthentication(Permissions.view_commcare_users)
object_class = User
include_resource_uri = False
allowed_methods = ['get']
def obj_get_list(self, bundle, **kwargs):
domain = kwargs['domain']
user_ids_username_pairs = get_all_user_id_username_pairs_by_domain(domain)
results = [UserInfo(user_id=user_pair[0], user_name=raw_username(user_pair[1]))
for user_pair in user_ids_username_pairs]
return results
class BaseODataResource(HqBaseResource, DomainSpecificResourceMixin):
config_id = None
table_id = None
def dispatch(self, request_type, request, **kwargs):
if not domain_has_privilege(request.domain, privileges.ODATA_FEED):
raise ImmediateHttpResponse(
response=HttpResponseNotFound('Feature flag not enabled.')
)
self.config_id = kwargs['config_id']
self.table_id = int(kwargs.get('table_id', 0))
with TimingContext() as timer:
response = super(BaseODataResource, self).dispatch(
request_type, request, **kwargs
)
record_feed_access_in_datadog(request, self.config_id, timer.duration, response)
return response
def create_response(self, request, data, response_class=HttpResponse,
**response_kwargs):
data['domain'] = request.domain
data['config_id'] = self.config_id
data['api_path'] = request.path
data['table_id'] = self.table_id
response = super(BaseODataResource, self).create_response(
request, data, response_class, **response_kwargs)
return add_odata_headers(response)
def detail_uri_kwargs(self, bundle_or_obj):
# Not sure why this is required but the feed 500s without it
return {
'pk': get_obj(bundle_or_obj)['_id']
}
def determine_format(self, request):
# Results should be sent as JSON
return 'application/json'
@location_safe
class ODataCaseResource(BaseODataResource):
def obj_get_list(self, bundle, domain, **kwargs):
config = get_document_or_404(CaseExportInstance, domain, self.config_id)
if raise_odata_permissions_issues(bundle.request.couch_user, domain, config):
raise ImmediateHttpResponse(
HttpForbidden(ugettext_noop(
"You do not have permission to view this feed."
))
)
query = get_case_export_base_query(domain, config.case_type)
for filter in config.get_filters():
query = query.filter(filter.to_es_filter())
if not bundle.request.couch_user.has_permission(
domain, 'access_all_locations'
):
query = query_location_restricted_cases(query, bundle.request)
return query
class Meta(v0_4.CommCareCaseResource.Meta):
authentication = ODataAuthentication()
resource_name = 'odata/cases'
serializer = ODataCaseSerializer()
limit = 2000
max_limit = 10000
def prepend_urls(self):
return [
url(r"^(?P<resource_name>{})/(?P<config_id>[\w\d_.-]+)/(?P<table_id>[\d]+)/feed".format(
self._meta.resource_name), self.wrap_view('dispatch_list')),
url(r"^(?P<resource_name>{})/(?P<config_id>[\w\d_.-]+)/feed".format(
self._meta.resource_name), self.wrap_view('dispatch_list')),
]
@location_safe
class ODataFormResource(BaseODataResource):
def obj_get_list(self, bundle, domain, **kwargs):
config = get_document_or_404(FormExportInstance, domain, self.config_id)
if raise_odata_permissions_issues(bundle.request.couch_user, domain, config):
raise ImmediateHttpResponse(
HttpForbidden(ugettext_noop(
"You do not have permission to view this feed."
))
)
query = get_form_export_base_query(domain, config.app_id, config.xmlns, include_errors=False)
for filter in config.get_filters():
query = query.filter(filter.to_es_filter())
if not bundle.request.couch_user.has_permission(
domain, 'access_all_locations'
):
query = query_location_restricted_forms(query, bundle.request)
return query
class Meta(v0_4.XFormInstanceResource.Meta):
authentication = ODataAuthentication()
resource_name = 'odata/forms'
serializer = ODataFormSerializer()
limit = 2000
max_limit = 10000
def prepend_urls(self):
return [
url(r"^(?P<resource_name>{})/(?P<config_id>[\w\d_.-]+)/(?P<table_id>[\d]+)/feed".format(
self._meta.resource_name), self.wrap_view('dispatch_list')),
url(r"^(?P<resource_name>{})/(?P<config_id>[\w\d_.-]+)/feed".format(
self._meta.resource_name), self.wrap_view('dispatch_list')),
]
class MessagingEventResource(HqBaseResource, ModelResource):
class Meta(object):
queryset = MessagingEvent.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'messaging-event'
authentication = RequirePermissionAuthentication(Permissions.edit_data)
authorization = DomainAuthorization()
paginator_class = NoCountingPaginator
filtering = {
# this is needed for the domain filtering but any values passed in via the URL get overridden
"domain": ('exact',),
"date": ('exact', 'gt', 'gte', 'lt', 'lte', 'range'),
"source": ('exact',),
"content_type": ('exact',),
"status": ('exact',),
}
ordering = [
'date',
]
|
py | 1a41e4f9363cbb60fbb0c1fc152bb22bf9fbe1cc | from shared import readAssets
def doExtract(args):
print("Larkstongue v0.0.1-alpha")
def readGfx():
readLine = line.strip("\n")
if len(readLine) > 0:
areaGfx.append(readLine)
def readGff():
readLine = line.strip("\n")
if len(readLine) > 0:
areaGff.append(readLine)
def readMap():
readLine = line.strip("\n")
if len(readLine) > 0:
areaMap.append(readLine)
def readSfx():
readLine = line.strip("\n")
if len(readLine) > 0:
encodedLine = ""
header = readLine[:8]
for i in range(8, len(readLine), 5):
pitchHex = readLine[i : i + 2]
waveformHex = readLine[i + 2]
volumeHex = readLine[i + 3]
effectHex = readLine[i + 4]
pitchDec = int(pitchHex, 16)
pitchBinary = format(pitchDec, "06b")
waveformDec = int(waveformHex, 16)
waveformBinary = format(waveformDec, "04b")
instrumentBit = waveformBinary[0]
waveformBinary = waveformBinary[1:]
volumeDec = int(volumeHex, 16)
volumeBinary = format(volumeDec, "03b")
effectDec = int(effectHex, 16)
effectBinary = format(effectDec, "03b")
noteBinary = waveformBinary[1:] + pitchBinary + instrumentBit + effectBinary + volumeBinary + waveformBinary[0]
noteDec = int(noteBinary, 2)
noteHex = format(noteDec, "04x")
encodedLine = encodedLine + noteHex
encodedLine = encodedLine + header
areaSfx.append(encodedLine)
def readMusic():
readLine = line.strip("\n")
if len(readLine) > 0:
flagHex = readLine[1]
flagDec = int(flagHex, 16)
flagBinary = format(flagDec, "04b")
channelsHex = []
for i in range(3, len(readLine), 2):
channelsHex.append(readLine[i : i+2])
encodedBinary = []
for i in range(3, -1, -1):
encodedBinary.append(flagBinary[i])
encodedLine = ""
for i in range(0, 4):
channelDec = int(channelsHex[i], 16)
channelBinary = format(channelDec, "07b")
encodedBinary[i] = encodedBinary[i] + channelBinary
encodedDec = int(encodedBinary[i], 2)
encodedHex = format(encodedDec, "02x")
encodedLine = encodedLine + encodedHex
areaMusic.append(encodedLine)
def cropBitmap():
if len(areaGfx) == 0:
print("Bitmap not found on cart!")
quit()
cropScanline = ""
while len(cropScanline) < 128:
cropScanline += args.bgcolor
while areaGfx[0] == cropScanline:
areaGfx.pop(0)
while areaGfx[-1] == cropScanline:
areaGfx.pop(-1)
marginFound = False
for leftMargin in range(0, 127):
for y in range(0, len(areaGfx)):
if areaGfx[y][leftMargin] != args.bgcolor:
marginFound = True
break
if marginFound == True:
break
marginFound = False
for rightMargin in range(128, 0, -1):
for y in range(0, len(areaGfx)):
if areaGfx[y][rightMargin - 1] != args.bgcolor:
marginFound = True
break
if marginFound == True:
break
cropWidth = rightMargin - leftMargin
if cropWidth % 2 != 0:
if rightMargin < 128:
rightMargin += 1
else:
leftMargin -= 1
for i in range(0, len(areaGfx)):
areaGfx[i] = areaGfx[i][leftMargin : rightMargin]
def swapGfxNibbles():
for i in range(0, len(areaGfx)):
line = areaGfx[i]
swappedLine = ""
for j in range(0, len(line), 2):
swappedLine += line[j + 1] + line[j]
areaGfx[i] = swappedLine
def writeBitmap():
for line in areaGfx:
outputFile.write("bitmap=" + line + "\n")
def areaToString(areaID):
if areaID == "gfx":
readArea = areaGfx
areaLength = 16384
elif areaID == "gff":
readArea = areaGff
areaLength = 512
elif areaID == "map":
readArea = areaMap
areaLength = 8192
elif areaID == "sfx":
readArea = areaSfx
areaLength = 8704
elif areaID == "music":
readArea = areaMusic
areaLength = 512
outputString = ""
for line in readArea:
outputString += line
while len(outputString) < areaLength:
if readArea == areaMusic:
outputString += "40"
else:
outputString += "0"
return outputString
def writeSoundtrack():
fullString = ""
fullString += areaToString("music")
fullString += areaToString("sfx")
outputFile.write("data=" + fullString + "\n")
def writeAllData():
fullString = ""
fullString += areaToString("gfx")
fullString += areaToString("map")
fullString += areaToString("gff")
fullString += areaToString("music")
fullString += areaToString("sfx")
outputFile.write("data=" + fullString + "\n")
areaGfx = []
areaGff = []
areaMap = []
areaSfx = []
areaMusic = []
try:
file = open(args.input, "r")
except FileNotFoundError:
print(args.input + " not found!")
quit()
cartContent = file.readlines()
file.close()
if args.source == "bitmap":
acceptedBgColorInputs = "0123456789abcdef"
if len(args.bgcolor) != 1 or args.bgcolor not in acceptedBgColorInputs:
print("Error: Background color input must be a single hexadecimal digit in lowercase!")
quit()
readMode = 0
readModes = { 1: readGfx,
2: readGff,
3: readMap,
4: readSfx,
5: readMusic
}
for line in cartContent:
if len(line) > 1:
if line.startswith("__gfx__") and args.source in ["bitmap", "gfx", "all"]:
readMode = 1
elif line.startswith("__gff__") and args.source in ["gff", "all"]:
readMode = 2
elif line.startswith("__map__") and args.source in ["map", "all"]:
readMode = 3
elif line.startswith("__sfx__") and args.source in ["soundtrack", "sfx", "all"]:
readMode = 4
elif line.startswith("__music__") and args.source in ["soundtrack", "music", "all"]:
readMode = 5
elif line.startswith("__label__"):
readMode = 0
elif readMode != 0:
readModes[readMode]()
if args.source == "bitmap":
cropBitmap()
elif len(areaGfx) > 0:
swapGfxNibbles()
assetList = readAssets(args.output, False)
dupeFound = False
for asset in assetList:
if asset.name == args.assetname:
dupeFound = True
if dupeFound:
print("An asset named \"" + args.assetname + "\" already found in " + args.output + ", overwrite?")
while True:
userInput = input("(y/n): ")
if userInput == "y":
break
if userInput == "n":
print("Extract cancelled!")
quit()
assetList = list(filter(lambda a: a.name != args.assetname, assetList))
try:
outputFile = open(args.output, "w")
except PermissionError:
print("Error! Cannot write to " + args.output + " due to a permission error")
for a in assetList:
outputFile.write("name=" + a.name + "\n")
if a.bitmap != None:
for line in a.bitmap:
outputFile.write("bitmap=" + line + "\n")
if a.data != None:
outputFile.write("data=" + a.data + "\n")
outputFile.write("-\n")
outputFile.write("name=" + args.assetname + "\n")
if args.source == "bitmap":
writeBitmap()
elif args.source == "soundtrack":
writeSoundtrack()
elif args.source == "all":
writeAllData()
else:
outputString = areaToString(args.source)
outputFile.write("data=" + outputString + "\n")
outputFile.write("-\n")
outputFile.close
print("Asset extracted successfully!") |
py | 1a41e69b5d07fb1b5264ecb87ad6b24923816295 | import logging
import pandas as pd
from bots import imps
from openbb_terminal.decorators import log_start_end
from openbb_terminal.economy import wsj_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def futures_coms_command():
"""Futures and commodities overview [Wall St. Journal]"""
# Debug user input
if imps.DEBUG:
logger.debug("econ-futures")
# Retrieve data
df = wsj_model.top_commodities()
# Check for argument
if df.empty:
raise Exception("No available data found")
df["Last Price"] = pd.to_numeric(df["Price"].astype(float))
df["Change"] = pd.to_numeric(df["Chg"].astype(float))
df["%Chg"] = pd.to_numeric(df["%Chg"].astype(float))
# Debug user output
if imps.DEBUG:
logger.debug(df.to_string())
formats = {
"Last Price": "${:.2f}",
"Change": "${:.2f}",
"%Chg": "<b>{:.2f}%</b>",
}
for col, value in formats.items():
df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640
df["Change"] = df.apply(lambda x: f"{x['Change']} (<b>{x['%Chg']}</b>)", axis=1)
df = df.fillna("")
df.set_index(" ", inplace=True)
font_color = ["white"] * 2 + [
["#e4003a" if boolv else "#00ACFF" for boolv in df["%Chg"].str.contains("-")]
]
df = df.drop(columns=["Price", "Chg", "%Chg"])
fig = imps.plot_df(
df,
fig_size=(620, (40 + (40 * len(df.index)))),
col_width=[4, 2.4, 3],
tbl_header=imps.PLT_TBL_HEADER,
tbl_cells=imps.PLT_TBL_CELLS,
font=imps.PLT_TBL_FONT,
row_fill_color=imps.PLT_TBL_ROW_COLORS,
paper_bgcolor="rgba(0, 0, 0, 0)",
)
fig.update_traces(
cells=(
dict(
align=["center", "right"],
font=dict(color=font_color),
)
)
)
imagefile = imps.save_image("econ-futures.png", fig)
return {"title": "Economy: [WSJ] Futures/Commodities", "imagefile": imagefile}
|
py | 1a41e7373fd5a068347b9fd34243210abb43ba3d | """Support for Tasmota lights."""
from hatasmota.light import (
LIGHT_TYPE_COLDWARM,
LIGHT_TYPE_NONE,
LIGHT_TYPE_RGB,
LIGHT_TYPE_RGBCW,
LIGHT_TYPE_RGBW,
)
from homeassistant.components import light
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.color as color_util
from .const import DATA_REMOVE_DISCOVER_COMPONENT, DOMAIN as TASMOTA_DOMAIN
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
from .mixins import TasmotaAvailability, TasmotaDiscoveryUpdate
DEFAULT_BRIGHTNESS_MAX = 255
TASMOTA_BRIGHTNESS_MAX = 100
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Tasmota light dynamically through discovery."""
@callback
def async_discover(tasmota_entity, discovery_hash):
"""Discover and add a Tasmota light."""
async_add_entities(
[TasmotaLight(tasmota_entity=tasmota_entity, discovery_hash=discovery_hash)]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(light.DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(light.DOMAIN, TASMOTA_DOMAIN),
async_discover,
)
class TasmotaLight(
TasmotaAvailability,
TasmotaDiscoveryUpdate,
LightEntity,
):
"""Representation of a Tasmota light."""
def __init__(self, **kwds):
"""Initialize Tasmota light."""
self._state = False
self._supported_features = 0
self._brightness = None
self._color_temp = None
self._effect = None
self._hs = None
self._white_value = None
self._flash_times = None
super().__init__(
discovery_update=self.discovery_update,
**kwds,
)
self._setup_from_entity()
async def discovery_update(self, update, write_state=True):
"""Handle updated discovery message."""
await super().discovery_update(update, write_state=False)
self._setup_from_entity()
self.async_write_ha_state()
def _setup_from_entity(self):
"""(Re)Setup the entity."""
supported_features = 0
light_type = self._tasmota_entity.light_type
if light_type != LIGHT_TYPE_NONE:
supported_features |= SUPPORT_BRIGHTNESS
supported_features |= SUPPORT_TRANSITION
if light_type in [LIGHT_TYPE_COLDWARM, LIGHT_TYPE_RGBCW]:
supported_features |= SUPPORT_COLOR_TEMP
if light_type in [LIGHT_TYPE_RGB, LIGHT_TYPE_RGBW, LIGHT_TYPE_RGBCW]:
supported_features |= SUPPORT_COLOR
supported_features |= SUPPORT_EFFECT
if light_type in [LIGHT_TYPE_RGBW, LIGHT_TYPE_RGBCW]:
supported_features |= SUPPORT_WHITE_VALUE
self._supported_features = supported_features
@callback
def state_updated(self, state, **kwargs):
"""Handle state updates."""
self._state = state
attributes = kwargs.get("attributes")
if attributes:
if "brightness" in attributes:
brightness = float(attributes["brightness"])
percent_bright = brightness / TASMOTA_BRIGHTNESS_MAX
self._brightness = percent_bright * 255
if "color" in attributes:
color = attributes["color"]
self._hs = color_util.color_RGB_to_hs(*color)
if "color_temp" in attributes:
self._color_temp = attributes["color_temp"]
if "effect" in attributes:
self._effect = attributes["effect"]
if "white_value" in attributes:
white_value = float(attributes["white_value"])
percent_white = white_value / TASMOTA_BRIGHTNESS_MAX
self._white_value = percent_white * 255
self.async_write_ha_state()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature in mired."""
return self._color_temp
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._tasmota_entity.min_mireds
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._tasmota_entity.max_mireds
@property
def effect(self):
"""Return the current effect."""
return self._effect
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._tasmota_entity.effect_list
@property
def hs_color(self):
"""Return the hs color value."""
return self._hs
@property
def white_value(self):
"""Return the white property."""
return self._white_value
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
supported_features = self._supported_features
attributes = {}
if ATTR_HS_COLOR in kwargs and supported_features & SUPPORT_COLOR:
hs_color = kwargs[ATTR_HS_COLOR]
attributes["color"] = {}
rgb = color_util.color_hsv_to_RGB(hs_color[0], hs_color[1], 100)
attributes["color"] = [rgb[0], rgb[1], rgb[2]]
if ATTR_TRANSITION in kwargs:
attributes["transition"] = kwargs[ATTR_TRANSITION]
if ATTR_BRIGHTNESS in kwargs and supported_features & SUPPORT_BRIGHTNESS:
brightness_normalized = kwargs[ATTR_BRIGHTNESS] / DEFAULT_BRIGHTNESS_MAX
device_brightness = min(
round(brightness_normalized * TASMOTA_BRIGHTNESS_MAX),
TASMOTA_BRIGHTNESS_MAX,
)
# Make sure the brightness is not rounded down to 0
device_brightness = max(device_brightness, 1)
attributes["brightness"] = device_brightness
if ATTR_COLOR_TEMP in kwargs and supported_features & SUPPORT_COLOR_TEMP:
attributes["color_temp"] = int(kwargs[ATTR_COLOR_TEMP])
if ATTR_EFFECT in kwargs:
attributes["effect"] = kwargs[ATTR_EFFECT]
if ATTR_WHITE_VALUE in kwargs:
white_value_normalized = kwargs[ATTR_WHITE_VALUE] / DEFAULT_BRIGHTNESS_MAX
device_white_value = min(
round(white_value_normalized * TASMOTA_BRIGHTNESS_MAX),
TASMOTA_BRIGHTNESS_MAX,
)
attributes["white_value"] = device_white_value
self._tasmota_entity.set_state(True, attributes)
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
attributes = {"state": "OFF"}
if ATTR_TRANSITION in kwargs:
attributes["transition"] = kwargs[ATTR_TRANSITION]
self._tasmota_entity.set_state(False, attributes)
|
py | 1a41e9f70472e75ce696b8741e66967c0c9bc6b2 | #!/usr/bin/env python
# This is a helper used by `update-pdfjs` to update the Mustache template for
# serving PDFs with PDFJS with the local dev server.
import os
import sys
# Header to insert at the top of the generated PDF.js viewer template
FILE_HEADER = """
<!-- AUTO-GENERATED BY {}. DO NOT EDIT. -->
""".format(
sys.argv[0]
)
# Header to insert after the original `<title>` tag in the PDF viewer HTML
# mustache template.
#
# This header is responsible for:
#
# - Adding a `<base>` tag so that relative URLs in the pre-built viewer HTML
# resolve to the right URL.
# - Injecting custom PDF.js viewer configuration
# - Injecting the Hypothesis client entry point and configuration
#
# The header needs to be inserted after the `<title>` tag so we can override it,
# but before any relative asset links which will be affected by the `<base>`
# tag.
#
HYPOTHESIS_HEADER = """
<!-- Begin Hypothesis modifications -->
<base href="/scripts/pdfjs/web/">
<title>via Hypothesis</title>
<!--
It's worth noting that this link tag is *not* currently used by the
Hypothesis client to determine the URL of this page. For consistency with
how these pages are served on via, however, we serve it with the PDF.js
viewer application.
-->
<link rel="canonical" href="{{{ documentUrl }}}"/>
<script>
window.DOCUMENT_URL = '{{{documentUrl}}}';
window.PDF_URL = '{{{ url }}}';
window.CLIENT_URL = '{{{clientUrl}}}'.replace('{current_host}', document.location.hostname);
</script>
<script src="/scripts/pdfjs-init.js"></script>
<!-- Configure Hypothesis client. -->
{{{hypothesisConfig}}}
<!-- End Hypothesis modifications -->
"""
def insert_after(str_, search_str, insert_str):
return str_.replace(search_str, search_str + insert_str)
input_file_path = sys.argv[1]
output_file_path = sys.argv[2]
input_file = open(input_file_path, "r")
output_file = open(output_file_path, "w")
base_dir = os.path.dirname(input_file_path)
viewer_html = input_file.read()
viewer_html = insert_after(viewer_html, "<!DOCTYPE html>", FILE_HEADER)
viewer_html = insert_after(
viewer_html, "</title>", HYPOTHESIS_HEADER.replace("$BASEDIR", base_dir)
)
output_file.write(viewer_html) |
py | 1a41ea8679e13041b9966e5fe48260dfdbe843bc | """ Simple Python class to access the JLR Remote Car API
https://github.com/ardevd/jlrpy
"""
from urllib.request import Request, build_opener
import json
import datetime
import calendar
import uuid
import time
class Connection(object):
"""Connection to the JLR Remote Car API"""
def __init__(self,
email='',
password='',
device_id='', ):
"""Init the connection object
The email address and password associated with your Jaguar InControl account is required.
"""
self.email = email
if device_id:
self.device_id = device_id
else:
self.device_id = str(uuid.uuid4())
self.oauth = {
"grant_type": "password",
"username": email,
"password": password}
self.expiration = 0 # force credential refresh
self.connect()
self.vehicles = []
try:
for v in self.get_vehicles(self.head)['vehicles']:
self.vehicles.append(Vehicle(v, self))
except TypeError:
print("[-] No vehicles associated with this account")
def get(self, command, url, headers):
"""GET data from API"""
return self.post(command, url, headers, None)
def post(self, command, url, headers, data=None):
"""POST data to API"""
now = calendar.timegm(datetime.datetime.now().timetuple())
if now > self.expiration:
# Auth expired, reconnect
self.connect()
return self.__open("%s/%s" % (url, command), headers=headers, data=data)
def connect(self):
print("[*] Connecting...")
auth = self.__authenticate(data=self.oauth)
self.__register_auth(auth)
print("[*] 1/3 authenticated")
self.__setheader(auth['access_token'], auth['expires_in'])
self.__register_device(self.head)
print("[*] 2/3 device id registered")
self.__login_user(self.head)
print("[*] 3/3 user logged in, user id retrieved")
def __open(self, url, headers=None, data=None):
req = Request(url, headers=headers)
if data:
req.data = bytes(json.dumps(data), encoding="utf8")
opener = build_opener()
resp = opener.open(req)
charset = resp.info().get('charset', 'utf-8')
return json.loads(resp.read().decode(charset))
def __register_auth(self, auth):
self.access_token = auth['access_token']
now = calendar.timegm(datetime.datetime.now().timetuple())
self.expiration = now + int(auth['expires_in'])
self.auth_token = auth['authorization_token']
self.refresh_token = auth['refresh_token']
def __setheader(self, access_token, expiration=float('inf')):
"""Set HTTP header fields"""
self.head = {
"Authorization": "Bearer %s" % access_token,
"X-Device-Id": self.device_id,
"Content-Type": "application/json"}
def __authenticate(self, data=None):
"""Raw urlopen command to the auth url"""
url = "https://jlp-ifas.wirelesscar.net/ifas/jlr/tokens"
auth_headers = {
"Authorization": "Basic YXM6YXNwYXNz",
"Content-Type": "application/json",
"X-Device-Id": self.device_id}
req = Request(url, headers=auth_headers)
# Convert data to json
req.data = bytes(json.dumps(data), encoding="utf8")
opener = build_opener()
resp = opener.open(req)
charset = resp.info().get('charset', 'utf-8')
return json.loads(resp.read().decode(charset))
def __register_device(self, headers=None):
"""Register the device Id"""
url = "https://jlp-ifop.wirelesscar.net/ifop/jlr/users/%s/clients" % self.email
data = {
"access_token": self.access_token,
"authorization_token": self.auth_token,
"expires_in": "86400",
"deviceID": self.device_id
}
req = Request(url, headers=headers)
req.data = bytes(json.dumps(data), encoding="utf8")
opener = build_opener()
resp = opener.open(req)
# TODO: Check for response code
def __login_user(self, headers=None):
"""Login the user"""
url = "https://jlp-ifoa.wirelesscar.net/if9/jlr/users?loginName=%s" % self.email
user_login_header = headers.copy()
user_login_header["Accept"] = "application/vnd.wirelesscar.ngtp.if9.User-v3+json"
req = Request(url, headers=user_login_header)
opener = build_opener()
resp = opener.open(req)
charset = resp.info().get('charset', 'utf-8')
"""Register user id"""
userdata = json.loads(resp.read().decode(charset))
self.user_id = userdata['userId']
return userdata
def get_vehicles(self, headers):
"""Get vehicles for user"""
url = "https://jlp-ifoa.wirelesscar.net/if9/jlr/users/%s/vehicles?primaryOnly=true" % self.user_id
req = Request(url, headers=headers)
opener = build_opener()
resp = opener.open(req)
charset = resp.info().get('charset', 'utf-8')
return json.loads(resp.read().decode(charset))
def get_user_info(self):
"""Get user information"""
return self.get(self.user_id, "https://jlp-ifoa.wirelesscar.net/if9/jlr/users", self.head)
class Vehicle(dict):
"""Vehicle class.
You can request data or send commands to vehicle. Consult the JLR API documentation for details
"""
def __init__(self, data, connection):
"""Initialize the vehicle class."""
super().__init__(data)
self.connection = connection
self.vin = data['vin']
# Authentiate to VHS
self.__authenticate_vhs()
def get_attributes(self):
"""Get vehicle attributes"""
headers = self.connection.head.copy()
headers["Accept"] = "application/vnd.ngtp.org.VehicleAttributes-v3+json"
result = self.get('attributes', headers)
return result
def get_status(self):
"""Get vehicle status"""
headers = self.connection.head.copy()
headers["Accept"] = "application/vnd.ngtp.org.if9.healthstatus-v2+json"
result = self.get('status', headers)
return result
def get_health_status(self):
"""Get vehicle health status"""
headers = self.connection.head.copy()
headers["Accept"] = "application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v4+json"
headers["Content-Type"] = "application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json; charset=utf-8"
return self.post('healthstatus', headers, self.vhs_data)
def get_departure_timers(self):
"""Get vehicle departure timers"""
headers = self.connection.head.copy()
headers["Accept"] = "application/vnd.wirelesscar.ngtp.if9.DepartureTimerSettings-v1+json"
return self.get("departuretimers", headers)
def get_wakeup_time(self):
"""Get configured wakeup time for vehicle"""
headers = self.connection.head.copy()
headers["Accept"] = "application/vnd.wirelesscar.ngtp.if9.VehicleWakeupTime-v2+json"
return self.get("wakeuptime", headers)
def get_subscription_packages(self):
"""Get vehicle status"""
result = self.get('subscriptionpackages', self.connection.head)
return result
def get_trips(self):
"""Get the last 1000 trips associated with vehicle"""
return self.get('trips?count=1000', self.connection.head)
def get_position(self):
"""Get current vehicle position"""
return self.get('position', self.connection.head)
def honk_blink(self):
"""Sound the horn and blink lights"""
headers = self.connection.head.copy()
headers["Accept"] = "application/vnd.wirelesscar.ngtp.if9.ServiceStatus-v4+json"
headers["Content-Type"] = "application/vnd.wirelesscar.ngtp.if9.StartServiceConfiguration-v3+json; charset=utf-8"
return self.post("honkBlink", headers, self.vhs_data)
def __authenticate_vhs(self):
"""Authenticate to vhs and get token"""
data = {
"serviceName": "VHS",
"pin": ""}
headers = self.connection.head.copy()
headers["Content-Type"] = "application/vnd.wirelesscar.ngtp.if9.AuthenticateRequest-v2+json; charset=utf-8"
vhs_auth_data = self.post("users/%s/authenticate" % self.connection.user_id, headers, data)
self.vhs_data = {
"token": vhs_auth_data['token']}
def post(self, command, headers, data):
"""Utility command to post data to VHS"""
return self.connection.post(command, 'https://jlp-ifoa.wirelesscar.net/if9/jlr/vehicles/%s' % self.vin,
headers, data)
def get(self, command, headers):
"""Utility command to get vehicle data from API"""
return self.connection.get(command, 'https://jlp-ifoa.wirelesscar.net/if9/jlr/vehicles/%s' % self.vin, headers)
|
py | 1a41ec2303e361fdfa7da639fe3d3796e3ffc42a | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""BC networks definition."""
import dataclasses
from typing import Optional, Tuple
from acme import specs
from acme.agents.jax import actor_core, actors
from acme.jax import networks as networks_lib
from acme.jax import utils
import gin
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from jrl.utils.networks import procgen_networks
distributional = networks_lib.distributional
atari = networks_lib.atari
@dataclasses.dataclass
class BCNetworks:
"""Network and pure functions for the BC agent."""
policy_network: networks_lib.FeedForwardNetwork
log_prob: networks_lib.LogProbFn
sample: networks_lib.SampleFn
sample_eval: Optional[networks_lib.SampleFn] = None
img_encoder: Optional[networks_lib.FeedForwardNetwork] = None
def apply_policy_and_sample(
networks, eval_mode = False):
"""Returns a function that computes actions."""
sample_fn = networks.sample if not eval_mode else networks.sample_eval
if not sample_fn:
raise ValueError('sample function is not provided')
def apply_and_sample(params, key, obs):
return sample_fn(networks.policy_network.apply(params, obs), key)
return actor_core.batched_feed_forward_to_actor_core(apply_and_sample)
def apply_policy_and_sample_with_img_encoder(
networks, eval_mode = False):
"""Returns a function that computes actions."""
sample_fn = networks.sample if not eval_mode else networks.sample_eval
if not sample_fn:
raise ValueError('sample function is not provided')
def apply_and_sample(params, key, obs):
img = obs['state_image']
img_embedding = networks.img_encoder.apply(params[1], img)
x = dict(state_image=img_embedding, state_dense=obs['state_dense'])
return sample_fn(networks.policy_network.apply(params[0], x), key)
return actor_core.batched_feed_forward_to_actor_core(apply_and_sample)
w_init = hk.initializers.VarianceScaling(1.0, "fan_avg", "truncated_normal")
b_init = jnp.zeros
dist_w_init = hk.initializers.VarianceScaling(1.0, "fan_avg", "truncated_normal")
dist_b_init = jnp.zeros
@gin.register
def build_standard_actor_fn(
num_dimensions,
actor_hidden_layer_sizes = (256, 256, 256),):
def _actor_fn(obs):
# # for matching Ilya's codebase
# relu_orthogonal = hk.initializers.Orthogonal(scale=2.0**0.5)
# near_zero_orthogonal = hk.initializers.Orthogonal(1e-2)
# x = obs
# for hid_dim in actor_hidden_layer_sizes:
# x = hk.Linear(hid_dim, w_init=relu_orthogonal, b_init=jnp.zeros)(x)
# x = jax.nn.relu(x)
# dist = networks_lib.NormalTanhDistribution(
# num_dimensions,
# w_init=near_zero_orthogonal,
# b_init=jnp.zeros)(x)
# return dist
network = hk.Sequential([
hk.nets.MLP(
list(actor_hidden_layer_sizes),
# w_init=hk.initializers.VarianceScaling(1.0, 'fan_in', 'uniform'),
# w_init=hk.initializers.VarianceScaling(1.0, "fan_avg", "truncated_normal"),
w_init=w_init,
b_init=b_init,
activation=jax.nn.relu,
activate_final=True),
# networks_lib.NormalTanhDistribution(num_dimensions),
networks_lib.NormalTanhDistribution(
num_dimensions,
w_init=dist_w_init,
b_init=dist_b_init,
min_scale=1e-2,
),
])
return network(obs)
return _actor_fn
def make_networks(
spec,
build_actor_fn=build_standard_actor_fn,
img_encoder_fn=None,
):
"""Creates networks used by the agent."""
# Create dummy observations and actions to create network parameters.
dummy_action = utils.zeros_like(spec.actions)
dummy_obs = utils.zeros_like(spec.observations)
dummy_action = utils.add_batch_dim(dummy_action)
dummy_obs = utils.add_batch_dim(dummy_obs)
if isinstance(spec.actions, specs.DiscreteArray):
num_dimensions = spec.actions.num_values
# _actor_fn = procgen_networks.build_procgen_actor_fn(num_dimensions)
else:
num_dimensions = np.prod(spec.actions.shape, dtype=int)
_actor_fn = build_actor_fn(num_dimensions)
if img_encoder_fn is not None:
img_encoder = hk.without_apply_rng(
hk.transform(img_encoder_fn, apply_rng=True))
key = jax.random.PRNGKey(seed=42)
temp_encoder_params = img_encoder.init(key, dummy_obs['state_image'])
dummy_hidden = img_encoder.apply(temp_encoder_params, dummy_obs['state_image'])
img_encoder_network = networks_lib.FeedForwardNetwork(
lambda key: img_encoder.init(key, dummy_hidden), img_encoder.apply)
dummy_policy_input = dict(
state_image=dummy_hidden,
state_dense=dummy_obs['state_dense'],)
else:
img_encoder_fn = None
dummy_policy_input = dummy_obs
img_encoder_network = None
policy = hk.without_apply_rng(hk.transform(_actor_fn, apply_rng=True))
return BCNetworks(
policy_network=networks_lib.FeedForwardNetwork(
lambda key: policy.init(key, dummy_policy_input), policy.apply),
log_prob=lambda params, actions: params.log_prob(actions),
sample=lambda params, key: params.sample(seed=key),
sample_eval=lambda params, key: params.mode(),
img_encoder=img_encoder_network,)
|
py | 1a41ec5cd4a3a0b26d2d99274f9632cc75e73dea | # Copyright (c) 2013 Tencent Inc.
# All rights reserved.
#
# Author: LI Yi <[email protected]>
# Created: September 27, 2013
"""
This module defines cu_library, cu_binary and cu_test rules
for cuda development.
"""
from __future__ import absolute_import
import os
from blade import build_manager
from blade import build_rules
from blade import config
from blade.blade_util import var_to_list
from blade.cc_targets import CcTarget
class CuTarget(CcTarget):
"""This class is derived from CcTarget and is the base class
of cu_library, cu_binary etc.
"""
def __init__(self,
name,
target_type,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
blade,
kwargs):
srcs = var_to_list(srcs)
deps = var_to_list(deps)
extra_cppflags = var_to_list(extra_cppflags)
extra_linkflags = var_to_list(extra_linkflags)
CcTarget.__init__(self,
name,
target_type,
srcs,
deps,
None,
warning,
defs,
incs,
[], [],
extra_cppflags,
extra_linkflags,
blade,
kwargs)
def _get_cu_flags(self):
"""Return the nvcc flags according to the BUILD file and other configs. """
nvcc_flags = []
# Warnings
if self.data.get('warning', '') == 'no':
nvcc_flags.append('-w')
# Defs
defs = self.data.get('defs', [])
nvcc_flags += [('-D' + macro) for macro in defs]
# Optimize flags
if (self.blade.get_options().profile == 'release' or
self.data.get('always_optimize')):
nvcc_flags += self._get_optimize_flags()
# Incs
incs = self._get_incs_list()
return nvcc_flags, incs
def _cu_objects_rules(self):
env_name = self._env_name()
flags_from_option, incs_list = self._get_cu_flags()
incs_string = " -I".join(incs_list)
flags_string = " ".join(flags_from_option)
objs = []
for src in self.srcs:
obj = 'obj_%s' % self._var_name_of(src)
target_path = os.path.join(
self.build_path, self.path, '%s.objs' % self.name, src)
self._write_rule(
'%s = %s.NvccObject(NVCCFLAGS="-I%s %s", target="%s" + top_env["OBJSUFFIX"]'
', source="%s")' % (obj,
env_name,
incs_string,
flags_string,
target_path,
self._target_file_path(src)))
objs.append(obj)
self._write_rule('%s = [%s]' % (self._objs_name(), ','.join(objs)))
class CuLibrary(CuTarget):
"""This class is derived from CuTarget and generates the cu_library
rules according to user options.
"""
def __init__(self,
name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
blade,
kwargs):
CuTarget.__init__(self,
name,
'cu_library',
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
blade,
kwargs)
def scons_rules(self):
"""Generate scons rules according to user options. """
self._prepare_to_generate_rule()
self._cu_objects_rules()
self._cc_library()
def cu_library(name,
srcs=[],
deps=[],
warning='yes',
defs=[],
incs=[],
extra_cppflags=[],
extra_linkflags=[],
**kwargs):
target = CuLibrary(name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
build_manager.instance,
kwargs)
build_manager.instance.register_target(target)
build_rules.register_function(cu_library)
class CuBinary(CuTarget):
"""This class is derived from CuTarget and generates the cu_binary
rules according to user options.
"""
def __init__(self,
name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
blade,
kwargs):
CuTarget.__init__(self,
name,
'cu_binary',
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
blade,
kwargs)
def _cc_binary(self):
env_name = self._env_name()
var_name = self._var_name()
(link_all_symbols_lib_list,
lib_str,
whole_link_flags) = self._get_static_deps_lib_list()
if whole_link_flags:
self._write_rule(
'%s.Append(LINKFLAGS=[%s])' % (env_name, whole_link_flags))
if self.data.get('export_dynamic'):
self._write_rule(
'%s.Append(LINKFLAGS="-rdynamic")' % env_name)
self._setup_link_flags()
self._write_rule('{0}.Replace('
'CC={0}["NVCC"], '
'CPP={0}["NVCC"], '
'CXX={0}["NVCC"], '
'LINK={0}["NVCC"])'.format(env_name))
self._write_rule('%s = %s.Program("%s", %s, %s)' % (
var_name,
env_name,
self._target_file_path(),
self._objs_name(),
lib_str))
self._write_rule('%s.Depends(%s, %s)' % (
env_name,
var_name,
self._objs_name()))
if link_all_symbols_lib_list:
self._write_rule('%s.Depends(%s, [%s])' % (
env_name, var_name, ', '.join(link_all_symbols_lib_list)))
# self._write_rule('%s.Append(LINKFLAGS=str(version_obj[0]))' % env_name)
self._write_rule('%s.Requires(%s, version_obj)' % (
env_name, var_name))
def scons_rules(self):
"""Generate scons rules according to user options. """
self._prepare_to_generate_rule()
self._cu_objects_rules()
self._cc_binary()
def cu_binary(name,
srcs=[],
deps=[],
warning='yes',
defs=[],
incs=[],
extra_cppflags=[],
extra_linkflags=[],
**kwargs):
target = CuBinary(name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
build_manager.instance,
kwargs)
build_manager.instance.register_target(target)
build_rules.register_function(cu_binary)
class CuTest(CuBinary):
"""This class is derived from CuBinary and generates the cu_test
rules according to user options.
"""
def __init__(self,
name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
testdata,
always_run,
exclusive,
blade,
kwargs):
# pylint: disable=too-many-locals
CuBinary.__init__(self,
name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
blade,
kwargs)
self.type = 'cu_test'
self.data['testdata'] = var_to_list(testdata)
self.data['always_run'] = always_run
self.data['exclusive'] = exclusive
cc_test_config = config.get_section('cc_test_config')
gtest_lib = var_to_list(cc_test_config['gtest_libs'])
gtest_main_lib = var_to_list(cc_test_config['gtest_main_libs'])
# Hardcode deps rule to thirdparty gtest main lib.
self._add_hardcode_library(gtest_lib)
self._add_hardcode_library(gtest_main_lib)
def cu_test(name,
srcs=[],
deps=[],
warning='yes',
defs=[],
incs=[],
extra_cppflags=[],
extra_linkflags=[],
testdata=[],
always_run=False,
exclusive=False,
**kwargs):
target = CuTest(name,
srcs,
deps,
warning,
defs,
incs,
extra_cppflags,
extra_linkflags,
testdata,
always_run,
exclusive,
build_manager.instance,
kwargs)
build_manager.instance.register_target(target)
build_rules.register_function(cu_test)
|
py | 1a41ecdff834838e155dc78e36ed572e700369a9 | from collections import deque
class RollingAverage:
def __init__(self, n=100):
self._value = 0.
self._deque = deque(maxlen=n)
@property
def value(self):
return self._value
def update(self, observed_value):
if len(self._deque) == self._deque.maxlen:
self._value += (observed_value - self._deque.popleft()) / self._deque.maxlen
self._deque.append(observed_value)
else:
self._deque.append(observed_value)
self._value += (observed_value - self._value) / len(self._deque)
return self._value
class ExponentialAverage:
def __init__(self, n=100):
self._value = 0.
self._len = 0
self._maxlen = n
@property
def value(self):
return self._value
def update(self, observed_value):
if self._len < self._maxlen:
self._len += 1
self._value += (observed_value - self._value) / self._len
return self._value
|
py | 1a41ecfce2607f7c90aada282742f9fa687760d3 | # Working test of textblob
# https://www.geeksforgeeks.org/spelling-checker-in-python/
from textblob import TextBlob
message = "Hello confsion houes"
print("entered: "+str(message))
corrected = TextBlob(message)
# prints the corrected spelling
print("corrected: "+str(corrected.correct()))
|
py | 1a41ed37b8326139b988f455bf40d364e9cee314 | import argparse
import os
import random
import shutil
import time
import warnings
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torch.nn.functional as F
from datasets import dataset
from networks import nets
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
model_names += ['resnext50_32x4d_reslt','resnet10_reslt', 'resnext101_32x4d_reslt']
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--root_path', type=str, default='data')
parser.add_argument('-dataset', type=str,
help='path to dataset')
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('-a', '--arch', metavar='ARCH', default='ResNeXt152',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=None, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.2, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=5e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
# ResLT
parser.add_argument('--mark', default=None, type=str, help='prefix of log file')
parser.add_argument('--beta', default=None, type=float)
parser.add_argument('--num_works', default=None, type=int)
parser.add_argument('--dropout', default=False, type=bool)
parser.add_argument('--lsm', default=0, type=float)
parser.add_argument('--warmup_epochs', default=5, type=int)
parser.add_argument('--after_1x1conv', action='store_true')
parser.add_argument('--gamma', default=0.5, type=float)
parser.add_argument('--num_classes', default=1000, type=int)
best_acc1 = 0
args = parser.parse_args()
args.root_model = f'{args.root_path}/{args.dataset}/{args.mark}'
os.makedirs(args.root_model, exist_ok=True)
def crossEntropy(softmax, logit, label, weight, num_classes):
label = F.one_hot(label, num_classes=num_classes)
target = label_smoothing(label, num_classes, delta=args.lsm)
loss = - (weight * (target * torch.log(softmax(logit)+1e-7)).sum(dim=1)).sum()
return loss
def disable_conv(model):
for module in model.modules():
if isinstance(module, nn.Conv2d):
module.weight.requires_grad=False
def label_smoothing(y_batch_tensor, num_classes, delta):
y_batch_smooth = (1 - delta - delta / (num_classes - 1)) * y_batch_tensor + delta / (num_classes - 1)
return y_batch_smooth
def main():
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = getattr(nets, args.arch)(dropout=args.dropout, after_1x1conv=args.after_1x1conv, gamma=args.gamma)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
filename = "checkpoints/ImageNet/"+args.mark+'_checkpoint.pth.tar'
if os.path.exists(filename):
args.resume = filename
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
print(args.dataset," ",type(args.dataset))
data=getattr(dataset,args.dataset)(batch_size=args.batch_size, num_works=args.num_works, root=args.data_path)
train_loader=data.train
val_loader=data.test
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
F_losses = AverageMeter('F_Loss', ':.4e')
I_losses = AverageMeter('I_Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, F_losses, I_losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
softmax = nn.Softmax(dim=1)
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
logitH, logitM, logitT = model(images)
######## ResLT
labelH=F.one_hot(target, num_classes=args.num_classes).sum(dim=1)
labelM=F.one_hot(target, num_classes=args.num_classes)[:,:825].sum(dim=1)
labelT=F.one_hot(target, num_classes=args.num_classes)[:,:220].sum(dim=1)
I_loss=(crossEntropy(softmax, logitH, target, labelH, args.num_classes) + crossEntropy(softmax, logitM, target, labelM, args.num_classes) \
+ crossEntropy(softmax, logitT, target, labelT, args.num_classes)) / (labelH.sum() + labelM.sum() + labelT.sum())
logit = (logitH + logitM + logitT)
F_loss = crossEntropy(softmax, logit, target, labelH, args.num_classes) / labelH.sum()
loss= (1-args.beta) * F_loss + args.beta * I_loss
# measure accuracy and record loss
acc1, acc5 = accuracy(logit, target, topk=(1, 5))
F_losses.update(F_loss.detach().item(), images.size(0))
I_losses.update(I_loss.detach().item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i, args)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('All_Acc@1', ':6.2f')
top5 = AverageMeter('All_Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
class_num=torch.zeros(1000).cuda()
correct=torch.zeros(1000).cuda()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
logitH, logitM, logitT = model(images)
output = logitH + logitM + logitT
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
_, predicted = output.max(1)
target_one_hot = F.one_hot(target, num_classes=args.num_classes)
predict_one_hot = F.one_hot(predicted, num_classes=args.num_classes)
class_num = class_num + target_one_hot.sum(dim=0).to(torch.float)
correct=correct + (target_one_hot + predict_one_hot==2).sum(dim=0).to(torch.float)
if i % args.print_freq == 0:
progress.display(i, args)
# TODO: this should also be done with the ProgressMeter
acc_classes = correct / class_num
head_acc = acc_classes[610:].mean()
medium_acc = acc_classes[165:610].mean()
tail_acc = acc_classes[:165].mean()
open(args.root_model+"/"+"train.log","a+").write((' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f} HAcc {head_acc:.3f} MAcc {medium_acc:.3f} TAcc {tail_acc:.3f} \n').format(top1=top1, top5=top5, head_acc=head_acc, medium_acc=medium_acc, tail_acc=tail_acc))
return top1.avg
def save_checkpoint(state, is_best, filename=args.root_model+'/checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, args.root_model+'/model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch, args):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
open(args.root_model+"/train.log","a+").write('\t'.join(entries)+"\n")
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr_min = 0
lr_max = args.lr
if epoch < args.warmup_epochs:
lr = args.lr / args.warmup_epochs * (epoch+1)
else:
lr= lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos( (epoch - args.warmup_epochs + 1) / (args.epochs - args.warmup_epochs + 1) * 3.1415926))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
py | 1a41ed6bfbf381728c63ad84ab1b2f65a29b4414 | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'sheet_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
sys.exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
sys.exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
py | 1a41ee25cb6dcf6d95a31b0eb02c74153d44d611 | # Copyright 2018 The Exoplanet ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Postprocessing utility functions for CLIF."""
# CLIF postprocessor for a C++ function with signature:
# bool MyFunc(input_arg1, ..., *output_arg1, *output_arg2, ..., *error)
#
# If MyFunc returns True, returns (output_arg1, output_arg2, ...)
# If MyFunc returns False, raises ValueError(error).
def ValueErrorOnFalse(ok, *output_args):
"""Raises ValueError if not ok, otherwise returns the output arguments."""
n_outputs = len(output_args)
if n_outputs < 2:
raise ValueError(
"Expected 2 or more output_args. Got: {}".format(n_outputs))
if not ok:
error = output_args[-1]
raise ValueError(error)
if n_outputs == 2:
output = output_args[0]
else:
output = output_args[0:-1]
return output
# CLIF postprocessor for a C++ function with signature:
# *result MyFactory(input_arg1, ..., *error)
#
# If result is not null, returns result.
# If result is null, raises ValueError(error).
def ValueErrorOnNull(result, error):
"""Raises ValueError(error) if result is None, otherwise returns result."""
if result is None:
raise ValueError(error)
return result
|
py | 1a41ee69e964f13a4ca5f62daf6ce34f237b90f3 | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
GPIO.output(11, GPIO.HIGH)
print("LED is on: Green")
time.sleep(3)
GPIO.cleanup()
|
py | 1a41ee71744d951542df63f2767975cc772a5645 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq_mod.data.encoders import register_tokenizer
@register_tokenizer('moses')
class MosesTokenizer(object):
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--moses-source-lang', metavar='SRC',
help='source language')
parser.add_argument('--moses-target-lang', metavar='TARGET',
help='target language')
parser.add_argument('--moses-no-dash-splits', action='store_true', default=False,
help='don\'t apply dash split rules')
parser.add_argument('--moses-no-escape', action='store_true', default=False,
help='don\'t perform HTML escaping on apostrophy, quotes, etc.')
# fmt: on
def __init__(self, args):
self.args = args
if getattr(args, 'moses_source_lang', None) is None:
args.moses_source_lang = getattr(args, 'source_lang', 'en')
if getattr(args, 'moses_target_lang', None) is None:
args.moses_target_lang = getattr(args, 'target_lang', 'en')
try:
from sacremoses import MosesTokenizer, MosesDetokenizer
self.tok = MosesTokenizer(args.moses_source_lang)
self.detok = MosesDetokenizer(args.moses_target_lang)
except ImportError:
raise ImportError('Please install Moses tokenizer with: pip install sacremoses')
def encode(self, x: str) -> str:
return self.tok.tokenize(
x,
aggressive_dash_splits=(not self.args.moses_no_dash_splits),
return_str=True,
escape=(not self.args.moses_no_escape),
)
def decode(self, x: str) -> str:
return self.detok.detokenize(x.split())
|
py | 1a41eea3f5f611718af3e79fed496a9709ed68f7 | from django.contrib.auth.backends import ModelBackend
import re
from .models import User
def jwt_response_payload_handler(token, user=None, request=None):
"""
自定义jwt认证成功返回数据
"""
return {
'token': token,
'user_id': user.id,
'username': user.username
}
def get_user_by_account(account):
"""
根据账号信息查询用户对象
:param account: 可以是手机号也可以是用户名
:return: 存在返回User对象,否则返回None
"""
try:
# 判断account是否是手机号
if re.match(r'((13[0-9])|(14[5,7])|(15[0-3,5-9])|(17[0,1,3,5-8])|(18[0-9])|166|198|199|(147))\d{8}', account):
# 根据手机号查询
user = User.objects.get(mobile=account)
else:
# 根据用户名查询
user = User.objects.get(username=account)
except User.DoesNotExist:
return None
else:
return user
class UsernameMobileAuthBackend(ModelBackend):
"""自定义的认证方法后端"""
def authenticate(self, request, username=None, password=None, **kwargs):
"""
自定义的认证方法
:param request: 本次请求的对象
:param username: 用户账号,可能是用户名也可能是手机号
:param password: 前端传递的密码
:param kwargs: 额外的参数
:return:
"""
# 根据username查询出用户对象
user = get_user_by_account(username)
# 如果用户对象存在,再调用user对象的check_password方法校验密码
if user and user.check_password(password):
# 验证成功,返回用户对象
return user
|
py | 1a41eed101813d7614baafc1e6aed775b1b00263 | """
Default exit plugin
"""
import shutil
import logging
import os
class ExitPlugin(object):
""" Removes temporary files and exits the program """
def __init__(self, skye):
self.skye = skye
def close_program(self):
""" Closes the program """
self.skye.speak("Goodbye")
logging.debug("Removing temporary folders")
if os.path.exists("temp"):
shutil.rmtree("temp", ignore_errors=True)
logging.info("Exiting")
quit()
def setup(skye):
"""Called when the plugin is set up. Used to register commands and other
initializations
Arguments:
skye {Skye} -- The singleton Skye instance
"""
exit_plugin = ExitPlugin(skye)
skye.register_command(("exit", "leave", "quit", "stop"),
exit_plugin.close_program)
|
py | 1a41ef99ae817c6ed28424c99238887ffaea2c74 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from seq2seq.models.decoder import Classifier
class Stage3(torch.nn.Module):
def __init__(self):
super(Stage3, self).__init__()
self.layer5 = torch.nn.LSTM(2048, 1024)
self.layer8 = Classifier(1024, 32320)
def forward(self, input1, input3, input0):
input2 = [None]
out0 = input0.clone()
out1 = input1.clone()
out2 = input2[0]
out3 = input3.clone()
out4 = torch.cat([out0, out1], 2)
out5 = self.layer5(out4, out2)
out6 = out5[0]
out6 = out6 + out3
out8 = self.layer8(out6)
return out8
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.constant_(m.bias, 0)
|
py | 1a41f07d017f31480840d406b732b6090b03859f | # https://stackoverflow.com/questions/33533148/how-do-i-specify-that-the-return-type-of-a-method-is-the-same-as-the-class-itsel
from __future__ import annotations
import hashlib
import uuid
from types import GeneratorType
from typing import Optional, Iterable
from pypadre.core.base import MetadataMixin, ChildMixin
from pypadre.core.model.computation.run import Run
from pypadre.core.model.generic.i_model_mixins import ProgressableMixin
from pypadre.core.model.generic.i_platform_info_mixin import PlatformInfoMixin
from pypadre.core.model.generic.i_storable_mixin import StoreableMixin
from pypadre.core.util.utils import persistent_hash
class Computation(StoreableMixin, ProgressableMixin, PlatformInfoMixin, MetadataMixin, ChildMixin):
COMPONENT_ID = "component_id"
COMPONENT_CLASS = "component_class"
RUN_ID = "run_id"
PREDECESSOR_ID = "predecessor_computation_id"
METRICS_IDS = "metrics_ids"
@classmethod
def _tablefy_register_columns(cls):
super()._tablefy_register_columns()
cls.tablefy_register("format", "type", "parameters", "initial_hyperparameters", "metrics", "result")
def __init__(self, *, component, run: Run, predecessor: Optional[Computation] = None, result_format=None, result,
parameters=None, initial_hyperparameters=None, branch=False, metrics=None, **kwargs):
if parameters is None:
parameters = {}
# Add defaults
defaults = {}
# Merge defaults
metadata = {**defaults, **kwargs.pop("metadata", {}), **{self.COMPONENT_ID: component.id,
self.COMPONENT_CLASS: str(component.__class__),
self.RUN_ID: str(run.id),
"id": uuid.uuid4().__str__() + "-" + str(persistent_hash(run.id, algorithm=hashlib.md5))
}}
if predecessor is not None:
metadata[self.PREDECESSOR_ID] = predecessor.id
if metrics is not None and len(metrics) > 0:
metadata[self.METRICS_IDS] = [m.id for m in metrics]
self._metrics = {m.name: m.result for m in metrics}
else:
self._metrics = None
self._format = result_format
self._component = component
self._result = result
# Todo add result schema (this has to be given by the component) At best a component can return directly a computation object
# Todo allow for multiple predecessors
self._predecessor = predecessor
self._parameters = parameters
self._initial_hyperparameters = initial_hyperparameters
self._branch = branch
super().__init__(parent=run, metadata=metadata, **kwargs)
if self.branch and not isinstance(self.result, GeneratorType) and not isinstance(self.result, Iterable):
raise ValueError("Can only branch if the computation produces a list or generator of data")
@property
def type(self):
# TODO this should be done via ontology
return str(self.__class__)
@property
def format(self):
# TODO Use Ontology here (Maybe even get this by looking at owlready2)
return self._format if self._format is not None else str(self.__class__)
@property
def run(self):
return self.parent
@property
def component(self):
return self._component
@property
def predecessor(self):
return self._predecessor
@property
def parameters(self):
return self._parameters
@property
def initial_hyperparameters(self):
return self._initial_hyperparameters
@property
def run(self):
return self.parent
@property
def branch(self):
return self._branch
@property
def metrics(self):
return self._metrics
@metrics.setter
def metrics(self, metrics):
self._metrics = metrics
@property
def result(self):
return self._result
@result.setter
def result(self, result):
self._result = result
def iter_result(self):
if self.branch:
return self.result
else:
return [self.result]
|
py | 1a41f2483a10e4c16fd64fbbe138d00c69f1f1fa | #chainer good bc of ACER
#https://github.com/chainer/chainerrl
import numpy as np
import gym
import h4rm0ny
import chainer
import chainer.functions as F
import chainer.links as L
import chainerrl
from chainerrl.action_value import DiscreteActionValue
from chainerrl import links
from chainerrl.agents import acer
from chainerrl.distribution import SoftmaxDistribution
from chainerrl import misc
from chainerrl.optimizers import rmsprop_async
from chainerrl import policies
from chainerrl import q_functions
from chainerrl.replay_buffer import EpisodicReplayBuffer
from chainerrl import v_functions
from chainerrl.initializers import LeCunNormal
from tqdm import tqdm
#creates an ACER agent
def create_acer_agent(env):
#our observation space dimension of malware
obs_dim = env.observation_space.shape[0]
#the list of actions that we can perform on the malware
n_actions = env.action_space.n
#our acer network
#consists of pi (our policy) and our q (our q function)
model = acer.ACERSeparateModel(
pi=links.Sequence(
L.Linear( obs_dim, 1024, initialW=LeCunNormal(1e-3)),
F.relu,
L.Linear( 1024, 512, initialW=LeCunNormal(1e-3)),
F.relu,
L.Linear( 512, n_actions, initialW=LeCunNormal(1e-3)),
SoftmaxDistribution),
q=links.Sequence(
L.Linear( obs_dim, 1024, initialW=LeCunNormal(1e-3)),
F.relu,
L.Linear( 1024, 512, initialW=LeCunNormal(1e-3)),
F.relu,
L.Linear( 512, n_actions, initialW=LeCunNormal(1e-3)),
DiscreteActionValue),
)
#optimizer for the acer
opt = rmsprop_async.RMSpropAsync( lr=7e-4, eps=1e-2, alpha=0.99)
opt.setup( model )
#hook to the chainer model
opt.add_hook( chainer.optimizer.GradientClipping(40) )
replay_buffer = EpisodicReplayBuffer( 128 )
#the agent itself, params from original file
agent = acer.ACER( model, opt,
gamma=0.95, # reward discount factor
t_max=32, # update the model after this many local steps
replay_buffer=replay_buffer,
n_times_replay=4, # number of times experience replay is repeated for each update
replay_start_size=64, # don't start replay unless we have this many experiences in the buffer
disable_online_update=True, # rely only on experience buffer
use_trust_region=True, # enable trust region policy optimiztion
trust_region_delta=0.1, # a parameter for TRPO
truncation_threshold=5.0, # truncate large importance weights
beta=1e-2, # entropy regularization parameter
phi= lambda obs: obs.astype(np.float32, copy=False) )
return agent
class QFunction(chainer.Chain):
def __init__(self, obs_size, n_actions, n_hidden_channels=[1024,256]):
super(QFunction,self).__init__()
net = []
inpdim = obs_size
for i,n_hid in enumerate(n_hidden_channels):
net += [ ('l{}'.format(i), L.Linear( inpdim, n_hid ) ) ]
net += [ ('norm{}'.format(i), L.BatchNormalization( n_hid ) ) ]
net += [ ('_act{}'.format(i), F.relu ) ]
inpdim = n_hid
net += [('output', L.Linear( inpdim, n_actions) )]
with self.init_scope():
for n in net:
if not n[0].startswith('_'):
setattr(self, n[0], n[1])
self.forward = net
def __call__(self, x, test=False):
"""
Args:
x (ndarray or chainer.Variable): An observation
test (bool): a flag indicating whether it is in test mode
"""
for n, f in self.forward:
if not n.startswith('_'):
x = getattr(self, n)(x)
else:
x = f(x)
return chainerrl.action_value.DiscreteActionValue(x)
def create_ddqn_agent(env):
obs_dim = env.observation_space.shape[0]
n_actions = env.action_space.n
q_func = QFunction(obs_dim, n_actions)
optimizer = chainer.optimizers.Adam(eps=1e-2)
optimizer.setup(q_func)
# Set the discount factor that discounts future rewards.
gamma = 0.95
# Use epsilon-greedy for exploration
explorer = chainerrl.explorers.Boltzmann()
# DQN uses Experience Replay.
# Specify a replay buffer and its capacity.
replay_buffer = chainerrl.replay_buffer.ReplayBuffer(capacity=1000)
# Chainer only accepts numpy.float32 by default, make sure
# a converter as a feature extractor function phi.
phi = lambda x: x.astype(np.float32, copy=False)
# Now create an agent that will interact with the environment.
# DQN agent as described in Mnih (2013) and Mnih (2015).
# http://arxiv.org/pdf/1312.5602.pdf
# http://arxiv.org/abs/1509.06461
agent = chainerrl.agents.DoubleDQN(
q_func, optimizer, replay_buffer, gamma, explorer,
replay_start_size=32, update_interval=1,
target_update_interval=100, phi=phi)
return agent
import os
def get_latest_model_from(basedir):
dirs = os.listdir(basedir)
lastmodel = -1
for d in dirs:
try:
if int(d) > lastmodel:
lastmodel = int(d)
except ValueError:
continue
assert lastmodel >= 0, "No saved models!"
return os.path.join(basedir, str(lastmodel))
from h4rm0ny.envs.utils import interface, malconv
from h4rm0ny.envs.controls import modifier
import random
ACTION_LOOKUP = {
i: act for i, act in enumerate(modifier.ACTION_TABLE.keys())
}
def get_latest_model_from(basedir):
dirs = os.listdir(basedir)
lastmodel = -1
for d in dirs:
try:
if int(d) > lastmodel:
lastmodel = int(d)
except ValueError:
continue
assert lastmodel >= 0, "No saved models!"
return os.path.join(basedir, str(lastmodel))
def gen_dataset(train_path, test_path, agent=None):
if not os.path.exists(test_path):
os.makedirs(test_path)
if not os.path.exists(train_path):
os.makedirs(train_path)
mc = malconv.MalConv()
sha256_train = interface.get_available_sha256('/home/jovyan/Research/malware_rl/rl_train_exp.csv')[:700]
sha256_test = interface.get_available_sha256('/home/jovyan/Research/malware_rl/rl_test_exp.csv')[:300]
print(sha256_train)
def __gen(sha, ouput_path):
for s in tqdm(sha):
if not agent:
action = random.choice(ACTION_LOOKUP)
else:
mal = np.array(mc.extract(interface.fetch_file(s)))
action = ACTION_LOOKUP[agent.act(mal)]
bytez = interface.fetch_file(s)
bytez = modifier.modify_sample(bytez, action)
evade_path = os.path.join(ouput_path, os.path.basename(s))
with open(evade_path, 'wb') as out:
out.write(bytez)
__gen(sha256_train, train_path, )
__gen(sha256_test, test_path, )
#training the ACER agent
def train_agent(rounds=10000, use_score=False, name='result_dir', test_set = "/home/jovyan/Research/malware_rl/sets/test_set", train_set = "/home/jovyan/Research/malware_rl/sets/train_set", create_agent=create_acer_agent, gym_env = "malconv-train-v0", train=True):
if(train):
print("inside train")
if(name == "random"):
gen_dataset(train_set, test_set, agent=None)
return 1
#we are training on the malconv gym
env = gym.make( gym_env )
#setting random seeds so we can reproduce results
np.random.seed(41)
env.seed(41)
#creating our agent
agent = create_agent(env)
#run through training, evaluate and give reward based on outcome
chainerrl.experiments.train_agent_with_evaluation(
agent, env,
steps=rounds, # Train the agent for this many rounds steps
train_max_episode_len=600, # Maximum length of each episodes
eval_interval=10, # Evaluate the agent after every step
eval_n_episodes = 10, #eval every episode
eval_n_steps = None,
save_best_so_far_agent = False,
outdir=name) # Save everything to 'result' directory
gen_dataset(train_set, test_set, agent)
else:
print("not in train")
env = gym.make(gym_env)
agent = create_acer_agent(env)
# pull latest stored model
#last_model_dir = get_latest_model_from("/home/jovyan/Research/malware_rl/" +name + "_1/500_finish")
agent.load( "/home/jovyan/Research/malware_rl/" +name + "_1/500_finish" )
gen_dataset(train_set, test_set, agent)
#training the ACER agent
def test(rounds=10, use_score=False, name='result_dir', create_agent=create_acer_agent, gym_env = "malconv-train-v0"):
#we are training on the malconv gym
env = gym.make( gym_env )
#setting random seeds so we can reproduce results
np.random.seed(42)
env.seed(42)
#creating our agent
agent = create_agent(env)
# pull latest stored model
last_model_dir = get_latest_model_from('models/')
agent.load( last_model_dir )
chainerrl.experiments.collect_demonstrations(agent,env,steps = rounds,episodes = 1,outdir = name)
# Save everything to 'result' directory
if __name__ == '__main__':
print("We go")
agent_score = train_agent(rounds=50000, use_score=True, name='models/', create_agent=create_acer_agent) # allow agent to see scores
# models are automatically saved
print("done score")
#use this model if you want to see if the RL can learn against a black box model
agent_blackbox = train_agent( rounds=50000, use_score=False, name='models/acer_chainer', create_agent=create_acer_agent) # black blox
# models are automatically saved
|
py | 1a41f2b3073f3fb9ece0b6e1446078a40a1cdbbc | #!/usr/bin/python
# -*- coding: utf-8 -*-
#for 循环迭代list
l=range(10)
for x in l:
print x
#for 循环dict 默认情况dict迭代的是key
d = {'a': 1, 'b': 2, 'c': 3}
for key in d:
print key
#for 循环dict 迭代value
for value in d.itervalues():
print value
#for 循环dict 迭代key,value
for k, v in d.iteritems():
print k,'--',v
#for 循环迭代字符串
for ch in 'ABC':
print ch |
py | 1a41f3d1123928090abd729abceb071786fc382f |
import logging, itertools, os
from datetime import date
import astropy.io.ascii as at
import matplotlib.pyplot as plt
from k2spin.config import *
from k2spin import plot
today = date.today().isoformat()
def plot_list(results_list):
"""
"""
res = at.read(base_path+"tables/"+results_list)
f = open("/home/stephanie/my_papers/hyadesk2/figure_sets/f8.tbl","w")
count = 1
for i, epic in enumerate(res["EPIC"]):
logging.info(epic)
outfilename = "ktwo{0}-c04_lc_analysis.png".format(epic)
plot.paper_lcs(epic,res[i])
plt.savefig(base_path+"plot_outputs/"+outfilename,bbox_inches="tight")
if ((epic==2107361051) or (epic==2107361050) or
(epic==210963067) or (epic==2109630670) or
(epic==210675409)):
# Use lc from smaller centroiding box for 210735105
# but daofind lc for 210963067
# 210675409 is too bright but still in my list somehow
# note I never ran 211037886
continue
elif epic==2109630671:
save_epic = 210963067
else:
save_epic = epic
figsetname = "f8_{0}.eps".format(count)
f.write("{0} & EPIC {1}\n".format(figsetname,save_epic))
plt.savefig("/home/stephanie/my_papers/hyadesk2/figure_sets/"+figsetname,bbox_inches="tight")
plt.close("all")
count += 1
f.close()
if __name__=="__main__":
plot_list("c4_lcs_aps_results_2015-12-18_comments.csv")
"""
lc_file = "ktwo210408563-c04.csv"
epic = "210408563"
ap = 5
res = at.read(base_path+"tables/c4_lcs_aps_results_2015-12-18.csv")
plot.paper_lcs(epic,res[4])
plt.savefig("/home/stephanie/my_papers/hyadesk2/sample_lc.eps",
bbox_inches="tight")
plt.savefig("/home/stephanie/Dropbox/plots_for_sharing/sample_lc.png",
bbox_inches="tight")
"""
|
py | 1a41f3f991e678adbf9347d7fbc69b367e5c351e | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import shutil
import sys
import tempfile
import textwrap
import threading
import time
import pytest
import salt.utils.atomicfile
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.ext import six
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
from tests.support.case import ModuleCase
from tests.support.helpers import slowTest, with_tempdir
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
from tests.support.sminion import create_sminion
from tests.support.unit import skipIf
log = logging.getLogger(__name__)
DEFAULT_ENDING = salt.utils.stringutils.to_bytes(os.linesep)
@pytest.mark.windows_whitelisted
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
"""
Validate the state module
"""
maxDiff = None
@classmethod
def setUpClass(cls):
def _reline(path, ending=DEFAULT_ENDING):
"""
Normalize the line endings of a file.
"""
with salt.utils.files.fopen(path, "rb") as fhr:
lines = fhr.read().splitlines()
with salt.utils.atomicfile.atomic_open(path, "wb") as fhw:
for line in lines:
fhw.write(line + ending)
destpath = os.path.join(RUNTIME_VARS.BASE_FILES, "testappend", "firstif")
_reline(destpath)
destpath = os.path.join(RUNTIME_VARS.BASE_FILES, "testappend", "secondif")
_reline(destpath)
if salt.utils.platform.is_windows():
cls.TIMEOUT = 600
# Be sure to have everything sync'ed
sminion = create_sminion()
sminion.functions.saltutil.sync_all()
else:
cls.TIMEOUT = 10
@slowTest
def test_show_highstate(self):
"""
state.show_highstate
"""
high = self.run_function("state.show_highstate")
destpath = os.path.join(RUNTIME_VARS.TMP, "testfile")
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]["__env__"], "base")
@slowTest
def test_show_lowstate(self):
"""
state.show_lowstate
"""
low = self.run_function("state.show_lowstate")
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
@slowTest
def test_show_states(self):
"""
state.show_states
"""
states = self.run_function("state.show_states")
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
states = self.run_function("state.show_states", sorted=False)
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
@slowTest
def test_show_states_missing_sls(self):
"""
Test state.show_states with a sls file
defined in a top file is missing
"""
topfile = os.path.join(RUNTIME_VARS.TMP_STATE_TREE, "top.sls")
with salt.utils.files.fopen(topfile, "w") as top_file:
top_file.write(
textwrap.dedent(
"""\
base:
'*':
- doesnotexist
"""
)
)
states = self.run_function("state.show_states")
assert isinstance(states, list)
assert states == ["No matching sls found for 'doesnotexist' in env 'base'"]
@slowTest
def test_catch_recurse(self):
"""
state.show_sls used to catch a recursive ref
"""
err = self.run_function("state.sls", mods="recurse_fail")
self.assertIn("recursive", err[0])
@slowTest
def test_no_recurse(self):
"""
verify that a sls structure is NOT a recursive ref
"""
sls = self.run_function("state.show_sls", mods="recurse_ok")
self.assertIn("snmpd", sls)
@slowTest
def test_no_recurse_two(self):
"""
verify that a sls structure is NOT a recursive ref
"""
sls = self.run_function("state.show_sls", mods="recurse_ok_two")
self.assertIn("/etc/nagios/nrpe.cfg", sls)
@slowTest
def test_running_dictionary_consistency(self):
"""
Test the structure of the running dictionary so we don't change it
without deprecating/documenting the change
"""
running_dict_fields = [
"__id__",
"__run_num__",
"__sls__",
"changes",
"comment",
"duration",
"name",
"result",
"start_time",
]
sls = self.run_function(
"state.single", fun="test.succeed_with_changes", name="gndn"
)
for state, ret in sls.items():
for field in running_dict_fields:
self.assertIn(field, ret)
@slowTest
def test_running_dictionary_key_sls(self):
"""
Ensure the __sls__ key is either null or a string
"""
sls1 = self.run_function(
"state.single", fun="test.succeed_with_changes", name="gndn"
)
sls2 = self.run_function("state.sls", mods="gndn")
for state, ret in sls1.items():
self.assertTrue(isinstance(ret["__sls__"], type(None)))
for state, ret in sls2.items():
self.assertTrue(isinstance(ret["__sls__"], six.string_types))
def _remove_request_cache_file(self):
"""
remove minion state request file
"""
cache_file = os.path.join(self.get_config("minion")["cachedir"], "req_state.p")
if os.path.exists(cache_file):
os.remove(cache_file)
@slowTest
def test_request(self):
"""
verify sending a state request to the minion(s)
"""
self._remove_request_cache_file()
ret = self.run_function("state.request", mods="modules.state.requested")
result = ret["cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run"]["result"]
self.assertEqual(result, None)
@slowTest
def test_check_request(self):
"""
verify checking a state request sent to the minion(s)
"""
self._remove_request_cache_file()
self.run_function("state.request", mods="modules.state.requested")
ret = self.run_function("state.check_request")
result = ret["default"]["test_run"][
"cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run"
]["result"]
self.assertEqual(result, None)
@slowTest
def test_clear_request(self):
"""
verify clearing a state request sent to the minion(s)
"""
self._remove_request_cache_file()
self.run_function("state.request", mods="modules.state.requested")
ret = self.run_function("state.clear_request")
self.assertTrue(ret)
@slowTest
def test_run_request_succeeded(self):
"""
verify running a state request sent to the minion(s)
"""
self._remove_request_cache_file()
if salt.utils.platform.is_windows():
self.run_function("state.request", mods="modules.state.requested_win")
else:
self.run_function("state.request", mods="modules.state.requested")
ret = self.run_function("state.run_request")
if salt.utils.platform.is_windows():
key = "cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run"
else:
key = "cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run"
result = ret[key]["result"]
self.assertTrue(result)
@slowTest
def test_run_request_failed_no_request_staged(self):
"""
verify not running a state request sent to the minion(s)
"""
self._remove_request_cache_file()
self.run_function("state.request", mods="modules.state.requested")
self.run_function("state.clear_request")
ret = self.run_function("state.run_request")
self.assertEqual(ret, {})
@with_tempdir()
@slowTest
def test_issue_1896_file_append_source(self, base_dir):
"""
Verify that we can append a file's contents
"""
testfile = os.path.join(base_dir, "test.append")
ret = self.run_state("file.touch", name=testfile)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
"file.append", name=testfile, source="salt://testappend/firstif"
)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
"file.append", name=testfile, source="salt://testappend/secondif"
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, "r") as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
contents = textwrap.dedent(
"""\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
"""
)
if salt.utils.platform.is_windows():
new_contents = contents.splitlines()
contents = os.linesep.join(new_contents)
contents += os.linesep
self.assertMultiLineEqual(contents, testfile_contents)
ret = self.run_state(
"file.append", name=testfile, source="salt://testappend/secondif"
)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
"file.append", name=testfile, source="salt://testappend/firstif"
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, "r") as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(contents, testfile_contents)
@slowTest
def test_issue_1876_syntax_error(self):
"""
verify that we catch the following syntax error::
/tmp/salttest/issue-1876:
file:
- managed
- source: salt://testfile
file.append:
- text: foo
"""
testfile = os.path.join(RUNTIME_VARS.TMP, "issue-1876")
sls = self.run_function("state.sls", mods="issue-1876")
self.assertIn(
"ID '{0}' in SLS 'issue-1876' contains multiple state "
"declarations of the same type".format(testfile),
sls,
)
@slowTest
def test_issue_1879_too_simple_contains_check(self):
expected = textwrap.dedent(
"""\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
"""
)
if salt.utils.platform.is_windows():
new_contents = expected.splitlines()
expected = os.linesep.join(new_contents)
expected += os.linesep
testfile = os.path.join(RUNTIME_VARS.TMP, "issue-1879")
# Delete if exiting
if os.path.isfile(testfile):
os.unlink(testfile)
# Create the file
ret = self.run_function("state.sls", mods="issue-1879", timeout=120)
self.assertSaltTrueReturn(ret)
# The first append
ret = self.run_function("state.sls", mods="issue-1879.step-1", timeout=120)
self.assertSaltTrueReturn(ret)
# The second append
ret = self.run_function("state.sls", mods="issue-1879.step-2", timeout=120)
self.assertSaltTrueReturn(ret)
# Does it match?
try:
with salt.utils.files.fopen(testfile, "r") as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
# Make sure we don't re-append existing text
ret = self.run_function("state.sls", mods="issue-1879.step-1", timeout=120)
self.assertSaltTrueReturn(ret)
ret = self.run_function("state.sls", mods="issue-1879.step-2", timeout=120)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, "r") as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
except Exception: # pylint: disable=broad-except
if os.path.exists(testfile):
shutil.copy(testfile, testfile + ".bak")
raise
finally:
if os.path.exists(testfile):
os.unlink(testfile)
@slowTest
def test_include(self):
tempdir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ("include-test", "to-include-test", "exclude-test"):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function("state.sls", mods="include-test", pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar["include-test"]))
self.assertTrue(os.path.isfile(pillar["to-include-test"]))
self.assertFalse(os.path.isfile(pillar["exclude-test"]))
@slowTest
def test_exclude(self):
tempdir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ("include-test", "exclude-test", "to-include-test"):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function("state.sls", mods="exclude-test", pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar["include-test"]))
self.assertTrue(os.path.isfile(pillar["exclude-test"]))
self.assertFalse(os.path.isfile(pillar["to-include-test"]))
@skipIf(
salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None,
"virtualenv not installed",
)
@slowTest
def test_issue_2068_template_str(self):
venv_dir = os.path.join(RUNTIME_VARS.TMP, "issue-2068-template-str")
try:
ret = self.run_function(
"state.sls", mods="issue-2068-template-str-no-dot", timeout=120
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"files",
"file",
"base",
"issue-2068-template-str-no-dot.sls",
)
with salt.utils.files.fopen(template_path, "r") as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function("state.template_str", [template], timeout=120)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function("state.template", [template_path], timeout=120)
self.assertSaltTrueReturn(ret)
# Now the problematic #2068 including dot's
ret = self.run_function(
"state.sls", mods="issue-2068-template-str", timeout=120
)
self.assertSaltTrueReturn(ret)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"files",
"file",
"base",
"issue-2068-template-str.sls",
)
with salt.utils.files.fopen(template_path, "r") as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function("state.template_str", [template], timeout=120)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function("state.template", [template_path], timeout=120)
self.assertSaltTrueReturn(ret)
@slowTest
def test_template_invalid_items(self):
TEMPLATE = textwrap.dedent(
"""\
{0}:
- issue-2068-template-str
/tmp/test-template-invalid-items:
file:
- managed
- source: salt://testfile
"""
)
for item in ("include", "exclude", "extends"):
ret = self.run_function("state.template_str", [TEMPLATE.format(item)])
self.assertTrue(isinstance(ret, list))
self.assertNotEqual(ret, [])
self.assertEqual(
[
"The '{0}' declaration found on '<template-str>' is "
"invalid when rendering single templates".format(item)
],
ret,
)
@slowTest
def test_pydsl(self):
"""
Test the basics of the pydsl
"""
ret = self.run_function("state.sls", mods="pydsl-1")
self.assertSaltTrueReturn(ret)
@slowTest
def test_issues_7905_and_8174_sls_syntax_error(self):
"""
Call sls file with yaml syntax error.
Ensure theses errors are detected and presented to the user without
stack traces.
"""
ret = self.run_function("state.sls", mods="syntax.badlist")
self.assertEqual(
ret, ["State 'A' in SLS 'syntax.badlist' is not formed as a list"]
)
ret = self.run_function("state.sls", mods="syntax.badlist2")
self.assertEqual(
ret, ["State 'C' in SLS 'syntax.badlist2' is not formed as a list"]
)
@slowTest
def test_requisites_mixed_require_prereq_use(self):
"""
Call sls file containing several requisites.
"""
expected_simple_result = {
"cmd_|-A_|-echo A_|-run": {
"__run_num__": 2,
"comment": 'Command "echo A" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B_|-run": {
"__run_num__": 1,
"comment": 'Command "echo B" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-echo C_|-run": {
"__run_num__": 0,
"comment": 'Command "echo C" run',
"result": True,
"changes": True,
},
}
expected_result = {
"cmd_|-A_|-echo A fifth_|-run": {
"__run_num__": 4,
"comment": 'Command "echo A fifth" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B third_|-run": {
"__run_num__": 2,
"comment": 'Command "echo B third" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-echo C second_|-run": {
"__run_num__": 1,
"comment": 'Command "echo C second" run',
"result": True,
"changes": True,
},
"cmd_|-D_|-echo D first_|-run": {
"__run_num__": 0,
"comment": 'Command "echo D first" run',
"result": True,
"changes": True,
},
"cmd_|-E_|-echo E fourth_|-run": {
"__run_num__": 3,
"comment": 'Command "echo E fourth" run',
"result": True,
"changes": True,
},
}
expected_req_use_result = {
"cmd_|-A_|-echo A_|-run": {
"__run_num__": 1,
"comment": 'Command "echo A" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B_|-run": {
"__run_num__": 4,
"comment": 'Command "echo B" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-echo C_|-run": {
"__run_num__": 0,
"comment": 'Command "echo C" run',
"result": True,
"changes": True,
},
"cmd_|-D_|-echo D_|-run": {
"__run_num__": 5,
"comment": 'Command "echo D" run',
"result": True,
"changes": True,
},
"cmd_|-E_|-echo E_|-run": {
"__run_num__": 2,
"comment": 'Command "echo E" run',
"result": True,
"changes": True,
},
"cmd_|-F_|-echo F_|-run": {
"__run_num__": 3,
"comment": 'Command "echo F" run',
"result": True,
"changes": True,
},
}
ret = self.run_function("state.sls", mods="requisites.mixed_simple")
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_simple_result, result)
# test Traceback recursion prereq+require #8785
# TODO: this is actually failing badly
# ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error2')
# self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
# )
# test Infinite recursion prereq+require #8785 v2
# TODO: this is actually failing badly
# ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error3')
# self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
# )
# test Infinite recursion prereq+require #8785 v3
# TODO: this is actually failing badly, and expected result is maybe not a recursion
# ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error4')
# self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
# )
# undetected infinite loopS prevents this test from running...
# TODO: this is actually failing badly
# ret = self.run_function('state.sls', mods='requisites.mixed_complex1')
# result = self.normalize_ret(ret)
# self.assertEqual(expected_result, result)
@slowTest
def test_watch_in(self):
"""
test watch_in requisite when there is a success
"""
ret = self.run_function("state.sls", mods="requisites.watch_in")
changes = "test_|-return_changes_|-return_changes_|-succeed_with_changes"
watch = "test_|-watch_states_|-watch_states_|-succeed_without_changes"
self.assertEqual(ret[changes]["__run_num__"], 0)
self.assertEqual(ret[watch]["__run_num__"], 2)
self.assertEqual("Watch statement fired.", ret[watch]["comment"])
self.assertEqual(
"Something pretended to change", ret[changes]["changes"]["testing"]["new"]
)
@slowTest
def test_watch_in_failure(self):
"""
test watch_in requisite when there is a failure
"""
ret = self.run_function("state.sls", mods="requisites.watch_in_failure")
fail = "test_|-return_changes_|-return_changes_|-fail_with_changes"
watch = "test_|-watch_states_|-watch_states_|-succeed_without_changes"
self.assertEqual(False, ret[fail]["result"])
self.assertEqual(
"One or more requisite failed: requisites.watch_in_failure.return_changes",
ret[watch]["comment"],
)
def normalize_ret(self, ret):
"""
Normalize the return to the format that we'll use for result checking
"""
result = {}
for item, descr in six.iteritems(ret):
result[item] = {
"__run_num__": descr["__run_num__"],
"comment": descr["comment"],
"result": descr["result"],
"changes": descr["changes"] != {}, # whether there where any changes
}
return result
@slowTest
def test_requisites_require_ordering_and_errors(self):
"""
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
"""
expected_result = {
"cmd_|-A_|-echo A fifth_|-run": {
"__run_num__": 4,
"comment": 'Command "echo A fifth" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B second_|-run": {
"__run_num__": 1,
"comment": 'Command "echo B second" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-echo C third_|-run": {
"__run_num__": 2,
"comment": 'Command "echo C third" run',
"result": True,
"changes": True,
},
"cmd_|-D_|-echo D first_|-run": {
"__run_num__": 0,
"comment": 'Command "echo D first" run',
"result": True,
"changes": True,
},
"cmd_|-E_|-echo E fourth_|-run": {
"__run_num__": 3,
"comment": 'Command "echo E fourth" run',
"result": True,
"changes": True,
},
"cmd_|-F_|-echo F_|-run": {
"__run_num__": 5,
"comment": "The following requisites were not found:\n"
+ " require:\n"
+ " foobar: A\n",
"result": False,
"changes": False,
},
"cmd_|-G_|-echo G_|-run": {
"__run_num__": 6,
"comment": "The following requisites were not found:\n"
+ " require:\n"
+ " cmd: Z\n",
"result": False,
"changes": False,
},
"cmd_|-H_|-echo H_|-run": {
"__run_num__": 7,
"comment": "The following requisites were not found:\n"
+ " require:\n"
+ " cmd: Z\n",
"result": False,
"changes": False,
},
}
ret = self.run_function("state.sls", mods="requisites.require")
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
ret = self.run_function("state.sls", mods="requisites.require_error1")
self.assertEqual(
ret,
[
"Cannot extend ID 'W' in 'base:requisites.require_error1'. It is not part of the high state.\nThis is likely due to a missing include statement or an incorrectly typed ID.\nEnsure that a state with an ID of 'W' is available\nin environment 'base' and to SLS 'requisites.require_error1'"
],
)
# issue #8235
# FIXME: Why is require enforcing list syntax while require_in does not?
# And why preventing it?
# Currently this state fails, should return C/B/A
result = {}
ret = self.run_function("state.sls", mods="requisites.require_simple_nolist")
self.assertEqual(
ret,
[
"The require statement in state 'B' in SLS "
+ "'requisites.require_simple_nolist' needs to be formed as a list"
],
)
# commented until a fix is made for issue #8772
# TODO: this test actually fails
# ret = self.run_function('state.sls', mods='requisites.require_error2')
# self.assertEqual(ret, [
# 'Cannot extend state foobar for ID A in "base:requisites.require_error2".'
# + ' It is not part of the high state.'
# ])
ret = self.run_function("state.sls", mods="requisites.require_recursion_error1")
self.assertEqual(
ret,
[
'A recursive requisite was found, SLS "requisites.require_recursion_error1" ID "B" ID "A"'
],
)
@slowTest
def test_requisites_require_any(self):
"""
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
"""
expected_result = {
"cmd_|-A_|-echo A_|-run": {
"__run_num__": 3,
"comment": 'Command "echo A" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B_|-run": {
"__run_num__": 0,
"comment": 'Command "echo B" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-$(which false)_|-run": {
"__run_num__": 1,
"comment": 'Command "$(which false)" run',
"result": False,
"changes": True,
},
"cmd_|-D_|-echo D_|-run": {
"__run_num__": 2,
"comment": 'Command "echo D" run',
"result": True,
"changes": True,
},
}
ret = self.run_function("state.sls", mods="requisites.require_any")
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
@slowTest
def test_requisites_require_any_fail(self):
"""
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
"""
ret = self.run_function("state.sls", mods="requisites.require_any_fail")
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn(
"One or more requisite failed", result["cmd_|-D_|-echo D_|-run"]["comment"]
)
@slowTest
def test_requisites_watch_any(self):
"""
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
"""
if salt.utils.platform.is_windows():
cmd_true = "exit"
cmd_false = "exit /B 1"
else:
cmd_true = "true"
cmd_false = "false"
expected_result = {
"cmd_|-A_|-{0}_|-wait".format(cmd_true): {
"__run_num__": 4,
"comment": 'Command "{0}" run'.format(cmd_true),
"result": True,
"changes": True,
},
"cmd_|-B_|-{0}_|-run".format(cmd_true): {
"__run_num__": 0,
"comment": 'Command "{0}" run'.format(cmd_true),
"result": True,
"changes": True,
},
"cmd_|-C_|-{0}_|-run".format(cmd_false): {
"__run_num__": 1,
"comment": 'Command "{0}" run'.format(cmd_false),
"result": False,
"changes": True,
},
"cmd_|-D_|-{0}_|-run".format(cmd_true): {
"__run_num__": 2,
"comment": 'Command "{0}" run'.format(cmd_true),
"result": True,
"changes": True,
},
"cmd_|-E_|-{0}_|-wait".format(cmd_true): {
"__run_num__": 9,
"comment": 'Command "{0}" run'.format(cmd_true),
"result": True,
"changes": True,
},
"cmd_|-F_|-{0}_|-run".format(cmd_true): {
"__run_num__": 5,
"comment": 'Command "{0}" run'.format(cmd_true),
"result": True,
"changes": True,
},
"cmd_|-G_|-{0}_|-run".format(cmd_false): {
"__run_num__": 6,
"comment": 'Command "{0}" run'.format(cmd_false),
"result": False,
"changes": True,
},
"cmd_|-H_|-{0}_|-run".format(cmd_false): {
"__run_num__": 7,
"comment": 'Command "{0}" run'.format(cmd_false),
"result": False,
"changes": True,
},
}
ret = self.run_function("state.sls", mods="requisites.watch_any")
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
@slowTest
def test_requisites_watch_any_fail(self):
"""
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
"""
ret = self.run_function("state.sls", mods="requisites.watch_any_fail")
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn(
"One or more requisite failed", result["cmd_|-A_|-true_|-wait"]["comment"]
)
@slowTest
def test_requisites_onchanges_any(self):
"""
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
"""
expected_result = {
'cmd_|-another_changing_state_|-echo "Changed!"_|-run': {
"__run_num__": 1,
"changes": True,
"comment": 'Command "echo "Changed!"" run',
"result": True,
},
'cmd_|-changing_state_|-echo "Changed!"_|-run': {
"__run_num__": 0,
"changes": True,
"comment": 'Command "echo "Changed!"" run',
"result": True,
},
'cmd_|-test_one_changing_states_|-echo "Success!"_|-run': {
"__run_num__": 4,
"changes": True,
"comment": 'Command "echo "Success!"" run',
"result": True,
},
'cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run': {
"__run_num__": 5,
"changes": False,
"comment": "State was not run because none of the onchanges reqs changed",
"result": True,
},
"pip_|-another_non_changing_state_|-mock_|-installed": {
"__run_num__": 3,
"changes": False,
"comment": "Python package mock was already installed\nAll specified packages are already installed",
"result": True,
},
"pip_|-non_changing_state_|-mock_|-installed": {
"__run_num__": 2,
"changes": False,
"comment": "Python package mock was already installed\nAll specified packages are already installed",
"result": True,
},
}
ret = self.run_function("state.sls", mods="requisites.onchanges_any")
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
@slowTest
def test_requisites_onfail_any(self):
"""
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
"""
expected_result = {
"cmd_|-a_|-exit 0_|-run": {
"__run_num__": 0,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-b_|-exit 1_|-run": {
"__run_num__": 1,
"changes": True,
"comment": 'Command "exit 1" run',
"result": False,
},
"cmd_|-c_|-exit 0_|-run": {
"__run_num__": 2,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-d_|-echo itworked_|-run": {
"__run_num__": 3,
"changes": True,
"comment": 'Command "echo itworked" run',
"result": True,
},
"cmd_|-e_|-exit 0_|-run": {
"__run_num__": 4,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-f_|-exit 0_|-run": {
"__run_num__": 5,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-g_|-exit 0_|-run": {
"__run_num__": 6,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-h_|-echo itworked_|-run": {
"__run_num__": 7,
"changes": False,
"comment": "State was not run because onfail req did not change",
"result": True,
},
}
ret = self.run_function("state.sls", mods="requisites.onfail_any")
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
@slowTest
def test_requisites_onfail_all(self):
"""
Call sls file containing several onfail-all
Ensure that some of them are failing and that the order is right.
"""
expected_result = {
"cmd_|-a_|-exit 0_|-run": {
"__run_num__": 0,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-b_|-exit 0_|-run": {
"__run_num__": 1,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-c_|-exit 0_|-run": {
"__run_num__": 2,
"changes": True,
"comment": 'Command "exit 0" run',
"result": True,
},
"cmd_|-d_|-exit 1_|-run": {
"__run_num__": 3,
"changes": True,
"comment": 'Command "exit 1" run',
"result": False,
},
"cmd_|-e_|-exit 1_|-run": {
"__run_num__": 4,
"changes": True,
"comment": 'Command "exit 1" run',
"result": False,
},
"cmd_|-f_|-exit 1_|-run": {
"__run_num__": 5,
"changes": True,
"comment": 'Command "exit 1" run',
"result": False,
},
"cmd_|-reqs also met_|-echo itonfailed_|-run": {
"__run_num__": 9,
"changes": True,
"comment": 'Command "echo itonfailed" run',
"result": True,
},
"cmd_|-reqs also not met_|-echo italsodidnonfail_|-run": {
"__run_num__": 7,
"changes": False,
"comment": "State was not run because onfail req did not change",
"result": True,
},
"cmd_|-reqs met_|-echo itonfailed_|-run": {
"__run_num__": 8,
"changes": True,
"comment": 'Command "echo itonfailed" run',
"result": True,
},
"cmd_|-reqs not met_|-echo itdidntonfail_|-run": {
"__run_num__": 6,
"changes": False,
"comment": "State was not run because onfail req did not change",
"result": True,
},
}
ret = self.run_function("state.sls", mods="requisites.onfail_all")
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
@slowTest
def test_requisites_full_sls(self):
"""
Teste the sls special command in requisites
"""
expected_result = {
"cmd_|-A_|-echo A_|-run": {
"__run_num__": 2,
"comment": 'Command "echo A" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B_|-run": {
"__run_num__": 0,
"comment": 'Command "echo B" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-echo C_|-run": {
"__run_num__": 1,
"comment": 'Command "echo C" run',
"result": True,
"changes": True,
},
}
ret = self.run_function("state.sls", mods="requisites.fullsls_require")
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
# issue #8233: traceback on prereq sls
# TODO: not done
# ret = self.run_function('state.sls', mods='requisites.fullsls_prereq')
# self.assertEqual(['sls command can only be used with require requisite'], ret)
@slowTest
def test_requisites_require_no_state_module(self):
"""
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
"""
expected_result = {
"cmd_|-A_|-echo A fifth_|-run": {
"__run_num__": 4,
"comment": 'Command "echo A fifth" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B second_|-run": {
"__run_num__": 1,
"comment": 'Command "echo B second" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-echo C third_|-run": {
"__run_num__": 2,
"comment": 'Command "echo C third" run',
"result": True,
"changes": True,
},
"cmd_|-D_|-echo D first_|-run": {
"__run_num__": 0,
"comment": 'Command "echo D first" run',
"result": True,
"changes": True,
},
"cmd_|-E_|-echo E fourth_|-run": {
"__run_num__": 3,
"comment": 'Command "echo E fourth" run',
"result": True,
"changes": True,
},
"cmd_|-G_|-echo G_|-run": {
"__run_num__": 5,
"comment": "The following requisites were not found:\n"
+ " require:\n"
+ " id: Z\n",
"result": False,
"changes": False,
},
"cmd_|-H_|-echo H_|-run": {
"__run_num__": 6,
"comment": "The following requisites were not found:\n"
+ " require:\n"
+ " id: Z\n",
"result": False,
"changes": False,
},
}
ret = self.run_function("state.sls", mods="requisites.require_no_state_module")
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
@slowTest
def test_requisites_prereq_simple_ordering_and_errors(self):
"""
Call sls file containing several prereq_in and prereq.
Ensure that some of them are failing and that the order is right.
"""
expected_result_simple = {
"cmd_|-A_|-echo A third_|-run": {
"__run_num__": 2,
"comment": 'Command "echo A third" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B first_|-run": {
"__run_num__": 0,
"comment": 'Command "echo B first" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-echo C second_|-run": {
"__run_num__": 1,
"comment": 'Command "echo C second" run',
"result": True,
"changes": True,
},
"cmd_|-I_|-echo I_|-run": {
"__run_num__": 3,
"comment": "The following requisites were not found:\n"
+ " prereq:\n"
+ " cmd: Z\n",
"result": False,
"changes": False,
},
"cmd_|-J_|-echo J_|-run": {
"__run_num__": 4,
"comment": "The following requisites were not found:\n"
+ " prereq:\n"
+ " foobar: A\n",
"result": False,
"changes": False,
},
}
expected_result_simple_no_state_module = {
"cmd_|-A_|-echo A third_|-run": {
"__run_num__": 2,
"comment": 'Command "echo A third" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B first_|-run": {
"__run_num__": 0,
"comment": 'Command "echo B first" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-echo C second_|-run": {
"__run_num__": 1,
"comment": 'Command "echo C second" run',
"result": True,
"changes": True,
},
"cmd_|-I_|-echo I_|-run": {
"__run_num__": 3,
"comment": "The following requisites were not found:\n"
+ " prereq:\n"
+ " id: Z\n",
"result": False,
"changes": False,
},
}
expected_result_simple2 = {
"cmd_|-A_|-echo A_|-run": {
"__run_num__": 1,
"comment": 'Command "echo A" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B_|-run": {
"__run_num__": 2,
"comment": 'Command "echo B" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-echo C_|-run": {
"__run_num__": 0,
"comment": 'Command "echo C" run',
"result": True,
"changes": True,
},
"cmd_|-D_|-echo D_|-run": {
"__run_num__": 3,
"comment": 'Command "echo D" run',
"result": True,
"changes": True,
},
"cmd_|-E_|-echo E_|-run": {
"__run_num__": 4,
"comment": 'Command "echo E" run',
"result": True,
"changes": True,
},
}
expected_result_simple3 = {
"cmd_|-A_|-echo A first_|-run": {
"__run_num__": 0,
"comment": 'Command "echo A first" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B second_|-run": {
"__run_num__": 1,
"comment": 'Command "echo B second" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-echo C third_|-wait": {
"__run_num__": 2,
"comment": "",
"result": True,
"changes": False,
},
}
expected_result_complex = {
"cmd_|-A_|-echo A fourth_|-run": {
"__run_num__": 3,
"comment": 'Command "echo A fourth" run',
"result": True,
"changes": True,
},
"cmd_|-B_|-echo B first_|-run": {
"__run_num__": 0,
"comment": 'Command "echo B first" run',
"result": True,
"changes": True,
},
"cmd_|-C_|-echo C second_|-run": {
"__run_num__": 1,
"comment": 'Command "echo C second" run',
"result": True,
"changes": True,
},
"cmd_|-D_|-echo D third_|-run": {
"__run_num__": 2,
"comment": 'Command "echo D third" run',
"result": True,
"changes": True,
},
}
ret = self.run_function("state.sls", mods="requisites.prereq_simple")
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple, result)
# same test, but not using lists in yaml syntax
# TODO: issue #8235, prereq ignored when not used in list syntax
# Currently fails badly with :
# TypeError encountered executing state.sls: string indices must be integers, not str.
# expected_result_simple.pop('cmd_|-I_|-echo I_|-run')
# expected_result_simple.pop('cmd_|-J_|-echo J_|-run')
# ret = self.run_function('state.sls', mods='requisites.prereq_simple_nolist')
# result = self.normalize_ret(ret)
# self.assertEqual(expected_result_simple, result)
ret = self.run_function("state.sls", mods="requisites.prereq_simple2")
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple2, result)
ret = self.run_function("state.sls", mods="requisites.prereq_simple3")
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple3, result)
# ret = self.run_function('state.sls', mods='requisites.prereq_error_nolist')
# self.assertEqual(
# ret,
# ['Cannot extend ID Z in "base:requisites.prereq_error_nolist".'
# + ' It is not part of the high state.']
# )
ret = self.run_function("state.sls", mods="requisites.prereq_compile_error1")
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret["cmd_|-B_|-echo B_|-run"]["comment"],
"The following requisites were not found:\n"
+ " prereq:\n"
+ " foobar: A\n",
)
ret = self.run_function("state.sls", mods="requisites.prereq_compile_error2")
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret["cmd_|-B_|-echo B_|-run"]["comment"],
"The following requisites were not found:\n"
+ " prereq:\n"
+ " foobar: C\n",
)
ret = self.run_function("state.sls", mods="requisites.prereq_complex")
result = self.normalize_ret(ret)
self.assertEqual(expected_result_complex, result)
# issue #8210 : prereq recursion undetected
# TODO: this test fails
# ret = self.run_function('state.sls', mods='requisites.prereq_recursion_error')
# self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_recursion_error" ID "B" ID "A"']
# )
ret = self.run_function(
"state.sls", mods="requisites.prereq_simple_no_state_module"
)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple_no_state_module, result)
@slowTest
def test_infinite_recursion_sls_prereq(self):
ret = self.run_function(
"state.sls", mods="requisites.prereq_sls_infinite_recursion"
)
self.assertSaltTrueReturn(ret)
@slowTest
def test_requisites_use(self):
"""
Call sls file containing several use_in and use.
"""
# TODO issue #8235 & #8774 some examples are still commented in the test file
ret = self.run_function("state.sls", mods="requisites.use")
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr["comment"], "onlyif condition is false")
# TODO: issue #8802 : use recursions undetected
# issue is closed as use does not actually inherit requisites
# if chain-use is added after #8774 resolution theses tests would maybe become useful
# ret = self.run_function('state.sls', mods='requisites.use_recursion')
# self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "B" ID "A"'
# ])
# ret = self.run_function('state.sls', mods='requisites.use_recursion2')
# self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion2"'
# + ' ID "C" ID "A"'
# ])
# ret = self.run_function('state.sls', mods='requisites.use_auto_recursion')
# self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "A" ID "A"'
# ])
@slowTest
def test_requisites_use_no_state_module(self):
"""
Call sls file containing several use_in and use.
"""
ret = self.run_function("state.sls", mods="requisites.use_no_state_module")
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr["comment"], "onlyif condition is false")
@slowTest
def test_onlyif_req(self):
ret = self.run_function(
"state.single",
fun="test.succeed_with_changes",
name="onlyif test",
onlyif=[{}],
)["test_|-onlyif test_|-onlyif test_|-succeed_with_changes"]
self.assertTrue(ret["result"])
self.assertEqual(ret["comment"], "Success!")
ret = self.run_function(
"state.single",
fun="test.fail_with_changes",
name="onlyif test",
onlyif=[{"fun": "test.false"}],
)["test_|-onlyif test_|-onlyif test_|-fail_with_changes"]
self.assertTrue(ret["result"])
self.assertFalse(ret["changes"])
self.assertEqual(ret["comment"], "onlyif condition is false")
ret = self.run_function(
"state.single",
fun="test.fail_with_changes",
name="onlyif test",
onlyif=[{"fun": "test.true"}],
)["test_|-onlyif test_|-onlyif test_|-fail_with_changes"]
self.assertFalse(ret["result"])
self.assertTrue(ret["changes"])
self.assertEqual(ret["comment"], "Failure!")
ret = self.run_function(
"state.single",
fun="test.succeed_without_changes",
name="onlyif test",
onlyif=[{"fun": "test.true"}],
)["test_|-onlyif test_|-onlyif test_|-succeed_without_changes"]
self.assertTrue(ret["result"])
self.assertFalse(ret["changes"])
self.assertEqual(ret["comment"], "Success!")
@slowTest
def test_onlyif_req_retcode(self):
ret = self.run_function(
"state.single",
fun="test.succeed_with_changes",
name="onlyif test",
onlyif=[{"fun": "test.retcode"}],
)["test_|-onlyif test_|-onlyif test_|-succeed_with_changes"]
self.assertTrue(ret["result"])
self.assertFalse(ret["changes"])
self.assertEqual(ret["comment"], "onlyif condition is false")
ret = self.run_function(
"state.single",
fun="test.succeed_with_changes",
name="onlyif test",
onlyif=[{"fun": "test.retcode", "code": 0}],
)["test_|-onlyif test_|-onlyif test_|-succeed_with_changes"]
self.assertTrue(ret["result"])
self.assertTrue(ret["changes"])
self.assertEqual(ret["comment"], "Success!")
@slowTest
def test_unless_req(self):
ret = self.run_function(
"state.single",
fun="test.succeed_with_changes",
name="unless test",
unless=[{}],
)["test_|-unless test_|-unless test_|-succeed_with_changes"]
self.assertTrue(ret["result"])
self.assertEqual(ret["comment"], "Success!")
ret = self.run_function(
"state.single",
fun="test.fail_with_changes",
name="unless test",
unless=[{"fun": "test.true"}],
)["test_|-unless test_|-unless test_|-fail_with_changes"]
self.assertTrue(ret["result"])
self.assertFalse(ret["changes"])
self.assertEqual(ret["comment"], "unless condition is true")
ret = self.run_function(
"state.single",
fun="test.fail_with_changes",
name="unless test",
unless=[{"fun": "test.false"}],
)["test_|-unless test_|-unless test_|-fail_with_changes"]
self.assertFalse(ret["result"])
self.assertTrue(ret["changes"])
self.assertEqual(ret["comment"], "Failure!")
ret = self.run_function(
"state.single",
fun="test.succeed_without_changes",
name="unless test",
unless=[{"fun": "test.false"}],
)["test_|-unless test_|-unless test_|-succeed_without_changes"]
self.assertTrue(ret["result"])
self.assertFalse(ret["changes"])
self.assertEqual(ret["comment"], "Success!")
@slowTest
def test_unless_req_retcode(self):
ret = self.run_function(
"state.single",
fun="test.succeed_with_changes",
name="unless test",
unless=[{"fun": "test.retcode"}],
)["test_|-unless test_|-unless test_|-succeed_with_changes"]
self.assertTrue(ret["result"])
self.assertTrue(ret["changes"])
self.assertEqual(ret["comment"], "Success!")
ret = self.run_function(
"state.single",
fun="test.succeed_with_changes",
name="unless test",
unless=[{"fun": "test.retcode", "code": 0}],
)["test_|-unless test_|-unless test_|-succeed_with_changes"]
self.assertTrue(ret["result"])
self.assertFalse(ret["changes"])
self.assertEqual(ret["comment"], "unless condition is true")
@slowTest
@pytest.mark.usefixtures("salt_sub_minion")
def test_get_file_from_env_in_top_match(self):
tgt = os.path.join(RUNTIME_VARS.TMP, "prod-cheese-file")
try:
ret = self.run_function("state.highstate", minion_tgt="sub_minion")
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(tgt))
with salt.utils.files.fopen(tgt, "r") as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn("Gromit", data)
self.assertIn("Comte", data)
finally:
if os.path.islink(tgt):
os.unlink(tgt)
# onchanges tests
@slowTest
def test_onchanges_requisite(self):
"""
Tests a simple state using the onchanges requisite
"""
# Only run the state once and keep the return data
state_run = self.run_function("state.sls", mods="requisites.onchanges_simple")
# First, test the result of the state run when changes are expected to happen
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run'][
"comment"
]
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run[
'cmd_|-test_non_changing_state_|-echo "Should not run"_|-run'
]["comment"]
expected_result = "State was not run because none of the onchanges reqs changed"
self.assertIn(expected_result, test_data)
@slowTest
def test_onchanges_requisite_multiple(self):
"""
Tests a simple state using the onchanges requisite
"""
# Only run the state once and keep the return data
state_run = self.run_function("state.sls", mods="requisites.onchanges_multiple")
# First, test the result of the state run when two changes are expected to happen
test_data = state_run['cmd_|-test_two_changing_states_|-echo "Success!"_|-run'][
"comment"
]
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when two changes are not expected to happen
test_data = state_run[
'cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run'
]["comment"]
expected_result = "State was not run because none of the onchanges reqs changed"
self.assertIn(expected_result, test_data)
# Finally, test the result of the state run when only one of the onchanges requisites changes.
test_data = state_run['cmd_|-test_one_changing_state_|-echo "Success!"_|-run'][
"comment"
]
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
@slowTest
def test_onchanges_in_requisite(self):
"""
Tests a simple state using the onchanges_in requisite
"""
# Only run the state once and keep the return data
state_run = self.run_function(
"state.sls", mods="requisites.onchanges_in_simple"
)
# First, test the result of the state run of when changes are expected to happen
test_data = state_run['cmd_|-test_changes_expected_|-echo "Success!"_|-run'][
"comment"
]
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run[
'cmd_|-test_changes_not_expected_|-echo "Should not run"_|-run'
]["comment"]
expected_result = "State was not run because none of the onchanges reqs changed"
self.assertIn(expected_result, test_data)
@slowTest
def test_onchanges_requisite_no_state_module(self):
"""
Tests a simple state using the onchanges requisite without state modules
"""
# Only run the state once and keep the return data
state_run = self.run_function(
"state.sls", mods="requisites.onchanges_simple_no_state_module"
)
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run'][
"comment"
]
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
@slowTest
def test_onchanges_requisite_with_duration(self):
"""
Tests a simple state using the onchanges requisite
the state will not run but results will include duration
"""
# Only run the state once and keep the return data
state_run = self.run_function("state.sls", mods="requisites.onchanges_simple")
# Then, test the result of the state run when changes are not expected to happen
# and ensure duration is included in the results
test_data = state_run[
'cmd_|-test_non_changing_state_|-echo "Should not run"_|-run'
]
self.assertIn("duration", test_data)
# onfail tests
@slowTest
def test_onfail_requisite(self):
"""
Tests a simple state using the onfail requisite
"""
# Only run the state once and keep the return data
state_run = self.run_function("state.sls", mods="requisites.onfail_simple")
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run'][
"comment"
]
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run[
'cmd_|-test_non_failing_state_|-echo "Should not run"_|-run'
]["comment"]
expected_result = "State was not run because onfail req did not change"
self.assertIn(expected_result, test_data)
@slowTest
def test_multiple_onfail_requisite(self):
"""
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
"""
state_run = self.run_function(
"state.sls", mods="requisites.onfail_multiple", timeout=self.TIMEOUT
)
retcode = state_run["cmd_|-c_|-echo itworked_|-run"]["changes"]["retcode"]
self.assertEqual(retcode, 0)
stdout = state_run["cmd_|-c_|-echo itworked_|-run"]["changes"]["stdout"]
self.assertEqual(stdout, "itworked")
@slowTest
def test_onfail_in_requisite(self):
"""
Tests a simple state using the onfail_in requisite
"""
# Only run the state once and keep the return data
state_run = self.run_function("state.sls", mods="requisites.onfail_in_simple")
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run'][
"comment"
]
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run[
'cmd_|-test_non_failing_state_|-echo "Should not run"_|-run'
]["comment"]
expected_result = "State was not run because onfail req did not change"
self.assertIn(expected_result, test_data)
@slowTest
def test_onfail_requisite_no_state_module(self):
"""
Tests a simple state using the onfail requisite
"""
# Only run the state once and keep the return data
state_run = self.run_function(
"state.sls", mods="requisites.onfail_simple_no_state_module"
)
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run'][
"comment"
]
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run[
'cmd_|-test_non_failing_state_|-echo "Should not run"_|-run'
]["comment"]
expected_result = "State was not run because onfail req did not change"
self.assertIn(expected_result, test_data)
@slowTest
def test_onfail_requisite_with_duration(self):
"""
Tests a simple state using the onfail requisite
"""
# Only run the state once and keep the return data
state_run = self.run_function("state.sls", mods="requisites.onfail_simple")
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run[
'cmd_|-test_non_failing_state_|-echo "Should not run"_|-run'
]
self.assertIn("duration", test_data)
@slowTest
def test_multiple_onfail_requisite_with_required(self):
"""
test to ensure multiple states are run
when specified as onfails for a single state.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
"""
state_run = self.run_function(
"state.sls", mods="requisites.onfail_multiple_required"
)
retcode = state_run["cmd_|-b_|-echo b_|-run"]["changes"]["retcode"]
self.assertEqual(retcode, 0)
retcode = state_run["cmd_|-c_|-echo c_|-run"]["changes"]["retcode"]
self.assertEqual(retcode, 0)
retcode = state_run["cmd_|-d_|-echo d_|-run"]["changes"]["retcode"]
self.assertEqual(retcode, 0)
stdout = state_run["cmd_|-b_|-echo b_|-run"]["changes"]["stdout"]
self.assertEqual(stdout, "b")
stdout = state_run["cmd_|-c_|-echo c_|-run"]["changes"]["stdout"]
self.assertEqual(stdout, "c")
stdout = state_run["cmd_|-d_|-echo d_|-run"]["changes"]["stdout"]
self.assertEqual(stdout, "d")
comment = state_run["cmd_|-e_|-echo e_|-run"]["comment"]
self.assertEqual(comment, "State was not run because onfail req did not change")
stdout = state_run["cmd_|-f_|-echo f_|-run"]["changes"]["stdout"]
self.assertEqual(stdout, "f")
@slowTest
def test_multiple_onfail_requisite_with_required_no_run(self):
"""
test to ensure multiple states are not run
when specified as onfails for a single state
which fails.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
"""
state_run = self.run_function(
"state.sls", mods="requisites.onfail_multiple_required_no_run"
)
expected = "State was not run because onfail req did not change"
stdout = state_run["cmd_|-b_|-echo b_|-run"]["comment"]
self.assertEqual(stdout, expected)
stdout = state_run["cmd_|-c_|-echo c_|-run"]["comment"]
self.assertEqual(stdout, expected)
stdout = state_run["cmd_|-d_|-echo d_|-run"]["comment"]
self.assertEqual(stdout, expected)
# listen tests
@slowTest
def test_listen_requisite(self):
"""
Tests a simple state using the listen requisite
"""
# Only run the state once and keep the return data
state_run = self.run_function("state.sls", mods="requisites.listen_simple")
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
@slowTest
def test_listen_in_requisite(self):
"""
Tests a simple state using the listen_in requisite
"""
# Only run the state once and keep the return data
state_run = self.run_function("state.sls", mods="requisites.listen_in_simple")
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
@slowTest
def test_listen_in_requisite_resolution(self):
"""
Verify listen_in requisite lookups use ID declaration to check for changes
"""
# Only run the state once and keep the return data
state_run = self.run_function("state.sls", mods="requisites.listen_in_simple")
# Test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
@slowTest
def test_listen_requisite_resolution(self):
"""
Verify listen requisite lookups use ID declaration to check for changes
"""
# Only run the state once and keep the return data
state_run = self.run_function("state.sls", mods="requisites.listen_simple")
# Both listeners are expected to trigger
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
listener_state = 'cmd_|-listener_test_listening_resolution_two_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
@slowTest
def test_listen_requisite_no_state_module(self):
"""
Tests a simple state using the listen requisite
"""
# Only run the state once and keep the return data
state_run = self.run_function(
"state.sls", mods="requisites.listen_simple_no_state_module"
)
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
@slowTest
def test_listen_in_requisite_resolution_names(self):
"""
Verify listen_in requisite lookups use ID declaration to check for changes
and resolves magic names state variable
"""
# Only run the state once and keep the return data
state_run = self.run_function("state.sls", mods="requisites.listen_in_names")
self.assertIn("test_|-listener_service_|-nginx_|-mod_watch", state_run)
self.assertIn("test_|-listener_service_|-crond_|-mod_watch", state_run)
@slowTest
def test_listen_requisite_resolution_names(self):
"""
Verify listen requisite lookups use ID declaration to check for changes
and resolves magic names state variable
"""
# Only run the state once and keep the return data
state_run = self.run_function(
"state.sls", mods="requisites.listen_names", timeout=self.TIMEOUT
)
self.assertIn("test_|-listener_service_|-nginx_|-mod_watch", state_run)
self.assertIn("test_|-listener_service_|-crond_|-mod_watch", state_run)
@slowTest
def test_issue_30820_requisite_in_match_by_name(self):
"""
This tests the case where a requisite_in matches by name instead of ID
See https://github.com/saltstack/salt/issues/30820 for more info
"""
state_run = self.run_function(
"state.sls", mods="requisites.requisite_in_match_by_name"
)
bar_state = "cmd_|-bar state_|-echo bar_|-wait"
self.assertIn(bar_state, state_run)
self.assertEqual(state_run[bar_state]["comment"], 'Command "echo bar" run')
@slowTest
def test_retry_option_defaults(self):
"""
test the retry option on a simple state with defaults
ensure comment is as expected
ensure state duration is greater than default retry_interval (30 seconds)
"""
state_run = self.run_function("state.sls", mods="retry.retry_defaults")
retry_state = "file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists"
expected_comment = (
'Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
"Specified path /path/to/a/non-existent/file.txt does not exist"
)
self.assertEqual(state_run[retry_state]["comment"], expected_comment)
self.assertTrue(state_run[retry_state]["duration"] > 30)
self.assertEqual(state_run[retry_state]["result"], False)
@slowTest
def test_retry_option_custom(self):
"""
test the retry option on a simple state with custom retry values
ensure comment is as expected
ensure state duration is greater than custom defined interval * (retries - 1)
"""
state_run = self.run_function("state.sls", mods="retry.retry_custom")
retry_state = "file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists"
expected_comment = (
'Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Attempt 2: Returned a result of "False", with the following comment: "Specified'
' path /path/to/a/non-existent/file.txt does not exist"\nAttempt 3: Returned'
' a result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nAttempt 4: Returned a'
' result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nSpecified path'
" /path/to/a/non-existent/file.txt does not exist"
)
self.assertEqual(state_run[retry_state]["comment"], expected_comment)
self.assertTrue(state_run[retry_state]["duration"] > 40)
self.assertEqual(state_run[retry_state]["result"], False)
@slowTest
def test_retry_option_success(self):
"""
test a state with the retry option that should return True immedietly (i.e. no retries)
"""
testfile = os.path.join(RUNTIME_VARS.TMP, "retry_file_option_success")
state_run = self.run_function("state.sls", mods="retry.retry_success")
os.unlink(testfile)
retry_state = "file_|-file_test_|-{0}_|-exists".format(testfile)
self.assertNotIn("Attempt", state_run[retry_state]["comment"])
def run_create(self, testfile):
"""
helper function to wait 30 seconds and then create the temp retry file
"""
# Wait for the requisite stae 'file_test_a' to complete before creating
# test_file
while True:
if os.path.exists(testfile + "_a"):
break
time.sleep(1)
time.sleep(30)
with salt.utils.files.fopen(testfile, "a"):
pass
@slowTest
def test_retry_option_eventual_success(self):
"""
test a state with the retry option that should return True after at least 4 retry attmempt
but never run 15 attempts
"""
testfile = os.path.join(RUNTIME_VARS.TMP, "retry_file_eventual_success")
assert not os.path.exists(testfile + "_a")
assert not os.path.exists(testfile)
create_thread = threading.Thread(target=self.run_create, args=(testfile,))
create_thread.start()
state_run = self.run_function("state.sls", mods="retry.retry_success2")
retry_state = "file_|-file_test_b_|-{0}_|-exists".format(testfile)
self.assertIn("Attempt 1:", state_run[retry_state]["comment"])
self.assertIn("Attempt 2:", state_run[retry_state]["comment"])
self.assertIn("Attempt 3:", state_run[retry_state]["comment"])
self.assertIn("Attempt 4:", state_run[retry_state]["comment"])
self.assertNotIn("Attempt 15:", state_run[retry_state]["comment"])
self.assertEqual(state_run[retry_state]["result"], True)
@slowTest
def test_issue_38683_require_order_failhard_combination(self):
"""
This tests the case where require, order, and failhard are all used together in a state definition.
Previously, the order option, which used in tandem with require and failhard, would cause the state
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
compiler. With the logic error resolved, this test should now pass.
See https://github.com/saltstack/salt/issues/38683 for more information.
"""
state_run = self.run_function(
"state.sls", mods="requisites.require_order_failhard_combo"
)
state_id = "test_|-b_|-b_|-fail_with_changes"
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]["comment"], "Failure!")
self.assertFalse(state_run[state_id]["result"])
@slowTest
def test_issue_46762_prereqs_on_a_state_with_unfulfilled_requirements(self):
"""
This tests the case where state C requires state A, which fails.
State C is a pre-required state for State B.
Since state A fails, state C will not run because the requisite failed,
therefore state B will not run because state C failed to run.
See https://github.com/saltstack/salt/issues/46762 for
more information.
"""
state_run = self.run_function("state.sls", mods="issue-46762")
state_id = "test_|-a_|-a_|-fail_without_changes"
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]["comment"], "Failure!")
self.assertFalse(state_run[state_id]["result"])
state_id = "test_|-b_|-b_|-nop"
self.assertIn(state_id, state_run)
self.assertEqual(
state_run[state_id]["comment"],
"One or more requisite failed: issue-46762.c",
)
self.assertFalse(state_run[state_id]["result"])
state_id = "test_|-c_|-c_|-nop"
self.assertIn(state_id, state_run)
self.assertEqual(
state_run[state_id]["comment"],
"One or more requisite failed: issue-46762.a",
)
self.assertFalse(state_run[state_id]["result"])
@slowTest
def test_state_nonbase_environment(self):
"""
test state.sls with saltenv using a nonbase environment
with a salt source
"""
filename = os.path.join(RUNTIME_VARS.TMP, "nonbase_env")
try:
ret = self.run_function("state.sls", mods="non-base-env", saltenv="prod")
ret = ret[next(iter(ret))]
assert ret["result"]
assert ret["comment"] == "File {0} updated".format(filename)
assert os.path.isfile(filename)
finally:
try:
os.remove(filename)
except OSError:
pass
@skipIf(
sys.platform.startswith("win"),
"Skipped until parallel states can be fixed on Windows",
)
@skipIf(
salt.utils.platform.is_darwin() and six.PY2, "This test hangs on OS X on Py2"
)
@slowTest
def test_parallel_state_with_long_tag(self):
"""
This tests the case where the state being executed has a long ID dec or
name and states are being run in parallel. The filenames used for the
parallel state cache were previously based on the tag for each chunk,
and longer ID decs or name params can cause the cache file to be longer
than the operating system's max file name length. To counter this we
instead generate a SHA1 hash of the chunk's tag to use as the cache
filename. This test will ensure that long tags don't cause caching
failures.
See https://github.com/saltstack/salt/issues/49738 for more info.
"""
short_command = "helloworld"
long_command = short_command * 25
ret = self.run_function(
"state.sls",
mods="issue-49738",
pillar={"short_command": short_command, "long_command": long_command},
)
comments = sorted([x["comment"] for x in six.itervalues(ret)])
expected = sorted(
['Command "{0}" run'.format(x) for x in (short_command, long_command)]
)
assert comments == expected, "{0} != {1}".format(comments, expected)
def _add_runtime_pillar(self, pillar):
"""
helper class to add pillar data at runtime
"""
import salt.utils.yaml
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_PILLAR_TREE, "pillar.sls"), "w"
) as fp:
salt.utils.yaml.safe_dump(pillar, fp)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_PILLAR_TREE, "top.sls"), "w"
) as fp:
fp.write(
textwrap.dedent(
"""\
base:
'*':
- pillar
"""
)
)
self.run_function("saltutil.refresh_pillar")
self.run_function("test.sleep", [5])
@slowTest
def test_state_sls_id_test(self):
"""
test state.sls_id when test is set
to true in pillar data
"""
self._add_runtime_pillar(pillar={"test": True})
testfile = os.path.join(RUNTIME_VARS.TMP, "testfile")
comment = "The file {0} is set to be changed\nNote: No changes made, actual changes may\nbe different due to other states.".format(
testfile
)
ret = self.run_function("state.sls", ["core"])
for key, val in ret.items():
self.assertEqual(val["comment"], comment)
self.assertEqual(val["changes"], {"newfile": testfile})
@slowTest
def test_state_sls_id_test_state_test_post_run(self):
"""
test state.sls_id when test is set to
true post the state already being run previously
"""
file_name = os.path.join(RUNTIME_VARS.TMP, "testfile")
ret = self.run_function("state.sls", ["core"])
for key, val in ret.items():
self.assertEqual(val["comment"], "File {0} updated".format(file_name))
self.assertEqual(val["changes"]["diff"], "New file")
self._add_runtime_pillar(pillar={"test": True})
ret = self.run_function("state.sls", ["core"])
for key, val in ret.items():
self.assertEqual(
val["comment"], "The file {0} is in the correct state".format(file_name)
)
self.assertEqual(val["changes"], {})
@slowTest
def test_state_sls_id_test_true(self):
"""
test state.sls_id when test=True is passed as arg
"""
file_name = os.path.join(RUNTIME_VARS.TMP, "testfile")
ret = self.run_function("state.sls", ["core"], test=True)
for key, val in ret.items():
self.assertEqual(
val["comment"],
"The file {0} is set to be changed\nNote: No changes made, actual changes may\nbe different due to other states.".format(
file_name
),
)
self.assertEqual(val["changes"], {"newfile": file_name})
@slowTest
def test_state_sls_id_test_true_post_run(self):
"""
test state.sls_id when test is set to true as an
arg post the state already being run previously
"""
file_name = os.path.join(RUNTIME_VARS.TMP, "testfile")
ret = self.run_function("state.sls", ["core"])
for key, val in ret.items():
self.assertEqual(val["comment"], "File {0} updated".format(file_name))
self.assertEqual(val["changes"]["diff"], "New file")
ret = self.run_function("state.sls", ["core"], test=True)
for key, val in ret.items():
self.assertEqual(
val["comment"], "The file {0} is in the correct state".format(file_name)
)
self.assertEqual(val["changes"], {})
@slowTest
def test_state_sls_id_test_false_pillar_true(self):
"""
test state.sls_id when test is set to false as an
arg and minion_state_test is set to True. Should
return test=False.
"""
file_name = os.path.join(RUNTIME_VARS.TMP, "testfile")
self._add_runtime_pillar(pillar={"test": True})
ret = self.run_function("state.sls", ["core"], test=False)
for key, val in ret.items():
self.assertEqual(val["comment"], "File {0} updated".format(file_name))
self.assertEqual(val["changes"]["diff"], "New file")
def test_state_test_pillar_false(self):
"""
test state.test forces test kwarg to True even when pillar is set to False
"""
self._add_runtime_pillar(pillar={"test": False})
testfile = os.path.join(RUNTIME_VARS.TMP, "testfile")
comment = "The file {0} is set to be changed\nNote: No changes made, actual changes may\nbe different due to other states.".format(
testfile
)
ret = self.run_function("state.test", ["core"])
self.assertIsInstance(ret, dict)
for key, val in ret.items():
self.assertEqual(val["comment"], comment)
self.assertEqual(val["changes"], {"newfile": testfile})
def test_state_test_test_false_pillar_false(self):
"""
test state.test forces test kwarg to True even when pillar and kwarg are set
to False
"""
self._add_runtime_pillar(pillar={"test": False})
testfile = os.path.join(RUNTIME_VARS.TMP, "testfile")
comment = "The file {0} is set to be changed\nNote: No changes made, actual changes may\nbe different due to other states.".format(
testfile
)
ret = self.run_function("state.test", ["core"], test=False)
for key, val in ret.items():
self.assertEqual(val["comment"], comment)
self.assertEqual(val["changes"], {"newfile": testfile})
@skipIf(
six.PY3 and salt.utils.platform.is_darwin(), "Test is broken on macosx and PY3"
)
@slowTest
def test_issue_30161_unless_and_onlyif_together(self):
"""
test cmd.run using multiple unless options where the first cmd in the
list will pass, but the second will fail. This tests the fix for issue
#35384. (The fix is in PR #35545.)
"""
sls = self.run_function("state.sls", mods="issue-30161")
self.assertSaltTrueReturn(sls)
# We must assert against the comment here to make sure the comment reads that the
# command "echo "hello"" was run. This ensures that we made it to the last unless
# command in the state. If the comment reads "unless condition is true", or similar,
# then the unless state run bailed out after the first unless command succeeded,
# which is the bug we're regression testing for.
_expected = {
"file_|-unless_false_onlyif_false_|-{0}{1}test.txt_|-managed".format(
RUNTIME_VARS.TMP, os.path.sep
): {
"comment": "onlyif condition is false\nunless condition is false",
"name": "{0}{1}test.txt".format(RUNTIME_VARS.TMP, os.path.sep),
"skip_watch": True,
"changes": {},
"result": True,
},
"file_|-unless_false_onlyif_true_|-{0}{1}test.txt_|-managed".format(
RUNTIME_VARS.TMP, os.path.sep
): {
"comment": "Empty file",
"name": "{0}{1}test.txt".format(RUNTIME_VARS.TMP, os.path.sep),
"start_time": "18:10:20.341753",
"result": True,
"changes": {
"new": "file {0}{1}test.txt created".format(
RUNTIME_VARS.TMP, os.path.sep
)
},
},
"file_|-unless_true_onlyif_false_|-{0}{1}test.txt_|-managed".format(
RUNTIME_VARS.TMP, os.path.sep
): {
"comment": "onlyif condition is false\nunless condition is true",
"name": "{0}{1}test.txt".format(RUNTIME_VARS.TMP, os.path.sep),
"start_time": "18:10:22.936446",
"skip_watch": True,
"changes": {},
"result": True,
},
"file_|-unless_true_onlyif_true_|-{0}{1}test.txt_|-managed".format(
RUNTIME_VARS.TMP, os.path.sep
): {
"comment": "onlyif condition is true\nunless condition is true",
"name": "{0}{1}test.txt".format(RUNTIME_VARS.TMP, os.path.sep),
"skip_watch": True,
"changes": {},
"result": True,
},
}
for id in _expected:
self.assertEqual(sls[id]["comment"], _expected[id]["comment"])
@skipIf(
six.PY3 and salt.utils.platform.is_darwin(), "Test is broken on macosx and PY3"
)
@slowTest
def test_state_sls_unicode_characters(self):
"""
test state.sls when state file contains non-ascii characters
"""
ret = self.run_function("state.sls", ["issue-46672"])
log.debug("== ret %s ==", type(ret))
_expected = "cmd_|-echo1_|-echo 'This is Æ test!'_|-run"
self.assertIn(_expected, ret)
@skipIf(
six.PY3 and salt.utils.platform.is_darwin(), "Test is broken on macosx and PY3"
)
@slowTest
def test_state_sls_unicode_characters_cmd_output(self):
"""
test the output from running and echo command with non-ascii
characters.
"""
ret = self.run_function("state.sls", ["issue-46672-a"], timeout=60)
key = list(ret.keys())[0]
log.debug("== ret %s ==", type(ret))
_expected = "This is Æ test!"
if salt.utils.platform.is_windows():
# Windows cmd.exe will mangle the output using cmd's codepage.
if six.PY2:
_expected = "'This is A+ test!'"
else:
_expected = "'This is ’ test!'"
self.assertEqual(_expected, ret[key]["changes"]["stdout"])
def tearDown(self):
rm_files = [
os.path.join(RUNTIME_VARS.TMP, "nonbase_env"),
os.path.join(RUNTIME_VARS.TMP, "testfile"),
os.path.join(RUNTIME_VARS.TMP, "test.txt"),
os.path.join(RUNTIME_VARS.TMP_STATE_TREE, "top.sls"),
]
for file_ in rm_files:
if os.path.isfile(file_):
os.remove(file_)
# remove old pillar data
for filename in os.listdir(RUNTIME_VARS.TMP_PILLAR_TREE):
os.remove(os.path.join(RUNTIME_VARS.TMP_PILLAR_TREE, filename))
self.run_function("saltutil.refresh_pillar")
self.run_function("test.sleep", [5])
@slowTest
def test_state_sls_integer_name(self):
"""
This tests the case where the state file is named
only with integers
"""
state_run = self.run_function("state.sls", mods="12345")
state_id = "test_|-always-passes_|-always-passes_|-succeed_without_changes"
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]["comment"], "Success!")
self.assertTrue(state_run[state_id]["result"])
@slowTest
def test_state_sls_lazyloader_allows_recursion(self):
"""
This tests that referencing dunders like __salt__ work
context: https://github.com/saltstack/salt/pull/51499
"""
state_run = self.run_function("state.sls", mods="issue-51499")
state_id = "test_|-always-passes_|-foo_|-succeed_without_changes"
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]["comment"], "Success!")
self.assertTrue(state_run[state_id]["result"])
@slowTest
def test_issue_56131(self):
module_path = os.path.join(RUNTIME_VARS.CODE_DIR, "pip.py")
if six.PY3:
modulec_path = os.path.join(RUNTIME_VARS.CODE_DIR, "__pycache__", "pip.pyc")
else:
modulec_path = os.path.join(RUNTIME_VARS.CODE_DIR, "pip.pyc")
unzip_path = os.path.join(RUNTIME_VARS.TMP, "issue-56131.txt")
def clean_paths(paths):
for path in paths:
try:
os.remove(path)
except OSError:
log.warn("Path not found: %s", path)
with salt.utils.files.fopen(module_path, "w") as fp:
fp.write('raise ImportError("No module named pip")')
self.addCleanup(clean_paths, [unzip_path, module_path, modulec_path])
assert not os.path.exists(unzip_path)
state_run = self.run_function(
"state.sls",
mods="issue-56131",
pillar={"unzip_to": RUNTIME_VARS.TMP},
timeout=30,
)
assert state_run is not False
assert os.path.exists(unzip_path)
@slowTest
def test_jinja_renderer_argline(self):
"""
This is a test case for https://github.com/saltstack/salt/issues/55124
Renderer for this is in tests/integration/files/file/base/_renderers/issue55124.py
"""
result = self.run_function("state.sls", mods="issue-55124")
assert isinstance(result, dict), result
result = result[next(iter(result))]
assert result["result"], result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.