max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
Basic Operations On Images/opencv_basic_operations_on_images.py | christophmellauner/opencv-examples | 0 | 6631351 | import numpy as np
import cv2
image = cv2.imread('../images/messi5.jpg')
logo = cv2.imread('../images/opencv-logo.png')
print(image.shape) # returns s tuple of no. of rows, columns and channels
print(image.size) # returns the no of pixels accessed
print(image.dtype) # returns the datatype obtained
b, g, r = cv2.split(image) # splits the channels of the image
image = cv2.merge((b, g, r)) # merges the channels of the image
'''
Coordinates of the ball: (ROI - Region Of Interest)
Upper Left - X: 280 Y: 340
Lower Right - X: 330 Y: 390
'''
ball = image[280:340, 330:390]
image[273:333, 100:160] = ball # where you want to place the ball (copy)
# images must be resized to same before adding
image = cv2.resize(image, (512, 512))
logo = cv2.resize(logo, (512, 512))
# dst = cv2.add(image, logo) # add two images with same weight
dst = cv2.addWeighted(image, 0.9, logo, 0.1, 0) # add two images with specific weight (image - dominant)
cv2.imshow('image', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
| import numpy as np
import cv2
image = cv2.imread('../images/messi5.jpg')
logo = cv2.imread('../images/opencv-logo.png')
print(image.shape) # returns s tuple of no. of rows, columns and channels
print(image.size) # returns the no of pixels accessed
print(image.dtype) # returns the datatype obtained
b, g, r = cv2.split(image) # splits the channels of the image
image = cv2.merge((b, g, r)) # merges the channels of the image
'''
Coordinates of the ball: (ROI - Region Of Interest)
Upper Left - X: 280 Y: 340
Lower Right - X: 330 Y: 390
'''
ball = image[280:340, 330:390]
image[273:333, 100:160] = ball # where you want to place the ball (copy)
# images must be resized to same before adding
image = cv2.resize(image, (512, 512))
logo = cv2.resize(logo, (512, 512))
# dst = cv2.add(image, logo) # add two images with same weight
dst = cv2.addWeighted(image, 0.9, logo, 0.1, 0) # add two images with specific weight (image - dominant)
cv2.imshow('image', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
| en | 0.736947 | # returns s tuple of no. of rows, columns and channels # returns the no of pixels accessed # returns the datatype obtained # splits the channels of the image # merges the channels of the image Coordinates of the ball: (ROI - Region Of Interest) Upper Left - X: 280 Y: 340 Lower Right - X: 330 Y: 390 # where you want to place the ball (copy) # images must be resized to same before adding # dst = cv2.add(image, logo) # add two images with same weight # add two images with specific weight (image - dominant) | 3.399984 | 3 |
_2020/d13-bustimetable.py | dcsparkes/adventofcode | 0 | 6631352 | """
https://adventofcode.com/2020/day/13
"""
from base import base
import math
import unittest
class MyTestCase(unittest.TestCase):
fInput1 = "input2020_13a.txt"
fTest1a = "test2020_13a.txt"
fTest1b = "test2020_13b.txt"
fTest1c = "test2020_13c.txt"
fTest1d = "test2020_13d.txt"
fTest1e = "test2020_13e.txt"
fTest1f = "test2020_13f.txt"
def test_firstTimestamp_fInput1(self):
result = findTimestamp(self.fInput1)
print("Part 2: {}".format(result))
self.assertEqual(534035653563227, result)
def test_firstTimestamp_fTest1a(self):
self.assertEqual(1068781, findTimestamp(self.fTest1a))
def test_firstTimestamp_fTest1b(self):
self.assertEqual(3417, findTimestamp(self.fTest1b))
def test_firstTimestamp_fTest1c(self):
self.assertEqual(754018, findTimestamp(self.fTest1c))
def test_firstTimestamp_fTest1d(self):
self.assertEqual(779210, findTimestamp(self.fTest1d))
def test_firstTimestamp_fTest1e(self):
self.assertEqual(1261476, findTimestamp(self.fTest1e))
def test_firstTimestamp_fTest1f(self):
self.assertEqual(1202161486, findTimestamp(self.fTest1f))
def test_nextBus_Input1(self):
result = nextBus(self.fInput1)[0]
print("Part 1: {}".format(result))
self.assertEqual(2165, result)
def test_nextBus_fTest1(self):
self.assertEqual(295, nextBus(self.fTest1a)[0])
def chineseRemainderSolution(modulos):
"""
https://mathworld.wolfram.com/ChineseRemainderTheorem.html
https://en.wikipedia.org/wiki/Chinese_remainder_theorem
:param modulos: list of tuples
:return:
"""
pass
def nextBus(fileName):
shortestWait = float('inf')
idNearest = None
timestamp = None
for stamp in base.getInputLines(fileName, delimiter=','):
if stamp == 'x':
pass
elif timestamp:
delay = timestamp % int(stamp)
if delay < shortestWait:
shortestWait = delay
idNearest = int(stamp)
else:
timestamp = -int(stamp)
return (shortestWait * idNearest, shortestWait, idNearest)
def findTimestamp(fileName):
ids = readIDs(fileName)
buses = list(zip(*ids))[0]
candidate = ids[0][0] - ids[0][1]
increment = ids[0][0]
for bus, offset in ids[1:]:
while -candidate % bus != offset % bus:
oldOffset = -candidate % bus
candidate += increment
newOffset = -candidate % bus
increment = math.lcm(increment, bus)
return candidate
def readIDs(fileName):
firstIgnored = False
ids = []
offset = 0
for stamp in base.getInputLines(fileName, delimiter=','):
if not firstIgnored:
firstIgnored = True
elif stamp == 'x':
offset += 1
else:
ids.append((int(stamp), offset))
offset += 1
return ids
if __name__ == '__main__':
unittest.main()
| """
https://adventofcode.com/2020/day/13
"""
from base import base
import math
import unittest
class MyTestCase(unittest.TestCase):
fInput1 = "input2020_13a.txt"
fTest1a = "test2020_13a.txt"
fTest1b = "test2020_13b.txt"
fTest1c = "test2020_13c.txt"
fTest1d = "test2020_13d.txt"
fTest1e = "test2020_13e.txt"
fTest1f = "test2020_13f.txt"
def test_firstTimestamp_fInput1(self):
result = findTimestamp(self.fInput1)
print("Part 2: {}".format(result))
self.assertEqual(534035653563227, result)
def test_firstTimestamp_fTest1a(self):
self.assertEqual(1068781, findTimestamp(self.fTest1a))
def test_firstTimestamp_fTest1b(self):
self.assertEqual(3417, findTimestamp(self.fTest1b))
def test_firstTimestamp_fTest1c(self):
self.assertEqual(754018, findTimestamp(self.fTest1c))
def test_firstTimestamp_fTest1d(self):
self.assertEqual(779210, findTimestamp(self.fTest1d))
def test_firstTimestamp_fTest1e(self):
self.assertEqual(1261476, findTimestamp(self.fTest1e))
def test_firstTimestamp_fTest1f(self):
self.assertEqual(1202161486, findTimestamp(self.fTest1f))
def test_nextBus_Input1(self):
result = nextBus(self.fInput1)[0]
print("Part 1: {}".format(result))
self.assertEqual(2165, result)
def test_nextBus_fTest1(self):
self.assertEqual(295, nextBus(self.fTest1a)[0])
def chineseRemainderSolution(modulos):
"""
https://mathworld.wolfram.com/ChineseRemainderTheorem.html
https://en.wikipedia.org/wiki/Chinese_remainder_theorem
:param modulos: list of tuples
:return:
"""
pass
def nextBus(fileName):
shortestWait = float('inf')
idNearest = None
timestamp = None
for stamp in base.getInputLines(fileName, delimiter=','):
if stamp == 'x':
pass
elif timestamp:
delay = timestamp % int(stamp)
if delay < shortestWait:
shortestWait = delay
idNearest = int(stamp)
else:
timestamp = -int(stamp)
return (shortestWait * idNearest, shortestWait, idNearest)
def findTimestamp(fileName):
ids = readIDs(fileName)
buses = list(zip(*ids))[0]
candidate = ids[0][0] - ids[0][1]
increment = ids[0][0]
for bus, offset in ids[1:]:
while -candidate % bus != offset % bus:
oldOffset = -candidate % bus
candidate += increment
newOffset = -candidate % bus
increment = math.lcm(increment, bus)
return candidate
def readIDs(fileName):
firstIgnored = False
ids = []
offset = 0
for stamp in base.getInputLines(fileName, delimiter=','):
if not firstIgnored:
firstIgnored = True
elif stamp == 'x':
offset += 1
else:
ids.append((int(stamp), offset))
offset += 1
return ids
if __name__ == '__main__':
unittest.main()
| en | 0.530562 | https://adventofcode.com/2020/day/13 https://mathworld.wolfram.com/ChineseRemainderTheorem.html https://en.wikipedia.org/wiki/Chinese_remainder_theorem :param modulos: list of tuples :return: | 3.49893 | 3 |
cython_idx/idX.py | RonBeavis/idx | 1 | 6631353 | <filename>cython_idx/idX.py
#
# Copyright © 2019 <NAME>
# Licensed under Apache License, Version 2.0, January 2004
#
# Identifies kernels corresponding to spectra
#
# idX version 2019.08.10.02
#
import ujson
import time
import gzip
import sys
import datetime
# import the method that deals with spectrum file formats
from spectraX import load_spectra
# import the method for the output of results to a file
from reportX import report_ids
from kernelX import index_kernel
from createX import create_ids
version = '2019.09.01'
#
# Coordinate the identification process, print job stats and progress
#
def main():
if len(sys.argv) < 4:
print('usage:\n\t>python3 idX.py SPECTRA_FILE KERNEL_FILE OUTPUT_FILE (high|medium|low*)')
exit()
start = time.time()
# record relavent parameters
param = {}
#fragment tolerance in millidaltons
param['fragment mass tolerance'] = float(400)
try:
if sys.argv[4] == 'high':
param['fragment mass tolerance'] = float(20)
elif sys.argv[4] == 'low':
param['fragment mass tolerance'] = float(400)
elif sys.argv[4] == 'medium':
param['fragment mass tolerance'] = float(100)
else:
print('ERROR: argument 4 must be high or low, not "%s"'% (sys.argv[4]))
exit()
except:
pass
param['maximum spectra'] = -1
try:
param['maximum spectra'] = int(sys.argv[5])
except:
pass
# parent tolerance in ppm
param['parent mass tolerance'] = float(20)
spectra = []
# report files named on command line
print('\nstart ...\nidX parameters')
if param['maximum spectra'] != -1:
print('\t max spectra: %i mDa' % (param['maximum spectra']))
else:
print('\t max spectra: unlimited')
print('\t fragment tol: %i mDa' % (param['fragment mass tolerance']))
print('\t parent tol: %i ppm' % (param['parent mass tolerance']))
print('\t spectrum file: %s' % (sys.argv[1]))
print('\t kernel file: %s' % (sys.argv[2]))
print('\t output file: %s' % (sys.argv[3]))
print('\t version: %s' % (version))
print('\t run time: %s' % (str(datetime.datetime.now())))
param['spectrum file'] = sys.argv[1]
print('load & index spectra')
# read the spectrum file and perform all necessary spectrum conditioning
spectra = load_spectra(param['spectrum file'],param)
if param['maximum spectra'] != -1:
spectra = spectra[0:param['maximum spectra']]
if len(spectra) == 0:
print('exiting: 0 spectra found')
print('done')
exit()
param['spectra'] = len(spectra)
delta = time.time()-start
start = time.time()
print('\n\t spectra = %i' % (len(spectra)))
print('\tspectra ΔT = %.1f s' % (delta))
param['kernel file'] = sys.argv[2]
print('load & index kernel')
# read the kernel file and create an index of peptide fragmentation patterns
(ki,mi) = index_kernel(param,spectra)
delta = time.time()-start
start = time.time()
print('\n\t kernels = %i' % (len(ki)))
print('\t ΔT = %.1f s' % (delta))
print('perform ids')
# generate a list of identifications for the spectra using the kernel index
ids = create_ids(ki,mi,spectra,param)
# free memory associated with indexes and spectra
delta = time.time()-start
start = time.time()
print('\tid ΔT = %.3f s' % (delta))
if len(spectra) > 0:
print('\t δT = %.0f microseconds' % (1.0e06*delta/len(spectra)))
else:
pass
# simple reporting of the kernels assigned to spectra
print('release memory')
ki = None
spectra = None
print('\tdone')
param['output file'] = sys.argv[3]
print('create report')
report_ids(ids,param)
print('... done')
if __name__== "__main__":
main()
| <filename>cython_idx/idX.py
#
# Copyright © 2019 <NAME>
# Licensed under Apache License, Version 2.0, January 2004
#
# Identifies kernels corresponding to spectra
#
# idX version 2019.08.10.02
#
import ujson
import time
import gzip
import sys
import datetime
# import the method that deals with spectrum file formats
from spectraX import load_spectra
# import the method for the output of results to a file
from reportX import report_ids
from kernelX import index_kernel
from createX import create_ids
version = '2019.09.01'
#
# Coordinate the identification process, print job stats and progress
#
def main():
if len(sys.argv) < 4:
print('usage:\n\t>python3 idX.py SPECTRA_FILE KERNEL_FILE OUTPUT_FILE (high|medium|low*)')
exit()
start = time.time()
# record relavent parameters
param = {}
#fragment tolerance in millidaltons
param['fragment mass tolerance'] = float(400)
try:
if sys.argv[4] == 'high':
param['fragment mass tolerance'] = float(20)
elif sys.argv[4] == 'low':
param['fragment mass tolerance'] = float(400)
elif sys.argv[4] == 'medium':
param['fragment mass tolerance'] = float(100)
else:
print('ERROR: argument 4 must be high or low, not "%s"'% (sys.argv[4]))
exit()
except:
pass
param['maximum spectra'] = -1
try:
param['maximum spectra'] = int(sys.argv[5])
except:
pass
# parent tolerance in ppm
param['parent mass tolerance'] = float(20)
spectra = []
# report files named on command line
print('\nstart ...\nidX parameters')
if param['maximum spectra'] != -1:
print('\t max spectra: %i mDa' % (param['maximum spectra']))
else:
print('\t max spectra: unlimited')
print('\t fragment tol: %i mDa' % (param['fragment mass tolerance']))
print('\t parent tol: %i ppm' % (param['parent mass tolerance']))
print('\t spectrum file: %s' % (sys.argv[1]))
print('\t kernel file: %s' % (sys.argv[2]))
print('\t output file: %s' % (sys.argv[3]))
print('\t version: %s' % (version))
print('\t run time: %s' % (str(datetime.datetime.now())))
param['spectrum file'] = sys.argv[1]
print('load & index spectra')
# read the spectrum file and perform all necessary spectrum conditioning
spectra = load_spectra(param['spectrum file'],param)
if param['maximum spectra'] != -1:
spectra = spectra[0:param['maximum spectra']]
if len(spectra) == 0:
print('exiting: 0 spectra found')
print('done')
exit()
param['spectra'] = len(spectra)
delta = time.time()-start
start = time.time()
print('\n\t spectra = %i' % (len(spectra)))
print('\tspectra ΔT = %.1f s' % (delta))
param['kernel file'] = sys.argv[2]
print('load & index kernel')
# read the kernel file and create an index of peptide fragmentation patterns
(ki,mi) = index_kernel(param,spectra)
delta = time.time()-start
start = time.time()
print('\n\t kernels = %i' % (len(ki)))
print('\t ΔT = %.1f s' % (delta))
print('perform ids')
# generate a list of identifications for the spectra using the kernel index
ids = create_ids(ki,mi,spectra,param)
# free memory associated with indexes and spectra
delta = time.time()-start
start = time.time()
print('\tid ΔT = %.3f s' % (delta))
if len(spectra) > 0:
print('\t δT = %.0f microseconds' % (1.0e06*delta/len(spectra)))
else:
pass
# simple reporting of the kernels assigned to spectra
print('release memory')
ki = None
spectra = None
print('\tdone')
param['output file'] = sys.argv[3]
print('create report')
report_ids(ids,param)
print('... done')
if __name__== "__main__":
main()
| en | 0.762278 | # # Copyright © 2019 <NAME> # Licensed under Apache License, Version 2.0, January 2004 # # Identifies kernels corresponding to spectra # # idX version 2019.08.10.02 # # import the method that deals with spectrum file formats # import the method for the output of results to a file # # Coordinate the identification process, print job stats and progress # # record relavent parameters #fragment tolerance in millidaltons # parent tolerance in ppm # report files named on command line # read the spectrum file and perform all necessary spectrum conditioning # read the kernel file and create an index of peptide fragmentation patterns # generate a list of identifications for the spectra using the kernel index # free memory associated with indexes and spectra # simple reporting of the kernels assigned to spectra | 2.167983 | 2 |
api_tests/nodes/views/test_node_contributors_detail.py | laurenrevere/osf.io | 0 | 6631354 | <filename>api_tests/nodes/views/test_node_contributors_detail.py
import pytest
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import NodeLog
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
)
from rest_framework import exceptions
from tests.utils import assert_latest_log, assert_latest_log_not
from website.util import permissions, disconnected_from_listeners
from website.project.signals import contributor_removed
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestContributorDetail:
@pytest.fixture()
def title(self):
return 'Cool Project'
@pytest.fixture()
def description(self):
return 'A Properly Cool Project'
@pytest.fixture()
def category(self):
return 'data'
@pytest.fixture()
def project_public(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user
)
@pytest.fixture()
def project_private(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=False,
creator=user
)
@pytest.fixture()
def url_public(self, user, project_public):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project_public._id, user._id)
@pytest.fixture()
def url_private_base(self, project_private):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project_private._id, '{}')
@pytest.fixture()
def url_private(self, user, url_private_base):
return url_private_base.format(user._id)
def test_get_contributor_detail_valid_response(
self, app, user, project_public,
project_private, url_public, url_private):
# test_get_public_contributor_detail
res = app.get(url_public)
assert res.status_code == 200
assert res.json['data']['id'] == '{}-{}'.format(
project_public._id, user._id)
# regression test
# test_get_public_contributor_detail_is_viewable_through_browsable_api
res = app.get(url_public + '?format=api')
assert res.status_code == 200
# test_get_private_node_contributor_detail_contributor_auth
res = app.get(url_private, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['id'] == '{}-{}'.format(
project_private._id, user._id)
def test_get_contributor_detail_errors(
self, app, user, url_private_base, url_private):
non_contrib = AuthUserFactory()
# test_get_private_node_contributor_detail_non_contributor
res = app.get(url_private, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_get_private_node_contributor_detail_not_logged_in
res = app.get(url_private, expect_errors=True)
assert res.status_code == 401
# test_get_private_node_non_contributor_detail_contributor_auth
res = app.get(
url_private_base.format(
non_contrib._id),
auth=user.auth,
expect_errors=True)
assert res.status_code == 404
# test_get_private_node_invalid_user_detail_contributor_auth
res = app.get(
url_private_base.format('invalid'),
auth=user.auth,
expect_errors=True)
assert res.status_code == 404
def test_unregistered_contributor_detail_show_up_as_name_associated_with_project(
self,
app,
user):
project = ProjectFactory(creator=user, is_public=True)
project.add_unregistered_contributor(
'<NAME>',
'<EMAIL>',
auth=Auth(user),
save=True)
unregistered_contributor = project.contributors[1]
url = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, unregistered_contributor._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['embeds']['users']['data']['attributes']['full_name'] == '<NAME>'
assert res.json['data']['attributes'].get(
'unregistered_contributor') == '<NAME>'
project_two = ProjectFactory(creator=user, is_public=True)
project_two.add_unregistered_contributor(
'<NAME>', '<EMAIL>', auth=Auth(user), save=True)
url = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project_two._id, unregistered_contributor._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['embeds']['users']['data']['attributes']['full_name'] == '<NAME>'
assert res.json['data']['attributes'].get(
'unregistered_contributor') == '<NAME>'
def test_detail_includes_index(
self,
app,
user,
project_public,
url_public):
res = app.get(url_public)
data = res.json['data']
assert 'index' in data['attributes'].keys()
assert data['attributes']['index'] == 0
other_contributor = AuthUserFactory()
project_public.add_contributor(
other_contributor, auth=Auth(user), save=True)
other_contributor_detail = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project_public._id, other_contributor._id)
res = app.get(other_contributor_detail)
assert res.json['data']['attributes']['index'] == 1
@pytest.mark.django_db
class TestNodeContributorOrdering:
@pytest.fixture()
def contribs(self, user):
return [user] + [AuthUserFactory() for _ in range(9)]
@pytest.fixture()
def project(self, user, contribs):
project = ProjectFactory(creator=user)
for contrib in contribs:
if contrib._id != user._id:
project.add_contributor(
contrib,
permissions=[permissions.READ, permissions.WRITE],
visible=True,
save=True
)
return project
@pytest.fixture()
def url_contrib_base(self, project):
return '/{}nodes/{}/contributors/'.format(API_BASE, project._id)
@pytest.fixture()
def url_creator(self, user, project):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user._id)
@pytest.fixture()
def urls_contrib(self, contribs, project):
return [
'/{}nodes/{}/contributors/{}/'.format(
API_BASE,
project._id,
contrib._id) for contrib in contribs]
@pytest.fixture()
def last_position(self, contribs):
return len(contribs) - 1
@staticmethod
@pytest.fixture()
def contrib_user_id():
def get_contrib_user_id(contributor):
return contributor['embeds']['users']['data']['id']
return get_contrib_user_id
def test_initial_order(
self, app, user, contribs, project, contrib_user_id):
res = app.get('/{}nodes/{}/contributors/'.format(
API_BASE, project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
found_contributors = False
for i in range(len(contribs)):
assert contribs[i]._id == contrib_user_id(contributor_list[i])
assert i == contributor_list[i]['attributes']['index']
found_contributors = True
assert found_contributors, 'Did not compare any contributors.'
def test_move_top_contributor_down_one_and_also_log(
self, app, user, contribs, project, contrib_user_id, url_contrib_base):
with assert_latest_log(NodeLog.CONTRIB_REORDERED, project):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get(
'/{}nodes/{}/contributors/'.format(API_BASE, project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[1]) == contributor_to_move
assert contrib_user_id(
contributor_list[0]) == former_second_contributor._id
def test_move_second_contributor_up_one_to_top(
self, app, user, contribs, project,
contrib_user_id, url_contrib_base):
contributor_to_move = contribs[1]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_first_contributor = contribs[0]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 0
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(
API_BASE, project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[0]) == contributor_to_move
assert contrib_user_id(
contributor_list[1]) == former_first_contributor._id
def test_move_top_contributor_down_to_bottom(
self, app, user, contribs, project,
contrib_user_id, last_position,
url_contrib_base):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': last_position
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(
contributor_list[last_position]) == contributor_to_move
assert contrib_user_id(
contributor_list[0]) == former_second_contributor._id
def test_move_bottom_contributor_up_to_top(
self, app, user, contribs, project,
contrib_user_id, last_position,
url_contrib_base):
contributor_to_move = contribs[last_position]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_to_last_contributor = contribs[last_position - 1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 0
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[0]) == contributor_to_move
assert (
contrib_user_id(contributor_list[last_position]) ==
former_second_to_last_contributor._id)
def test_move_second_to_last_contributor_down_past_bottom(
self, app, user, contribs, project,
contrib_user_id, last_position,
url_contrib_base):
contributor_to_move = contribs[last_position - 1]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_last_contributor = contribs[last_position]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': last_position + 10
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(
contributor_list[last_position]) == contributor_to_move
assert (
contrib_user_id(contributor_list[last_position - 1]) ==
former_last_contributor._id)
def test_move_top_contributor_down_to_second_to_last_position_with_negative_numbers(
self, app, user, contribs, project, contrib_user_id, last_position, url_contrib_base):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': -1
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(
contributor_list[last_position - 1]) == contributor_to_move
assert contrib_user_id(
contributor_list[0]) == former_second_contributor._id
def test_write_contributor_fails_to_move_top_contributor_down_one(
self, app, user, contribs, project, contrib_user_id, url_contrib_base):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = app.patch_json_api(
url, data,
auth=former_second_contributor.auth,
expect_errors=True)
assert res_patch.status_code == 403
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[0]) == contributor_to_move
assert contrib_user_id(
contributor_list[1]) == former_second_contributor._id
def test_non_authenticated_fails_to_move_top_contributor_down_one(
self, app, user, contribs, project, contrib_user_id, url_contrib_base):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = app.patch_json_api(url, data, expect_errors=True)
assert res_patch.status_code == 401
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(
API_BASE, project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[0]) == contributor_to_move
assert contrib_user_id(
contributor_list[1]) == former_second_contributor._id
@pytest.mark.django_db
class TestNodeContributorUpdate:
@pytest.fixture()
def contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user, contrib):
project = ProjectFactory(creator=user)
project.add_contributor(
contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=True,
save=True)
return project
@pytest.fixture()
def url_creator(self, user, project):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user._id)
@pytest.fixture()
def url_contrib(self, project, contrib):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, contrib._id)
def test_change_contrib_errors(
self, app, user, contrib, project, url_contrib):
# test_change_contributor_no_id
data = {
'data': {
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib,
data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
# test_change_contributor_incorrect_id
data = {
'data': {
'id': '12345',
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib,
data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 409
# test_change_contributor_no_type
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
# test_change_contributor_incorrect_type
data = {
'data': {
'id': contrib._id,
'type': 'Wrong type.',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 409
# test_invalid_change_inputs_contributor
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': 'invalid',
'bibliographic': 'invalid'
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
assert project.get_visible(contrib)
# test_change_contributor_not_logged_in
data = {
'data': {
'id': contrib._id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = app.put_json_api(url_contrib, data, expect_errors=True)
assert res.status_code == 401
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
assert project.get_visible(contrib)
# test_change_contributor_non_admin_auth
data = {
'data': {
'id': contrib._id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=contrib.auth,
expect_errors=True)
assert res.status_code == 403
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
assert project.get_visible(contrib)
def test_change_admin_self_without_other_admin(
self, app, user, project, url_creator):
contrib_id = '{}-{}'.format(project._id, user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_creator, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
project.reload()
assert project.get_permissions(user) == [
permissions.READ, permissions.WRITE, permissions.ADMIN]
def test_node_update_invalid_data(self, app, user, url_creator):
res = app.put_json_api(
url_creator,
'Incorrect data',
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
res = app.put_json_api(
url_creator,
['Incorrect data'],
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
def test_change_contributor_correct_id(
self, app, user, contrib, project, url_contrib):
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 200
def test_remove_all_bibliographic_statuses_contributors(
self, app, user, contrib, project, url_creator):
project.set_visible(contrib, False, save=True)
contrib_id = '{}-{}'.format(project._id, user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': False
}
}
}
res = app.put_json_api(
url_creator, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
project.reload()
assert project.get_visible(user)
def test_change_contributor_permissions(
self, app, user, contrib, project, url_contrib):
contrib_id = '{}-{}'.format(project._id, contrib._id)
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.ADMIN
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE, permissions.ADMIN]
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.WRITE
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.READ
project.reload()
assert project.get_permissions(contrib) == [permissions.READ]
def test_change_contributor_bibliographic(
self, app, user, contrib, project, url_contrib):
contrib_id = '{}-{}'.format(project._id, contrib._id)
with assert_latest_log(NodeLog.MADE_CONTRIBUTOR_INVISIBLE, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': False
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert not attributes['bibliographic']
project.reload()
assert not project.get_visible(contrib)
with assert_latest_log(NodeLog.MADE_CONTRIBUTOR_VISIBLE, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['bibliographic']
project.reload()
assert project.get_visible(contrib)
def test_change_contributor_permission_and_bibliographic(
self, app, user, contrib, project, url_contrib):
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project, 1), assert_latest_log(NodeLog.MADE_CONTRIBUTOR_INVISIBLE, project):
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.READ
assert not attributes['bibliographic']
project.reload()
assert project.get_permissions(contrib) == [permissions.READ]
assert not project.get_visible(contrib)
# @assert_not_logs(NodeLog.PERMISSIONS_UPDATED, 'project')
def test_not_change_contributor(
self, app, user, contrib, project, url_contrib):
with assert_latest_log_not(NodeLog.PERMISSIONS_UPDATED, project):
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': None,
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.WRITE
assert attributes['bibliographic']
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
assert project.get_visible(contrib)
def test_change_admin_self_with_other_admin(
self, app, user, contrib, project, url_creator):
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project):
project.add_permission(contrib, permissions.ADMIN, save=True)
contrib_id = '{}-{}'.format(project._id, user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = app.put_json_api(url_creator, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.WRITE
project.reload()
assert project.get_permissions(user) == [
permissions.READ, permissions.WRITE]
@pytest.mark.django_db
class TestNodeContributorPartialUpdate:
@pytest.fixture()
def contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user, contrib):
project = ProjectFactory(creator=user)
project.add_contributor(
contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=True,
save=True)
return project
@pytest.fixture()
def url_creator(self, user, project):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user._id)
@pytest.fixture()
def url_contrib(self, contrib, project):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, self.project._id, self.user_two._id)
def test_patch_bibliographic_only(self, app, user, project, url_creator):
creator_id = '{}-{}'.format(project._id, user._id)
data = {
'data': {
'id': creator_id,
'type': 'contributors',
'attributes': {
'bibliographic': False,
}
}
}
res = app.patch_json_api(url_creator, data, auth=user.auth)
assert res.status_code == 200
project.reload()
assert project.get_permissions(user) == [
permissions.READ, permissions.WRITE, permissions.ADMIN]
assert not project.get_visible(user)
def test_patch_permission_only(self, app, user, project):
user_read_contrib = AuthUserFactory()
project.add_contributor(
user_read_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=False,
save=True)
url_read_contrib = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user_read_contrib._id)
contributor_id = '{}-{}'.format(project._id, user_read_contrib._id)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
}
}
}
res = app.patch_json_api(url_read_contrib, data, auth=user.auth)
assert res.status_code == 200
project.reload()
assert project.get_permissions(user_read_contrib) == [permissions.READ]
assert not project.get_visible(user_read_contrib)
@pytest.mark.django_db
class TestNodeContributorDelete:
@pytest.fixture()
def user_write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user, user_write_contrib):
project = ProjectFactory(creator=user)
project.add_contributor(
user_write_contrib,
permissions=[permissions.READ, permissions.WRITE],
visible=True, save=True)
return project
@pytest.fixture()
def url_user(self, project, user):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user._id)
@pytest.fixture()
def url_user_write_contrib(self, project, user_write_contrib):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user_write_contrib._id)
@pytest.fixture()
def url_user_non_contrib(self, project, user_non_contrib):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user_non_contrib._id)
def test_remove_errors(
self, app, user, user_write_contrib,
user_non_contrib, project, url_user,
url_user_write_contrib, url_user_non_contrib):
# test_remove_contributor_non_contributor
res = app.delete(
url_user_write_contrib,
auth=user_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
project.reload()
assert user_write_contrib in project.contributors
# test_remove_contributor_not_logged_in
res = app.delete(url_user_write_contrib, expect_errors=True)
assert res.status_code == 401
project.reload()
assert user_write_contrib in project.contributors
# test_remove_non_contributor_admin
assert user_non_contrib not in project.contributors
res = app.delete(
url_user_non_contrib,
auth=user.auth,
expect_errors=True)
assert res.status_code == 404
project.reload()
assert user_non_contrib not in project.contributors
# test_remove_non_existing_user_admin
url_user_fake = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, 'fake')
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(url_user_fake, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_remove_self_contributor_unique_admin
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(url_user, auth=user.auth, expect_errors=True)
assert res.status_code == 400
project.reload()
assert user in project.contributors
def test_can_not_remove_only_bibliographic_contributor(
self, app, user, project, user_write_contrib, url_user):
project.add_permission(
user_write_contrib,
permissions.ADMIN,
save=True)
project.set_visible(user_write_contrib, False, save=True)
res = app.delete(url_user, auth=user.auth, expect_errors=True)
assert res.status_code == 400
project.reload()
assert user in project.contributors
def test_remove_contributor_non_admin_is_forbidden(
self, app, user_write_contrib,
user_non_contrib, project,
url_user_non_contrib):
project.add_contributor(
user_non_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=True,
save=True)
res = app.delete(
url_user_non_contrib,
auth=user_write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
project.reload()
assert user_non_contrib in project.contributors
# @assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_contributor_admin(
self, app, user, user_write_contrib,
project, url_user_write_contrib):
with assert_latest_log(NodeLog.CONTRIB_REMOVED, project):
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in
# osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(url_user_write_contrib, auth=user.auth)
assert res.status_code == 204
project.reload()
assert user_write_contrib not in project.contributors
# @assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_self_non_admin(
self, app, user_non_contrib,
project, url_user_non_contrib):
with assert_latest_log(NodeLog.CONTRIB_REMOVED, project):
project.add_contributor(
user_non_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=True,
save=True)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in
# osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(
url_user_non_contrib,
auth=user_non_contrib.auth)
assert res.status_code == 204
project.reload()
assert user_non_contrib not in project.contributors
# @assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_self_contributor_not_unique_admin(
self, app, user, user_write_contrib, project, url_user):
with assert_latest_log(NodeLog.CONTRIB_REMOVED, project):
project.add_permission(
user_write_contrib,
permissions.ADMIN,
save=True)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in
# osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(url_user, auth=user.auth)
assert res.status_code == 204
project.reload()
assert user not in project.contributors
# @assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_can_remove_self_as_contributor_not_unique_admin(
self, app, user_write_contrib, project, url_user_write_contrib):
with assert_latest_log(NodeLog.CONTRIB_REMOVED, project):
project.add_permission(
user_write_contrib,
permissions.ADMIN,
save=True)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in
# osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(
url_user_write_contrib,
auth=user_write_contrib.auth)
assert res.status_code == 204
project.reload()
assert user_write_contrib not in project.contributors
| <filename>api_tests/nodes/views/test_node_contributors_detail.py
import pytest
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import NodeLog
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
)
from rest_framework import exceptions
from tests.utils import assert_latest_log, assert_latest_log_not
from website.util import permissions, disconnected_from_listeners
from website.project.signals import contributor_removed
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestContributorDetail:
@pytest.fixture()
def title(self):
return 'Cool Project'
@pytest.fixture()
def description(self):
return 'A Properly Cool Project'
@pytest.fixture()
def category(self):
return 'data'
@pytest.fixture()
def project_public(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user
)
@pytest.fixture()
def project_private(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=False,
creator=user
)
@pytest.fixture()
def url_public(self, user, project_public):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project_public._id, user._id)
@pytest.fixture()
def url_private_base(self, project_private):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project_private._id, '{}')
@pytest.fixture()
def url_private(self, user, url_private_base):
return url_private_base.format(user._id)
def test_get_contributor_detail_valid_response(
self, app, user, project_public,
project_private, url_public, url_private):
# test_get_public_contributor_detail
res = app.get(url_public)
assert res.status_code == 200
assert res.json['data']['id'] == '{}-{}'.format(
project_public._id, user._id)
# regression test
# test_get_public_contributor_detail_is_viewable_through_browsable_api
res = app.get(url_public + '?format=api')
assert res.status_code == 200
# test_get_private_node_contributor_detail_contributor_auth
res = app.get(url_private, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['id'] == '{}-{}'.format(
project_private._id, user._id)
def test_get_contributor_detail_errors(
self, app, user, url_private_base, url_private):
non_contrib = AuthUserFactory()
# test_get_private_node_contributor_detail_non_contributor
res = app.get(url_private, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_get_private_node_contributor_detail_not_logged_in
res = app.get(url_private, expect_errors=True)
assert res.status_code == 401
# test_get_private_node_non_contributor_detail_contributor_auth
res = app.get(
url_private_base.format(
non_contrib._id),
auth=user.auth,
expect_errors=True)
assert res.status_code == 404
# test_get_private_node_invalid_user_detail_contributor_auth
res = app.get(
url_private_base.format('invalid'),
auth=user.auth,
expect_errors=True)
assert res.status_code == 404
def test_unregistered_contributor_detail_show_up_as_name_associated_with_project(
self,
app,
user):
project = ProjectFactory(creator=user, is_public=True)
project.add_unregistered_contributor(
'<NAME>',
'<EMAIL>',
auth=Auth(user),
save=True)
unregistered_contributor = project.contributors[1]
url = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, unregistered_contributor._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['embeds']['users']['data']['attributes']['full_name'] == '<NAME>'
assert res.json['data']['attributes'].get(
'unregistered_contributor') == '<NAME>'
project_two = ProjectFactory(creator=user, is_public=True)
project_two.add_unregistered_contributor(
'<NAME>', '<EMAIL>', auth=Auth(user), save=True)
url = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project_two._id, unregistered_contributor._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['embeds']['users']['data']['attributes']['full_name'] == '<NAME>'
assert res.json['data']['attributes'].get(
'unregistered_contributor') == '<NAME>'
def test_detail_includes_index(
self,
app,
user,
project_public,
url_public):
res = app.get(url_public)
data = res.json['data']
assert 'index' in data['attributes'].keys()
assert data['attributes']['index'] == 0
other_contributor = AuthUserFactory()
project_public.add_contributor(
other_contributor, auth=Auth(user), save=True)
other_contributor_detail = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project_public._id, other_contributor._id)
res = app.get(other_contributor_detail)
assert res.json['data']['attributes']['index'] == 1
@pytest.mark.django_db
class TestNodeContributorOrdering:
@pytest.fixture()
def contribs(self, user):
return [user] + [AuthUserFactory() for _ in range(9)]
@pytest.fixture()
def project(self, user, contribs):
project = ProjectFactory(creator=user)
for contrib in contribs:
if contrib._id != user._id:
project.add_contributor(
contrib,
permissions=[permissions.READ, permissions.WRITE],
visible=True,
save=True
)
return project
@pytest.fixture()
def url_contrib_base(self, project):
return '/{}nodes/{}/contributors/'.format(API_BASE, project._id)
@pytest.fixture()
def url_creator(self, user, project):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user._id)
@pytest.fixture()
def urls_contrib(self, contribs, project):
return [
'/{}nodes/{}/contributors/{}/'.format(
API_BASE,
project._id,
contrib._id) for contrib in contribs]
@pytest.fixture()
def last_position(self, contribs):
return len(contribs) - 1
@staticmethod
@pytest.fixture()
def contrib_user_id():
def get_contrib_user_id(contributor):
return contributor['embeds']['users']['data']['id']
return get_contrib_user_id
def test_initial_order(
self, app, user, contribs, project, contrib_user_id):
res = app.get('/{}nodes/{}/contributors/'.format(
API_BASE, project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
found_contributors = False
for i in range(len(contribs)):
assert contribs[i]._id == contrib_user_id(contributor_list[i])
assert i == contributor_list[i]['attributes']['index']
found_contributors = True
assert found_contributors, 'Did not compare any contributors.'
def test_move_top_contributor_down_one_and_also_log(
self, app, user, contribs, project, contrib_user_id, url_contrib_base):
with assert_latest_log(NodeLog.CONTRIB_REORDERED, project):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get(
'/{}nodes/{}/contributors/'.format(API_BASE, project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[1]) == contributor_to_move
assert contrib_user_id(
contributor_list[0]) == former_second_contributor._id
def test_move_second_contributor_up_one_to_top(
self, app, user, contribs, project,
contrib_user_id, url_contrib_base):
contributor_to_move = contribs[1]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_first_contributor = contribs[0]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 0
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(
API_BASE, project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[0]) == contributor_to_move
assert contrib_user_id(
contributor_list[1]) == former_first_contributor._id
def test_move_top_contributor_down_to_bottom(
self, app, user, contribs, project,
contrib_user_id, last_position,
url_contrib_base):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': last_position
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(
contributor_list[last_position]) == contributor_to_move
assert contrib_user_id(
contributor_list[0]) == former_second_contributor._id
def test_move_bottom_contributor_up_to_top(
self, app, user, contribs, project,
contrib_user_id, last_position,
url_contrib_base):
contributor_to_move = contribs[last_position]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_to_last_contributor = contribs[last_position - 1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 0
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[0]) == contributor_to_move
assert (
contrib_user_id(contributor_list[last_position]) ==
former_second_to_last_contributor._id)
def test_move_second_to_last_contributor_down_past_bottom(
self, app, user, contribs, project,
contrib_user_id, last_position,
url_contrib_base):
contributor_to_move = contribs[last_position - 1]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_last_contributor = contribs[last_position]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': last_position + 10
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(
contributor_list[last_position]) == contributor_to_move
assert (
contrib_user_id(contributor_list[last_position - 1]) ==
former_last_contributor._id)
def test_move_top_contributor_down_to_second_to_last_position_with_negative_numbers(
self, app, user, contribs, project, contrib_user_id, last_position, url_contrib_base):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': -1
}
}
}
res_patch = app.patch_json_api(url, data, auth=user.auth)
assert res_patch.status_code == 200
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(
contributor_list[last_position - 1]) == contributor_to_move
assert contrib_user_id(
contributor_list[0]) == former_second_contributor._id
def test_write_contributor_fails_to_move_top_contributor_down_one(
self, app, user, contribs, project, contrib_user_id, url_contrib_base):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = app.patch_json_api(
url, data,
auth=former_second_contributor.auth,
expect_errors=True)
assert res_patch.status_code == 403
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(API_BASE,
project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[0]) == contributor_to_move
assert contrib_user_id(
contributor_list[1]) == former_second_contributor._id
def test_non_authenticated_fails_to_move_top_contributor_down_one(
self, app, user, contribs, project, contrib_user_id, url_contrib_base):
contributor_to_move = contribs[0]._id
contributor_id = '{}-{}'.format(project._id, contributor_to_move)
former_second_contributor = contribs[1]
url = '{}{}/'.format(url_contrib_base, contributor_to_move)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'index': 1
}
}
}
res_patch = app.patch_json_api(url, data, expect_errors=True)
assert res_patch.status_code == 401
project.reload()
res = app.get('/{}nodes/{}/contributors/'.format(
API_BASE, project._id), auth=user.auth)
assert res.status_code == 200
contributor_list = res.json['data']
assert contrib_user_id(contributor_list[0]) == contributor_to_move
assert contrib_user_id(
contributor_list[1]) == former_second_contributor._id
@pytest.mark.django_db
class TestNodeContributorUpdate:
@pytest.fixture()
def contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user, contrib):
project = ProjectFactory(creator=user)
project.add_contributor(
contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=True,
save=True)
return project
@pytest.fixture()
def url_creator(self, user, project):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user._id)
@pytest.fixture()
def url_contrib(self, project, contrib):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, contrib._id)
def test_change_contrib_errors(
self, app, user, contrib, project, url_contrib):
# test_change_contributor_no_id
data = {
'data': {
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib,
data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
# test_change_contributor_incorrect_id
data = {
'data': {
'id': '12345',
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib,
data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 409
# test_change_contributor_no_type
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
# test_change_contributor_incorrect_type
data = {
'data': {
'id': contrib._id,
'type': 'Wrong type.',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 409
# test_invalid_change_inputs_contributor
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': 'invalid',
'bibliographic': 'invalid'
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
assert project.get_visible(contrib)
# test_change_contributor_not_logged_in
data = {
'data': {
'id': contrib._id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = app.put_json_api(url_contrib, data, expect_errors=True)
assert res.status_code == 401
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
assert project.get_visible(contrib)
# test_change_contributor_non_admin_auth
data = {
'data': {
'id': contrib._id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=contrib.auth,
expect_errors=True)
assert res.status_code == 403
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
assert project.get_visible(contrib)
def test_change_admin_self_without_other_admin(
self, app, user, project, url_creator):
contrib_id = '{}-{}'.format(project._id, user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_creator, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
project.reload()
assert project.get_permissions(user) == [
permissions.READ, permissions.WRITE, permissions.ADMIN]
def test_node_update_invalid_data(self, app, user, url_creator):
res = app.put_json_api(
url_creator,
'Incorrect data',
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
res = app.put_json_api(
url_creator,
['Incorrect data'],
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
def test_change_contributor_correct_id(
self, app, user, contrib, project, url_contrib):
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(
url_contrib, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 200
def test_remove_all_bibliographic_statuses_contributors(
self, app, user, contrib, project, url_creator):
project.set_visible(contrib, False, save=True)
contrib_id = '{}-{}'.format(project._id, user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': False
}
}
}
res = app.put_json_api(
url_creator, data,
auth=user.auth,
expect_errors=True)
assert res.status_code == 400
project.reload()
assert project.get_visible(user)
def test_change_contributor_permissions(
self, app, user, contrib, project, url_contrib):
contrib_id = '{}-{}'.format(project._id, contrib._id)
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.ADMIN,
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.ADMIN
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE, permissions.ADMIN]
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.WRITE
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.READ
project.reload()
assert project.get_permissions(contrib) == [permissions.READ]
def test_change_contributor_bibliographic(
self, app, user, contrib, project, url_contrib):
contrib_id = '{}-{}'.format(project._id, contrib._id)
with assert_latest_log(NodeLog.MADE_CONTRIBUTOR_INVISIBLE, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': False
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert not attributes['bibliographic']
project.reload()
assert not project.get_visible(contrib)
with assert_latest_log(NodeLog.MADE_CONTRIBUTOR_VISIBLE, project):
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['bibliographic']
project.reload()
assert project.get_visible(contrib)
def test_change_contributor_permission_and_bibliographic(
self, app, user, contrib, project, url_contrib):
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project, 1), assert_latest_log(NodeLog.MADE_CONTRIBUTOR_INVISIBLE, project):
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
'bibliographic': False
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.READ
assert not attributes['bibliographic']
project.reload()
assert project.get_permissions(contrib) == [permissions.READ]
assert not project.get_visible(contrib)
# @assert_not_logs(NodeLog.PERMISSIONS_UPDATED, 'project')
def test_not_change_contributor(
self, app, user, contrib, project, url_contrib):
with assert_latest_log_not(NodeLog.PERMISSIONS_UPDATED, project):
contrib_id = '{}-{}'.format(project._id, contrib._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': None,
'bibliographic': True
}
}
}
res = app.put_json_api(url_contrib, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.WRITE
assert attributes['bibliographic']
project.reload()
assert project.get_permissions(contrib) == [
permissions.READ, permissions.WRITE]
assert project.get_visible(contrib)
def test_change_admin_self_with_other_admin(
self, app, user, contrib, project, url_creator):
with assert_latest_log(NodeLog.PERMISSIONS_UPDATED, project):
project.add_permission(contrib, permissions.ADMIN, save=True)
contrib_id = '{}-{}'.format(project._id, user._id)
data = {
'data': {
'id': contrib_id,
'type': 'contributors',
'attributes': {
'permission': permissions.WRITE,
'bibliographic': True
}
}
}
res = app.put_json_api(url_creator, data, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
assert attributes['permission'] == permissions.WRITE
project.reload()
assert project.get_permissions(user) == [
permissions.READ, permissions.WRITE]
@pytest.mark.django_db
class TestNodeContributorPartialUpdate:
@pytest.fixture()
def contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user, contrib):
project = ProjectFactory(creator=user)
project.add_contributor(
contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=True,
save=True)
return project
@pytest.fixture()
def url_creator(self, user, project):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user._id)
@pytest.fixture()
def url_contrib(self, contrib, project):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, self.project._id, self.user_two._id)
def test_patch_bibliographic_only(self, app, user, project, url_creator):
creator_id = '{}-{}'.format(project._id, user._id)
data = {
'data': {
'id': creator_id,
'type': 'contributors',
'attributes': {
'bibliographic': False,
}
}
}
res = app.patch_json_api(url_creator, data, auth=user.auth)
assert res.status_code == 200
project.reload()
assert project.get_permissions(user) == [
permissions.READ, permissions.WRITE, permissions.ADMIN]
assert not project.get_visible(user)
def test_patch_permission_only(self, app, user, project):
user_read_contrib = AuthUserFactory()
project.add_contributor(
user_read_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=False,
save=True)
url_read_contrib = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user_read_contrib._id)
contributor_id = '{}-{}'.format(project._id, user_read_contrib._id)
data = {
'data': {
'id': contributor_id,
'type': 'contributors',
'attributes': {
'permission': permissions.READ,
}
}
}
res = app.patch_json_api(url_read_contrib, data, auth=user.auth)
assert res.status_code == 200
project.reload()
assert project.get_permissions(user_read_contrib) == [permissions.READ]
assert not project.get_visible(user_read_contrib)
@pytest.mark.django_db
class TestNodeContributorDelete:
@pytest.fixture()
def user_write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user, user_write_contrib):
project = ProjectFactory(creator=user)
project.add_contributor(
user_write_contrib,
permissions=[permissions.READ, permissions.WRITE],
visible=True, save=True)
return project
@pytest.fixture()
def url_user(self, project, user):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user._id)
@pytest.fixture()
def url_user_write_contrib(self, project, user_write_contrib):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user_write_contrib._id)
@pytest.fixture()
def url_user_non_contrib(self, project, user_non_contrib):
return '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, user_non_contrib._id)
def test_remove_errors(
self, app, user, user_write_contrib,
user_non_contrib, project, url_user,
url_user_write_contrib, url_user_non_contrib):
# test_remove_contributor_non_contributor
res = app.delete(
url_user_write_contrib,
auth=user_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
project.reload()
assert user_write_contrib in project.contributors
# test_remove_contributor_not_logged_in
res = app.delete(url_user_write_contrib, expect_errors=True)
assert res.status_code == 401
project.reload()
assert user_write_contrib in project.contributors
# test_remove_non_contributor_admin
assert user_non_contrib not in project.contributors
res = app.delete(
url_user_non_contrib,
auth=user.auth,
expect_errors=True)
assert res.status_code == 404
project.reload()
assert user_non_contrib not in project.contributors
# test_remove_non_existing_user_admin
url_user_fake = '/{}nodes/{}/contributors/{}/'.format(
API_BASE, project._id, 'fake')
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(url_user_fake, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_remove_self_contributor_unique_admin
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(url_user, auth=user.auth, expect_errors=True)
assert res.status_code == 400
project.reload()
assert user in project.contributors
def test_can_not_remove_only_bibliographic_contributor(
self, app, user, project, user_write_contrib, url_user):
project.add_permission(
user_write_contrib,
permissions.ADMIN,
save=True)
project.set_visible(user_write_contrib, False, save=True)
res = app.delete(url_user, auth=user.auth, expect_errors=True)
assert res.status_code == 400
project.reload()
assert user in project.contributors
def test_remove_contributor_non_admin_is_forbidden(
self, app, user_write_contrib,
user_non_contrib, project,
url_user_non_contrib):
project.add_contributor(
user_non_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=True,
save=True)
res = app.delete(
url_user_non_contrib,
auth=user_write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
project.reload()
assert user_non_contrib in project.contributors
# @assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_contributor_admin(
self, app, user, user_write_contrib,
project, url_user_write_contrib):
with assert_latest_log(NodeLog.CONTRIB_REMOVED, project):
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in
# osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(url_user_write_contrib, auth=user.auth)
assert res.status_code == 204
project.reload()
assert user_write_contrib not in project.contributors
# @assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_self_non_admin(
self, app, user_non_contrib,
project, url_user_non_contrib):
with assert_latest_log(NodeLog.CONTRIB_REMOVED, project):
project.add_contributor(
user_non_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
visible=True,
save=True)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in
# osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(
url_user_non_contrib,
auth=user_non_contrib.auth)
assert res.status_code == 204
project.reload()
assert user_non_contrib not in project.contributors
# @assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_remove_self_contributor_not_unique_admin(
self, app, user, user_write_contrib, project, url_user):
with assert_latest_log(NodeLog.CONTRIB_REMOVED, project):
project.add_permission(
user_write_contrib,
permissions.ADMIN,
save=True)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in
# osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(url_user, auth=user.auth)
assert res.status_code == 204
project.reload()
assert user not in project.contributors
# @assert_logs(NodeLog.CONTRIB_REMOVED, 'project')
def test_can_remove_self_as_contributor_not_unique_admin(
self, app, user_write_contrib, project, url_user_write_contrib):
with assert_latest_log(NodeLog.CONTRIB_REMOVED, project):
project.add_permission(
user_write_contrib,
permissions.ADMIN,
save=True)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in
# osf-models
with disconnected_from_listeners(contributor_removed):
res = app.delete(
url_user_write_contrib,
auth=user_write_contrib.auth)
assert res.status_code == 204
project.reload()
assert user_write_contrib not in project.contributors
| en | 0.458067 | # test_get_public_contributor_detail # regression test # test_get_public_contributor_detail_is_viewable_through_browsable_api # test_get_private_node_contributor_detail_contributor_auth # test_get_private_node_contributor_detail_non_contributor # test_get_private_node_contributor_detail_not_logged_in # test_get_private_node_non_contributor_detail_contributor_auth # test_get_private_node_invalid_user_detail_contributor_auth # test_change_contributor_no_id # test_change_contributor_incorrect_id # test_change_contributor_no_type # test_change_contributor_incorrect_type # test_invalid_change_inputs_contributor # test_change_contributor_not_logged_in # test_change_contributor_non_admin_auth # @assert_not_logs(NodeLog.PERMISSIONS_UPDATED, 'project') # test_remove_contributor_non_contributor # test_remove_contributor_not_logged_in # test_remove_non_contributor_admin # test_remove_non_existing_user_admin # Disconnect contributor_removed so that we don't check in files # We can remove this when StoredFileNode is implemented in osf-models # test_remove_self_contributor_unique_admin # Disconnect contributor_removed so that we don't check in files # We can remove this when StoredFileNode is implemented in osf-models # @assert_logs(NodeLog.CONTRIB_REMOVED, 'project') # Disconnect contributor_removed so that we don't check in files # We can remove this when StoredFileNode is implemented in # osf-models # @assert_logs(NodeLog.CONTRIB_REMOVED, 'project') # Disconnect contributor_removed so that we don't check in files # We can remove this when StoredFileNode is implemented in # osf-models # @assert_logs(NodeLog.CONTRIB_REMOVED, 'project') # Disconnect contributor_removed so that we don't check in files # We can remove this when StoredFileNode is implemented in # osf-models # @assert_logs(NodeLog.CONTRIB_REMOVED, 'project') # Disconnect contributor_removed so that we don't check in files # We can remove this when StoredFileNode is implemented in # osf-models | 1.858322 | 2 |
template_creator/tests/test_go_strategy.py | VanOvermeire/sam-template-creator | 3 | 6631355 | import unittest
from template_creator.reader.strategies.GoStrategy import GoStrategy
class TestGoStrategy(unittest.TestCase):
def setUp(self):
self.lines = ['package main\n', '\n', 'import (\n', '\t"context"\n', '\t"fmt"\n', '\t"github.com/aws/aws-lambda-go/events"\n', '\t"github.com/aws/aws-lambda-go/lambda"\n', '\t"os"\n', ')\n',
'\n', 'var dbClient *db.Client\n', '\n', 'func init() {\n', '\tdbClient = db.SetupDynamoDBClient(os.Getenv("REGION"), os.Getenv("TABLE_NAME"))\n', '}\n', '\n',
'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n', '// some message\n', '\treturn handleAdd(dbClient, event)\n', '}\n', '\n',
'func main() {\n', '\tlambda.Start(HandleRequest)\n', '}\n']
self.strategy = GoStrategy()
self.hander_line = 'func HandleRequest(_ context.Context, s3event events.APIGatewayProxyRequest) (Response, error) {'
def test_is_handler_tabs(self):
is_handler, line = self.strategy.is_handler_file(self.lines)
self.assertTrue(is_handler)
self.assertEqual(line, 'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n')
def test_is_handler_spaces(self):
lines = ['package main\n', '\n', 'import (\n', '\t"context"\n', '\t"fmt"\n', '\t"github.com/aws/aws-lambda-go/events"\n', '\t"github.com/aws/aws-lambda-go/lambda"\n', '\t"os"\n', ')\n',
'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n', '// some message\n', '\treturn handleAdd(dbClient, event)\n', '}\n', '\n',
'func main() {\n', ' lambda.Start(HandleRequest)\n', '}\n']
is_handler, line = self.strategy.is_handler_file(lines)
self.assertTrue(is_handler)
self.assertEqual(line, 'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n')
def test_is_not_handler(self):
lines = ['package main\n', '\n', 'import (\n', '\t"context"\n', '\t"fmt"\n', '\t"github.com/aws/aws-lambda-go/events"\n', '\t"github.com/aws/aws-lambda-go/lambda"\n', '\t"os"\n', ')\n',
'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n', '// some message\n', '\treturn handleAdd(dbClient, event)\n', '}\n', '\n',
'func main() {\n', ' fmt.Println("Stuff")\n', '}\n']
is_handler, line = self.strategy.is_handler_file(lines)
self.assertFalse(is_handler)
def test_build_handler(self):
result = self.strategy.build_handler('/some/location/dir_of_lambda', '/some/location/dir_of_lambda/file.py', self.hander_line, None)
self.assertEqual(result, 'handler')
def test_build_handler_for_executable(self):
result = self.strategy.build_handler('/some/location/dir_of_lambda', '/some/location/dir_of_lambda/file.py', self.hander_line, '/some/location/dir_of_lambda/main')
self.assertEqual(result, 'main')
def test_find_events(self):
result = self.strategy.find_events(self.hander_line)
self.assertEqual(result, ['S3'])
def test_find_events_with_underscore_in_name_event(self):
handler_line = 'func HandleRequest(_ context.Context, s3event events.APIGatewayProxyRequest) (Response, error) {\n'
result = self.strategy.find_events(handler_line)
self.assertEqual(result, ['S3'])
def test_find_events_no_event(self):
handler_line = 'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {'
result = self.strategy.find_events(handler_line)
self.assertIsNone(result)
def test_find_events_no_arguments(self):
handler_line = 'func HandleRequest() error {'
result = self.strategy.find_events(handler_line)
self.assertIsNone(result)
def test_find_api_no_api(self):
result = self.strategy.find_api(self.hander_line)
self.assertEqual(result, [])
def test_find_api_simple_with_method_first(self):
handler_line = 'func PutAddRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {'
result = self.strategy.find_api(handler_line)
self.assertEqual(result, ['put', '/add'])
def test_find_api_simple_with_method_second(self):
handler_line = 'func AddPostRequest() (Response, error) {'
result = self.strategy.find_api(handler_line)
self.assertEqual(result, ['post', '/add'])
def test_find_api_multiple_levels_with_method_first(self):
handler_line = 'func PutAddHelloRequest(_ context.Context, event events.APIGatewayProxyRequest) error {'
result = self.strategy.find_api(handler_line)
self.assertEqual(result, ['put', '/add/hello'])
def test_find_env_variables(self):
result = self.strategy.find_env_variables(self.lines)
self.assertCountEqual(result, ['TABLE_NAME', 'REGION'])
def test_find_roles_no_roles(self):
result = self.strategy.find_permissions(self.lines)
self.assertCountEqual(result, [])
def test_find_roles(self):
lines = ['package main\n', '\n', 'import (\n', '\t"context"\n', '\t"fmt"\n', '\t"github.com/aws/aws-lambda-go/events"\n', '\t"github.com/aws/aws-lambda-go/lambda"\n',
'\t"github.com/aws/aws-sdk-go/service/s3"\n', '"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"', '"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"',
'\t"go-reservations/db"\n', '\t"os"\n', ')\n', '\n', 'var dbClient *db.Client\n', '\n', 'func init() {\n',
'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n', '\tfmt.Println("Received ", event) // remove, temporary logging\n',
'\treturn handleAdd(dbClient, event)\n', '}\n', '\n', 'func main() {\n', '\tlambda.Start(HandleRequest)\n', '}\n']
result = self.strategy.find_permissions(lines)
self.assertCountEqual(result, ['s3:*', 'dynamodb:*'])
def test_find_roles_from_exception_list(self):
lines = ['package main\n', '\n', 'import (\n', '\t"context"\n', '\t"fmt"\n', '\t"github.com/aws/aws-lambda-go/events"\n', '\t"github.com/aws/aws-lambda-go/lambda"\n',
'\t"github.com/aws/aws-sdk-go/service/efs"\n', '\t"go-reservations/db"\n', '\t"os"\n', ')\n', '\n', 'var dbClient *db.Client\n', '\n', 'func init() {\n',
'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n', '\tfmt.Println("Received ", event) // remove, temporary logging\n',
'\treturn handleAdd(dbClient, event)\n', '}\n', '\n', 'func main() {\n', '\tlambda.Start(HandleRequest)\n', '}\n']
result = self.strategy.find_permissions(lines)
self.assertCountEqual(result, ['elasticfilesystem:*'])
def test_find_invoked_files(self):
handler_lines = ['package main\n', '\n', 'import (\n', '\t"fmt"\n', '\t"github.com/aws/aws-lambda-go/events"\n', '\t"myproject/mylib"\n', '\t"myproject/secondlib"\n',
'\t// "myproject/commented"\n', ')\n', 'import "anotherthing"', 'import "myproject/thirdlibrary"' 'var dbClient *db.Client\n', '\n',
'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n',
'\tfmt.Println("Received ", event)\n', '\treturn {}\n', '}\n', 'func main() {\n', '\tlambda.Start(HandleRequest)\n', '}\n']
results = self.strategy.find_invoked_files(handler_lines)
self.assertEqual(results['mylib'], '*')
self.assertEqual(results['secondlib'], '*')
self.assertEqual(results['thirdlibrary'], '*')
def test_remove_commented_lines(self):
lines = ['not commented', '\t// "myproject/commented"\n', '// import "anotherthing"', 'not']
results = GoStrategy.remove_commented_lines(lines)
self.assertEqual(len(results), 2)
self.assertEqual(results[0], 'not commented')
self.assertEqual(results[1], 'not')
| import unittest
from template_creator.reader.strategies.GoStrategy import GoStrategy
class TestGoStrategy(unittest.TestCase):
def setUp(self):
self.lines = ['package main\n', '\n', 'import (\n', '\t"context"\n', '\t"fmt"\n', '\t"github.com/aws/aws-lambda-go/events"\n', '\t"github.com/aws/aws-lambda-go/lambda"\n', '\t"os"\n', ')\n',
'\n', 'var dbClient *db.Client\n', '\n', 'func init() {\n', '\tdbClient = db.SetupDynamoDBClient(os.Getenv("REGION"), os.Getenv("TABLE_NAME"))\n', '}\n', '\n',
'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n', '// some message\n', '\treturn handleAdd(dbClient, event)\n', '}\n', '\n',
'func main() {\n', '\tlambda.Start(HandleRequest)\n', '}\n']
self.strategy = GoStrategy()
self.hander_line = 'func HandleRequest(_ context.Context, s3event events.APIGatewayProxyRequest) (Response, error) {'
def test_is_handler_tabs(self):
is_handler, line = self.strategy.is_handler_file(self.lines)
self.assertTrue(is_handler)
self.assertEqual(line, 'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n')
def test_is_handler_spaces(self):
lines = ['package main\n', '\n', 'import (\n', '\t"context"\n', '\t"fmt"\n', '\t"github.com/aws/aws-lambda-go/events"\n', '\t"github.com/aws/aws-lambda-go/lambda"\n', '\t"os"\n', ')\n',
'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n', '// some message\n', '\treturn handleAdd(dbClient, event)\n', '}\n', '\n',
'func main() {\n', ' lambda.Start(HandleRequest)\n', '}\n']
is_handler, line = self.strategy.is_handler_file(lines)
self.assertTrue(is_handler)
self.assertEqual(line, 'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n')
def test_is_not_handler(self):
lines = ['package main\n', '\n', 'import (\n', '\t"context"\n', '\t"fmt"\n', '\t"github.com/aws/aws-lambda-go/events"\n', '\t"github.com/aws/aws-lambda-go/lambda"\n', '\t"os"\n', ')\n',
'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n', '// some message\n', '\treturn handleAdd(dbClient, event)\n', '}\n', '\n',
'func main() {\n', ' fmt.Println("Stuff")\n', '}\n']
is_handler, line = self.strategy.is_handler_file(lines)
self.assertFalse(is_handler)
def test_build_handler(self):
result = self.strategy.build_handler('/some/location/dir_of_lambda', '/some/location/dir_of_lambda/file.py', self.hander_line, None)
self.assertEqual(result, 'handler')
def test_build_handler_for_executable(self):
result = self.strategy.build_handler('/some/location/dir_of_lambda', '/some/location/dir_of_lambda/file.py', self.hander_line, '/some/location/dir_of_lambda/main')
self.assertEqual(result, 'main')
def test_find_events(self):
result = self.strategy.find_events(self.hander_line)
self.assertEqual(result, ['S3'])
def test_find_events_with_underscore_in_name_event(self):
handler_line = 'func HandleRequest(_ context.Context, s3event events.APIGatewayProxyRequest) (Response, error) {\n'
result = self.strategy.find_events(handler_line)
self.assertEqual(result, ['S3'])
def test_find_events_no_event(self):
handler_line = 'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {'
result = self.strategy.find_events(handler_line)
self.assertIsNone(result)
def test_find_events_no_arguments(self):
handler_line = 'func HandleRequest() error {'
result = self.strategy.find_events(handler_line)
self.assertIsNone(result)
def test_find_api_no_api(self):
result = self.strategy.find_api(self.hander_line)
self.assertEqual(result, [])
def test_find_api_simple_with_method_first(self):
handler_line = 'func PutAddRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {'
result = self.strategy.find_api(handler_line)
self.assertEqual(result, ['put', '/add'])
def test_find_api_simple_with_method_second(self):
handler_line = 'func AddPostRequest() (Response, error) {'
result = self.strategy.find_api(handler_line)
self.assertEqual(result, ['post', '/add'])
def test_find_api_multiple_levels_with_method_first(self):
handler_line = 'func PutAddHelloRequest(_ context.Context, event events.APIGatewayProxyRequest) error {'
result = self.strategy.find_api(handler_line)
self.assertEqual(result, ['put', '/add/hello'])
def test_find_env_variables(self):
result = self.strategy.find_env_variables(self.lines)
self.assertCountEqual(result, ['TABLE_NAME', 'REGION'])
def test_find_roles_no_roles(self):
result = self.strategy.find_permissions(self.lines)
self.assertCountEqual(result, [])
def test_find_roles(self):
lines = ['package main\n', '\n', 'import (\n', '\t"context"\n', '\t"fmt"\n', '\t"github.com/aws/aws-lambda-go/events"\n', '\t"github.com/aws/aws-lambda-go/lambda"\n',
'\t"github.com/aws/aws-sdk-go/service/s3"\n', '"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"', '"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"',
'\t"go-reservations/db"\n', '\t"os"\n', ')\n', '\n', 'var dbClient *db.Client\n', '\n', 'func init() {\n',
'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n', '\tfmt.Println("Received ", event) // remove, temporary logging\n',
'\treturn handleAdd(dbClient, event)\n', '}\n', '\n', 'func main() {\n', '\tlambda.Start(HandleRequest)\n', '}\n']
result = self.strategy.find_permissions(lines)
self.assertCountEqual(result, ['s3:*', 'dynamodb:*'])
def test_find_roles_from_exception_list(self):
lines = ['package main\n', '\n', 'import (\n', '\t"context"\n', '\t"fmt"\n', '\t"github.com/aws/aws-lambda-go/events"\n', '\t"github.com/aws/aws-lambda-go/lambda"\n',
'\t"github.com/aws/aws-sdk-go/service/efs"\n', '\t"go-reservations/db"\n', '\t"os"\n', ')\n', '\n', 'var dbClient *db.Client\n', '\n', 'func init() {\n',
'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n', '\tfmt.Println("Received ", event) // remove, temporary logging\n',
'\treturn handleAdd(dbClient, event)\n', '}\n', '\n', 'func main() {\n', '\tlambda.Start(HandleRequest)\n', '}\n']
result = self.strategy.find_permissions(lines)
self.assertCountEqual(result, ['elasticfilesystem:*'])
def test_find_invoked_files(self):
handler_lines = ['package main\n', '\n', 'import (\n', '\t"fmt"\n', '\t"github.com/aws/aws-lambda-go/events"\n', '\t"myproject/mylib"\n', '\t"myproject/secondlib"\n',
'\t// "myproject/commented"\n', ')\n', 'import "anotherthing"', 'import "myproject/thirdlibrary"' 'var dbClient *db.Client\n', '\n',
'func HandleRequest(_ context.Context, event events.APIGatewayProxyRequest) (Response, error) {\n',
'\tfmt.Println("Received ", event)\n', '\treturn {}\n', '}\n', 'func main() {\n', '\tlambda.Start(HandleRequest)\n', '}\n']
results = self.strategy.find_invoked_files(handler_lines)
self.assertEqual(results['mylib'], '*')
self.assertEqual(results['secondlib'], '*')
self.assertEqual(results['thirdlibrary'], '*')
def test_remove_commented_lines(self):
lines = ['not commented', '\t// "myproject/commented"\n', '// import "anotherthing"', 'not']
results = GoStrategy.remove_commented_lines(lines)
self.assertEqual(len(results), 2)
self.assertEqual(results[0], 'not commented')
self.assertEqual(results[1], 'not')
| none | 1 | 2.428502 | 2 |
|
vkontakte_api/factories.py | mcfoton/django-vkontakte-api | 0 | 6631356 | import factory
class DjangoModelNoCommitFactory(factory.DjangoModelFactory):
ABSTRACT_FACTORY = True
@classmethod
def _create(cls, *args, **kwargs):
kwargs['commit_remote'] = False
return super(DjangoModelNoCommitFactory, cls)._create(*args, **kwargs)
| import factory
class DjangoModelNoCommitFactory(factory.DjangoModelFactory):
ABSTRACT_FACTORY = True
@classmethod
def _create(cls, *args, **kwargs):
kwargs['commit_remote'] = False
return super(DjangoModelNoCommitFactory, cls)._create(*args, **kwargs)
| none | 1 | 1.964536 | 2 |
|
cogs/error_handlers/l10n.py | thatoneolib/senko | 0 | 6631357 | # This file contains the localization markers for all permissions.
# It is never imported anywhere and not exposed.
_ = lambda m: m
permissions = [
# NOTE: The "add reactions" permission.
# DEFAULT: add reactions
_("#permission_add_reactions"),
# NOTE: The "administrator" permission.
# DEFAULT: administrator
_("#permission_administrator"),
# NOTE: The "attach files" permission.
# DEFAULT: attach files
_("#permission_attach_files"),
# NOTE: The "ban members" permission.
# DEFAULT: ban members
_("#permission_ban_members"),
# NOTE: The "change nickname" permission.
# DEFAULT: change nickname
_("#permission_change_nickname"),
# NOTE: The "connect" permission.
# DEFAULT: connect
_("#permission_connect"),
# NOTE: The "create instant invite" permission.
# DEFAULT: create instant invite
_("#permission_create_instant_invite"),
# NOTE: The "deafen members" permission.
# DEFAULT: deafen members
_("#permission_deafen_members"),
# NOTE: The "embed links" permission.
# DEFAULT: embed links
_("#permission_embed_links"),
# NOTE: The "external emojis" permission.
# DEFAULT: external emojis
_("#permission_external_emojis"),
# NOTE: The "kick members" permission.
# DEFAULT: kick members
_("#permission_kick_members"),
# NOTE: The "manage channels" permission.
# DEFAULT: manage channels
_("#permission_manage_channels"),
# NOTE: The "manage emojis" permission.
# DEFAULT: manage emojis
_("#permission_manage_emojis"),
# NOTE: The "manage guild" permission.
# DEFAULT: manage guild
_("#permission_manage_guild"),
# NOTE: The "manage messages" permission.
# DEFAULT: manage messages
_("#permission_manage_messages"),
# NOTE: The "manage nicknames" permission.
# DEFAULT: manage nicknames
_("#permission_manage_nicknames"),
# NOTE: The "manage permissions" permission.
# DEFAULT: manage permissions
_("#permission_manage_permissions"),
# NOTE: The "manage roles" permission.
# DEFAULT: manage roles
_("#permission_manage_roles"),
# NOTE: The "manage webhooks" permission.
# DEFAULT: manage webhooks
_("#permission_manage_webhooks"),
# NOTE: The "mention everyone" permission.
# DEFAULT: mention everyone
_("#permission_mention_everyone"),
# NOTE: The "move members" permission.
# DEFAULT: move members
_("#permission_move_members"),
# NOTE: The "mute members" permission.
# DEFAULT: mute members
_("#permission_mute_members"),
# NOTE: The "priority speaker" permission.
# DEFAULT: priority speaker
_("#permission_priority_speaker"),
# NOTE: The "read message history" permission.
# DEFAULT: read message history
_("#permission_read_message_history"),
# NOTE: The "read messages" permission.
# DEFAULT: read messages
_("#permission_read_messages"),
# NOTE: The "send messages" permission.
# DEFAULT: send messages
_("#permission_send_messages"),
# NOTE: The "send tts messages" permission.
# DEFAULT: send tts messages
_("#permission_send_tts_messages"),
# NOTE: The "speak" permission.
# DEFAULT: speak
_("#permission_speak"),
# NOTE: The "stream" permission.
# DEFAULT: stream
_("#permission_stream"),
# NOTE: The "use external emojis" permission.
# DEFAULT: use external emojis
_("#permission_use_external_emojis"),
# NOTE: The "use voice activation" permission.
# DEFAULT: use voice activation
_("#permission_use_voice_activation"),
# NOTE: The "view audit log" permission.
# DEFAULT: view audit log
_("#permission_view_audit_log"),
# NOTE: The "view channel" permission.
# DEFAULT: view channel
_("#permission_view_channel"),
# NOTE: The "view guild insights" permission.
# DEFAULT: view guild insights
_("#permission_view_guild_insights"),
]
del permissions
| # This file contains the localization markers for all permissions.
# It is never imported anywhere and not exposed.
_ = lambda m: m
permissions = [
# NOTE: The "add reactions" permission.
# DEFAULT: add reactions
_("#permission_add_reactions"),
# NOTE: The "administrator" permission.
# DEFAULT: administrator
_("#permission_administrator"),
# NOTE: The "attach files" permission.
# DEFAULT: attach files
_("#permission_attach_files"),
# NOTE: The "ban members" permission.
# DEFAULT: ban members
_("#permission_ban_members"),
# NOTE: The "change nickname" permission.
# DEFAULT: change nickname
_("#permission_change_nickname"),
# NOTE: The "connect" permission.
# DEFAULT: connect
_("#permission_connect"),
# NOTE: The "create instant invite" permission.
# DEFAULT: create instant invite
_("#permission_create_instant_invite"),
# NOTE: The "deafen members" permission.
# DEFAULT: deafen members
_("#permission_deafen_members"),
# NOTE: The "embed links" permission.
# DEFAULT: embed links
_("#permission_embed_links"),
# NOTE: The "external emojis" permission.
# DEFAULT: external emojis
_("#permission_external_emojis"),
# NOTE: The "kick members" permission.
# DEFAULT: kick members
_("#permission_kick_members"),
# NOTE: The "manage channels" permission.
# DEFAULT: manage channels
_("#permission_manage_channels"),
# NOTE: The "manage emojis" permission.
# DEFAULT: manage emojis
_("#permission_manage_emojis"),
# NOTE: The "manage guild" permission.
# DEFAULT: manage guild
_("#permission_manage_guild"),
# NOTE: The "manage messages" permission.
# DEFAULT: manage messages
_("#permission_manage_messages"),
# NOTE: The "manage nicknames" permission.
# DEFAULT: manage nicknames
_("#permission_manage_nicknames"),
# NOTE: The "manage permissions" permission.
# DEFAULT: manage permissions
_("#permission_manage_permissions"),
# NOTE: The "manage roles" permission.
# DEFAULT: manage roles
_("#permission_manage_roles"),
# NOTE: The "manage webhooks" permission.
# DEFAULT: manage webhooks
_("#permission_manage_webhooks"),
# NOTE: The "mention everyone" permission.
# DEFAULT: mention everyone
_("#permission_mention_everyone"),
# NOTE: The "move members" permission.
# DEFAULT: move members
_("#permission_move_members"),
# NOTE: The "mute members" permission.
# DEFAULT: mute members
_("#permission_mute_members"),
# NOTE: The "priority speaker" permission.
# DEFAULT: priority speaker
_("#permission_priority_speaker"),
# NOTE: The "read message history" permission.
# DEFAULT: read message history
_("#permission_read_message_history"),
# NOTE: The "read messages" permission.
# DEFAULT: read messages
_("#permission_read_messages"),
# NOTE: The "send messages" permission.
# DEFAULT: send messages
_("#permission_send_messages"),
# NOTE: The "send tts messages" permission.
# DEFAULT: send tts messages
_("#permission_send_tts_messages"),
# NOTE: The "speak" permission.
# DEFAULT: speak
_("#permission_speak"),
# NOTE: The "stream" permission.
# DEFAULT: stream
_("#permission_stream"),
# NOTE: The "use external emojis" permission.
# DEFAULT: use external emojis
_("#permission_use_external_emojis"),
# NOTE: The "use voice activation" permission.
# DEFAULT: use voice activation
_("#permission_use_voice_activation"),
# NOTE: The "view audit log" permission.
# DEFAULT: view audit log
_("#permission_view_audit_log"),
# NOTE: The "view channel" permission.
# DEFAULT: view channel
_("#permission_view_channel"),
# NOTE: The "view guild insights" permission.
# DEFAULT: view guild insights
_("#permission_view_guild_insights"),
]
del permissions
| en | 0.699166 | # This file contains the localization markers for all permissions. # It is never imported anywhere and not exposed. # NOTE: The "add reactions" permission. # DEFAULT: add reactions # NOTE: The "administrator" permission. # DEFAULT: administrator # NOTE: The "attach files" permission. # DEFAULT: attach files # NOTE: The "ban members" permission. # DEFAULT: ban members # NOTE: The "change nickname" permission. # DEFAULT: change nickname # NOTE: The "connect" permission. # DEFAULT: connect # NOTE: The "create instant invite" permission. # DEFAULT: create instant invite # NOTE: The "deafen members" permission. # DEFAULT: deafen members # NOTE: The "embed links" permission. # DEFAULT: embed links # NOTE: The "external emojis" permission. # DEFAULT: external emojis # NOTE: The "kick members" permission. # DEFAULT: kick members # NOTE: The "manage channels" permission. # DEFAULT: manage channels # NOTE: The "manage emojis" permission. # DEFAULT: manage emojis # NOTE: The "manage guild" permission. # DEFAULT: manage guild # NOTE: The "manage messages" permission. # DEFAULT: manage messages # NOTE: The "manage nicknames" permission. # DEFAULT: manage nicknames # NOTE: The "manage permissions" permission. # DEFAULT: manage permissions # NOTE: The "manage roles" permission. # DEFAULT: manage roles # NOTE: The "manage webhooks" permission. # DEFAULT: manage webhooks # NOTE: The "mention everyone" permission. # DEFAULT: mention everyone # NOTE: The "move members" permission. # DEFAULT: move members # NOTE: The "mute members" permission. # DEFAULT: mute members # NOTE: The "priority speaker" permission. # DEFAULT: priority speaker # NOTE: The "read message history" permission. # DEFAULT: read message history # NOTE: The "read messages" permission. # DEFAULT: read messages # NOTE: The "send messages" permission. # DEFAULT: send messages # NOTE: The "send tts messages" permission. # DEFAULT: send tts messages # NOTE: The "speak" permission. # DEFAULT: speak # NOTE: The "stream" permission. # DEFAULT: stream # NOTE: The "use external emojis" permission. # DEFAULT: use external emojis # NOTE: The "use voice activation" permission. # DEFAULT: use voice activation # NOTE: The "view audit log" permission. # DEFAULT: view audit log # NOTE: The "view channel" permission. # DEFAULT: view channel # NOTE: The "view guild insights" permission. # DEFAULT: view guild insights | 1.551798 | 2 |
utils.py | zrimseku/Reproducibility-Challenge | 1 | 6631358 | import os
import numpy as np
import random
def print_file(str_, save_file_path=None):
print(str_)
if save_file_path != None:
f = open(save_file_path, 'a')
print(str_, file=f)
class Metrictor_PPI:
def __init__(self, pre_y, truth_y, is_binary=False):
self.TP = 0
self.FP = 0
self.TN = 0
self.FN = 0
if is_binary:
length = pre_y.shape[0]
for i in range(length):
if pre_y[i] == truth_y[i]:
if truth_y[i] == 1:
self.TP += 1
else:
self.TN += 1
elif truth_y[i] == 1:
self.FN += 1
elif pre_y[i] == 1:
self.FP += 1
self.num = length
else:
N, C = pre_y.shape
for i in range(N):
for j in range(C):
if pre_y[i][j] == truth_y[i][j]:
if truth_y[i][j] == 1:
self.TP += 1
else:
self.TN += 1
elif truth_y[i][j] == 1:
self.FN += 1
elif truth_y[i][j] == 0:
self.FP += 1
self.num = N * C
def show_result(self, is_print=False, file=None):
self.Accuracy = (self.TP + self.TN) / (self.num + 1e-10)
self.Precision = self.TP / (self.TP + self.FP + 1e-10)
self.Recall = self.TP / (self.TP + self.FN + 1e-10)
self.F1 = 2 * self.Precision * self.Recall / (self.Precision + self.Recall + 1e-10)
if is_print:
print_file("Accuracy: {}".format(self.Accuracy), file)
print_file("Precision: {}".format(self.Precision), file)
print_file("Recall: {}".format(self.Recall), file)
print_file("F1-Score: {}".format(self.F1), file)
class UnionFindSet(object):
def __init__(self, m):
# m, n = len(grid), len(grid[0])
self.roots = [i for i in range(m)]
self.rank = [0 for i in range(m)]
self.count = m
for i in range(m):
self.roots[i] = i
def find(self, member):
tmp = []
while member != self.roots[member]:
tmp.append(member)
member = self.roots[member]
for root in tmp:
self.roots[root] = member
return member
def union(self, p, q):
parentP = self.find(p)
parentQ = self.find(q)
if parentP != parentQ:
if self.rank[parentP] > self.rank[parentQ]:
self.roots[parentQ] = parentP
elif self.rank[parentP] < self.rank[parentQ]:
self.roots[parentP] = parentQ
else:
self.roots[parentQ] = parentP
self.rank[parentP] -= 1
self.count -= 1
def get_bfs_sub_graph(ppi_list, node_num, node_to_edge_index, sub_graph_size):
candidate_node = []
selected_edge_index = []
selected_node = []
random_node = random.randint(0, node_num - 1)
while len(node_to_edge_index[random_node]) > 5:
random_node = random.randint(0, node_num - 1)
candidate_node.append(random_node)
print(f'First node is {candidate_node[0]}')
while len(selected_edge_index) < sub_graph_size:
cur_node = candidate_node.pop(0)
selected_node.append(cur_node)
for edge_index in node_to_edge_index[cur_node]:
if edge_index not in selected_edge_index:
selected_edge_index.append(edge_index)
end_node = -1
if ppi_list[edge_index][0] == cur_node:
end_node = ppi_list[edge_index][1]
else:
end_node = ppi_list[edge_index][0]
if end_node not in selected_node and end_node not in candidate_node:
candidate_node.append(end_node)
else:
continue
# print(len(selected_edge_index), len(candidate_node))
node_list = candidate_node + selected_node
# print(len(node_list), len(selected_edge_index))
return selected_edge_index
def get_dfs_sub_graph(ppi_list, node_num, node_to_edge_index, sub_graph_size):
stack = []
selected_edge_index = []
selected_node = []
random_node = random.randint(0, node_num - 1)
while len(node_to_edge_index[random_node]) > 5:
random_node = random.randint(0, node_num - 1)
stack.append(random_node)
print(f'First node is {stack[0]}')
while len(selected_edge_index) < sub_graph_size:
# print(len(selected_edge_index), len(stack), len(selected_node))
cur_node = stack[-1]
if cur_node in selected_node:
flag = True
for edge_index in node_to_edge_index[cur_node]:
if flag:
end_node = -1
if ppi_list[edge_index][0] == cur_node:
end_node = ppi_list[edge_index][1]
else:
end_node = ppi_list[edge_index][0]
if end_node in selected_node:
continue
else:
stack.append(end_node)
flag = False
else:
break
if flag:
stack.pop()
continue
else:
selected_node.append(cur_node)
for edge_index in node_to_edge_index[cur_node]:
if edge_index not in selected_edge_index:
selected_edge_index.append(edge_index)
return selected_edge_index | import os
import numpy as np
import random
def print_file(str_, save_file_path=None):
print(str_)
if save_file_path != None:
f = open(save_file_path, 'a')
print(str_, file=f)
class Metrictor_PPI:
def __init__(self, pre_y, truth_y, is_binary=False):
self.TP = 0
self.FP = 0
self.TN = 0
self.FN = 0
if is_binary:
length = pre_y.shape[0]
for i in range(length):
if pre_y[i] == truth_y[i]:
if truth_y[i] == 1:
self.TP += 1
else:
self.TN += 1
elif truth_y[i] == 1:
self.FN += 1
elif pre_y[i] == 1:
self.FP += 1
self.num = length
else:
N, C = pre_y.shape
for i in range(N):
for j in range(C):
if pre_y[i][j] == truth_y[i][j]:
if truth_y[i][j] == 1:
self.TP += 1
else:
self.TN += 1
elif truth_y[i][j] == 1:
self.FN += 1
elif truth_y[i][j] == 0:
self.FP += 1
self.num = N * C
def show_result(self, is_print=False, file=None):
self.Accuracy = (self.TP + self.TN) / (self.num + 1e-10)
self.Precision = self.TP / (self.TP + self.FP + 1e-10)
self.Recall = self.TP / (self.TP + self.FN + 1e-10)
self.F1 = 2 * self.Precision * self.Recall / (self.Precision + self.Recall + 1e-10)
if is_print:
print_file("Accuracy: {}".format(self.Accuracy), file)
print_file("Precision: {}".format(self.Precision), file)
print_file("Recall: {}".format(self.Recall), file)
print_file("F1-Score: {}".format(self.F1), file)
class UnionFindSet(object):
def __init__(self, m):
# m, n = len(grid), len(grid[0])
self.roots = [i for i in range(m)]
self.rank = [0 for i in range(m)]
self.count = m
for i in range(m):
self.roots[i] = i
def find(self, member):
tmp = []
while member != self.roots[member]:
tmp.append(member)
member = self.roots[member]
for root in tmp:
self.roots[root] = member
return member
def union(self, p, q):
parentP = self.find(p)
parentQ = self.find(q)
if parentP != parentQ:
if self.rank[parentP] > self.rank[parentQ]:
self.roots[parentQ] = parentP
elif self.rank[parentP] < self.rank[parentQ]:
self.roots[parentP] = parentQ
else:
self.roots[parentQ] = parentP
self.rank[parentP] -= 1
self.count -= 1
def get_bfs_sub_graph(ppi_list, node_num, node_to_edge_index, sub_graph_size):
candidate_node = []
selected_edge_index = []
selected_node = []
random_node = random.randint(0, node_num - 1)
while len(node_to_edge_index[random_node]) > 5:
random_node = random.randint(0, node_num - 1)
candidate_node.append(random_node)
print(f'First node is {candidate_node[0]}')
while len(selected_edge_index) < sub_graph_size:
cur_node = candidate_node.pop(0)
selected_node.append(cur_node)
for edge_index in node_to_edge_index[cur_node]:
if edge_index not in selected_edge_index:
selected_edge_index.append(edge_index)
end_node = -1
if ppi_list[edge_index][0] == cur_node:
end_node = ppi_list[edge_index][1]
else:
end_node = ppi_list[edge_index][0]
if end_node not in selected_node and end_node not in candidate_node:
candidate_node.append(end_node)
else:
continue
# print(len(selected_edge_index), len(candidate_node))
node_list = candidate_node + selected_node
# print(len(node_list), len(selected_edge_index))
return selected_edge_index
def get_dfs_sub_graph(ppi_list, node_num, node_to_edge_index, sub_graph_size):
stack = []
selected_edge_index = []
selected_node = []
random_node = random.randint(0, node_num - 1)
while len(node_to_edge_index[random_node]) > 5:
random_node = random.randint(0, node_num - 1)
stack.append(random_node)
print(f'First node is {stack[0]}')
while len(selected_edge_index) < sub_graph_size:
# print(len(selected_edge_index), len(stack), len(selected_node))
cur_node = stack[-1]
if cur_node in selected_node:
flag = True
for edge_index in node_to_edge_index[cur_node]:
if flag:
end_node = -1
if ppi_list[edge_index][0] == cur_node:
end_node = ppi_list[edge_index][1]
else:
end_node = ppi_list[edge_index][0]
if end_node in selected_node:
continue
else:
stack.append(end_node)
flag = False
else:
break
if flag:
stack.pop()
continue
else:
selected_node.append(cur_node)
for edge_index in node_to_edge_index[cur_node]:
if edge_index not in selected_edge_index:
selected_edge_index.append(edge_index)
return selected_edge_index | en | 0.126866 | # m, n = len(grid), len(grid[0]) # print(len(selected_edge_index), len(candidate_node)) # print(len(node_list), len(selected_edge_index)) # print(len(selected_edge_index), len(stack), len(selected_node)) | 2.68957 | 3 |
stacks/XIAOMATECH/1.0/services/HIVE/package/scripts/post_upgrade.py | tvorogme/dataops | 3 | 6631359 | <filename>stacks/XIAOMATECH/1.0/services/HIVE/package/scripts/post_upgrade.py
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python Imports
import os
import shutil
# Local Imports
from hive import create_hive_hdfs_dirs
# Ambari Commons & Resource Management Imports
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions import upgrade_summary
from resource_management.libraries.functions.format import format
from resource_management.libraries.script import Script
class HivePostUpgrade(Script):
def move_tables(self, env):
import params
env.set_params(params)
create_hive_hdfs_dirs()
target_version = upgrade_summary.get_target_version(
service_name="HIVE")
hive_script = format("{install_dir}/bin/hive")
cmd = format(
"{hive_script} --config /etc/hive --service strictmanagedmigration --hiveconf hive.strict.managed.tables=true -m automatic --modifyManagedTables --oldWarehouseRoot /apps/hive/warehouse"
)
Execute(
cmd,
environment={'JAVA_HOME': params.java64_home},
user=params.hdfs_user)
if __name__ == "__main__":
HivePostUpgrade().execute()
| <filename>stacks/XIAOMATECH/1.0/services/HIVE/package/scripts/post_upgrade.py
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python Imports
import os
import shutil
# Local Imports
from hive import create_hive_hdfs_dirs
# Ambari Commons & Resource Management Imports
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions import upgrade_summary
from resource_management.libraries.functions.format import format
from resource_management.libraries.script import Script
class HivePostUpgrade(Script):
def move_tables(self, env):
import params
env.set_params(params)
create_hive_hdfs_dirs()
target_version = upgrade_summary.get_target_version(
service_name="HIVE")
hive_script = format("{install_dir}/bin/hive")
cmd = format(
"{hive_script} --config /etc/hive --service strictmanagedmigration --hiveconf hive.strict.managed.tables=true -m automatic --modifyManagedTables --oldWarehouseRoot /apps/hive/warehouse"
)
Execute(
cmd,
environment={'JAVA_HOME': params.java64_home},
user=params.hdfs_user)
if __name__ == "__main__":
HivePostUpgrade().execute()
| en | 0.82985 | #!/usr/bin/env python Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Python Imports # Local Imports # Ambari Commons & Resource Management Imports | 1.609355 | 2 |
ms_deisotope/test/common.py | WEHI-Proteomics/ms_deisotope | 1 | 6631360 | <filename>ms_deisotope/test/common.py
import os
import gzip
import pickle
import sys
try:
import faulthandler
faulthandler.enable()
except ImportError:
pass
data_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data"))
def datafile(name):
return os.path.join(data_path, name)
def gzload(path):
with gzip.open(path, 'rb') as fh:
if sys.version_info.major > 2:
return pickle.load(fh, encoding='latin1')
else:
return pickle.load(fh)
def example_scan_bunch():
import ms_deisotope
reader = ms_deisotope.MSFileLoader(
datafile("20150710_3um_AGP_001_29_30.mzML.gz"))
return reader.next()
| <filename>ms_deisotope/test/common.py
import os
import gzip
import pickle
import sys
try:
import faulthandler
faulthandler.enable()
except ImportError:
pass
data_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data"))
def datafile(name):
return os.path.join(data_path, name)
def gzload(path):
with gzip.open(path, 'rb') as fh:
if sys.version_info.major > 2:
return pickle.load(fh, encoding='latin1')
else:
return pickle.load(fh)
def example_scan_bunch():
import ms_deisotope
reader = ms_deisotope.MSFileLoader(
datafile("20150710_3um_AGP_001_29_30.mzML.gz"))
return reader.next()
| none | 1 | 2.103162 | 2 |
|
simple_french/guide/urls.py | ericgroom/simplefrench | 0 | 6631361 | from django.urls import path
from . import views
app_name = 'guide'
urlpatterns = [
path('', views.article_table_of_contents, name='list'),
path('<slug>', views.ArticleDetailView.as_view(), name='detail'),
] | from django.urls import path
from . import views
app_name = 'guide'
urlpatterns = [
path('', views.article_table_of_contents, name='list'),
path('<slug>', views.ArticleDetailView.as_view(), name='detail'),
] | none | 1 | 1.825041 | 2 |
|
src/infrastructure/errors/unable_to_equalize_exception.py | OzielFilho/ProjetoFinalPdi | 0 | 6631362 | <filename>src/infrastructure/errors/unable_to_equalize_exception.py<gh_stars>0
from infrastructure.errors.image_exception import ImageException
class UnableToEqualizeImageException(ImageException):
pass
| <filename>src/infrastructure/errors/unable_to_equalize_exception.py<gh_stars>0
from infrastructure.errors.image_exception import ImageException
class UnableToEqualizeImageException(ImageException):
pass
| none | 1 | 1.475112 | 1 |
|
stix_shifter_utils/stix_transmission/utils/RestApiClient.py | remkohdev/stix-shifter | 1 | 6631363 | import requests
from requests_toolbelt.adapters import host_header_ssl
import sys
import collections
import urllib.parse
import os
import errno
import uuid
# This is a simple HTTP client that can be used to access the REST API
class RestApiClient:
#cert_verify can be True -- do proper signed cert check, False -- skip all cert checks, or a Cert -- use the proper cleint side cert
#mutual_auth is in the case the gateway is being used
def __init__(self, host, port=None, cert=None, headers={}, url_modifier_function=None, cert_verify=True,
mutual_auth=False, sni=None):
uniqueFileHandle = uuid.uuid4()
self.client_cert_name = "/tmp/{0}-client_cert.pem".format(uniqueFileHandle)
self.server_cert_name = "/tmp/{0}-server_cert.pem".format(uniqueFileHandle)
server_ip = host
if port is not None:
server_ip += ":" + str(port)
self.server_ip = server_ip
#sni is none unless we are using a server cert
self.sni = None
#Gateway Case -- use client cert cert_verify is None
if mutual_auth:
self.server_cert_content = None
self.server_cert_file_content_exists = False
self.client_cert_content = self.client_cert_name
self.client_cert_file_content_exists = True
self.client_cert_file_content = cert
#verify is true or false
elif isinstance(cert_verify, bool):
if cert_verify:
self.server_cert_content = True
self.server_cert_file_content_exists = False
self.client_cert_content = None
self.client_cert_file_content_exists = False
else:
self.server_cert_content = False
self.server_cert_file_content_exists = False
self.client_cert_content = None
self.client_cert_file_content_exists = False
#server cert provided
elif isinstance(cert_verify, str):
self.server_cert_content = self.server_cert_name
self.server_cert_file_content_exists = True
self.server_cert_file_content = cert_verify
self.client_cert_content = None
self.client_cert_file_content_exists = False
if sni is not None:
self.sni = sni
self.headers = headers
self.url_modifier_function = url_modifier_function
# This method is used to set up an HTTP request and send it to the server
def call_api(self, endpoint, method, headers=None, params=[], data=None, urldata=None, timeout=None):
try:
# convert client cert to file
if self.client_cert_file_content_exists is True:
with open(self.client_cert_name, 'w') as f:
try:
f.write(self.client_cert_file_content)
except IOError:
print('Failed to setup certificate')
# covnert server cert to file
if self.server_cert_file_content_exists is True:
with open(self.server_cert_name, 'w') as f:
try:
f.write(self.server_cert_file_content)
except IOError:
print('Failed to setup certificate')
url = None
actual_headers = self.headers.copy()
if headers is not None:
for header_key in headers:
actual_headers[header_key] = headers[header_key]
if urldata:
urldata = urllib.parse.urlencode(urldata)
if '?' in endpoint:
endpoint += '&'
else:
endpoint += '?'
endpoint += urldata
if self.url_modifier_function is not None:
url = self.url_modifier_function(
self.server_ip, endpoint, actual_headers)
else:
url = 'https://' + self.server_ip + '/' + endpoint
try:
call = getattr(requests, method.lower())
# only use the tool belt session in case of SNI for safety
if self.sni is not None:
session = requests.Session()
call = getattr(session, method.lower())
session.mount('https://', host_header_ssl.HostHeaderSSLAdapter())
actual_headers["Host"] = self.sni
response = call(url, headers=actual_headers,
cert=self.client_cert_content, data=data, verify=self.server_cert_content, timeout=timeout)
if 'headers' in dir(response) and isinstance(response.headers, collections.Mapping) and 'Content-Type' in response.headers \
and "Deprecated" in response.headers['Content-Type']:
print("WARNING: " +
response.headers['Content-Type'], file=sys.stderr)
return ResponseWrapper(response)
except Exception as e:
print('exception occured during requesting url: ' + str(e))
raise e
finally:
if self.server_cert_file_content_exists is True:
try:
os.remove(self.server_cert_name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
if self.client_cert_file_content_exists is True:
try:
os.remove(self.client_cert_name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
# Simple getters that can be used to inspect the state of this client.
def get_headers(self):
return self.headers.copy()
def get_server_ip(self):
return self.server_ip
class ResponseWrapper:
def __init__(self, response):
self.response = response
def read(self):
return self.response.content
@property
def bytes(self):
return self.response.content
@property
def code(self):
return self.response.status_code
| import requests
from requests_toolbelt.adapters import host_header_ssl
import sys
import collections
import urllib.parse
import os
import errno
import uuid
# This is a simple HTTP client that can be used to access the REST API
class RestApiClient:
#cert_verify can be True -- do proper signed cert check, False -- skip all cert checks, or a Cert -- use the proper cleint side cert
#mutual_auth is in the case the gateway is being used
def __init__(self, host, port=None, cert=None, headers={}, url_modifier_function=None, cert_verify=True,
mutual_auth=False, sni=None):
uniqueFileHandle = uuid.uuid4()
self.client_cert_name = "/tmp/{0}-client_cert.pem".format(uniqueFileHandle)
self.server_cert_name = "/tmp/{0}-server_cert.pem".format(uniqueFileHandle)
server_ip = host
if port is not None:
server_ip += ":" + str(port)
self.server_ip = server_ip
#sni is none unless we are using a server cert
self.sni = None
#Gateway Case -- use client cert cert_verify is None
if mutual_auth:
self.server_cert_content = None
self.server_cert_file_content_exists = False
self.client_cert_content = self.client_cert_name
self.client_cert_file_content_exists = True
self.client_cert_file_content = cert
#verify is true or false
elif isinstance(cert_verify, bool):
if cert_verify:
self.server_cert_content = True
self.server_cert_file_content_exists = False
self.client_cert_content = None
self.client_cert_file_content_exists = False
else:
self.server_cert_content = False
self.server_cert_file_content_exists = False
self.client_cert_content = None
self.client_cert_file_content_exists = False
#server cert provided
elif isinstance(cert_verify, str):
self.server_cert_content = self.server_cert_name
self.server_cert_file_content_exists = True
self.server_cert_file_content = cert_verify
self.client_cert_content = None
self.client_cert_file_content_exists = False
if sni is not None:
self.sni = sni
self.headers = headers
self.url_modifier_function = url_modifier_function
# This method is used to set up an HTTP request and send it to the server
def call_api(self, endpoint, method, headers=None, params=[], data=None, urldata=None, timeout=None):
try:
# convert client cert to file
if self.client_cert_file_content_exists is True:
with open(self.client_cert_name, 'w') as f:
try:
f.write(self.client_cert_file_content)
except IOError:
print('Failed to setup certificate')
# covnert server cert to file
if self.server_cert_file_content_exists is True:
with open(self.server_cert_name, 'w') as f:
try:
f.write(self.server_cert_file_content)
except IOError:
print('Failed to setup certificate')
url = None
actual_headers = self.headers.copy()
if headers is not None:
for header_key in headers:
actual_headers[header_key] = headers[header_key]
if urldata:
urldata = urllib.parse.urlencode(urldata)
if '?' in endpoint:
endpoint += '&'
else:
endpoint += '?'
endpoint += urldata
if self.url_modifier_function is not None:
url = self.url_modifier_function(
self.server_ip, endpoint, actual_headers)
else:
url = 'https://' + self.server_ip + '/' + endpoint
try:
call = getattr(requests, method.lower())
# only use the tool belt session in case of SNI for safety
if self.sni is not None:
session = requests.Session()
call = getattr(session, method.lower())
session.mount('https://', host_header_ssl.HostHeaderSSLAdapter())
actual_headers["Host"] = self.sni
response = call(url, headers=actual_headers,
cert=self.client_cert_content, data=data, verify=self.server_cert_content, timeout=timeout)
if 'headers' in dir(response) and isinstance(response.headers, collections.Mapping) and 'Content-Type' in response.headers \
and "Deprecated" in response.headers['Content-Type']:
print("WARNING: " +
response.headers['Content-Type'], file=sys.stderr)
return ResponseWrapper(response)
except Exception as e:
print('exception occured during requesting url: ' + str(e))
raise e
finally:
if self.server_cert_file_content_exists is True:
try:
os.remove(self.server_cert_name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
if self.client_cert_file_content_exists is True:
try:
os.remove(self.client_cert_name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
# Simple getters that can be used to inspect the state of this client.
def get_headers(self):
return self.headers.copy()
def get_server_ip(self):
return self.server_ip
class ResponseWrapper:
def __init__(self, response):
self.response = response
def read(self):
return self.response.content
@property
def bytes(self):
return self.response.content
@property
def code(self):
return self.response.status_code
| en | 0.828274 | # This is a simple HTTP client that can be used to access the REST API #cert_verify can be True -- do proper signed cert check, False -- skip all cert checks, or a Cert -- use the proper cleint side cert #mutual_auth is in the case the gateway is being used #sni is none unless we are using a server cert #Gateway Case -- use client cert cert_verify is None #verify is true or false #server cert provided # This method is used to set up an HTTP request and send it to the server # convert client cert to file # covnert server cert to file # only use the tool belt session in case of SNI for safety # Simple getters that can be used to inspect the state of this client. | 3.053075 | 3 |
action.py | XiaoPigYao/Aoto--CloudMusic-LevelUp | 0 | 6631364 | # -*- encoding: utf-8 -*-
"""
@FILE : action.py
@DSEC : 网易云音乐签到刷歌脚本
@AUTHOR : Secriy
@DATE : 2020/08/25
@VERSION : 2.4
"""
import os
import requests
import base64
import sys
import binascii
import argparse
import random
import hashlib
from Crypto.Cipher import AES
import json
# Get the arguments input.
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("phone", help="Your Phone Number.")
parser.add_argument("password", help="The plaint text or MD5 value of the password.")
parser.add_argument("-s", dest="sc_key", nargs=1, help="The SCKEY of the Server Chan.")
parser.add_argument("-t", dest="tg_bot_key", nargs=2, help="The Token and Chat ID of your telegram bot.")
parser.add_argument("-b", dest="bark_key", nargs=1, help="The key of your bark app.")
parser.add_argument("-w", dest="wecom_key", nargs=3, help="Your Wecom ID, App-AgentID and App-Secrets.")
parser.add_argument("-p", dest="push_plus_key", nargs=1, help="The token of your pushplus account.")
args = parser.parse_args()
return {
"phone": args.phone,
"password": args.password,
"sc_key": args.sc_key,
"tg_bot_key": args.tg_bot_key,
"bark_key": args.bark_key,
"wecom_key": args.wecom_key,
"push_plus_key": args.push_plus_key,
}
# Get custom playlist.txt
def get_playlist():
path = sys.path[0] + "/playlist.txt"
file = open(path)
lines = file.readlines()
return lines
# Error
def handle_error(func, err, *args, **kwargs):
try:
func(*args)
except Exception as err:
print("{0}推送失败:".format(err) + str(err))
# Calculate the MD5 value of text
def calc_md5(text):
md5_text = hashlib.md5(text.encode(encoding="utf-8")).hexdigest()
return md5_text
# Random String Generator
def create_secret_key(size):
return str(binascii.hexlify(os.urandom(size))[:16], encoding="utf-8")
# AES Encrypt
def aes_encrypt(text, sec_key):
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
encryptor = AES.new(sec_key.encode("utf8"), 2, b"0102030405060708")
ciphertext = encryptor.encrypt(text.encode("utf8"))
ciphertext = str(base64.b64encode(ciphertext), encoding="utf-8")
return ciphertext
# RSA Encrypt
def rsa_encrypt(text, pub_key, modulus):
text = text[::-1]
rs = int(text.encode("utf-8").hex(), 16) ** int(pub_key, 16) % int(modulus, 16)
return format(rs, "x").zfill(256)
# Server Chan Turbo Push
def server_chan_push(sendkey, text):
url = "https://sctapi.ftqq.com/%s.send" % sendkey
headers = {"Content-type": "application/x-www-form-urlencoded"}
content = {"title": "网易云打卡", "desp": text}
ret = requests.post(url, headers=headers, data=content)
print("ServerChan: " + ret.text)
# Telegram Bot Push
def telegram_push(token, chat_id, text):
url = "https://api.telegram.org/bot{0}/sendMessage".format(token)
data = {
"chat_id": chat_id,
"text": text,
}
ret = requests.post(url, data=data)
print("Telegram: " + ret.text)
# Bark Push
def bark_push(bark_key, bark_save, text):
data = {"title": "网易云打卡", "body": text}
headers = {"Content-Type": "application/json;charset=utf-8"}
url = "https://api.day.app/{0}/?isArchive={1}".format(bark_key, bark_save)
ret = requests.post(url, json=data, headers=headers)
print("Bark: " + ret.text)
# PushPlus Push
def push_plus_push(token, text):
url = "http://www.pushplus.plus/send?token={0}&title={1}&content={2}&template={3}".format(
token, "网易云打卡", text, "html"
)
ret = requests.get(url)
print("pushplus: " + ret.text)
# Wecom Push
def wecom_id_push(ww_id, agent_id, app_secrets, msg):
body = {
"touser": "@all",
"msgtype": "text",
"agentid": agent_id,
"text": {"content": msg},
"safe": 0,
"enable_id_trans": 0,
"enable_duplicate_check": 0,
"duplicate_check_interval": 1800,
}
access_token = requests.get(
"https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={0}&corpsecret={1}".format(str(ww_id), app_secrets)
).json()["access_token"]
res = requests.post(
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={0}".format(access_token),
data=json.dumps(body),
)
ret = res.json()
if ret["errcode"] != 0:
print("微信推送配置错误")
else:
print("Wecom: " + ret)
class Encrypt:
def __init__(self):
self.modulus = (
"00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629"
"ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d"
"813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7 "
)
self.nonce = "0CoJUm6Qyw8W8jud"
self.pubKey = "010001"
def encrypt(self, text):
sec_key = create_secret_key(16)
enc_text = aes_encrypt(aes_encrypt(text, self.nonce), sec_key)
enc_sec_key = rsa_encrypt(sec_key, self.pubKey, self.modulus)
return {"params": enc_text, "encSecKey": enc_sec_key}
class CloudMusic:
def __init__(self, phone, password):
self.session = requests.Session()
self.enc = Encrypt()
self.phone = phone
self.csrf = ""
self.nickname = ""
self.login_data = self.enc.encrypt(
json.dumps({"phone": phone, "countrycode": "86", "password": password, "rememberLogin": "true"})
)
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/84.0.4147.89 "
"Safari/537.36",
"Referer": "http://music.163.com/",
"Accept-Encoding": "gzip, deflate",
}
def login(self):
login_url = "https://music.163.com/weapi/login/cellphone"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/84.0.4147.89 Safari/537.36",
"Referer": "http://music.163.com/",
"Accept-Encoding": "gzip, deflate",
"Cookie": "os=pc; osver=Microsoft-Windows-10-Professional-build-10586-64bit; appver=2.0.3.131777; "
"channel=netease; __remember_me=true;",
}
res = self.session.post(url=login_url, data=self.login_data, headers=headers)
ret = json.loads(res.text)
if ret["code"] == 200:
self.csrf = requests.utils.dict_from_cookiejar(res.cookies)["__csrf"]
self.nickname = ret["profile"]["nickname"]
retext = '"{nickname}" 登录成功,当前等级:{level}\n\n'.format(
nickname=self.nickname, level=self.get_level()["level"]
) + "距离升级还需听{before_count}首歌".format(
before_count=self.get_level()["nextPlayCount"] - self.get_level()["nowPlayCount"]
)
return retext
else:
return "账号 {0} 登录失败: ".format(self.phone) + str(ret["code"])
# Get the level of account.
def get_level(self):
url = "https://music.163.com/weapi/user/level?csrf_token=" + self.csrf
res = self.session.post(url=url, data=self.login_data, headers=self.headers)
ret = json.loads(res.text)
return ret["data"]
# def refresh(self):
# url = "https://music.163.com/weapi/login/token/refresh?csrf_token=" + self.csrf
# res = self.session.post(url=url,
# data=self.loginData,
# headers=self.headers)
# ret = json.loads(res.text)
# print(ret)
# return ret["code"]
def sign(self):
sign_url = "https://music.163.com/weapi/point/dailyTask?{csrf}".format(csrf=self.csrf)
res = self.session.post(url=sign_url, data=self.enc.encrypt('{"type":0}'), headers=self.headers)
ret = json.loads(res.text)
if ret["code"] == 200:
return "签到成功,经验+" + str(ret["point"])
elif ret["code"] == -2:
return "今天已经签到过了"
else:
return "签到失败 " + str(ret["code"]) + ":" + ret["message"]
def task(self, playlist):
url = "https://music.163.com/weapi/v6/playlist/detail?csrf_token=" + self.csrf
recommend_url = "https://music.163.com/weapi/v1/discovery/recommend/resource"
music_lists = []
if not playlist:
res = self.session.post(
url=recommend_url, data=self.enc.encrypt('{"csrf_token":"' + self.csrf + '"}'), headers=self.headers
)
ret = json.loads(res.text)
if ret["code"] != 200:
print("获取推荐歌曲失败 " + str(ret["code"]) + ":" + ret["message"])
else:
lists = ret["recommend"]
music_lists = [(d["id"]) for d in lists]
else:
music_lists = playlist
music_id = []
for m in music_lists:
res = self.session.post(
url=url,
data=self.enc.encrypt(json.dumps({"id": m, "n": 1000, "csrf_token": self.csrf})),
headers=self.headers,
)
ret = json.loads(res.text)
for i in ret["playlist"]["trackIds"]:
music_id.append(i["id"])
music_amount = 420 if len(music_id) > 420 else len(music_id) # 歌单大小
post_data = json.dumps(
{
"logs": json.dumps(
list(
map(
lambda x: {
"action": "play",
"json": {
"download": 0,
"end": "playend",
"id": x,
"sourceId": "",
"time": 240,
"type": "song",
"wifi": 0,
},
},
random.sample(music_id, music_amount),
)
)
)
}
)
res = self.session.post(url="http://music.163.com/weapi/feedback/weblog", data=self.enc.encrypt(post_data))
ret = json.loads(res.text)
if ret["code"] == 200:
return "刷听歌量成功,共{0}首".format(music_amount)
else:
return "刷听歌量失败 " + str(ret["code"]) + ":" + ret["message"]
def run_task(info, phone, password):
# Start
app = CloudMusic(phone, password)
# Login
res_login = app.login()
if "400" not in res_login:
# Sign In
res_sign = app.sign()
# Music Task
res_task = app.task(get_playlist())
# Print Response
res_print = res_login + "\n\n" + res_sign + "\n\n" + res_task
print(res_print)
print(30 * "=")
# Server 酱推送
if info["sc_key"]:
handle_error(server_chan_push, "Server酱", info["sc_key"][0], res_print)
# Bark 推送
if info["bark_key"]:
handle_error(bark_push, "Bark", info["bark_key"][0], 1, res_print)
# Telegram 推送
if info["tg_bot_key"]:
handle_error(telegram_push, "Telegram", info["tg_bot_key"][0], info["tg_bot_key"][1], res_print)
# pushplus 推送
if info["push_plus_key"]:
handle_error(push_plus_push, "pushplus", info["push_plus_key"][0], res_print)
# 企业微信推送
if info["wecom_key"]:
handle_error(
wecom_id_push, "Wecom", info["wecom_key"][0], info["wecom_key"][1], info["wecom_key"][2], res_print
)
else:
print(res_login)
print(30 * "=")
if __name__ == "__main__":
# Get arguments
infos = get_args()
phone_list = infos["phone"].split(",")
passwd_list = infos["password"].split(",")
# Run tasks
for k, v in enumerate(phone_list):
print(30 * "=")
if not passwd_list[k]:
break
if len(passwd_list[k]) == 32:
run_task(infos, phone_list[k], passwd_list[k])
else:
run_task(infos, phone_list[k], calc_md5(passwd_list[k]))
| # -*- encoding: utf-8 -*-
"""
@FILE : action.py
@DSEC : 网易云音乐签到刷歌脚本
@AUTHOR : Secriy
@DATE : 2020/08/25
@VERSION : 2.4
"""
import os
import requests
import base64
import sys
import binascii
import argparse
import random
import hashlib
from Crypto.Cipher import AES
import json
# Get the arguments input.
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("phone", help="Your Phone Number.")
parser.add_argument("password", help="The plaint text or MD5 value of the password.")
parser.add_argument("-s", dest="sc_key", nargs=1, help="The SCKEY of the Server Chan.")
parser.add_argument("-t", dest="tg_bot_key", nargs=2, help="The Token and Chat ID of your telegram bot.")
parser.add_argument("-b", dest="bark_key", nargs=1, help="The key of your bark app.")
parser.add_argument("-w", dest="wecom_key", nargs=3, help="Your Wecom ID, App-AgentID and App-Secrets.")
parser.add_argument("-p", dest="push_plus_key", nargs=1, help="The token of your pushplus account.")
args = parser.parse_args()
return {
"phone": args.phone,
"password": args.password,
"sc_key": args.sc_key,
"tg_bot_key": args.tg_bot_key,
"bark_key": args.bark_key,
"wecom_key": args.wecom_key,
"push_plus_key": args.push_plus_key,
}
# Get custom playlist.txt
def get_playlist():
path = sys.path[0] + "/playlist.txt"
file = open(path)
lines = file.readlines()
return lines
# Error
def handle_error(func, err, *args, **kwargs):
try:
func(*args)
except Exception as err:
print("{0}推送失败:".format(err) + str(err))
# Calculate the MD5 value of text
def calc_md5(text):
md5_text = hashlib.md5(text.encode(encoding="utf-8")).hexdigest()
return md5_text
# Random String Generator
def create_secret_key(size):
return str(binascii.hexlify(os.urandom(size))[:16], encoding="utf-8")
# AES Encrypt
def aes_encrypt(text, sec_key):
pad = 16 - len(text) % 16
text = text + pad * chr(pad)
encryptor = AES.new(sec_key.encode("utf8"), 2, b"0102030405060708")
ciphertext = encryptor.encrypt(text.encode("utf8"))
ciphertext = str(base64.b64encode(ciphertext), encoding="utf-8")
return ciphertext
# RSA Encrypt
def rsa_encrypt(text, pub_key, modulus):
text = text[::-1]
rs = int(text.encode("utf-8").hex(), 16) ** int(pub_key, 16) % int(modulus, 16)
return format(rs, "x").zfill(256)
# Server Chan Turbo Push
def server_chan_push(sendkey, text):
url = "https://sctapi.ftqq.com/%s.send" % sendkey
headers = {"Content-type": "application/x-www-form-urlencoded"}
content = {"title": "网易云打卡", "desp": text}
ret = requests.post(url, headers=headers, data=content)
print("ServerChan: " + ret.text)
# Telegram Bot Push
def telegram_push(token, chat_id, text):
url = "https://api.telegram.org/bot{0}/sendMessage".format(token)
data = {
"chat_id": chat_id,
"text": text,
}
ret = requests.post(url, data=data)
print("Telegram: " + ret.text)
# Bark Push
def bark_push(bark_key, bark_save, text):
data = {"title": "网易云打卡", "body": text}
headers = {"Content-Type": "application/json;charset=utf-8"}
url = "https://api.day.app/{0}/?isArchive={1}".format(bark_key, bark_save)
ret = requests.post(url, json=data, headers=headers)
print("Bark: " + ret.text)
# PushPlus Push
def push_plus_push(token, text):
url = "http://www.pushplus.plus/send?token={0}&title={1}&content={2}&template={3}".format(
token, "网易云打卡", text, "html"
)
ret = requests.get(url)
print("pushplus: " + ret.text)
# Wecom Push
def wecom_id_push(ww_id, agent_id, app_secrets, msg):
body = {
"touser": "@all",
"msgtype": "text",
"agentid": agent_id,
"text": {"content": msg},
"safe": 0,
"enable_id_trans": 0,
"enable_duplicate_check": 0,
"duplicate_check_interval": 1800,
}
access_token = requests.get(
"https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={0}&corpsecret={1}".format(str(ww_id), app_secrets)
).json()["access_token"]
res = requests.post(
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={0}".format(access_token),
data=json.dumps(body),
)
ret = res.json()
if ret["errcode"] != 0:
print("微信推送配置错误")
else:
print("Wecom: " + ret)
class Encrypt:
def __init__(self):
self.modulus = (
"00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629"
"ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d"
"813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7 "
)
self.nonce = "0CoJUm6Qyw8W8jud"
self.pubKey = "010001"
def encrypt(self, text):
sec_key = create_secret_key(16)
enc_text = aes_encrypt(aes_encrypt(text, self.nonce), sec_key)
enc_sec_key = rsa_encrypt(sec_key, self.pubKey, self.modulus)
return {"params": enc_text, "encSecKey": enc_sec_key}
class CloudMusic:
def __init__(self, phone, password):
self.session = requests.Session()
self.enc = Encrypt()
self.phone = phone
self.csrf = ""
self.nickname = ""
self.login_data = self.enc.encrypt(
json.dumps({"phone": phone, "countrycode": "86", "password": password, "rememberLogin": "true"})
)
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/84.0.4147.89 "
"Safari/537.36",
"Referer": "http://music.163.com/",
"Accept-Encoding": "gzip, deflate",
}
def login(self):
login_url = "https://music.163.com/weapi/login/cellphone"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/84.0.4147.89 Safari/537.36",
"Referer": "http://music.163.com/",
"Accept-Encoding": "gzip, deflate",
"Cookie": "os=pc; osver=Microsoft-Windows-10-Professional-build-10586-64bit; appver=2.0.3.131777; "
"channel=netease; __remember_me=true;",
}
res = self.session.post(url=login_url, data=self.login_data, headers=headers)
ret = json.loads(res.text)
if ret["code"] == 200:
self.csrf = requests.utils.dict_from_cookiejar(res.cookies)["__csrf"]
self.nickname = ret["profile"]["nickname"]
retext = '"{nickname}" 登录成功,当前等级:{level}\n\n'.format(
nickname=self.nickname, level=self.get_level()["level"]
) + "距离升级还需听{before_count}首歌".format(
before_count=self.get_level()["nextPlayCount"] - self.get_level()["nowPlayCount"]
)
return retext
else:
return "账号 {0} 登录失败: ".format(self.phone) + str(ret["code"])
# Get the level of account.
def get_level(self):
url = "https://music.163.com/weapi/user/level?csrf_token=" + self.csrf
res = self.session.post(url=url, data=self.login_data, headers=self.headers)
ret = json.loads(res.text)
return ret["data"]
# def refresh(self):
# url = "https://music.163.com/weapi/login/token/refresh?csrf_token=" + self.csrf
# res = self.session.post(url=url,
# data=self.loginData,
# headers=self.headers)
# ret = json.loads(res.text)
# print(ret)
# return ret["code"]
def sign(self):
sign_url = "https://music.163.com/weapi/point/dailyTask?{csrf}".format(csrf=self.csrf)
res = self.session.post(url=sign_url, data=self.enc.encrypt('{"type":0}'), headers=self.headers)
ret = json.loads(res.text)
if ret["code"] == 200:
return "签到成功,经验+" + str(ret["point"])
elif ret["code"] == -2:
return "今天已经签到过了"
else:
return "签到失败 " + str(ret["code"]) + ":" + ret["message"]
def task(self, playlist):
url = "https://music.163.com/weapi/v6/playlist/detail?csrf_token=" + self.csrf
recommend_url = "https://music.163.com/weapi/v1/discovery/recommend/resource"
music_lists = []
if not playlist:
res = self.session.post(
url=recommend_url, data=self.enc.encrypt('{"csrf_token":"' + self.csrf + '"}'), headers=self.headers
)
ret = json.loads(res.text)
if ret["code"] != 200:
print("获取推荐歌曲失败 " + str(ret["code"]) + ":" + ret["message"])
else:
lists = ret["recommend"]
music_lists = [(d["id"]) for d in lists]
else:
music_lists = playlist
music_id = []
for m in music_lists:
res = self.session.post(
url=url,
data=self.enc.encrypt(json.dumps({"id": m, "n": 1000, "csrf_token": self.csrf})),
headers=self.headers,
)
ret = json.loads(res.text)
for i in ret["playlist"]["trackIds"]:
music_id.append(i["id"])
music_amount = 420 if len(music_id) > 420 else len(music_id) # 歌单大小
post_data = json.dumps(
{
"logs": json.dumps(
list(
map(
lambda x: {
"action": "play",
"json": {
"download": 0,
"end": "playend",
"id": x,
"sourceId": "",
"time": 240,
"type": "song",
"wifi": 0,
},
},
random.sample(music_id, music_amount),
)
)
)
}
)
res = self.session.post(url="http://music.163.com/weapi/feedback/weblog", data=self.enc.encrypt(post_data))
ret = json.loads(res.text)
if ret["code"] == 200:
return "刷听歌量成功,共{0}首".format(music_amount)
else:
return "刷听歌量失败 " + str(ret["code"]) + ":" + ret["message"]
def run_task(info, phone, password):
# Start
app = CloudMusic(phone, password)
# Login
res_login = app.login()
if "400" not in res_login:
# Sign In
res_sign = app.sign()
# Music Task
res_task = app.task(get_playlist())
# Print Response
res_print = res_login + "\n\n" + res_sign + "\n\n" + res_task
print(res_print)
print(30 * "=")
# Server 酱推送
if info["sc_key"]:
handle_error(server_chan_push, "Server酱", info["sc_key"][0], res_print)
# Bark 推送
if info["bark_key"]:
handle_error(bark_push, "Bark", info["bark_key"][0], 1, res_print)
# Telegram 推送
if info["tg_bot_key"]:
handle_error(telegram_push, "Telegram", info["tg_bot_key"][0], info["tg_bot_key"][1], res_print)
# pushplus 推送
if info["push_plus_key"]:
handle_error(push_plus_push, "pushplus", info["push_plus_key"][0], res_print)
# 企业微信推送
if info["wecom_key"]:
handle_error(
wecom_id_push, "Wecom", info["wecom_key"][0], info["wecom_key"][1], info["wecom_key"][2], res_print
)
else:
print(res_login)
print(30 * "=")
if __name__ == "__main__":
# Get arguments
infos = get_args()
phone_list = infos["phone"].split(",")
passwd_list = infos["password"].split(",")
# Run tasks
for k, v in enumerate(phone_list):
print(30 * "=")
if not passwd_list[k]:
break
if len(passwd_list[k]) == 32:
run_task(infos, phone_list[k], passwd_list[k])
else:
run_task(infos, phone_list[k], calc_md5(passwd_list[k]))
| en | 0.233219 | # -*- encoding: utf-8 -*- @FILE : action.py @DSEC : 网易云音乐签到刷歌脚本 @AUTHOR : Secriy @DATE : 2020/08/25 @VERSION : 2.4 # Get the arguments input. # Get custom playlist.txt # Error # Calculate the MD5 value of text # Random String Generator # AES Encrypt # RSA Encrypt # Server Chan Turbo Push # Telegram Bot Push # Bark Push # PushPlus Push # Wecom Push # Get the level of account. # def refresh(self): # url = "https://music.163.com/weapi/login/token/refresh?csrf_token=" + self.csrf # res = self.session.post(url=url, # data=self.loginData, # headers=self.headers) # ret = json.loads(res.text) # print(ret) # return ret["code"] # 歌单大小 # Start # Login # Sign In # Music Task # Print Response # Server 酱推送 # Bark 推送 # Telegram 推送 # pushplus 推送 # 企业微信推送 # Get arguments # Run tasks | 2.359507 | 2 |
src/utils/dataset.py | MZSHAN/pytorch_yolov3 | 0 | 6631365 | from pathlib import Path
import warnings
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from skimage.transform import resize
from errors import ImageReadError, LabelFileReadError
#Basic Implementation - read image, convert to numpy array, exchange axes,
# make the image a square by padding with zeros, shift labels according
# to make congruent with padded image
#TODO: Add augmentations using Albumentations, imgaug and pytorch transforms
class CocoImagePathFileDataset(Dataset):
"""
Map style dataset to load COCO dataset from a file having image paths
"""
def __init__(self, image_path_file):
"""
Args:
image_path_file: file has paths of all the images that are part of
the dataset
"""
self.image_paths = self._load_image_paths(image_path_file)
# Assume avg file string length = 100, utf8 for alphabets takes 1 byte
# So each image_file path string is 100bytes
# Max size of Coco Train2014 is ~81k
# So max size of the image_paths list is 8100k = 8.1Mb
# Dataset object creation will take some time
# It is only done once per dataloader so it's fine
def __len__(self):
return len(self.image_paths)
def _load_image_paths(self, image_path_file):
if not isinstance(image_path_file, str):
raise ValueError(f"The image_path_file should be a string but got a {type(image_path_file)}")
if not Path(image_path_file).is_file():
raise FileNotFoundError(f"The image path file does not exist at {image_path_file}")
image_paths = []
with open(image_path_file, "r") as image_locations:
for image_path in image_locations:
image_path = image_path.strip()
try:
self._check_label_present(image_path)
image_paths.append(image_path)
except FileNotFoundError as e:
#If just label absent, ignore. If dir incorrect, alert
if not Path(image_path).parent.is_dir():
raise FileNotFoundError(f"The image does not exist"
f"at {image_path}")
return image_paths
@staticmethod
def _check_label_present(image_loc):
if "/images/" not in image_loc:
raise ValueError("Image path must have the folder \"images\"")
label_file = CocoImagePathFileDataset._get_labelfile(image_loc)
if not Path(label_file).is_file():
raise FileNotFoundError(f"The label file for {image_loc}"
f" is not present at {label_file}")
@staticmethod
def _get_labelfile(image_loc):
"""
Generates label file locations for the images on the go
"""
#label file exists, checked in constructor
parent_dir, training_image = image_loc.split("images/")
label_file = parent_dir + "labels/" +training_image.split(".")[0] + ".txt"
return label_file
def __getitem__(self, idx):
image_path = self.image_paths[idx]
image_tensor, label_tensor = self._get_square_tensor_from_image(
image_path)
return image_tensor, label_tensor
#TODO: Make this a transform
@staticmethod
def _get_square_tensor_from_image(image_path, target_height=416):
"""
Function takes an image path as input, reads the image, pads it with
zeros to make it a square of target_height and returns a tensor
representation
It also generates a generates a transformed labels
Args:
image_path(str): path of the image file. File should exist
target_height(int): height and width of resized image to be returned
returns:
torch tensor of transfored image, tensor of transformed labels
"""
try:
image_np = np.array(Image.open(image_path))
except Exception:
raise ImageReadError(f"Could not be read image: {image_path}")
height, width, _ = image_np.shape
total_pad_len = abs(height - width)
pad_before, pad_after = (total_pad_len // 2, total_pad_len -
total_pad_len//2)
pad_sequence = (((pad_before, pad_after), (0, 0), (0, 0))
if height <= width else ((0, 0, (pad_before, pad_after), (0, 0))))
pad_image_np = np.pad(image_np, pad_sequence, mode="constant", constant_values=128)
pad_image_np = pad_image_np/255. #normalize
target_shape = (target_height, target_height, 3)
square_image_np = resize(pad_image_np, target_shape, mode="reflect")
#torch tensor representation needs channels as first axis
image_tensor = torch.from_numpy(np.transpose(square_image_np, (2, 0, 1)))
#find the left and top padding to move center of labels
pad_top, pad_left = pad_sequence[0][0], pad_sequence[1][0]
label_path = CocoImagePathFileDataset._get_labelfile(image_path)
label_tensor = CocoImagePathFileDataset._label_tensor_for_square_img(
label_path, pad_top, pad_left,
image_np.shape[0:2], pad_image_np.shape[0:2])
return image_tensor.float(), label_tensor.float()
@staticmethod
def _label_tensor_for_square_img(label_path, pad_top, pad_left,
prev_size, pad_size):
"""
Function takes a label_file with labels for an image
It returns a tensor with lables that are adjusted for the square image
Labels are in terms of fraction of the padded image
Since the labels are in fractions, the padded image can be resized
and scaled, and teh labels will remain the same
label file contains class, center_x , center_y, width, height
The last 4 coordinates in terms of fraction of original image
Args:
label_file(str) : The location of the label file
pad_top (float) : The number of pixels padded to the top of image
pad_left(float) : The number of pixels padded to the left of image
prev_size(iterable) : Size of the unpadded image (height, width)
new_size(iterable) : Size of the resized image (height, width)
returns:
torch tensor with the label modified for padding and resizing
"""
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
box_labels = np.loadtxt(label_path).reshape(-1, 5)
except Exception:
raise LabelFileReadError(f"Error in reading {label_path}")
prev_height, prev_width = prev_size
pad_height, pad_width = pad_size
#Convert xywh to xyxy - get unnormalized top left and
# bottom right corner
x1 = (box_labels[:,1] - box_labels[:,3]/2) * prev_width
x2 = (box_labels[:,1] + box_labels[:,3]/2) * prev_width
y1 = (box_labels[:,2] - box_labels[:,4]/2) * prev_height
y2 = (box_labels[:,2] + box_labels[:,4]/2) * prev_height
#Get padding shifted corners
x1 = x1 + pad_left
x2 = x2 + pad_left
y1 = y1 + pad_top
y2 = y2 + pad_top
#calcualte padding shifted center from corners, normalize
# by padded width
box_labels[:,1] = ((x1 + x2) / 2) / pad_width
box_labels[:,2] = ((y1 + y2) / 2) / pad_height
#get fractional width and height : from unpadded to padded
box_labels[:,3] *= prev_width / pad_width
box_labels[:,4] *= prev_height/ pad_height
tensor_box_labels = torch.from_numpy(box_labels)
return tensor_box_labels | from pathlib import Path
import warnings
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from skimage.transform import resize
from errors import ImageReadError, LabelFileReadError
#Basic Implementation - read image, convert to numpy array, exchange axes,
# make the image a square by padding with zeros, shift labels according
# to make congruent with padded image
#TODO: Add augmentations using Albumentations, imgaug and pytorch transforms
class CocoImagePathFileDataset(Dataset):
"""
Map style dataset to load COCO dataset from a file having image paths
"""
def __init__(self, image_path_file):
"""
Args:
image_path_file: file has paths of all the images that are part of
the dataset
"""
self.image_paths = self._load_image_paths(image_path_file)
# Assume avg file string length = 100, utf8 for alphabets takes 1 byte
# So each image_file path string is 100bytes
# Max size of Coco Train2014 is ~81k
# So max size of the image_paths list is 8100k = 8.1Mb
# Dataset object creation will take some time
# It is only done once per dataloader so it's fine
def __len__(self):
return len(self.image_paths)
def _load_image_paths(self, image_path_file):
if not isinstance(image_path_file, str):
raise ValueError(f"The image_path_file should be a string but got a {type(image_path_file)}")
if not Path(image_path_file).is_file():
raise FileNotFoundError(f"The image path file does not exist at {image_path_file}")
image_paths = []
with open(image_path_file, "r") as image_locations:
for image_path in image_locations:
image_path = image_path.strip()
try:
self._check_label_present(image_path)
image_paths.append(image_path)
except FileNotFoundError as e:
#If just label absent, ignore. If dir incorrect, alert
if not Path(image_path).parent.is_dir():
raise FileNotFoundError(f"The image does not exist"
f"at {image_path}")
return image_paths
@staticmethod
def _check_label_present(image_loc):
if "/images/" not in image_loc:
raise ValueError("Image path must have the folder \"images\"")
label_file = CocoImagePathFileDataset._get_labelfile(image_loc)
if not Path(label_file).is_file():
raise FileNotFoundError(f"The label file for {image_loc}"
f" is not present at {label_file}")
@staticmethod
def _get_labelfile(image_loc):
"""
Generates label file locations for the images on the go
"""
#label file exists, checked in constructor
parent_dir, training_image = image_loc.split("images/")
label_file = parent_dir + "labels/" +training_image.split(".")[0] + ".txt"
return label_file
def __getitem__(self, idx):
image_path = self.image_paths[idx]
image_tensor, label_tensor = self._get_square_tensor_from_image(
image_path)
return image_tensor, label_tensor
#TODO: Make this a transform
@staticmethod
def _get_square_tensor_from_image(image_path, target_height=416):
"""
Function takes an image path as input, reads the image, pads it with
zeros to make it a square of target_height and returns a tensor
representation
It also generates a generates a transformed labels
Args:
image_path(str): path of the image file. File should exist
target_height(int): height and width of resized image to be returned
returns:
torch tensor of transfored image, tensor of transformed labels
"""
try:
image_np = np.array(Image.open(image_path))
except Exception:
raise ImageReadError(f"Could not be read image: {image_path}")
height, width, _ = image_np.shape
total_pad_len = abs(height - width)
pad_before, pad_after = (total_pad_len // 2, total_pad_len -
total_pad_len//2)
pad_sequence = (((pad_before, pad_after), (0, 0), (0, 0))
if height <= width else ((0, 0, (pad_before, pad_after), (0, 0))))
pad_image_np = np.pad(image_np, pad_sequence, mode="constant", constant_values=128)
pad_image_np = pad_image_np/255. #normalize
target_shape = (target_height, target_height, 3)
square_image_np = resize(pad_image_np, target_shape, mode="reflect")
#torch tensor representation needs channels as first axis
image_tensor = torch.from_numpy(np.transpose(square_image_np, (2, 0, 1)))
#find the left and top padding to move center of labels
pad_top, pad_left = pad_sequence[0][0], pad_sequence[1][0]
label_path = CocoImagePathFileDataset._get_labelfile(image_path)
label_tensor = CocoImagePathFileDataset._label_tensor_for_square_img(
label_path, pad_top, pad_left,
image_np.shape[0:2], pad_image_np.shape[0:2])
return image_tensor.float(), label_tensor.float()
@staticmethod
def _label_tensor_for_square_img(label_path, pad_top, pad_left,
prev_size, pad_size):
"""
Function takes a label_file with labels for an image
It returns a tensor with lables that are adjusted for the square image
Labels are in terms of fraction of the padded image
Since the labels are in fractions, the padded image can be resized
and scaled, and teh labels will remain the same
label file contains class, center_x , center_y, width, height
The last 4 coordinates in terms of fraction of original image
Args:
label_file(str) : The location of the label file
pad_top (float) : The number of pixels padded to the top of image
pad_left(float) : The number of pixels padded to the left of image
prev_size(iterable) : Size of the unpadded image (height, width)
new_size(iterable) : Size of the resized image (height, width)
returns:
torch tensor with the label modified for padding and resizing
"""
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
box_labels = np.loadtxt(label_path).reshape(-1, 5)
except Exception:
raise LabelFileReadError(f"Error in reading {label_path}")
prev_height, prev_width = prev_size
pad_height, pad_width = pad_size
#Convert xywh to xyxy - get unnormalized top left and
# bottom right corner
x1 = (box_labels[:,1] - box_labels[:,3]/2) * prev_width
x2 = (box_labels[:,1] + box_labels[:,3]/2) * prev_width
y1 = (box_labels[:,2] - box_labels[:,4]/2) * prev_height
y2 = (box_labels[:,2] + box_labels[:,4]/2) * prev_height
#Get padding shifted corners
x1 = x1 + pad_left
x2 = x2 + pad_left
y1 = y1 + pad_top
y2 = y2 + pad_top
#calcualte padding shifted center from corners, normalize
# by padded width
box_labels[:,1] = ((x1 + x2) / 2) / pad_width
box_labels[:,2] = ((y1 + y2) / 2) / pad_height
#get fractional width and height : from unpadded to padded
box_labels[:,3] *= prev_width / pad_width
box_labels[:,4] *= prev_height/ pad_height
tensor_box_labels = torch.from_numpy(box_labels)
return tensor_box_labels | en | 0.780672 | #Basic Implementation - read image, convert to numpy array, exchange axes, # make the image a square by padding with zeros, shift labels according # to make congruent with padded image #TODO: Add augmentations using Albumentations, imgaug and pytorch transforms Map style dataset to load COCO dataset from a file having image paths Args: image_path_file: file has paths of all the images that are part of the dataset # Assume avg file string length = 100, utf8 for alphabets takes 1 byte # So each image_file path string is 100bytes # Max size of Coco Train2014 is ~81k # So max size of the image_paths list is 8100k = 8.1Mb # Dataset object creation will take some time # It is only done once per dataloader so it's fine #If just label absent, ignore. If dir incorrect, alert Generates label file locations for the images on the go #label file exists, checked in constructor #TODO: Make this a transform Function takes an image path as input, reads the image, pads it with zeros to make it a square of target_height and returns a tensor representation It also generates a generates a transformed labels Args: image_path(str): path of the image file. File should exist target_height(int): height and width of resized image to be returned returns: torch tensor of transfored image, tensor of transformed labels #normalize #torch tensor representation needs channels as first axis #find the left and top padding to move center of labels Function takes a label_file with labels for an image It returns a tensor with lables that are adjusted for the square image Labels are in terms of fraction of the padded image Since the labels are in fractions, the padded image can be resized and scaled, and teh labels will remain the same label file contains class, center_x , center_y, width, height The last 4 coordinates in terms of fraction of original image Args: label_file(str) : The location of the label file pad_top (float) : The number of pixels padded to the top of image pad_left(float) : The number of pixels padded to the left of image prev_size(iterable) : Size of the unpadded image (height, width) new_size(iterable) : Size of the resized image (height, width) returns: torch tensor with the label modified for padding and resizing #Convert xywh to xyxy - get unnormalized top left and # bottom right corner #Get padding shifted corners #calcualte padding shifted center from corners, normalize # by padded width #get fractional width and height : from unpadded to padded | 2.608342 | 3 |
play/tests/test_handsorter.py | edelgm6/montecarlo-holdem | 0 | 6631366 | from django.test import TestCase
from play.models import Game, Deck, Card, Stage, Suit, Hand
from play.handsorter import HandSorter
class HandSorterTestCase(TestCase):
"""
TODO
Test that the corect Hand enum is returned in each test
"""
def test_sort_cards_sorts_high_to_low(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number = 2))
hand.append(Card(suit=Suit.CLUB, number = 14))
hand.append(Card(suit=Suit.DIAMOND, number = 5))
hand.append(Card(suit=Suit.SPADE, number = 3))
hand.append(Card(suit=Suit.DIAMOND, number = 11))
hand.append(Card(suit=Suit.SPADE, number = 10))
hand.append(Card(suit=Suit.DIAMOND, number = 11))
ordered_hand = HandSorter.sort_cards(hand)
self.assertEqual(ordered_hand[0].number, 14)
self.assertEqual(ordered_hand[6].number, 2)
def test_returns_best_hand(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number = 2))
hand.append(Card(suit=Suit.CLUB, number = 2))
hand.append(Card(suit=Suit.DIAMOND, number = 2))
hand.append(Card(suit=Suit.SPADE, number = 2))
hand.append(Card(suit=Suit.DIAMOND, number = 3))
hand.append(Card(suit=Suit.SPADE, number = 3))
hand.append(Card(suit=Suit.DIAMOND, number = 3))
hand = HandSorter.get_best_hand(hand)
self.assertEqual(hand['score'], Hand.FOUR_OF_A_KIND)
hand = []
hand.append(Card(suit=Suit.CLUB, number = 2))
hand.append(Card(suit=Suit.CLUB, number = 3))
hand.append(Card(suit=Suit.CLUB, number = 4))
hand.append(Card(suit=Suit.CLUB, number = 5))
hand.append(Card(suit=Suit.CLUB, number = 6))
hand.append(Card(suit=Suit.SPADE, number = 3))
hand.append(Card(suit=Suit.DIAMOND, number = 3))
hand = HandSorter.get_best_hand(hand)
self.assertEqual(hand['score'], Hand.STRAIGHT_FLUSH)
hand = []
hand.append(Card(suit=Suit.CLUB, number = 2))
hand.append(Card(suit=Suit.CLUB, number = 2))
hand.append(Card(suit=Suit.CLUB, number = 3))
hand.append(Card(suit=Suit.CLUB, number = 3))
hand.append(Card(suit=Suit.CLUB, number = 7))
hand.append(Card(suit=Suit.SPADE, number = 7))
hand.append(Card(suit=Suit.DIAMOND, number = 7))
hand = HandSorter.get_best_hand(hand)
self.assertEqual(hand['score'], Hand.FULL_HOUSE)
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=11))
hand.append(Card(suit=Suit.SPADE, number=5))
hand.append(Card(suit=Suit.HEART, number=6))
hand.append(Card(suit=Suit.DIAMOND, number=7))
hand.append(Card(suit=Suit.CLUB, number=8))
hand.append(Card(suit=Suit.CLUB, number=9))
hand = HandSorter.get_best_hand(hand)
self.assertEqual(hand['score'], Hand.STRAIGHT)
def test_is_flush_ids_a_flush(self):
hand = []
for number in range(2, 8):
card = Card(suit=Suit.DIAMOND, number=number)
hand.append(card)
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.CLUB, number=3))
is_flush = HandSorter.is_flush(hand)
self.assertEqual(is_flush['score'], Hand.FLUSH)
self.assertEqual(is_flush['hand'][0].number, 7)
self.assertEqual(is_flush['hand'][4].number, 3)
self.assertEqual(len(is_flush['hand']), 5)
def test_is_flush_returns_false_if_no_flush(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.CLUB, number=3))
is_flush = HandSorter.is_flush(hand)
self.assertFalse(is_flush)
def test_is_straight_returns_straight_hand(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=11))
hand.append(Card(suit=Suit.SPADE, number=5))
hand.append(Card(suit=Suit.HEART, number=6))
hand.append(Card(suit=Suit.DIAMOND, number=7))
hand.append(Card(suit=Suit.CLUB, number=8))
hand.append(Card(suit=Suit.CLUB, number=9))
is_straight = HandSorter.is_straight(hand)
self.assertTrue(is_straight)
self.assertEqual(len(is_straight['hand']), 5)
returned_hand = is_straight['hand']
self.assertEqual(returned_hand[0].number, 9)
self.assertEqual(returned_hand[4].number, 5)
self.assertEqual(returned_hand[3].number, 6)
self.assertEqual(returned_hand[2].number, 7)
self.assertEqual(returned_hand[1].number, 8)
self.assertEqual(is_straight['score'], Hand.STRAIGHT)
def test_isnt_straight_returns_false(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.CLUB, number=3))
hand.append(Card(suit=Suit.DIAMOND, number=5))
hand.append(Card(suit=Suit.SPADE, number=4))
hand.append(Card(suit=Suit.DIAMOND, number=3))
hand.append(Card(suit=Suit.SPADE, number=10))
hand.append(Card(suit=Suit.DIAMOND, number=14))
is_straight = HandSorter.is_straight(hand)
self.assertFalse(is_straight)
def test_is_four_of_a_kind_returns_hand(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=2))
hand.append(Card(suit=Suit.HEART, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=3))
hand.append(Card(suit=Suit.SPADE, number=10))
hand.append(Card(suit=Suit.DIAMOND, number=14))
is_four_of_a_kind = HandSorter.is_four_of_a_kind(hand)
self.assertEqual(is_four_of_a_kind['score'], Hand.FOUR_OF_A_KIND)
self.assertEqual(is_four_of_a_kind['hand'][0].number, 2)
self.assertEqual(is_four_of_a_kind['hand'][1].number, 2)
self.assertEqual(is_four_of_a_kind['hand'][2].number, 2)
self.assertEqual(is_four_of_a_kind['hand'][3].number, 2)
self.assertEqual(is_four_of_a_kind['hand'][4].number, 14)
def test_isnt_four_of_a_kind_returns_false(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=6))
hand.append(Card(suit=Suit.SPADE, number=2))
hand.append(Card(suit=Suit.HEART, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=3))
hand.append(Card(suit=Suit.SPADE, number=10))
hand.append(Card(suit=Suit.DIAMOND, number=14))
is_four_of_a_kind = HandSorter.is_four_of_a_kind(hand)
self.assertFalse(is_four_of_a_kind)
def test_is_three_of_a_kind_returns_high_card(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=2))
hand.append(Card(suit=Suit.HEART, number=3))
hand.append(Card(suit=Suit.DIAMOND, number=3))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.DIAMOND, number=14))
is_three_of_a_kind = HandSorter.is_three_of_a_kind(hand)
self.assertEqual(is_three_of_a_kind['score'], Hand.THREE_OF_A_KIND)
self.assertEqual(is_three_of_a_kind['hand'][0].number, 3)
self.assertEqual(is_three_of_a_kind['hand'][1].number, 3)
self.assertEqual(is_three_of_a_kind['hand'][2].number, 3)
self.assertEqual(is_three_of_a_kind['hand'][3].number, 14)
self.assertEqual(is_three_of_a_kind['hand'][4].number, 2)
def test_isnt_three_of_a_kind_returns_false(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=3))
hand.append(Card(suit=Suit.SPADE, number=4))
hand.append(Card(suit=Suit.HEART, number=5))
hand.append(Card(suit=Suit.DIAMOND, number=6))
hand.append(Card(suit=Suit.SPADE, number=7))
hand.append(Card(suit=Suit.DIAMOND, number=8))
is_three_of_a_kind = HandSorter.is_three_of_a_kind(hand)
self.assertFalse(is_three_of_a_kind)
def test_is_pair_returns_value(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.HEART, number=4))
hand.append(Card(suit=Suit.DIAMOND, number=5))
hand.append(Card(suit=Suit.SPADE, number=6))
hand.append(Card(suit=Suit.DIAMOND, number=7))
is_pair = HandSorter.is_pair(hand)
self.assertEqual(is_pair['score'], Hand.PAIR)
self.assertEqual(is_pair['hand'][0].number, 2)
self.assertEqual(is_pair['hand'][1].number, 2)
self.assertEqual(is_pair['hand'][2].number, 7)
self.assertEqual(is_pair['hand'][3].number, 6)
self.assertEqual(is_pair['hand'][4].number, 5)
def test_isnt_pair_returns_false(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=14))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.HEART, number=4))
hand.append(Card(suit=Suit.DIAMOND, number=5))
hand.append(Card(suit=Suit.SPADE, number=6))
hand.append(Card(suit=Suit.DIAMOND, number=7))
is_pair = HandSorter.is_pair(hand)
self.assertFalse(is_pair)
def test_is_full_house_returns_three_and_pair(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.HEART, number=3))
hand.append(Card(suit=Suit.DIAMOND, number=3))
hand.append(Card(suit=Suit.SPADE, number=6))
hand.append(Card(suit=Suit.DIAMOND, number=6))
is_full_house = HandSorter.is_full_house(hand)
self.assertEqual(is_full_house['score'], Hand.FULL_HOUSE)
self.assertEqual(is_full_house['hand'][0].number, 3)
self.assertEqual(is_full_house['hand'][1].number, 3)
self.assertEqual(is_full_house['hand'][2].number, 3)
self.assertEqual(is_full_house['hand'][3].number, 6)
self.assertEqual(is_full_house['hand'][4].number, 6)
def test_is_two_pair_returns_two_pairs_and_kicker(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.HEART, number=3))
hand.append(Card(suit=Suit.DIAMOND, number=7))
hand.append(Card(suit=Suit.SPADE, number=9))
hand.append(Card(suit=Suit.DIAMOND, number=9))
is_two_pair = HandSorter.is_two_pair(hand)
self.assertEqual(is_two_pair['score'], Hand.TWO_PAIR)
self.assertEqual(is_two_pair['hand'][0].number, 9)
self.assertEqual(is_two_pair['hand'][1].number, 9)
self.assertEqual(is_two_pair['hand'][2].number, 3)
self.assertEqual(is_two_pair['hand'][3].number, 3)
self.assertEqual(is_two_pair['hand'][4].number, 7)
def test_isnt_two_pair_returns_false(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.HEART, number=4))
hand.append(Card(suit=Suit.DIAMOND, number=7))
hand.append(Card(suit=Suit.SPADE, number=10))
hand.append(Card(suit=Suit.DIAMOND, number=11))
is_two_pair = HandSorter.is_two_pair(hand)
self.assertFalse(is_two_pair)
def test_get_high_card_returns_ordered_cards(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=13))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.HEART, number=4))
hand.append(Card(suit=Suit.DIAMOND, number=7))
hand.append(Card(suit=Suit.SPADE, number=11))
hand.append(Card(suit=Suit.DIAMOND, number=11))
high_card = HandSorter.get_high_card(hand)
hand = high_card['hand']
self.assertEqual(len(hand), 5)
self.assertEqual(hand[0].number, 13)
self.assertEqual(hand[1].number, 11)
self.assertEqual(hand[2].number, 11)
self.assertEqual(hand[3].number, 7)
self.assertEqual(hand[4].number, 4)
self.assertEqual(high_card['score'], Hand.HIGH_CARD)
def test_is_straight_flush_returns_high_card(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=3))
hand.append(Card(suit=Suit.CLUB, number=4))
hand.append(Card(suit=Suit.CLUB, number=5))
hand.append(Card(suit=Suit.CLUB, number=6))
hand.append(Card(suit=Suit.CLUB, number=7))
hand.append(Card(suit=Suit.CLUB, number=8))
hand.append(Card(suit=Suit.CLUB, number=10))
is_straight_flush = HandSorter.is_straight_flush(hand)
hand = is_straight_flush['hand']
self.assertEqual(len(hand), 5)
self.assertEqual(hand[0].number, 8)
self.assertEqual(hand[1].number, 7)
self.assertEqual(hand[2].number, 6)
self.assertEqual(hand[3].number, 5)
self.assertEqual(hand[4].number, 4)
self.assertEqual(is_straight_flush['score'], Hand.STRAIGHT_FLUSH)
def test_isnt_straight_flush_returns_false(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.CLUB, number=3))
hand.append(Card(suit=Suit.DIAMOND, number=4))
hand.append(Card(suit=Suit.CLUB, number=5))
hand.append(Card(suit=Suit.CLUB, number=6))
hand.append(Card(suit=Suit.SPADE, number=10))
hand.append(Card(suit=Suit.DIAMOND, number=11))
is_straight_flush = HandSorter.is_straight_flush(hand)
self.assertFalse(is_straight_flush) | from django.test import TestCase
from play.models import Game, Deck, Card, Stage, Suit, Hand
from play.handsorter import HandSorter
class HandSorterTestCase(TestCase):
"""
TODO
Test that the corect Hand enum is returned in each test
"""
def test_sort_cards_sorts_high_to_low(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number = 2))
hand.append(Card(suit=Suit.CLUB, number = 14))
hand.append(Card(suit=Suit.DIAMOND, number = 5))
hand.append(Card(suit=Suit.SPADE, number = 3))
hand.append(Card(suit=Suit.DIAMOND, number = 11))
hand.append(Card(suit=Suit.SPADE, number = 10))
hand.append(Card(suit=Suit.DIAMOND, number = 11))
ordered_hand = HandSorter.sort_cards(hand)
self.assertEqual(ordered_hand[0].number, 14)
self.assertEqual(ordered_hand[6].number, 2)
def test_returns_best_hand(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number = 2))
hand.append(Card(suit=Suit.CLUB, number = 2))
hand.append(Card(suit=Suit.DIAMOND, number = 2))
hand.append(Card(suit=Suit.SPADE, number = 2))
hand.append(Card(suit=Suit.DIAMOND, number = 3))
hand.append(Card(suit=Suit.SPADE, number = 3))
hand.append(Card(suit=Suit.DIAMOND, number = 3))
hand = HandSorter.get_best_hand(hand)
self.assertEqual(hand['score'], Hand.FOUR_OF_A_KIND)
hand = []
hand.append(Card(suit=Suit.CLUB, number = 2))
hand.append(Card(suit=Suit.CLUB, number = 3))
hand.append(Card(suit=Suit.CLUB, number = 4))
hand.append(Card(suit=Suit.CLUB, number = 5))
hand.append(Card(suit=Suit.CLUB, number = 6))
hand.append(Card(suit=Suit.SPADE, number = 3))
hand.append(Card(suit=Suit.DIAMOND, number = 3))
hand = HandSorter.get_best_hand(hand)
self.assertEqual(hand['score'], Hand.STRAIGHT_FLUSH)
hand = []
hand.append(Card(suit=Suit.CLUB, number = 2))
hand.append(Card(suit=Suit.CLUB, number = 2))
hand.append(Card(suit=Suit.CLUB, number = 3))
hand.append(Card(suit=Suit.CLUB, number = 3))
hand.append(Card(suit=Suit.CLUB, number = 7))
hand.append(Card(suit=Suit.SPADE, number = 7))
hand.append(Card(suit=Suit.DIAMOND, number = 7))
hand = HandSorter.get_best_hand(hand)
self.assertEqual(hand['score'], Hand.FULL_HOUSE)
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=11))
hand.append(Card(suit=Suit.SPADE, number=5))
hand.append(Card(suit=Suit.HEART, number=6))
hand.append(Card(suit=Suit.DIAMOND, number=7))
hand.append(Card(suit=Suit.CLUB, number=8))
hand.append(Card(suit=Suit.CLUB, number=9))
hand = HandSorter.get_best_hand(hand)
self.assertEqual(hand['score'], Hand.STRAIGHT)
def test_is_flush_ids_a_flush(self):
hand = []
for number in range(2, 8):
card = Card(suit=Suit.DIAMOND, number=number)
hand.append(card)
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.CLUB, number=3))
is_flush = HandSorter.is_flush(hand)
self.assertEqual(is_flush['score'], Hand.FLUSH)
self.assertEqual(is_flush['hand'][0].number, 7)
self.assertEqual(is_flush['hand'][4].number, 3)
self.assertEqual(len(is_flush['hand']), 5)
def test_is_flush_returns_false_if_no_flush(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.CLUB, number=3))
is_flush = HandSorter.is_flush(hand)
self.assertFalse(is_flush)
def test_is_straight_returns_straight_hand(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=11))
hand.append(Card(suit=Suit.SPADE, number=5))
hand.append(Card(suit=Suit.HEART, number=6))
hand.append(Card(suit=Suit.DIAMOND, number=7))
hand.append(Card(suit=Suit.CLUB, number=8))
hand.append(Card(suit=Suit.CLUB, number=9))
is_straight = HandSorter.is_straight(hand)
self.assertTrue(is_straight)
self.assertEqual(len(is_straight['hand']), 5)
returned_hand = is_straight['hand']
self.assertEqual(returned_hand[0].number, 9)
self.assertEqual(returned_hand[4].number, 5)
self.assertEqual(returned_hand[3].number, 6)
self.assertEqual(returned_hand[2].number, 7)
self.assertEqual(returned_hand[1].number, 8)
self.assertEqual(is_straight['score'], Hand.STRAIGHT)
def test_isnt_straight_returns_false(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.CLUB, number=3))
hand.append(Card(suit=Suit.DIAMOND, number=5))
hand.append(Card(suit=Suit.SPADE, number=4))
hand.append(Card(suit=Suit.DIAMOND, number=3))
hand.append(Card(suit=Suit.SPADE, number=10))
hand.append(Card(suit=Suit.DIAMOND, number=14))
is_straight = HandSorter.is_straight(hand)
self.assertFalse(is_straight)
def test_is_four_of_a_kind_returns_hand(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=2))
hand.append(Card(suit=Suit.HEART, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=3))
hand.append(Card(suit=Suit.SPADE, number=10))
hand.append(Card(suit=Suit.DIAMOND, number=14))
is_four_of_a_kind = HandSorter.is_four_of_a_kind(hand)
self.assertEqual(is_four_of_a_kind['score'], Hand.FOUR_OF_A_KIND)
self.assertEqual(is_four_of_a_kind['hand'][0].number, 2)
self.assertEqual(is_four_of_a_kind['hand'][1].number, 2)
self.assertEqual(is_four_of_a_kind['hand'][2].number, 2)
self.assertEqual(is_four_of_a_kind['hand'][3].number, 2)
self.assertEqual(is_four_of_a_kind['hand'][4].number, 14)
def test_isnt_four_of_a_kind_returns_false(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=6))
hand.append(Card(suit=Suit.SPADE, number=2))
hand.append(Card(suit=Suit.HEART, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=3))
hand.append(Card(suit=Suit.SPADE, number=10))
hand.append(Card(suit=Suit.DIAMOND, number=14))
is_four_of_a_kind = HandSorter.is_four_of_a_kind(hand)
self.assertFalse(is_four_of_a_kind)
def test_is_three_of_a_kind_returns_high_card(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=2))
hand.append(Card(suit=Suit.HEART, number=3))
hand.append(Card(suit=Suit.DIAMOND, number=3))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.DIAMOND, number=14))
is_three_of_a_kind = HandSorter.is_three_of_a_kind(hand)
self.assertEqual(is_three_of_a_kind['score'], Hand.THREE_OF_A_KIND)
self.assertEqual(is_three_of_a_kind['hand'][0].number, 3)
self.assertEqual(is_three_of_a_kind['hand'][1].number, 3)
self.assertEqual(is_three_of_a_kind['hand'][2].number, 3)
self.assertEqual(is_three_of_a_kind['hand'][3].number, 14)
self.assertEqual(is_three_of_a_kind['hand'][4].number, 2)
def test_isnt_three_of_a_kind_returns_false(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=3))
hand.append(Card(suit=Suit.SPADE, number=4))
hand.append(Card(suit=Suit.HEART, number=5))
hand.append(Card(suit=Suit.DIAMOND, number=6))
hand.append(Card(suit=Suit.SPADE, number=7))
hand.append(Card(suit=Suit.DIAMOND, number=8))
is_three_of_a_kind = HandSorter.is_three_of_a_kind(hand)
self.assertFalse(is_three_of_a_kind)
def test_is_pair_returns_value(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.HEART, number=4))
hand.append(Card(suit=Suit.DIAMOND, number=5))
hand.append(Card(suit=Suit.SPADE, number=6))
hand.append(Card(suit=Suit.DIAMOND, number=7))
is_pair = HandSorter.is_pair(hand)
self.assertEqual(is_pair['score'], Hand.PAIR)
self.assertEqual(is_pair['hand'][0].number, 2)
self.assertEqual(is_pair['hand'][1].number, 2)
self.assertEqual(is_pair['hand'][2].number, 7)
self.assertEqual(is_pair['hand'][3].number, 6)
self.assertEqual(is_pair['hand'][4].number, 5)
def test_isnt_pair_returns_false(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=14))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.HEART, number=4))
hand.append(Card(suit=Suit.DIAMOND, number=5))
hand.append(Card(suit=Suit.SPADE, number=6))
hand.append(Card(suit=Suit.DIAMOND, number=7))
is_pair = HandSorter.is_pair(hand)
self.assertFalse(is_pair)
def test_is_full_house_returns_three_and_pair(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.HEART, number=3))
hand.append(Card(suit=Suit.DIAMOND, number=3))
hand.append(Card(suit=Suit.SPADE, number=6))
hand.append(Card(suit=Suit.DIAMOND, number=6))
is_full_house = HandSorter.is_full_house(hand)
self.assertEqual(is_full_house['score'], Hand.FULL_HOUSE)
self.assertEqual(is_full_house['hand'][0].number, 3)
self.assertEqual(is_full_house['hand'][1].number, 3)
self.assertEqual(is_full_house['hand'][2].number, 3)
self.assertEqual(is_full_house['hand'][3].number, 6)
self.assertEqual(is_full_house['hand'][4].number, 6)
def test_is_two_pair_returns_two_pairs_and_kicker(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.HEART, number=3))
hand.append(Card(suit=Suit.DIAMOND, number=7))
hand.append(Card(suit=Suit.SPADE, number=9))
hand.append(Card(suit=Suit.DIAMOND, number=9))
is_two_pair = HandSorter.is_two_pair(hand)
self.assertEqual(is_two_pair['score'], Hand.TWO_PAIR)
self.assertEqual(is_two_pair['hand'][0].number, 9)
self.assertEqual(is_two_pair['hand'][1].number, 9)
self.assertEqual(is_two_pair['hand'][2].number, 3)
self.assertEqual(is_two_pair['hand'][3].number, 3)
self.assertEqual(is_two_pair['hand'][4].number, 7)
def test_isnt_two_pair_returns_false(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.HEART, number=4))
hand.append(Card(suit=Suit.DIAMOND, number=7))
hand.append(Card(suit=Suit.SPADE, number=10))
hand.append(Card(suit=Suit.DIAMOND, number=11))
is_two_pair = HandSorter.is_two_pair(hand)
self.assertFalse(is_two_pair)
def test_get_high_card_returns_ordered_cards(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=13))
hand.append(Card(suit=Suit.DIAMOND, number=2))
hand.append(Card(suit=Suit.SPADE, number=3))
hand.append(Card(suit=Suit.HEART, number=4))
hand.append(Card(suit=Suit.DIAMOND, number=7))
hand.append(Card(suit=Suit.SPADE, number=11))
hand.append(Card(suit=Suit.DIAMOND, number=11))
high_card = HandSorter.get_high_card(hand)
hand = high_card['hand']
self.assertEqual(len(hand), 5)
self.assertEqual(hand[0].number, 13)
self.assertEqual(hand[1].number, 11)
self.assertEqual(hand[2].number, 11)
self.assertEqual(hand[3].number, 7)
self.assertEqual(hand[4].number, 4)
self.assertEqual(high_card['score'], Hand.HIGH_CARD)
def test_is_straight_flush_returns_high_card(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=3))
hand.append(Card(suit=Suit.CLUB, number=4))
hand.append(Card(suit=Suit.CLUB, number=5))
hand.append(Card(suit=Suit.CLUB, number=6))
hand.append(Card(suit=Suit.CLUB, number=7))
hand.append(Card(suit=Suit.CLUB, number=8))
hand.append(Card(suit=Suit.CLUB, number=10))
is_straight_flush = HandSorter.is_straight_flush(hand)
hand = is_straight_flush['hand']
self.assertEqual(len(hand), 5)
self.assertEqual(hand[0].number, 8)
self.assertEqual(hand[1].number, 7)
self.assertEqual(hand[2].number, 6)
self.assertEqual(hand[3].number, 5)
self.assertEqual(hand[4].number, 4)
self.assertEqual(is_straight_flush['score'], Hand.STRAIGHT_FLUSH)
def test_isnt_straight_flush_returns_false(self):
hand = []
hand.append(Card(suit=Suit.CLUB, number=2))
hand.append(Card(suit=Suit.CLUB, number=3))
hand.append(Card(suit=Suit.DIAMOND, number=4))
hand.append(Card(suit=Suit.CLUB, number=5))
hand.append(Card(suit=Suit.CLUB, number=6))
hand.append(Card(suit=Suit.SPADE, number=10))
hand.append(Card(suit=Suit.DIAMOND, number=11))
is_straight_flush = HandSorter.is_straight_flush(hand)
self.assertFalse(is_straight_flush) | en | 0.915709 | TODO Test that the corect Hand enum is returned in each test | 2.752774 | 3 |
Lib/site-packages/django_mysql/models/__init__.py | pavanmaganti9/djangoapp | 0 | 6631367 | <reponame>pavanmaganti9/djangoapp
"""
isort:skip_file
"""
from django_mysql.models.base import Model # noqa
from django_mysql.models.aggregates import ( # noqa
BitAnd, BitOr, BitXor, GroupConcat,
)
from django_mysql.models.expressions import ListF, SetF # noqa
from django_mysql.models.query import ( # noqa
add_QuerySetMixin, ApproximateInt, SmartChunkedIterator, SmartIterator,
pt_visual_explain, QuerySet, QuerySetMixin,
)
from django_mysql.models.fields import ( # noqa
Bit1BooleanField, DynamicField, EnumField, JSONField, ListCharField,
ListTextField, NullBit1BooleanField, SetCharField, SetTextField,
SizedBinaryField, SizedTextField,
)
| """
isort:skip_file
"""
from django_mysql.models.base import Model # noqa
from django_mysql.models.aggregates import ( # noqa
BitAnd, BitOr, BitXor, GroupConcat,
)
from django_mysql.models.expressions import ListF, SetF # noqa
from django_mysql.models.query import ( # noqa
add_QuerySetMixin, ApproximateInt, SmartChunkedIterator, SmartIterator,
pt_visual_explain, QuerySet, QuerySetMixin,
)
from django_mysql.models.fields import ( # noqa
Bit1BooleanField, DynamicField, EnumField, JSONField, ListCharField,
ListTextField, NullBit1BooleanField, SetCharField, SetTextField,
SizedBinaryField, SizedTextField,
) | uz | 0.299772 | isort:skip_file # noqa # noqa # noqa # noqa # noqa | 1.829551 | 2 |
tools/data/window_file_select_vid_classes.py | myfavouritekk/TPN | 74 | 6631368 | <reponame>myfavouritekk/TPN
#!/usr/bin/env python
import argparse
import scipy.io as sio
import os
import os.path as osp
import numpy as np
from vdetlib.vdet.dataset import index_det_to_vdet
if __name__ == '__main__':
parser = argparse.ArgumentParser('Convert a window file for DET for VID.')
parser.add_argument('window_file')
parser.add_argument('save_window_file')
args = parser.parse_args()
f = open(args.window_file, 'r')
save_file = open(args.save_window_file, 'w')
boxes = []
image_ind = 0
count = 0
while 1:
image_ind += 1
if image_ind % 1000 == 0:
print "Processed {} files.".format(image_ind)
# read number line
number_line = f.readline().strip()
if len(number_line) == 0: break # end of the file
assert number_line[0] == '#'
# read image line
img_path = f.readline().strip()
image_specs = []
for i in xrange(4): image_specs.append(f.readline().strip())
num = int(f.readline().strip())
cur_boxes = []
only_bg = True
for i in xrange(num):
box_target = map(float, f.readline().strip().split())
# skip background or other non-vid classes
if int(box_target[0]) not in index_det_to_vdet: continue
# map DET index to VID
box_target[0] = index_det_to_vdet[box_target[0]]
cur_boxes.append(box_target)
if box_target[0] != 0:
only_bg = False
if len(cur_boxes) == 0 or only_bg: continue
save_file.write('# {}\n'.format(count))
count += 1
save_file.write('{}\n'.format(img_path))
for i in xrange(4): save_file.write('{}\n'.format(image_specs[i]))
selected_num = len(cur_boxes)
save_file.write('{}\n'.format(selected_num))
for box_target in cur_boxes:
save_file.write('{:.0f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:f} {:f} {:f} {:f}\n'.format(*box_target))
if image_ind % 1000 != 0:
print "Processed {} files.".format(image_ind)
f.close()
save_file.close()
| #!/usr/bin/env python
import argparse
import scipy.io as sio
import os
import os.path as osp
import numpy as np
from vdetlib.vdet.dataset import index_det_to_vdet
if __name__ == '__main__':
parser = argparse.ArgumentParser('Convert a window file for DET for VID.')
parser.add_argument('window_file')
parser.add_argument('save_window_file')
args = parser.parse_args()
f = open(args.window_file, 'r')
save_file = open(args.save_window_file, 'w')
boxes = []
image_ind = 0
count = 0
while 1:
image_ind += 1
if image_ind % 1000 == 0:
print "Processed {} files.".format(image_ind)
# read number line
number_line = f.readline().strip()
if len(number_line) == 0: break # end of the file
assert number_line[0] == '#'
# read image line
img_path = f.readline().strip()
image_specs = []
for i in xrange(4): image_specs.append(f.readline().strip())
num = int(f.readline().strip())
cur_boxes = []
only_bg = True
for i in xrange(num):
box_target = map(float, f.readline().strip().split())
# skip background or other non-vid classes
if int(box_target[0]) not in index_det_to_vdet: continue
# map DET index to VID
box_target[0] = index_det_to_vdet[box_target[0]]
cur_boxes.append(box_target)
if box_target[0] != 0:
only_bg = False
if len(cur_boxes) == 0 or only_bg: continue
save_file.write('# {}\n'.format(count))
count += 1
save_file.write('{}\n'.format(img_path))
for i in xrange(4): save_file.write('{}\n'.format(image_specs[i]))
selected_num = len(cur_boxes)
save_file.write('{}\n'.format(selected_num))
for box_target in cur_boxes:
save_file.write('{:.0f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:f} {:f} {:f} {:f}\n'.format(*box_target))
if image_ind % 1000 != 0:
print "Processed {} files.".format(image_ind)
f.close()
save_file.close() | en | 0.648691 | #!/usr/bin/env python # read number line # end of the file # read image line # skip background or other non-vid classes # map DET index to VID | 2.300232 | 2 |
pyleecan/GUI/Dialog/DMachineSetup/SWSlot/PWSlot12/Ui_PWSlot12.py | EmileDvs/pyleecan | 5 | 6631369 | # -*- coding: utf-8 -*-
# File generated according to PWSlot12.ui
# WARNING! All changes made in this file will be lost!
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from ......GUI.Tools.FloatEdit import FloatEdit
from ......GUI.Dialog.DMachineSetup.SWSlot.WWSlotOut.WWSlotOut import WWSlotOut
from pyleecan.GUI.Resources import pyleecan_rc
class Ui_PWSlot12(object):
def setupUi(self, PWSlot12):
if not PWSlot12.objectName():
PWSlot12.setObjectName(u"PWSlot12")
PWSlot12.resize(964, 503)
PWSlot12.setMinimumSize(QSize(630, 470))
PWSlot12.setMaximumSize(QSize(16777215, 16777215))
self.horizontalLayout = QHBoxLayout(PWSlot12)
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.verticalLayout_2 = QVBoxLayout()
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.img_slot = QLabel(PWSlot12)
self.img_slot.setObjectName(u"img_slot")
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.img_slot.sizePolicy().hasHeightForWidth())
self.img_slot.setSizePolicy(sizePolicy)
self.img_slot.setMaximumSize(QSize(16777215, 16777215))
self.img_slot.setPixmap(
QPixmap(u":/images/images/MachineSetup/WSlot/SlotW12.png")
)
self.img_slot.setScaledContents(False)
self.img_slot.setAlignment(Qt.AlignCenter)
self.verticalLayout_2.addWidget(self.img_slot)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.scrollArea = QScrollArea(PWSlot12)
self.scrollArea.setObjectName(u"scrollArea")
self.scrollArea.setMinimumSize(QSize(270, 0))
self.scrollArea.setMaximumSize(QSize(270, 16777215))
self.scrollArea.setWidgetResizable(True)
self.scrollAreaWidgetContents = QWidget()
self.scrollAreaWidgetContents.setObjectName(u"scrollAreaWidgetContents")
self.scrollAreaWidgetContents.setGeometry(QRect(0, 0, 268, 479))
self.verticalLayout_3 = QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.gridLayout = QGridLayout()
self.gridLayout.setObjectName(u"gridLayout")
self.in_R1 = QLabel(self.scrollAreaWidgetContents)
self.in_R1.setObjectName(u"in_R1")
self.gridLayout.addWidget(self.in_R1, 0, 0, 1, 1)
self.lf_R1 = FloatEdit(self.scrollAreaWidgetContents)
self.lf_R1.setObjectName(u"lf_R1")
self.gridLayout.addWidget(self.lf_R1, 0, 1, 1, 1)
self.unit_R1 = QLabel(self.scrollAreaWidgetContents)
self.unit_R1.setObjectName(u"unit_R1")
self.gridLayout.addWidget(self.unit_R1, 0, 2, 1, 1)
self.in_R2 = QLabel(self.scrollAreaWidgetContents)
self.in_R2.setObjectName(u"in_R2")
self.gridLayout.addWidget(self.in_R2, 1, 0, 1, 1)
self.lf_R2 = FloatEdit(self.scrollAreaWidgetContents)
self.lf_R2.setObjectName(u"lf_R2")
self.gridLayout.addWidget(self.lf_R2, 1, 1, 1, 1)
self.unit_R2 = QLabel(self.scrollAreaWidgetContents)
self.unit_R2.setObjectName(u"unit_R2")
self.gridLayout.addWidget(self.unit_R2, 1, 2, 1, 1)
self.in_H0 = QLabel(self.scrollAreaWidgetContents)
self.in_H0.setObjectName(u"in_H0")
self.gridLayout.addWidget(self.in_H0, 2, 0, 1, 1)
self.lf_H0 = FloatEdit(self.scrollAreaWidgetContents)
self.lf_H0.setObjectName(u"lf_H0")
self.gridLayout.addWidget(self.lf_H0, 2, 1, 1, 1)
self.unit_H0 = QLabel(self.scrollAreaWidgetContents)
self.unit_H0.setObjectName(u"unit_H0")
self.gridLayout.addWidget(self.unit_H0, 2, 2, 1, 1)
self.in_H1 = QLabel(self.scrollAreaWidgetContents)
self.in_H1.setObjectName(u"in_H1")
self.gridLayout.addWidget(self.in_H1, 3, 0, 1, 1)
self.lf_H1 = FloatEdit(self.scrollAreaWidgetContents)
self.lf_H1.setObjectName(u"lf_H1")
self.gridLayout.addWidget(self.lf_H1, 3, 1, 1, 1)
self.unit_H1 = QLabel(self.scrollAreaWidgetContents)
self.unit_H1.setObjectName(u"unit_H1")
self.gridLayout.addWidget(self.unit_H1, 3, 2, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout)
self.verticalSpacer = QSpacerItem(
20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding
)
self.verticalLayout_3.addItem(self.verticalSpacer)
self.w_out = WWSlotOut(self.scrollAreaWidgetContents)
self.w_out.setObjectName(u"w_out")
self.verticalLayout_3.addWidget(self.w_out)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout.addWidget(self.scrollArea)
QWidget.setTabOrder(self.lf_R1, self.lf_R2)
QWidget.setTabOrder(self.lf_R2, self.lf_H0)
QWidget.setTabOrder(self.lf_H0, self.lf_H1)
self.retranslateUi(PWSlot12)
QMetaObject.connectSlotsByName(PWSlot12)
# setupUi
def retranslateUi(self, PWSlot12):
PWSlot12.setWindowTitle(QCoreApplication.translate("PWSlot12", u"Form", None))
self.img_slot.setText("")
self.in_R1.setText(QCoreApplication.translate("PWSlot12", u"R1", None))
self.unit_R1.setText(QCoreApplication.translate("PWSlot12", u"m", None))
self.in_R2.setText(QCoreApplication.translate("PWSlot12", u"R2", None))
self.unit_R2.setText(QCoreApplication.translate("PWSlot12", u"m", None))
self.in_H0.setText(QCoreApplication.translate("PWSlot12", u"H0", None))
self.unit_H0.setText(QCoreApplication.translate("PWSlot12", u"m", None))
self.in_H1.setText(QCoreApplication.translate("PWSlot12", u"H1", None))
self.unit_H1.setText(QCoreApplication.translate("PWSlot12", u"m", None))
# retranslateUi
| # -*- coding: utf-8 -*-
# File generated according to PWSlot12.ui
# WARNING! All changes made in this file will be lost!
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from ......GUI.Tools.FloatEdit import FloatEdit
from ......GUI.Dialog.DMachineSetup.SWSlot.WWSlotOut.WWSlotOut import WWSlotOut
from pyleecan.GUI.Resources import pyleecan_rc
class Ui_PWSlot12(object):
def setupUi(self, PWSlot12):
if not PWSlot12.objectName():
PWSlot12.setObjectName(u"PWSlot12")
PWSlot12.resize(964, 503)
PWSlot12.setMinimumSize(QSize(630, 470))
PWSlot12.setMaximumSize(QSize(16777215, 16777215))
self.horizontalLayout = QHBoxLayout(PWSlot12)
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.verticalLayout_2 = QVBoxLayout()
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.img_slot = QLabel(PWSlot12)
self.img_slot.setObjectName(u"img_slot")
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.img_slot.sizePolicy().hasHeightForWidth())
self.img_slot.setSizePolicy(sizePolicy)
self.img_slot.setMaximumSize(QSize(16777215, 16777215))
self.img_slot.setPixmap(
QPixmap(u":/images/images/MachineSetup/WSlot/SlotW12.png")
)
self.img_slot.setScaledContents(False)
self.img_slot.setAlignment(Qt.AlignCenter)
self.verticalLayout_2.addWidget(self.img_slot)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.scrollArea = QScrollArea(PWSlot12)
self.scrollArea.setObjectName(u"scrollArea")
self.scrollArea.setMinimumSize(QSize(270, 0))
self.scrollArea.setMaximumSize(QSize(270, 16777215))
self.scrollArea.setWidgetResizable(True)
self.scrollAreaWidgetContents = QWidget()
self.scrollAreaWidgetContents.setObjectName(u"scrollAreaWidgetContents")
self.scrollAreaWidgetContents.setGeometry(QRect(0, 0, 268, 479))
self.verticalLayout_3 = QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.gridLayout = QGridLayout()
self.gridLayout.setObjectName(u"gridLayout")
self.in_R1 = QLabel(self.scrollAreaWidgetContents)
self.in_R1.setObjectName(u"in_R1")
self.gridLayout.addWidget(self.in_R1, 0, 0, 1, 1)
self.lf_R1 = FloatEdit(self.scrollAreaWidgetContents)
self.lf_R1.setObjectName(u"lf_R1")
self.gridLayout.addWidget(self.lf_R1, 0, 1, 1, 1)
self.unit_R1 = QLabel(self.scrollAreaWidgetContents)
self.unit_R1.setObjectName(u"unit_R1")
self.gridLayout.addWidget(self.unit_R1, 0, 2, 1, 1)
self.in_R2 = QLabel(self.scrollAreaWidgetContents)
self.in_R2.setObjectName(u"in_R2")
self.gridLayout.addWidget(self.in_R2, 1, 0, 1, 1)
self.lf_R2 = FloatEdit(self.scrollAreaWidgetContents)
self.lf_R2.setObjectName(u"lf_R2")
self.gridLayout.addWidget(self.lf_R2, 1, 1, 1, 1)
self.unit_R2 = QLabel(self.scrollAreaWidgetContents)
self.unit_R2.setObjectName(u"unit_R2")
self.gridLayout.addWidget(self.unit_R2, 1, 2, 1, 1)
self.in_H0 = QLabel(self.scrollAreaWidgetContents)
self.in_H0.setObjectName(u"in_H0")
self.gridLayout.addWidget(self.in_H0, 2, 0, 1, 1)
self.lf_H0 = FloatEdit(self.scrollAreaWidgetContents)
self.lf_H0.setObjectName(u"lf_H0")
self.gridLayout.addWidget(self.lf_H0, 2, 1, 1, 1)
self.unit_H0 = QLabel(self.scrollAreaWidgetContents)
self.unit_H0.setObjectName(u"unit_H0")
self.gridLayout.addWidget(self.unit_H0, 2, 2, 1, 1)
self.in_H1 = QLabel(self.scrollAreaWidgetContents)
self.in_H1.setObjectName(u"in_H1")
self.gridLayout.addWidget(self.in_H1, 3, 0, 1, 1)
self.lf_H1 = FloatEdit(self.scrollAreaWidgetContents)
self.lf_H1.setObjectName(u"lf_H1")
self.gridLayout.addWidget(self.lf_H1, 3, 1, 1, 1)
self.unit_H1 = QLabel(self.scrollAreaWidgetContents)
self.unit_H1.setObjectName(u"unit_H1")
self.gridLayout.addWidget(self.unit_H1, 3, 2, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout)
self.verticalSpacer = QSpacerItem(
20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding
)
self.verticalLayout_3.addItem(self.verticalSpacer)
self.w_out = WWSlotOut(self.scrollAreaWidgetContents)
self.w_out.setObjectName(u"w_out")
self.verticalLayout_3.addWidget(self.w_out)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout.addWidget(self.scrollArea)
QWidget.setTabOrder(self.lf_R1, self.lf_R2)
QWidget.setTabOrder(self.lf_R2, self.lf_H0)
QWidget.setTabOrder(self.lf_H0, self.lf_H1)
self.retranslateUi(PWSlot12)
QMetaObject.connectSlotsByName(PWSlot12)
# setupUi
def retranslateUi(self, PWSlot12):
PWSlot12.setWindowTitle(QCoreApplication.translate("PWSlot12", u"Form", None))
self.img_slot.setText("")
self.in_R1.setText(QCoreApplication.translate("PWSlot12", u"R1", None))
self.unit_R1.setText(QCoreApplication.translate("PWSlot12", u"m", None))
self.in_R2.setText(QCoreApplication.translate("PWSlot12", u"R2", None))
self.unit_R2.setText(QCoreApplication.translate("PWSlot12", u"m", None))
self.in_H0.setText(QCoreApplication.translate("PWSlot12", u"H0", None))
self.unit_H0.setText(QCoreApplication.translate("PWSlot12", u"m", None))
self.in_H1.setText(QCoreApplication.translate("PWSlot12", u"H1", None))
self.unit_H1.setText(QCoreApplication.translate("PWSlot12", u"m", None))
# retranslateUi
| en | 0.418602 | # -*- coding: utf-8 -*- # File generated according to PWSlot12.ui # WARNING! All changes made in this file will be lost! ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ # setupUi # retranslateUi | 1.944157 | 2 |
conda/common/disk.py | jack-pappas/conda | 4,825 | 6631370 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from contextlib import contextmanager
from os import unlink
from .._vendor.auxlib.compat import Utf8NamedTemporaryFile
@contextmanager
def temporary_content_in_file(content, suffix=""):
# content returns temporary file path with contents
fh = None
path = None
try:
with Utf8NamedTemporaryFile(mode="w", delete=False, suffix=suffix) as fh:
path = fh.name
fh.write(content)
fh.flush()
fh.close()
yield path
finally:
if fh is not None:
fh.close()
if path is not None:
unlink(path)
| # -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from contextlib import contextmanager
from os import unlink
from .._vendor.auxlib.compat import Utf8NamedTemporaryFile
@contextmanager
def temporary_content_in_file(content, suffix=""):
# content returns temporary file path with contents
fh = None
path = None
try:
with Utf8NamedTemporaryFile(mode="w", delete=False, suffix=suffix) as fh:
path = fh.name
fh.write(content)
fh.flush()
fh.close()
yield path
finally:
if fh is not None:
fh.close()
if path is not None:
unlink(path)
| en | 0.544649 | # -*- coding: utf-8 -*- # Copyright (C) 2012 Anaconda, Inc # SPDX-License-Identifier: BSD-3-Clause # content returns temporary file path with contents | 2.37829 | 2 |
classes/users.py | ravermeister/xmpp-chatbot | 1 | 6631371 | <filename>classes/users.py
# coding=utf-8
import asyncio
import logging
from common.strings import StaticAnswers
class UserInfo:
"""
queries, user info on the Server
such as online users and registered users
"""
def __init__(self, static_answers: StaticAnswers):
# init all necessary variables
self.static_answers = static_answers
self.response_func = None
self.response_file_func = None
self.original_msg = None
self.response_data = list()
self.response_file_lists = list()
self.xep_0030 = None
self.xep_0050 = None
self.xep_0096 = None
self.max_list_entries = 10
self.fallback_session = {}
self.target, self.opt_arg = None, None
# noinspection PyUnusedLocal
def process(self, queries, target, opt_arg):
self.xep_0050 = queries['xep_0133'].xmpp['xep_0050']
self.xep_0030 = queries['xep_0030']
self.xep_0096 = queries['xep_0096']
self.response_func = queries['response_func']
self.original_msg = queries['original_msg']
self.response_data = list()
self.max_list_entries = queries['max_list_entries']
queries['xep_0133'].get_registered_users_num(jid=target, session={
'next': self.command_start,
'error': self.command_error,
'command': 'get-registered-users-num',
'send_response': False
})
queries['xep_0133'].get_online_users_num(jid=target, session={
'next': self.command_start,
'error': self.command_error,
'command': 'get-online-users-num',
'send_response': False
})
# doesn't work with my ejabberd 21.12
# 'get-online-users-list', 'get-online-users', 'get-active-users', 'get-registered-users-list'
queries['xep_0133'].get_online_users(jid=target, session={
'next': self.command_start,
'error': self.command_error,
'target': target,
'command': 'get-online-users',
'send_response': True
})
def command_start(self, iq, session):
"""
Process the initial command result.
Arguments:
iq -- The iq stanza containing the command result.
session -- A dictionary of data relevant to the command
session. Additional, custom data may be saved
here to persist across handler callbacks.
"""
if not self.response_data:
self.response_data.append(" ")
logging.debug("Command handler for: '%s'" % session['command'])
messages = self.static_answers.lang.command_messages
if session['command'] == 'get-registered-users-num':
# noinspection SpellCheckingInspection
registered_users_elems = iq.xml.findall(".//{jabber:x:data}field[@var='registeredusersnum']/{jabber:x:data}value")
if registered_users_elems:
registered_users_num = registered_users_elems[0].text
self.response_data.append(messages['users.registered'] % registered_users_num)
else:
logging.warning("received invalid data in response for xep_0133 - get-registered-users-num")
self.response_data.append(messages['users.invalid-data'])
elif session['command'] == 'get-online-users-num':
# noinspection SpellCheckingInspection
online_users_elems = iq.xml.findall(".//{jabber:x:data}field[@var='onlineusersnum']/{jabber:x:data}value")
if online_users_elems:
online_users_num = online_users_elems[0].text
self.response_data.append(messages['users.online'] % online_users_num)
else:
logging.warning("received invalid data in response for xep_0133 - get-online-users-num")
self.response_data.append(messages['users.invalid-data'])
elif session['command'] == 'get-online-users':
logging.debug("online user list response: %s" % iq.xml)
if session['send_response']:
self.response_func(self.response_data, self.original_msg)
# Other options include using:
# continue_command() -- Continue to the next step in the workflow
# cancel_command() -- Stop command execution.
self.xep_0050.complete_command(session)
# noinspection PyMethodMayBeStatic
def command_error(self, iq, session):
"""
Process an error that occurs during command execution.
Arguments:
iq -- The iq stanza containing the error.
session -- A dictionary of data relevant to the command
session. Additional, custom data may be saved
here to persist across handler callbacks.
"""
error_text = "%s: %s %s" % (session['command'], iq['error']['condition'], iq['error']['text'])
logging.error("%s" % error_text)
if not self.response_data:
self.response_data.append(" ")
if session['command'] == 'get-online-users':
# fallback for get-online-users in ejabberd
logging.debug("fallback method for ejabberd for get online user list")
self.fallback_session = {
'command': 'get-online-users',
'send_response': session['send_response']
}
async def fallback():
# noinspection PyBroadException
try:
await self.xep_0030.get_items(
jid=session['target'],
node='online users',
callback=self.fallback_onlineusers_ejabberd_callback_handler
)
except Exception:
pass
asyncio.create_task(fallback())
session['send_response'] = False
else:
self.response_data.append("%s" % error_text)
if session['send_response']:
self.response_func(self.response_data, self.original_msg)
# Terminate the command's execution and clear its session.
# The session will automatically be cleared if no error
# handler is provided.
self.xep_0050.terminate_command(session)
def fallback_onlineusers_ejabberd_callback_handler(self, iq):
session = self.fallback_session
self.fallback_session = {}
# error check
response_type = iq.xml.get('type')
messages = self.static_answers.lang.command_messages
if response_type == 'result':
# noinspection HttpUrlsUsage
response = iq.xml.findall(".//{http://jabber.org/protocol/disco#items}item")
user_list = list()
for user in response:
user_jid = user.get("jid")
user_split = user_jid.split("/")
user_name = user_split[0]
user_app = user_split[1].split(".")[0]
user_entry = messages['users.using'] % (user_name, user_app)
user_list.append(user_entry)
send_list = list(user_list)
if len(send_list) > self.max_list_entries:
del send_list[self.max_list_entries:]
file = "\n".join(user_list)
logging.error("File Content:\n%s" % file)
for user in send_list:
self.response_data.append(user)
else:
response = iq.xml.findall(".//{jabber:client}error")
for error in response:
if len(error) > 0:
error_type = error[0].tag.partition('}')[2]
error_text = error.find(".//{urn:ietf:params:xml:ns:xmpp-stanzas}text").text
self.response_data.append("%s: %s %s" % (session['command'], error_type, error_text))
if session['send_response']:
async def send_response_task():
# noinspection PyBroadException
try:
await self.response_func(self.response_data, self.original_msg)
except Exception:
pass
asyncio.create_task(send_response_task())
# self.response_file_func(self.response_file_lists, self.original_msg)
| <filename>classes/users.py
# coding=utf-8
import asyncio
import logging
from common.strings import StaticAnswers
class UserInfo:
"""
queries, user info on the Server
such as online users and registered users
"""
def __init__(self, static_answers: StaticAnswers):
# init all necessary variables
self.static_answers = static_answers
self.response_func = None
self.response_file_func = None
self.original_msg = None
self.response_data = list()
self.response_file_lists = list()
self.xep_0030 = None
self.xep_0050 = None
self.xep_0096 = None
self.max_list_entries = 10
self.fallback_session = {}
self.target, self.opt_arg = None, None
# noinspection PyUnusedLocal
def process(self, queries, target, opt_arg):
self.xep_0050 = queries['xep_0133'].xmpp['xep_0050']
self.xep_0030 = queries['xep_0030']
self.xep_0096 = queries['xep_0096']
self.response_func = queries['response_func']
self.original_msg = queries['original_msg']
self.response_data = list()
self.max_list_entries = queries['max_list_entries']
queries['xep_0133'].get_registered_users_num(jid=target, session={
'next': self.command_start,
'error': self.command_error,
'command': 'get-registered-users-num',
'send_response': False
})
queries['xep_0133'].get_online_users_num(jid=target, session={
'next': self.command_start,
'error': self.command_error,
'command': 'get-online-users-num',
'send_response': False
})
# doesn't work with my ejabberd 21.12
# 'get-online-users-list', 'get-online-users', 'get-active-users', 'get-registered-users-list'
queries['xep_0133'].get_online_users(jid=target, session={
'next': self.command_start,
'error': self.command_error,
'target': target,
'command': 'get-online-users',
'send_response': True
})
def command_start(self, iq, session):
"""
Process the initial command result.
Arguments:
iq -- The iq stanza containing the command result.
session -- A dictionary of data relevant to the command
session. Additional, custom data may be saved
here to persist across handler callbacks.
"""
if not self.response_data:
self.response_data.append(" ")
logging.debug("Command handler for: '%s'" % session['command'])
messages = self.static_answers.lang.command_messages
if session['command'] == 'get-registered-users-num':
# noinspection SpellCheckingInspection
registered_users_elems = iq.xml.findall(".//{jabber:x:data}field[@var='registeredusersnum']/{jabber:x:data}value")
if registered_users_elems:
registered_users_num = registered_users_elems[0].text
self.response_data.append(messages['users.registered'] % registered_users_num)
else:
logging.warning("received invalid data in response for xep_0133 - get-registered-users-num")
self.response_data.append(messages['users.invalid-data'])
elif session['command'] == 'get-online-users-num':
# noinspection SpellCheckingInspection
online_users_elems = iq.xml.findall(".//{jabber:x:data}field[@var='onlineusersnum']/{jabber:x:data}value")
if online_users_elems:
online_users_num = online_users_elems[0].text
self.response_data.append(messages['users.online'] % online_users_num)
else:
logging.warning("received invalid data in response for xep_0133 - get-online-users-num")
self.response_data.append(messages['users.invalid-data'])
elif session['command'] == 'get-online-users':
logging.debug("online user list response: %s" % iq.xml)
if session['send_response']:
self.response_func(self.response_data, self.original_msg)
# Other options include using:
# continue_command() -- Continue to the next step in the workflow
# cancel_command() -- Stop command execution.
self.xep_0050.complete_command(session)
# noinspection PyMethodMayBeStatic
def command_error(self, iq, session):
"""
Process an error that occurs during command execution.
Arguments:
iq -- The iq stanza containing the error.
session -- A dictionary of data relevant to the command
session. Additional, custom data may be saved
here to persist across handler callbacks.
"""
error_text = "%s: %s %s" % (session['command'], iq['error']['condition'], iq['error']['text'])
logging.error("%s" % error_text)
if not self.response_data:
self.response_data.append(" ")
if session['command'] == 'get-online-users':
# fallback for get-online-users in ejabberd
logging.debug("fallback method for ejabberd for get online user list")
self.fallback_session = {
'command': 'get-online-users',
'send_response': session['send_response']
}
async def fallback():
# noinspection PyBroadException
try:
await self.xep_0030.get_items(
jid=session['target'],
node='online users',
callback=self.fallback_onlineusers_ejabberd_callback_handler
)
except Exception:
pass
asyncio.create_task(fallback())
session['send_response'] = False
else:
self.response_data.append("%s" % error_text)
if session['send_response']:
self.response_func(self.response_data, self.original_msg)
# Terminate the command's execution and clear its session.
# The session will automatically be cleared if no error
# handler is provided.
self.xep_0050.terminate_command(session)
def fallback_onlineusers_ejabberd_callback_handler(self, iq):
session = self.fallback_session
self.fallback_session = {}
# error check
response_type = iq.xml.get('type')
messages = self.static_answers.lang.command_messages
if response_type == 'result':
# noinspection HttpUrlsUsage
response = iq.xml.findall(".//{http://jabber.org/protocol/disco#items}item")
user_list = list()
for user in response:
user_jid = user.get("jid")
user_split = user_jid.split("/")
user_name = user_split[0]
user_app = user_split[1].split(".")[0]
user_entry = messages['users.using'] % (user_name, user_app)
user_list.append(user_entry)
send_list = list(user_list)
if len(send_list) > self.max_list_entries:
del send_list[self.max_list_entries:]
file = "\n".join(user_list)
logging.error("File Content:\n%s" % file)
for user in send_list:
self.response_data.append(user)
else:
response = iq.xml.findall(".//{jabber:client}error")
for error in response:
if len(error) > 0:
error_type = error[0].tag.partition('}')[2]
error_text = error.find(".//{urn:ietf:params:xml:ns:xmpp-stanzas}text").text
self.response_data.append("%s: %s %s" % (session['command'], error_type, error_text))
if session['send_response']:
async def send_response_task():
# noinspection PyBroadException
try:
await self.response_func(self.response_data, self.original_msg)
except Exception:
pass
asyncio.create_task(send_response_task())
# self.response_file_func(self.response_file_lists, self.original_msg)
| en | 0.618752 | # coding=utf-8 queries, user info on the Server such as online users and registered users # init all necessary variables # noinspection PyUnusedLocal # doesn't work with my ejabberd 21.12 # 'get-online-users-list', 'get-online-users', 'get-active-users', 'get-registered-users-list' Process the initial command result. Arguments: iq -- The iq stanza containing the command result. session -- A dictionary of data relevant to the command session. Additional, custom data may be saved here to persist across handler callbacks. # noinspection SpellCheckingInspection # noinspection SpellCheckingInspection # Other options include using: # continue_command() -- Continue to the next step in the workflow # cancel_command() -- Stop command execution. # noinspection PyMethodMayBeStatic Process an error that occurs during command execution. Arguments: iq -- The iq stanza containing the error. session -- A dictionary of data relevant to the command session. Additional, custom data may be saved here to persist across handler callbacks. # fallback for get-online-users in ejabberd # noinspection PyBroadException # Terminate the command's execution and clear its session. # The session will automatically be cleared if no error # handler is provided. # error check # noinspection HttpUrlsUsage #items}item") # noinspection PyBroadException # self.response_file_func(self.response_file_lists, self.original_msg) | 2.541959 | 3 |
integration-tests/integration_tests/integration_tests/end_to_end_tests/int_asynchronous_express_messaging_pattern_tests.py | tomzo/integration-adaptors | 0 | 6631372 | <filename>integration-tests/integration_tests/integration_tests/end_to_end_tests/int_asynchronous_express_messaging_pattern_tests.py
"""
Provides tests around the Asynchronous Express workflow, including sync-async wrapping
"""
from unittest import TestCase
from integration_tests.amq.amq import MHS_INBOUND_QUEUE
from integration_tests.amq.amq_message_assertor import AMQMessageAssertor
from integration_tests.assertors.assert_with_retries import AssertWithRetries
from integration_tests.dynamo.dynamo import MHS_STATE_TABLE_DYNAMO_WRAPPER, MHS_SYNC_ASYNC_TABLE_DYNAMO_WRAPPER
from integration_tests.dynamo.dynamo_sync_async_mhs_table import DynamoSyncAsyncMhsTableStateAssertor
from integration_tests.dynamo.dynamo_mhs_table import DynamoMhsTableStateAssertor
from integration_tests.helpers.build_message import build_message
from integration_tests.http.mhs_http_request_builder import MhsHttpRequestBuilder
from integration_tests.xml.hl7_xml_assertor import Hl7XmlResponseAssertor
class AsynchronousExpressMessagingPatternTests(TestCase):
"""
These tests show an asynchronous express response from Spine via the MHS for the example message interaction of PSIS
(Personal Spine Information Service).
Asynchronous message interaction:
- Message sent: PSIS Document List Data Request (QUPC_IN160101UK05)
- Expected response: PSIS Document List Data Retrieval (QUPC_IN160102UK05)
Flow documented at:
- https://data.developer.nhs.uk/dms/mim/6.3.01/Index.htm
-> Domains - Health and Clinical Management
-> PSIS Query
-> 6.1 (Request)
-> 6.2 (Response)
"""
def setUp(self):
MHS_STATE_TABLE_DYNAMO_WRAPPER.clear_all_records_in_table()
MHS_SYNC_ASYNC_TABLE_DYNAMO_WRAPPER.clear_all_records_in_table()
def test_should_return_successful_response_from_spine_to_message_queue(self):
# Arrange
message, message_id = build_message('QUPC_IN160101UK05', '9689177923')
# Act
MhsHttpRequestBuilder() \
.with_headers(interaction_id='QUPC_IN160101UK05',
message_id=message_id,
sync_async=False,
correlation_id='1') \
.with_body(message) \
.execute_post_expecting_success()
# Assert
AMQMessageAssertor(MHS_INBOUND_QUEUE.get_next_message_on_queue()) \
.assert_property('message-id', message_id) \
.assert_property('correlation-id', '1') \
.assert_json_content_type() \
.assertor_for_hl7_xml_message() \
.assert_element_attribute('.//queryAck//queryResponseCode', 'code', 'OK') \
.assert_element_attribute('.//patient//id', 'extension', '9689177923')
def test_should_record_asynchronous_express_message_status_as_successful(self):
# Arrange
message, message_id = build_message('QUPC_IN160101UK05', '9689177923')
# Act
MhsHttpRequestBuilder() \
.with_headers(interaction_id='QUPC_IN160101UK05',
message_id=message_id,
sync_async=False,
correlation_id='1') \
.with_body(message) \
.execute_post_expecting_success()
# Assert
AMQMessageAssertor(MHS_INBOUND_QUEUE.get_next_message_on_queue()) \
.assertor_for_hl7_xml_message() \
.assert_element_attribute('.//queryAck//queryResponseCode', 'code', 'OK')
AssertWithRetries(retry_count=10) \
.assert_condition_met(lambda: DynamoMhsTableStateAssertor.wait_for_inbound_response_processed(message_id))
DynamoMhsTableStateAssertor(MHS_STATE_TABLE_DYNAMO_WRAPPER.get_all_records_in_table()) \
.assert_single_item_exists_with_key(message_id) \
.assert_item_contains_values({
'INBOUND_STATUS': 'INBOUND_RESPONSE_SUCCESSFULLY_PROCESSED',
'OUTBOUND_STATUS': 'OUTBOUND_MESSAGE_ACKD',
'WORKFLOW': 'async-express'
})
def test_should_return_successful_response_from_spine_in_original_post_request_body_if_sync_async_requested(self):
# Arrange
message, message_id = build_message('QUPC_IN160101UK05', '9689177923')
# Act
response = MhsHttpRequestBuilder() \
.with_headers(interaction_id='QUPC_IN160101UK05', message_id=message_id, sync_async=True) \
.with_body(message) \
.execute_post_expecting_success()
# Assert
Hl7XmlResponseAssertor(response.text) \
.assert_element_attribute('.//queryAck//queryResponseCode', 'code', 'OK') \
.assert_element_attribute('.//patient//id', 'extension', '9689177923')
def test_should_record_the_correct_response_between_the_inbound_and_outbound_components_if_sync_async_requested(self):
# Arrange
message, message_id = build_message('QUPC_IN160101UK05', '9689177923')
# Act
MhsHttpRequestBuilder() \
.with_headers(interaction_id='QUPC_IN160101UK05',
message_id=message_id,
sync_async=True,
correlation_id='1') \
.with_body(message) \
.execute_post_expecting_success()
# Assert
DynamoSyncAsyncMhsTableStateAssertor(MHS_SYNC_ASYNC_TABLE_DYNAMO_WRAPPER.get_all_records_in_table()) \
.assert_single_item_exists_with_key(message_id) \
.assert_element_attribute('.//queryAck//queryResponseCode', 'code', 'OK') \
.assert_element_attribute('.//patient//id', 'extension', '9689177923')
| <filename>integration-tests/integration_tests/integration_tests/end_to_end_tests/int_asynchronous_express_messaging_pattern_tests.py
"""
Provides tests around the Asynchronous Express workflow, including sync-async wrapping
"""
from unittest import TestCase
from integration_tests.amq.amq import MHS_INBOUND_QUEUE
from integration_tests.amq.amq_message_assertor import AMQMessageAssertor
from integration_tests.assertors.assert_with_retries import AssertWithRetries
from integration_tests.dynamo.dynamo import MHS_STATE_TABLE_DYNAMO_WRAPPER, MHS_SYNC_ASYNC_TABLE_DYNAMO_WRAPPER
from integration_tests.dynamo.dynamo_sync_async_mhs_table import DynamoSyncAsyncMhsTableStateAssertor
from integration_tests.dynamo.dynamo_mhs_table import DynamoMhsTableStateAssertor
from integration_tests.helpers.build_message import build_message
from integration_tests.http.mhs_http_request_builder import MhsHttpRequestBuilder
from integration_tests.xml.hl7_xml_assertor import Hl7XmlResponseAssertor
class AsynchronousExpressMessagingPatternTests(TestCase):
"""
These tests show an asynchronous express response from Spine via the MHS for the example message interaction of PSIS
(Personal Spine Information Service).
Asynchronous message interaction:
- Message sent: PSIS Document List Data Request (QUPC_IN160101UK05)
- Expected response: PSIS Document List Data Retrieval (QUPC_IN160102UK05)
Flow documented at:
- https://data.developer.nhs.uk/dms/mim/6.3.01/Index.htm
-> Domains - Health and Clinical Management
-> PSIS Query
-> 6.1 (Request)
-> 6.2 (Response)
"""
def setUp(self):
MHS_STATE_TABLE_DYNAMO_WRAPPER.clear_all_records_in_table()
MHS_SYNC_ASYNC_TABLE_DYNAMO_WRAPPER.clear_all_records_in_table()
def test_should_return_successful_response_from_spine_to_message_queue(self):
# Arrange
message, message_id = build_message('QUPC_IN160101UK05', '9689177923')
# Act
MhsHttpRequestBuilder() \
.with_headers(interaction_id='QUPC_IN160101UK05',
message_id=message_id,
sync_async=False,
correlation_id='1') \
.with_body(message) \
.execute_post_expecting_success()
# Assert
AMQMessageAssertor(MHS_INBOUND_QUEUE.get_next_message_on_queue()) \
.assert_property('message-id', message_id) \
.assert_property('correlation-id', '1') \
.assert_json_content_type() \
.assertor_for_hl7_xml_message() \
.assert_element_attribute('.//queryAck//queryResponseCode', 'code', 'OK') \
.assert_element_attribute('.//patient//id', 'extension', '9689177923')
def test_should_record_asynchronous_express_message_status_as_successful(self):
# Arrange
message, message_id = build_message('QUPC_IN160101UK05', '9689177923')
# Act
MhsHttpRequestBuilder() \
.with_headers(interaction_id='QUPC_IN160101UK05',
message_id=message_id,
sync_async=False,
correlation_id='1') \
.with_body(message) \
.execute_post_expecting_success()
# Assert
AMQMessageAssertor(MHS_INBOUND_QUEUE.get_next_message_on_queue()) \
.assertor_for_hl7_xml_message() \
.assert_element_attribute('.//queryAck//queryResponseCode', 'code', 'OK')
AssertWithRetries(retry_count=10) \
.assert_condition_met(lambda: DynamoMhsTableStateAssertor.wait_for_inbound_response_processed(message_id))
DynamoMhsTableStateAssertor(MHS_STATE_TABLE_DYNAMO_WRAPPER.get_all_records_in_table()) \
.assert_single_item_exists_with_key(message_id) \
.assert_item_contains_values({
'INBOUND_STATUS': 'INBOUND_RESPONSE_SUCCESSFULLY_PROCESSED',
'OUTBOUND_STATUS': 'OUTBOUND_MESSAGE_ACKD',
'WORKFLOW': 'async-express'
})
def test_should_return_successful_response_from_spine_in_original_post_request_body_if_sync_async_requested(self):
# Arrange
message, message_id = build_message('QUPC_IN160101UK05', '9689177923')
# Act
response = MhsHttpRequestBuilder() \
.with_headers(interaction_id='QUPC_IN160101UK05', message_id=message_id, sync_async=True) \
.with_body(message) \
.execute_post_expecting_success()
# Assert
Hl7XmlResponseAssertor(response.text) \
.assert_element_attribute('.//queryAck//queryResponseCode', 'code', 'OK') \
.assert_element_attribute('.//patient//id', 'extension', '9689177923')
def test_should_record_the_correct_response_between_the_inbound_and_outbound_components_if_sync_async_requested(self):
# Arrange
message, message_id = build_message('QUPC_IN160101UK05', '9689177923')
# Act
MhsHttpRequestBuilder() \
.with_headers(interaction_id='QUPC_IN160101UK05',
message_id=message_id,
sync_async=True,
correlation_id='1') \
.with_body(message) \
.execute_post_expecting_success()
# Assert
DynamoSyncAsyncMhsTableStateAssertor(MHS_SYNC_ASYNC_TABLE_DYNAMO_WRAPPER.get_all_records_in_table()) \
.assert_single_item_exists_with_key(message_id) \
.assert_element_attribute('.//queryAck//queryResponseCode', 'code', 'OK') \
.assert_element_attribute('.//patient//id', 'extension', '9689177923')
| en | 0.729136 | Provides tests around the Asynchronous Express workflow, including sync-async wrapping These tests show an asynchronous express response from Spine via the MHS for the example message interaction of PSIS (Personal Spine Information Service). Asynchronous message interaction: - Message sent: PSIS Document List Data Request (QUPC_IN160101UK05) - Expected response: PSIS Document List Data Retrieval (QUPC_IN160102UK05) Flow documented at: - https://data.developer.nhs.uk/dms/mim/6.3.01/Index.htm -> Domains - Health and Clinical Management -> PSIS Query -> 6.1 (Request) -> 6.2 (Response) # Arrange # Act # Assert # Arrange # Act # Assert # Arrange # Act # Assert # Arrange # Act # Assert | 2.059957 | 2 |
detectron2/src/classification/model.py | roaldi/ImageStore | 590 | 6631373 | <gh_stars>100-1000
import numpy as np
from PIL import Image
import sys
import os
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.data import build_detection_test_loader, build_detection_train_loader
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
sys.path.append("bottom-up-attention.pytorch")
from models.bua import add_bottom_up_attention_config
from models.bua.box_regression import BUABoxes
from utils.extract_utils import get_image_blob
from models.bua.layers.nms import nms
class ImageClassifier:
def __init__(self, min_boxes=3, max_boxes=10, threshold=0.5):
config_file = 'bottom-up-attention.pytorch/configs/bua-caffe/extract-bua-caffe-r101.yaml'
self._cfg = get_cfg()
add_bottom_up_attention_config(self._cfg, True)
self._cfg.merge_from_file(config_file)
self._cfg.MODEL.DEVICE = 'cpu'
self._model = DefaultTrainer.build_model(self._cfg)
DetectionCheckpointer(self._model, save_dir=self._cfg.OUTPUT_DIR).resume_or_load(self._cfg.MODEL.WEIGHTS)
self._model.eval()
self._min_boxes = min_boxes
self._max_boxes = max_boxes
self._threshold = threshold
self._classes = ['__background__']
with open(os.path.join('bottom-up-attention.pytorch', 'evaluation', 'objects_vocab.txt')) as f:
for object in f.readlines():
self._classes.append(object.split(',')[0].lower().strip())
def predict(self, image):
# convert image to opencv format
x = np.array(image)
x = x[:, :, ::-1].copy()
dataset_dict = get_image_blob(x, self._cfg.MODEL.PIXEL_MEAN)
with torch.set_grad_enabled(False):
boxes, scores, features_pooled, attr_scores = self._model([dataset_dict])
dets = boxes[0].tensor.cpu() / dataset_dict['im_scale']
scores = scores[0].cpu()
feats = features_pooled[0].cpu()
attr_scores = attr_scores[0].cpu()
max_conf = torch.zeros((scores.shape[0])).to(scores.device)
for cls_ind in range(1, scores.shape[1]):
cls_scores = scores[:, cls_ind]
keep = nms(dets, cls_scores, 0.3)
max_conf[keep] = torch.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = torch.nonzero(max_conf >= self._threshold).flatten()
if len(keep_boxes) < self._min_boxes:
keep_boxes = torch.argsort(max_conf, descending=True)[:self._min_boxes]
elif len(keep_boxes) > self._max_boxes:
keep_boxes = torch.argsort(max_conf, descending=True)[:self._max_boxes]
boxes = dets[keep_boxes].numpy()
objects = np.argmax(scores[keep_boxes].numpy()[:,1:], axis=1)
attr = np.argmax(attr_scores[keep_boxes].numpy()[:,1:], axis=1)
attr_conf = np.max(attr_scores[keep_boxes].numpy()[:,1:], axis=1)
outputs = []
for i in range(len(keep_boxes)):
# if attr_conf[i] > attr_thresh:
# cls = attributes[attr[i]+1] + " " + cls
outputs.append(self._classes[objects[i]+1])
return outputs
| import numpy as np
from PIL import Image
import sys
import os
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.data import build_detection_test_loader, build_detection_train_loader
from detectron2.config import get_cfg
from detectron2.engine import DefaultTrainer, default_setup, launch
from detectron2.evaluation import COCOEvaluator, verify_results
sys.path.append("bottom-up-attention.pytorch")
from models.bua import add_bottom_up_attention_config
from models.bua.box_regression import BUABoxes
from utils.extract_utils import get_image_blob
from models.bua.layers.nms import nms
class ImageClassifier:
def __init__(self, min_boxes=3, max_boxes=10, threshold=0.5):
config_file = 'bottom-up-attention.pytorch/configs/bua-caffe/extract-bua-caffe-r101.yaml'
self._cfg = get_cfg()
add_bottom_up_attention_config(self._cfg, True)
self._cfg.merge_from_file(config_file)
self._cfg.MODEL.DEVICE = 'cpu'
self._model = DefaultTrainer.build_model(self._cfg)
DetectionCheckpointer(self._model, save_dir=self._cfg.OUTPUT_DIR).resume_or_load(self._cfg.MODEL.WEIGHTS)
self._model.eval()
self._min_boxes = min_boxes
self._max_boxes = max_boxes
self._threshold = threshold
self._classes = ['__background__']
with open(os.path.join('bottom-up-attention.pytorch', 'evaluation', 'objects_vocab.txt')) as f:
for object in f.readlines():
self._classes.append(object.split(',')[0].lower().strip())
def predict(self, image):
# convert image to opencv format
x = np.array(image)
x = x[:, :, ::-1].copy()
dataset_dict = get_image_blob(x, self._cfg.MODEL.PIXEL_MEAN)
with torch.set_grad_enabled(False):
boxes, scores, features_pooled, attr_scores = self._model([dataset_dict])
dets = boxes[0].tensor.cpu() / dataset_dict['im_scale']
scores = scores[0].cpu()
feats = features_pooled[0].cpu()
attr_scores = attr_scores[0].cpu()
max_conf = torch.zeros((scores.shape[0])).to(scores.device)
for cls_ind in range(1, scores.shape[1]):
cls_scores = scores[:, cls_ind]
keep = nms(dets, cls_scores, 0.3)
max_conf[keep] = torch.where(cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep])
keep_boxes = torch.nonzero(max_conf >= self._threshold).flatten()
if len(keep_boxes) < self._min_boxes:
keep_boxes = torch.argsort(max_conf, descending=True)[:self._min_boxes]
elif len(keep_boxes) > self._max_boxes:
keep_boxes = torch.argsort(max_conf, descending=True)[:self._max_boxes]
boxes = dets[keep_boxes].numpy()
objects = np.argmax(scores[keep_boxes].numpy()[:,1:], axis=1)
attr = np.argmax(attr_scores[keep_boxes].numpy()[:,1:], axis=1)
attr_conf = np.max(attr_scores[keep_boxes].numpy()[:,1:], axis=1)
outputs = []
for i in range(len(keep_boxes)):
# if attr_conf[i] > attr_thresh:
# cls = attributes[attr[i]+1] + " " + cls
outputs.append(self._classes[objects[i]+1])
return outputs | en | 0.428997 | # convert image to opencv format # if attr_conf[i] > attr_thresh: # cls = attributes[attr[i]+1] + " " + cls | 2.133401 | 2 |
spotfinder/servers/adsc_client.py | dperl-sol/cctbx_project | 155 | 6631374 | from __future__ import absolute_import, division, print_function
from six.moves import range
import os
from spotfinder.diffraction.imagefiles import quick_image
from spotfinder.servers.multipart_encoder import post_multipart
def get_spotfinder_url(file_object,host,port):
testurl = "%s:%d"%(host,port)
selector = "/spotfinder"
start_index=0
stop_index = file_object.linearintdata.size()
raw_string=file_object.linearintdata.slice_to_byte_str(start_index,stop_index)
query_object = [
("moduleindex",file_object.__dict__.get("moduleindex",-1)),
("filename",file_object.filename),
("bin",1),
("vendortype",file_object.vendortype),
("beam_center_reference_frame",file_object.beam_center_reference_frame),
("beam_center_convention",file_object.beam_center_convention),
("header",file_object.header),
("headerlines",""),
]
for item in ['DISTANCE', 'PHI', 'WAVELENGTH',
'TWOTHETA', 'OSC_RANGE',
'CCD_IMAGE_SATURATION', 'OSC_START', 'DETECTOR_SN', 'PIXEL_SIZE',
'SIZE1','SIZE2','BEAM_CENTER_X','BEAM_CENTER_Y'
]:
if type(file_object.parameters[item])==type(1.0):
query_object.append((item,"%.6f"%file_object.parameters[item]))
else:
query_object.append((item,file_object.parameters[item]))
files = [
("adsc_data",file_object.filename,raw_string)
]
print("length of data in ints",stop_index)
print("length of data in bytes",len(raw_string))
assert len(raw_string)/4==stop_index
Response = post_multipart(host=testurl, selector=selector,
fields = query_object, files = files)
print(Response.getresponse().read())
def get_labelit_image_object(file,convention):
Q = quick_image(file)
Q.set_beam_center_convention(convention)
Q.read()
return Q
def do_main(filepath, force_binning, convention, host, port):
absfile = os.path.abspath(filepath)
Q = get_labelit_image_object(absfile, convention)
if force_binning:
Q.setBin(2)
Q.show_header()
get_spotfinder_url(Q,host,port)
from iotbx.detectors import image_divider
number_of_modules = image_divider(
Q.linearintdata,
Q.vendor_specific_null_value
).module_count()
for x in range(number_of_modules):
file = "file://%s?slice=%d"%(absfile,x)
Q = get_labelit_image_object(file, convention)
if force_binning:
Q.setBin(2)
Q.show_header()
get_spotfinder_url(Q,host,port)
if __name__=="__main__":
import sys
try:
filepath, force_binning, convention, host, port = sys.argv[1:6]
force_binning = bool(force_binning)
port = int(port)
convention = int(convention)
except Exception:
print("""
Usage:
libtbx.python adsc_client.py <filepath> <force_binning> <convention> <host> <port>
Four mandatory arguments:
filepath: absolute or relative path name of the ADSC test image to be analyzed
force_binning: True (client-side 2x2-pixel software binning; sometimes the best
choice if raw data is not hardware-binned) or False
convention: beam_center_convention as defined on the spotfinder servers wiki
host: usually "localhost"; in any case, must be machine with same endianness
port: port number of image analyzer http service
""")
do_main(filepath, force_binning, convention, host, port)
| from __future__ import absolute_import, division, print_function
from six.moves import range
import os
from spotfinder.diffraction.imagefiles import quick_image
from spotfinder.servers.multipart_encoder import post_multipart
def get_spotfinder_url(file_object,host,port):
testurl = "%s:%d"%(host,port)
selector = "/spotfinder"
start_index=0
stop_index = file_object.linearintdata.size()
raw_string=file_object.linearintdata.slice_to_byte_str(start_index,stop_index)
query_object = [
("moduleindex",file_object.__dict__.get("moduleindex",-1)),
("filename",file_object.filename),
("bin",1),
("vendortype",file_object.vendortype),
("beam_center_reference_frame",file_object.beam_center_reference_frame),
("beam_center_convention",file_object.beam_center_convention),
("header",file_object.header),
("headerlines",""),
]
for item in ['DISTANCE', 'PHI', 'WAVELENGTH',
'TWOTHETA', 'OSC_RANGE',
'CCD_IMAGE_SATURATION', 'OSC_START', 'DETECTOR_SN', 'PIXEL_SIZE',
'SIZE1','SIZE2','BEAM_CENTER_X','BEAM_CENTER_Y'
]:
if type(file_object.parameters[item])==type(1.0):
query_object.append((item,"%.6f"%file_object.parameters[item]))
else:
query_object.append((item,file_object.parameters[item]))
files = [
("adsc_data",file_object.filename,raw_string)
]
print("length of data in ints",stop_index)
print("length of data in bytes",len(raw_string))
assert len(raw_string)/4==stop_index
Response = post_multipart(host=testurl, selector=selector,
fields = query_object, files = files)
print(Response.getresponse().read())
def get_labelit_image_object(file,convention):
Q = quick_image(file)
Q.set_beam_center_convention(convention)
Q.read()
return Q
def do_main(filepath, force_binning, convention, host, port):
absfile = os.path.abspath(filepath)
Q = get_labelit_image_object(absfile, convention)
if force_binning:
Q.setBin(2)
Q.show_header()
get_spotfinder_url(Q,host,port)
from iotbx.detectors import image_divider
number_of_modules = image_divider(
Q.linearintdata,
Q.vendor_specific_null_value
).module_count()
for x in range(number_of_modules):
file = "file://%s?slice=%d"%(absfile,x)
Q = get_labelit_image_object(file, convention)
if force_binning:
Q.setBin(2)
Q.show_header()
get_spotfinder_url(Q,host,port)
if __name__=="__main__":
import sys
try:
filepath, force_binning, convention, host, port = sys.argv[1:6]
force_binning = bool(force_binning)
port = int(port)
convention = int(convention)
except Exception:
print("""
Usage:
libtbx.python adsc_client.py <filepath> <force_binning> <convention> <host> <port>
Four mandatory arguments:
filepath: absolute or relative path name of the ADSC test image to be analyzed
force_binning: True (client-side 2x2-pixel software binning; sometimes the best
choice if raw data is not hardware-binned) or False
convention: beam_center_convention as defined on the spotfinder servers wiki
host: usually "localhost"; in any case, must be machine with same endianness
port: port number of image analyzer http service
""")
do_main(filepath, force_binning, convention, host, port)
| en | 0.683086 | Usage: libtbx.python adsc_client.py <filepath> <force_binning> <convention> <host> <port> Four mandatory arguments: filepath: absolute or relative path name of the ADSC test image to be analyzed force_binning: True (client-side 2x2-pixel software binning; sometimes the best choice if raw data is not hardware-binned) or False convention: beam_center_convention as defined on the spotfinder servers wiki host: usually "localhost"; in any case, must be machine with same endianness port: port number of image analyzer http service | 1.96144 | 2 |
flask/hello.py | Sunsetboy/learning_python | 0 | 6631375 | <filename>flask/hello.py
from flask import Flask
from markupsafe import escape
app = Flask(__name__)
@app.route("/")
def hello_world():
return "<p>Hello world 2!!"
@app.route("/user/<username>")
def show_profile(username):
return f"Hello {escape(username)}"
| <filename>flask/hello.py
from flask import Flask
from markupsafe import escape
app = Flask(__name__)
@app.route("/")
def hello_world():
return "<p>Hello world 2!!"
@app.route("/user/<username>")
def show_profile(username):
return f"Hello {escape(username)}"
| none | 1 | 2.750374 | 3 |
|
oarepo_model_builder/invenio/invenio_sample_app_poetry.py | Alzpeta/oarepo-model-builder | 0 | 6631376 | <reponame>Alzpeta/oarepo-model-builder<filename>oarepo_model_builder/invenio/invenio_sample_app_poetry.py
from ..builders import OutputBuilder
from ..outputs.toml import TOMLOutput
from ..utils.verbose import log
class InvenioSampleAppPoetryBuilder(OutputBuilder):
TYPE = 'invenio_sample_app_poetry'
def finish(self):
super().finish()
output: TOMLOutput = self.builder.get_output(
'toml',
'pyproject.toml'
)
output.setdefault("tool.poetry",
"name", self.settings.package_base.replace('_', '-'),
"version", "0.0.1",
"description", f"A sample application for {self.settings.package}",
"authors", [])
output.setdefault("build-system",
"requires", ['poetry-core>=1.0.0'],
"build-backend", "poetry.core.masonry.api")
output.setdefault("tool.poetry.dependencies", "python", "^3.9")
output.setdefault("tool.poetry.dependencies", "python", "^3.9")
output.setdefault("tool.poetry.dependencies.invenio",
'version', '^3.5.0a1',
'extras', ["base", "auth", "metadata", "files", "postgresql", "elasticsearch7"],
'optional', True,
'allow-prereleases', True
)
output.setdefault("tool.poetry.dependencies.invenio-records-resources",
'version', '^0.17.3',
'optional', True,
'allow-prereleases', True
)
output.setdefault("tool.poetry.extras", 'sample-app', ['invenio', 'invenio-records-resources'])
if output.created:
log(log.INFO, f"""To install the sample app, run
poetry install -E sample-app
""")
| from ..builders import OutputBuilder
from ..outputs.toml import TOMLOutput
from ..utils.verbose import log
class InvenioSampleAppPoetryBuilder(OutputBuilder):
TYPE = 'invenio_sample_app_poetry'
def finish(self):
super().finish()
output: TOMLOutput = self.builder.get_output(
'toml',
'pyproject.toml'
)
output.setdefault("tool.poetry",
"name", self.settings.package_base.replace('_', '-'),
"version", "0.0.1",
"description", f"A sample application for {self.settings.package}",
"authors", [])
output.setdefault("build-system",
"requires", ['poetry-core>=1.0.0'],
"build-backend", "poetry.core.masonry.api")
output.setdefault("tool.poetry.dependencies", "python", "^3.9")
output.setdefault("tool.poetry.dependencies", "python", "^3.9")
output.setdefault("tool.poetry.dependencies.invenio",
'version', '^3.5.0a1',
'extras', ["base", "auth", "metadata", "files", "postgresql", "elasticsearch7"],
'optional', True,
'allow-prereleases', True
)
output.setdefault("tool.poetry.dependencies.invenio-records-resources",
'version', '^0.17.3',
'optional', True,
'allow-prereleases', True
)
output.setdefault("tool.poetry.extras", 'sample-app', ['invenio', 'invenio-records-resources'])
if output.created:
log(log.INFO, f"""To install the sample app, run
poetry install -E sample-app
""") | en | 0.800973 | To install the sample app, run poetry install -E sample-app | 2.083683 | 2 |
test/common.py | jcrd/python-pkgbuilder | 0 | 6631377 | <gh_stars>0
from pathlib import Path
test1_pkg = 'test1-1-1-any.pkg.tar.xz'
test1_dep1_pkg = 'test1-dep1-1-1-any.pkg.tar.xz'
test1_makedep1_pkg = 'test1-makedep1-1-1-any.pkg.tar.xz'
localdir = str(Path(__file__).parent) + '/pkgbuilds'
chrootdir = '/var/lib/pkgbuilder'
def pkgnames(pkgs):
return [str(Path(p).name) for p in pkgs]
| from pathlib import Path
test1_pkg = 'test1-1-1-any.pkg.tar.xz'
test1_dep1_pkg = 'test1-dep1-1-1-any.pkg.tar.xz'
test1_makedep1_pkg = 'test1-makedep1-1-1-any.pkg.tar.xz'
localdir = str(Path(__file__).parent) + '/pkgbuilds'
chrootdir = '/var/lib/pkgbuilder'
def pkgnames(pkgs):
return [str(Path(p).name) for p in pkgs] | none | 1 | 2.339042 | 2 |
|
issues/migrations/0013_auto_20201012_1440.py | Floyd-Droid/jf-issue-tracker | 0 | 6631378 | <reponame>Floyd-Droid/jf-issue-tracker
# Generated by Django 3.1.1 on 2020-10-12 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('issues', '0012_auto_20201012_1437'),
]
operations = [
migrations.AlterField(
model_name='project',
name='slug',
field=models.SlugField(default='default', max_length=100, unique=True),
preserve_default=False,
),
]
| # Generated by Django 3.1.1 on 2020-10-12 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('issues', '0012_auto_20201012_1437'),
]
operations = [
migrations.AlterField(
model_name='project',
name='slug',
field=models.SlugField(default='default', max_length=100, unique=True),
preserve_default=False,
),
] | en | 0.784515 | # Generated by Django 3.1.1 on 2020-10-12 14:40 | 1.444915 | 1 |
answers/Khushi/Day 24/Question 2.py | vishaljha2121/30-DaysOfCode-March-2021 | 22 | 6631379 | <gh_stars>10-100
def countMinOperations(k,n):
r=0
while (True):
countZero=0
i=0
while(i<n):
if((k[i] & 1)>0):
break
elif(k[i]==0):
countZero+=1
i+=1
if(countZero==n):
return r
if(i==n):
for j in range(n):
k[j]=k[j]//2
r+=1
for j in range(i,n):
if(k[j] & 1):
k[j]-=1
r+=1
n=int(input("Enter no. of elements in array: "))
a=[]
print("Enter elements of array:")
for x in range(n):
elem=int(input())
a.append(elem)
print("Minimum number of steps required to get the zero array is:",end=" ")
print(countMinOperations(a,n))
| def countMinOperations(k,n):
r=0
while (True):
countZero=0
i=0
while(i<n):
if((k[i] & 1)>0):
break
elif(k[i]==0):
countZero+=1
i+=1
if(countZero==n):
return r
if(i==n):
for j in range(n):
k[j]=k[j]//2
r+=1
for j in range(i,n):
if(k[j] & 1):
k[j]-=1
r+=1
n=int(input("Enter no. of elements in array: "))
a=[]
print("Enter elements of array:")
for x in range(n):
elem=int(input())
a.append(elem)
print("Minimum number of steps required to get the zero array is:",end=" ")
print(countMinOperations(a,n)) | none | 1 | 3.485106 | 3 |
|
vizdoomaze/envs/vizdoomazeone11.py | fanyuzeng/Vizdoomaze | 3 | 6631380 | <reponame>fanyuzeng/Vizdoomaze
from vizdoomaze.envs.vizdoomenv import VizdoomEnv
class vizdoomazeOne11(VizdoomEnv):
def __init__(self):
super(vizdoomazeOne11, self).__init__(24) | from vizdoomaze.envs.vizdoomenv import VizdoomEnv
class vizdoomazeOne11(VizdoomEnv):
def __init__(self):
super(vizdoomazeOne11, self).__init__(24) | none | 1 | 1.584204 | 2 |
|
src/pyglui/pyfontstash/setup.py | pupil-labs/pyglui | 24 | 6631381 | import platform
from Cython.Build import cythonize
from setuptools import Extension, setup
if platform.system() == "Darwin":
includes = ["/System/Library/Frameworks/OpenGL.framework/Versions/Current/Headers/"]
f = "-framework"
link_args = [f, "OpenGL"]
libs = []
compile_args = ["-D FONTSTASH_IMPLEMENTATION", "-D GLFONTSTASH_IMPLEMENTATION"]
elif platform.system() == "Windows":
includes = []
libs = ["OpenGL32"]
link_args = []
compile_args = [
"/DFONTSTASH_IMPLEMENTATION",
"/DGLFONTSTASH_IMPLEMENTATION",
] # http://msdn.microsoft.com/de-de/library/hhzbb5c8.aspx
else:
includes = [
"/usr/include/GL",
]
libs = ["GL"]
link_args = []
compile_args = ["-D FONTSTASH_IMPLEMENTATION", "-D GLFONTSTASH_IMPLEMENTATION"]
extensions = [
Extension(
name="fontstash",
sources=["fontstash.pyx"],
include_dirs=includes + ["fontstash/src"],
libraries=libs,
extra_link_args=link_args,
extra_compile_args=compile_args,
)
]
# this package will be compiled into a single.so file.
setup(
name="pyfontstash",
version="0.2",
author="<NAME>",
license="MIT",
description="OpenGL font rendering. This module can also be used as a submodule for other cython projects that want to use OpenGL.",
ext_modules=cythonize(extensions),
)
| import platform
from Cython.Build import cythonize
from setuptools import Extension, setup
if platform.system() == "Darwin":
includes = ["/System/Library/Frameworks/OpenGL.framework/Versions/Current/Headers/"]
f = "-framework"
link_args = [f, "OpenGL"]
libs = []
compile_args = ["-D FONTSTASH_IMPLEMENTATION", "-D GLFONTSTASH_IMPLEMENTATION"]
elif platform.system() == "Windows":
includes = []
libs = ["OpenGL32"]
link_args = []
compile_args = [
"/DFONTSTASH_IMPLEMENTATION",
"/DGLFONTSTASH_IMPLEMENTATION",
] # http://msdn.microsoft.com/de-de/library/hhzbb5c8.aspx
else:
includes = [
"/usr/include/GL",
]
libs = ["GL"]
link_args = []
compile_args = ["-D FONTSTASH_IMPLEMENTATION", "-D GLFONTSTASH_IMPLEMENTATION"]
extensions = [
Extension(
name="fontstash",
sources=["fontstash.pyx"],
include_dirs=includes + ["fontstash/src"],
libraries=libs,
extra_link_args=link_args,
extra_compile_args=compile_args,
)
]
# this package will be compiled into a single.so file.
setup(
name="pyfontstash",
version="0.2",
author="<NAME>",
license="MIT",
description="OpenGL font rendering. This module can also be used as a submodule for other cython projects that want to use OpenGL.",
ext_modules=cythonize(extensions),
)
| en | 0.536564 | # http://msdn.microsoft.com/de-de/library/hhzbb5c8.aspx # this package will be compiled into a single.so file. | 2.034631 | 2 |
see/__init__.py | ljcooke/see | 42 | 6631382 | <reponame>ljcooke/see
"""
see: dir for humans.
Documentation is available at https://ljcooke.github.io/see/
"""
from .inspector import see
from .output import SeeResult
__all__ = ['see', 'SeeResult']
__author__ = '<NAME>'
__contributors__ = 'See AUTHORS.rst'
__version__ = '1.4.1'
__copyright__ = 'Copyright (c) 2009-2018 <NAME>'
__license__ = 'BSD License'
| """
see: dir for humans.
Documentation is available at https://ljcooke.github.io/see/
"""
from .inspector import see
from .output import SeeResult
__all__ = ['see', 'SeeResult']
__author__ = '<NAME>'
__contributors__ = 'See AUTHORS.rst'
__version__ = '1.4.1'
__copyright__ = 'Copyright (c) 2009-2018 <NAME>'
__license__ = 'BSD License' | en | 0.800202 | see: dir for humans. Documentation is available at https://ljcooke.github.io/see/ | 1.25967 | 1 |
neo/io/nwbio.py | yger/python-neo | 199 | 6631383 | <gh_stars>100-1000
"""
NWBIO
=====
IO class for reading data from a Neurodata Without Borders (NWB) dataset
Documentation : https://www.nwb.org/
Depends on: h5py, nwb, dateutil
Supported: Read, Write
Python API - https://pynwb.readthedocs.io
Sample datasets from CRCNS - https://crcns.org/NWB
Sample datasets from Allen Institute
- http://alleninstitute.github.io/AllenSDK/cell_types.html#neurodata-without-borders
"""
from __future__ import absolute_import, division
import json
import logging
import os
from collections import defaultdict
from itertools import chain
from json.decoder import JSONDecodeError
import numpy as np
import quantities as pq
from neo.core import (Segment, SpikeTrain, Epoch, Event, AnalogSignal,
IrregularlySampledSignal, Block, ImageSequence)
from neo.io.baseio import BaseIO
from neo.io.proxyobjects import (
AnalogSignalProxy as BaseAnalogSignalProxy,
EventProxy as BaseEventProxy,
EpochProxy as BaseEpochProxy,
SpikeTrainProxy as BaseSpikeTrainProxy
)
# PyNWB imports
try:
import pynwb
from pynwb import NWBFile, TimeSeries
from pynwb.base import ProcessingModule
from pynwb.ecephys import ElectricalSeries, Device, EventDetection
from pynwb.behavior import SpatialSeries
from pynwb.misc import AnnotationSeries
from pynwb import image
from pynwb.image import ImageSeries
from pynwb.spec import NWBAttributeSpec, NWBDatasetSpec, NWBGroupSpec, NWBNamespace, \
NWBNamespaceBuilder
from pynwb.device import Device
# For calcium imaging data
from pynwb.ophys import TwoPhotonSeries, OpticalChannel, ImageSegmentation, Fluorescence
have_pynwb = True
except ImportError:
have_pynwb = False
# hdmf imports
try:
from hdmf.spec import (LinkSpec, GroupSpec, DatasetSpec, SpecNamespace,
NamespaceBuilder, AttributeSpec, DtypeSpec, RefSpec)
have_hdmf = True
except ImportError:
have_hdmf = False
except SyntaxError:
have_hdmf = False
logger = logging.getLogger("Neo")
GLOBAL_ANNOTATIONS = (
"session_start_time", "identifier", "timestamps_reference_time", "experimenter",
"experiment_description", "session_id", "institution", "keywords", "notes",
"pharmacology", "protocol", "related_publications", "slices", "source_script",
"source_script_file_name", "data_collection", "surgery", "virus", "stimulus_notes",
"lab", "session_description"
)
POSSIBLE_JSON_FIELDS = (
"source_script", "description"
)
prefix_map = {
1e9: 'giga',
1e6: 'mega',
1e3: 'kilo',
1: '',
1e-3: 'milli',
1e-6: 'micro',
1e-9: 'nano',
1e-12: 'pico'
}
def try_json_field(content):
"""
Try to interpret a string as JSON data.
If successful, return the JSON data (dict or list)
If unsuccessful, return the original string
"""
try:
return json.loads(content)
except JSONDecodeError:
return content
def get_class(module, name):
"""
Given a module path and a class name, return the class object
"""
module_path = module.split(".")
assert len(module_path) == 2 # todo: handle the general case where this isn't 2
return getattr(getattr(pynwb, module_path[1]), name)
def statistics(block): # todo: move this to be a property of Block
"""
Return simple statistics about a Neo Block.
"""
stats = {
"SpikeTrain": {"count": 0},
"AnalogSignal": {"count": 0},
"IrregularlySampledSignal": {"count": 0},
"Epoch": {"count": 0},
"Event": {"count": 0},
}
for segment in block.segments:
stats["SpikeTrain"]["count"] += len(segment.spiketrains)
stats["AnalogSignal"]["count"] += len(segment.analogsignals)
stats["IrregularlySampledSignal"]["count"] += len(segment.irregularlysampledsignals)
stats["Epoch"]["count"] += len(segment.epochs)
stats["Event"]["count"] += len(segment.events)
return stats
def get_units_conversion(signal, timeseries_class):
"""
Given a quantity array and a TimeSeries subclass, return
the conversion factor and the expected units
"""
# it would be nice if the expected units was an attribute of the PyNWB class
if "CurrentClamp" in timeseries_class.__name__:
expected_units = pq.volt
elif "VoltageClamp" in timeseries_class.__name__:
expected_units = pq.ampere
else:
# todo: warn that we don't handle this subclass yet
expected_units = signal.units
return float((signal.units / expected_units).simplified.magnitude), expected_units
def time_in_seconds(t):
return float(t.rescale("second"))
def _decompose_unit(unit):
"""
Given a quantities unit object, return a base unit name and a conversion factor.
Example:
>>> _decompose_unit(pq.mV)
('volt', 0.001)
"""
assert isinstance(unit, pq.quantity.Quantity)
assert unit.magnitude == 1
conversion = 1.0
def _decompose(unit):
dim = unit.dimensionality
if len(dim) != 1:
raise NotImplementedError("Compound units not yet supported") # e.g. volt-metre
uq, n = list(dim.items())[0]
if n != 1:
raise NotImplementedError("Compound units not yet supported") # e.g. volt^2
uq_def = uq.definition
return float(uq_def.magnitude), uq_def
conv, unit2 = _decompose(unit)
while conv != 1:
conversion *= conv
unit = unit2
conv, unit2 = _decompose(unit)
return list(unit.dimensionality.keys())[0].name, conversion
def _recompose_unit(base_unit_name, conversion):
"""
Given a base unit name and a conversion factor, return a quantities unit object
Example:
>>> _recompose_unit("ampere", 1e-9)
UnitCurrent('nanoampere', 0.001 * uA, 'nA')
"""
unit_name = None
for cf in prefix_map:
# conversion may have a different float precision to the keys in
# prefix_map, so we can't just use `prefix_map[conversion]`
if abs(conversion - cf) / cf < 1e-6:
unit_name = prefix_map[cf] + base_unit_name
if unit_name is None:
raise ValueError(f"Can't handle this conversion factor: {conversion}")
if unit_name[-1] == "s": # strip trailing 's', e.g. "volts" --> "volt"
unit_name = unit_name[:-1]
try:
return getattr(pq, unit_name)
except AttributeError:
logger.warning(f"Can't handle unit '{unit_name}'. Returning dimensionless")
return pq.dimensionless
class NWBIO(BaseIO):
"""
Class for "reading" experimental data from a .nwb file, and "writing" a .nwb file from Neo
"""
supported_objects = [Block, Segment, AnalogSignal, IrregularlySampledSignal,
SpikeTrain, Epoch, Event, ImageSequence]
readable_objects = supported_objects
writeable_objects = supported_objects
has_header = False
support_lazy = True
name = 'NeoNWB IO'
description = 'This IO reads/writes experimental data from/to an .nwb dataset'
extensions = ['nwb']
mode = 'one-file'
is_readable = True
is_writable = True
is_streameable = False
def __init__(self, filename, mode='r'):
"""
Arguments:
filename : the filename
"""
if not have_pynwb:
raise Exception("Please install the pynwb package to use NWBIO")
if not have_hdmf:
raise Exception("Please install the hdmf package to use NWBIO")
BaseIO.__init__(self, filename=filename)
self.filename = filename
self.blocks_written = 0
self.nwb_file_mode = mode
def read_all_blocks(self, lazy=False, **kwargs):
"""
Load all blocks in the file.
"""
assert self.nwb_file_mode in ('r',)
io = pynwb.NWBHDF5IO(self.filename, mode=self.nwb_file_mode,
load_namespaces=True) # Open a file with NWBHDF5IO
self._file = io.read()
self.global_block_metadata = {}
for annotation_name in GLOBAL_ANNOTATIONS:
value = getattr(self._file, annotation_name, None)
if value is not None:
if annotation_name in POSSIBLE_JSON_FIELDS:
value = try_json_field(value)
self.global_block_metadata[annotation_name] = value
if "session_description" in self.global_block_metadata:
self.global_block_metadata["description"] = self.global_block_metadata[
"session_description"]
self.global_block_metadata["file_origin"] = self.filename
if "session_start_time" in self.global_block_metadata:
self.global_block_metadata["rec_datetime"] = self.global_block_metadata[
"session_start_time"]
if "file_create_date" in self.global_block_metadata:
self.global_block_metadata["file_datetime"] = self.global_block_metadata[
"file_create_date"]
self._blocks = {}
self._read_acquisition_group(lazy=lazy)
self._read_stimulus_group(lazy)
self._read_units(lazy=lazy)
self._read_epochs_group(lazy)
return list(self._blocks.values())
def read_block(self, lazy=False, block_index=0, **kargs):
"""
Load the first block in the file.
"""
return self.read_all_blocks(lazy=lazy)[block_index]
def _get_segment(self, block_name, segment_name):
# If we've already created a Block with the given name return it,
# otherwise create it now and store it in self._blocks.
# If we've already created a Segment in the given block, return it,
# otherwise create it now and return it.
if block_name in self._blocks:
block = self._blocks[block_name]
else:
block = Block(name=block_name, **self.global_block_metadata)
self._blocks[block_name] = block
segment = None
for seg in block.segments:
if segment_name == seg.name:
segment = seg
break
if segment is None:
segment = Segment(name=segment_name)
segment.block = block
block.segments.append(segment)
return segment
def _read_epochs_group(self, lazy):
if self._file.epochs is not None:
try:
# NWB files created by Neo store the segment, block and epoch names as extra
# columns
segment_names = self._file.epochs.segment[:]
block_names = self._file.epochs.block[:]
epoch_names = self._file.epochs._name[:]
except AttributeError:
epoch_names = None
if epoch_names is not None:
unique_epoch_names = np.unique(epoch_names)
for epoch_name in unique_epoch_names:
index, = np.where((epoch_names == epoch_name))
epoch = EpochProxy(self._file.epochs, epoch_name, index)
if not lazy:
epoch = epoch.load()
segment_name = np.unique(segment_names[index])
block_name = np.unique(block_names[index])
assert segment_name.size == block_name.size == 1
segment = self._get_segment(block_name[0], segment_name[0])
segment.epochs.append(epoch)
epoch.segment = segment
else:
epoch = EpochProxy(self._file.epochs)
if not lazy:
epoch = epoch.load()
segment = self._get_segment("default", "default")
segment.epochs.append(epoch)
epoch.segment = segment
def _read_timeseries_group(self, group_name, lazy):
group = getattr(self._file, group_name)
for timeseries in group.values():
try:
# NWB files created by Neo store the segment and block names in the comments field
hierarchy = json.loads(timeseries.comments)
except JSONDecodeError:
# For NWB files created with other applications, we put everything in a single
# segment in a single block
# todo: investigate whether there is a reliable way to create multiple segments,
# e.g. using Trial information
block_name = "default"
segment_name = "default"
else:
block_name = hierarchy["block"]
segment_name = hierarchy["segment"]
segment = self._get_segment(block_name, segment_name)
if isinstance(timeseries, AnnotationSeries):
event = EventProxy(timeseries, group_name)
if not lazy:
event = event.load()
segment.events.append(event)
event.segment = segment
elif timeseries.rate: # AnalogSignal
signal = AnalogSignalProxy(timeseries, group_name)
if not lazy:
signal = signal.load()
segment.analogsignals.append(signal)
signal.segment = segment
else: # IrregularlySampledSignal
signal = AnalogSignalProxy(timeseries, group_name)
if not lazy:
signal = signal.load()
segment.irregularlysampledsignals.append(signal)
signal.segment = segment
def _read_units(self, lazy):
if self._file.units:
for id in range(len(self._file.units)):
try:
# NWB files created by Neo store the segment and block names as extra columns
segment_name = self._file.units.segment[id]
block_name = self._file.units.block[id]
except AttributeError:
# For NWB files created with other applications, we put everything in a single
# segment in a single block
segment_name = "default"
block_name = "default"
segment = self._get_segment(block_name, segment_name)
spiketrain = SpikeTrainProxy(self._file.units, id)
if not lazy:
spiketrain = spiketrain.load()
segment.spiketrains.append(spiketrain)
spiketrain.segment = segment
def _read_acquisition_group(self, lazy):
self._read_timeseries_group("acquisition", lazy)
def _read_stimulus_group(self, lazy):
self._read_timeseries_group("stimulus", lazy)
def write_all_blocks(self, blocks, **kwargs):
"""
Write list of blocks to the file
"""
# todo: allow metadata in NWBFile constructor to be taken from kwargs
annotations = defaultdict(set)
for annotation_name in GLOBAL_ANNOTATIONS:
if annotation_name in kwargs:
annotations[annotation_name] = kwargs[annotation_name]
else:
for block in blocks:
if annotation_name in block.annotations:
try:
annotations[annotation_name].add(block.annotations[annotation_name])
except TypeError:
if annotation_name in POSSIBLE_JSON_FIELDS:
encoded = json.dumps(block.annotations[annotation_name])
annotations[annotation_name].add(encoded)
else:
raise
if annotation_name in annotations:
if len(annotations[annotation_name]) > 1:
raise NotImplementedError(
"We don't yet support multiple values for {}".format(annotation_name))
# take single value from set
annotations[annotation_name], = annotations[annotation_name]
if "identifier" not in annotations:
annotations["identifier"] = self.filename
if "session_description" not in annotations:
annotations["session_description"] = blocks[0].description or self.filename
# todo: concatenate descriptions of multiple blocks if different
if "session_start_time" not in annotations:
raise Exception("Writing to NWB requires an annotation 'session_start_time'")
# todo: handle subject
# todo: store additional Neo annotations somewhere in NWB file
nwbfile = NWBFile(**annotations)
assert self.nwb_file_mode in ('w',) # possibly expand to 'a'ppend later
if self.nwb_file_mode == "w" and os.path.exists(self.filename):
os.remove(self.filename)
io_nwb = pynwb.NWBHDF5IO(self.filename, mode=self.nwb_file_mode)
if sum(statistics(block)["SpikeTrain"]["count"] for block in blocks) > 0:
nwbfile.add_unit_column('_name', 'the name attribute of the SpikeTrain')
# nwbfile.add_unit_column('_description',
# 'the description attribute of the SpikeTrain')
nwbfile.add_unit_column(
'segment', 'the name of the Neo Segment to which the SpikeTrain belongs')
nwbfile.add_unit_column(
'block', 'the name of the Neo Block to which the SpikeTrain belongs')
if sum(statistics(block)["Epoch"]["count"] for block in blocks) > 0:
nwbfile.add_epoch_column('_name', 'the name attribute of the Epoch')
# nwbfile.add_epoch_column('_description', 'the description attribute of the Epoch')
nwbfile.add_epoch_column(
'segment', 'the name of the Neo Segment to which the Epoch belongs')
nwbfile.add_epoch_column('block',
'the name of the Neo Block to which the Epoch belongs')
for i, block in enumerate(blocks):
self.write_block(nwbfile, block)
io_nwb.write(nwbfile)
io_nwb.close()
with pynwb.NWBHDF5IO(self.filename, "r") as io_validate:
errors = pynwb.validate(io_validate, namespace="core")
if errors:
raise Exception(f"Errors found when validating {self.filename}")
def write_block(self, nwbfile, block, **kwargs):
"""
Write a Block to the file
:param block: Block to be written
:param nwbfile: Representation of an NWB file
"""
electrodes = self._write_electrodes(nwbfile, block)
if not block.name:
block.name = "block%d" % self.blocks_written
for i, segment in enumerate(block.segments):
assert segment.block is block
if not segment.name:
segment.name = "%s : segment%d" % (block.name, i)
self._write_segment(nwbfile, segment, electrodes)
self.blocks_written += 1
def _write_electrodes(self, nwbfile, block):
# this handles only icephys_electrode for now
electrodes = {}
devices = {}
for segment in block.segments:
for signal in chain(segment.analogsignals, segment.irregularlysampledsignals):
if "nwb_electrode" in signal.annotations:
elec_meta = signal.annotations["nwb_electrode"].copy()
if elec_meta["name"] not in electrodes:
# todo: check for consistency if the name is already there
if elec_meta["device"]["name"] in devices:
device = devices[elec_meta["device"]["name"]]
else:
device = nwbfile.create_device(**elec_meta["device"])
devices[elec_meta["device"]["name"]] = device
elec_meta.pop("device")
electrodes[elec_meta["name"]] = nwbfile.create_icephys_electrode(
device=device, **elec_meta
)
return electrodes
def _write_segment(self, nwbfile, segment, electrodes):
# maybe use NWB trials to store Segment metadata?
for i, signal in enumerate(
chain(segment.analogsignals, segment.irregularlysampledsignals)):
assert signal.segment is segment
if not signal.name:
signal.name = "%s : analogsignal%d" % (segment.name, i)
self._write_signal(nwbfile, signal, electrodes)
for i, train in enumerate(segment.spiketrains):
assert train.segment is segment
if not train.name:
train.name = "%s : spiketrain%d" % (segment.name, i)
self._write_spiketrain(nwbfile, train)
for i, event in enumerate(segment.events):
assert event.segment is segment
if not event.name:
event.name = "%s : event%d" % (segment.name, i)
self._write_event(nwbfile, event)
for i, epoch in enumerate(segment.epochs):
if not epoch.name:
epoch.name = "%s : epoch%d" % (segment.name, i)
self._write_epoch(nwbfile, epoch)
def _write_signal(self, nwbfile, signal, electrodes):
hierarchy = {'block': signal.segment.block.name, 'segment': signal.segment.name}
if "nwb_neurodata_type" in signal.annotations:
timeseries_class = get_class(*signal.annotations["nwb_neurodata_type"])
else:
timeseries_class = TimeSeries # default
additional_metadata = {name[4:]: value
for name, value in signal.annotations.items()
if name.startswith("nwb:")}
if "nwb_electrode" in signal.annotations:
electrode_name = signal.annotations["nwb_electrode"]["name"]
additional_metadata["electrode"] = electrodes[electrode_name]
if timeseries_class != TimeSeries:
conversion, units = get_units_conversion(signal, timeseries_class)
additional_metadata["conversion"] = conversion
else:
units = signal.units
if isinstance(signal, AnalogSignal):
sampling_rate = signal.sampling_rate.rescale("Hz")
tS = timeseries_class(
name=signal.name,
starting_time=time_in_seconds(signal.t_start),
data=signal,
unit=units.dimensionality.string,
rate=float(sampling_rate),
comments=json.dumps(hierarchy),
**additional_metadata)
# todo: try to add array_annotations via "control" attribute
elif isinstance(signal, IrregularlySampledSignal):
tS = timeseries_class(
name=signal.name,
data=signal,
unit=units.dimensionality.string,
timestamps=signal.times.rescale('second').magnitude,
comments=json.dumps(hierarchy),
**additional_metadata)
else:
raise TypeError(
"signal has type {0}, should be AnalogSignal or IrregularlySampledSignal".format(
signal.__class__.__name__))
nwb_group = signal.annotations.get("nwb_group", "acquisition")
add_method_map = {
"acquisition": nwbfile.add_acquisition,
"stimulus": nwbfile.add_stimulus
}
if nwb_group in add_method_map:
add_time_series = add_method_map[nwb_group]
else:
raise NotImplementedError("NWB group '{}' not yet supported".format(nwb_group))
add_time_series(tS)
return tS
def _write_spiketrain(self, nwbfile, spiketrain):
nwbfile.add_unit(spike_times=spiketrain.rescale('s').magnitude,
obs_intervals=[[float(spiketrain.t_start.rescale('s')),
float(spiketrain.t_stop.rescale('s'))]],
_name=spiketrain.name,
# _description=spiketrain.description,
segment=spiketrain.segment.name,
block=spiketrain.segment.block.name)
# todo: handle annotations (using add_unit_column()?)
# todo: handle Neo Units
# todo: handle spike waveforms, if any (see SpikeEventSeries)
return nwbfile.units
def _write_event(self, nwbfile, event):
hierarchy = {'block': event.segment.block.name, 'segment': event.segment.name}
tS_evt = AnnotationSeries(
name=event.name,
data=event.labels,
timestamps=event.times.rescale('second').magnitude,
description=event.description or "",
comments=json.dumps(hierarchy))
nwbfile.add_acquisition(tS_evt)
return tS_evt
def _write_epoch(self, nwbfile, epoch):
for t_start, duration, label in zip(epoch.rescale('s').magnitude,
epoch.durations.rescale('s').magnitude,
epoch.labels):
nwbfile.add_epoch(t_start, t_start + duration, [label], [],
_name=epoch.name,
segment=epoch.segment.name,
block=epoch.segment.block.name)
return nwbfile.epochs
class AnalogSignalProxy(BaseAnalogSignalProxy):
common_metadata_fields = (
# fields that are the same for all TimeSeries subclasses
"comments", "description", "unit", "starting_time", "timestamps", "rate",
"data", "starting_time_unit", "timestamps_unit", "electrode"
)
def __init__(self, timeseries, nwb_group):
self._timeseries = timeseries
self.units = timeseries.unit
if timeseries.conversion:
self.units = _recompose_unit(timeseries.unit, timeseries.conversion)
if timeseries.starting_time is not None:
self.t_start = timeseries.starting_time * pq.s
else:
self.t_start = timeseries.timestamps[0] * pq.s
if timeseries.rate:
self.sampling_rate = timeseries.rate * pq.Hz
else:
self.sampling_rate = None
self.name = timeseries.name
self.annotations = {"nwb_group": nwb_group}
self.description = try_json_field(timeseries.description)
if isinstance(self.description, dict):
self.annotations["notes"] = self.description
if "name" in self.annotations:
self.annotations.pop("name")
self.description = None
self.shape = self._timeseries.data.shape
if len(self.shape) == 1:
self.shape = (self.shape[0], 1)
metadata_fields = list(timeseries.__nwbfields__)
for field_name in self.__class__.common_metadata_fields: # already handled
try:
metadata_fields.remove(field_name)
except ValueError:
pass
for field_name in metadata_fields:
value = getattr(timeseries, field_name)
if value is not None:
self.annotations[f"nwb:{field_name}"] = value
self.annotations["nwb_neurodata_type"] = (
timeseries.__class__.__module__,
timeseries.__class__.__name__
)
if hasattr(timeseries, "electrode"):
# todo: once the Group class is available, we could add electrode metadata
# to a Group containing all signals that share that electrode
# This would reduce the amount of redundancy (repeated metadata in every signal)
electrode_metadata = {"device": {}}
metadata_fields = list(timeseries.electrode.__class__.__nwbfields__) + ["name"]
metadata_fields.remove("device") # needs special handling
for field_name in metadata_fields:
value = getattr(timeseries.electrode, field_name)
if value is not None:
electrode_metadata[field_name] = value
for field_name in timeseries.electrode.device.__class__.__nwbfields__:
value = getattr(timeseries.electrode.device, field_name)
if value is not None:
electrode_metadata["device"][field_name] = value
self.annotations["nwb_electrode"] = electrode_metadata
def load(self, time_slice=None, strict_slicing=True):
"""
Load AnalogSignalProxy args:
:param time_slice: None or tuple of the time slice expressed with quantities.
None is the entire signal.
:param strict_slicing: True by default.
Control if an error is raised or not when one of the time_slice members
(t_start or t_stop) is outside the real time range of the segment.
"""
i_start, i_stop, sig_t_start = None, None, self.t_start
if time_slice:
if self.sampling_rate is None:
i_start, i_stop = np.searchsorted(self._timeseries.timestamps, time_slice)
else:
i_start, i_stop, sig_t_start = self._time_slice_indices(
time_slice, strict_slicing=strict_slicing)
signal = self._timeseries.data[i_start: i_stop]
if self.sampling_rate is None:
return IrregularlySampledSignal(
self._timeseries.timestamps[i_start:i_stop] * pq.s,
signal,
units=self.units,
t_start=sig_t_start,
sampling_rate=self.sampling_rate,
name=self.name,
description=self.description,
array_annotations=None,
**self.annotations) # todo: timeseries.control / control_description
else:
return AnalogSignal(
signal,
units=self.units,
t_start=sig_t_start,
sampling_rate=self.sampling_rate,
name=self.name,
description=self.description,
array_annotations=None,
**self.annotations) # todo: timeseries.control / control_description
class EventProxy(BaseEventProxy):
def __init__(self, timeseries, nwb_group):
self._timeseries = timeseries
self.name = timeseries.name
self.annotations = {"nwb_group": nwb_group}
self.description = try_json_field(timeseries.description)
if isinstance(self.description, dict):
self.annotations.update(self.description)
self.description = None
self.shape = self._timeseries.data.shape
def load(self, time_slice=None, strict_slicing=True):
"""
Load EventProxy args:
:param time_slice: None or tuple of the time slice expressed with quantities.
None is the entire signal.
:param strict_slicing: True by default.
Control if an error is raised or not when one of the time_slice members
(t_start or t_stop) is outside the real time range of the segment.
"""
if time_slice:
raise NotImplementedError("todo")
else:
times = self._timeseries.timestamps[:]
labels = self._timeseries.data[:]
return Event(times * pq.s,
labels=labels,
name=self.name,
description=self.description,
**self.annotations)
class EpochProxy(BaseEpochProxy):
def __init__(self, time_intervals, epoch_name=None, index=None):
"""
:param time_intervals: An epochs table,
which is a specific TimeIntervals table that stores info about long periods
:param epoch_name: (str)
Name of the epoch object
:param index: (np.array, slice)
Slice object or array of bool values masking time_intervals to be used. In case of
an array it has to have the same shape as `time_intervals`.
"""
self._time_intervals = time_intervals
if index is not None:
self._index = index
self.shape = (index.sum(),)
else:
self._index = slice(None)
self.shape = (len(time_intervals),)
self.name = epoch_name
def load(self, time_slice=None, strict_slicing=True):
"""
Load EpochProxy args:
:param time_slice: None or tuple of the time slice expressed with quantities.
None is all of the intervals.
:param strict_slicing: True by default.
Control if an error is raised or not when one of the time_slice members
(t_start or t_stop) is outside the real time range of the segment.
"""
if time_slice:
raise NotImplementedError("todo")
else:
start_times = self._time_intervals.start_time[self._index]
stop_times = self._time_intervals.stop_time[self._index]
durations = stop_times - start_times
labels = self._time_intervals.tags[self._index]
return Epoch(times=start_times * pq.s,
durations=durations * pq.s,
labels=labels,
name=self.name)
class SpikeTrainProxy(BaseSpikeTrainProxy):
def __init__(self, units_table, id):
"""
:param units_table: A Units table
(see https://pynwb.readthedocs.io/en/stable/pynwb.misc.html#pynwb.misc.Units)
:param id: the cell/unit ID (integer)
"""
self._units_table = units_table
self.id = id
self.units = pq.s
obs_intervals = units_table.get_unit_obs_intervals(id)
if len(obs_intervals) == 0:
t_start, t_stop = None, None
elif len(obs_intervals) == 1:
t_start, t_stop = obs_intervals[0]
else:
raise NotImplementedError("Can't yet handle multiple observation intervals")
self.t_start = t_start * pq.s
self.t_stop = t_stop * pq.s
self.annotations = {"nwb_group": "acquisition"}
try:
# NWB files created by Neo store the name as an extra column
self.name = units_table._name[id]
except AttributeError:
self.name = None
self.shape = None # no way to get this without reading the data
def load(self, time_slice=None, strict_slicing=True):
"""
Load SpikeTrainProxy args:
:param time_slice: None or tuple of the time slice expressed with quantities.
None is the entire spike train.
:param strict_slicing: True by default.
Control if an error is raised or not when one of the time_slice members
(t_start or t_stop) is outside the real time range of the segment.
"""
interval = None
if time_slice:
interval = (float(t) for t in time_slice) # convert from quantities
spike_times = self._units_table.get_unit_spike_times(self.id, in_interval=interval)
return SpikeTrain(
spike_times * self.units,
self.t_stop,
units=self.units,
# sampling_rate=array(1.) * Hz,
t_start=self.t_start,
# waveforms=None,
# left_sweep=None,
name=self.name,
# file_origin=None,
# description=None,
# array_annotations=None,
**self.annotations)
| """
NWBIO
=====
IO class for reading data from a Neurodata Without Borders (NWB) dataset
Documentation : https://www.nwb.org/
Depends on: h5py, nwb, dateutil
Supported: Read, Write
Python API - https://pynwb.readthedocs.io
Sample datasets from CRCNS - https://crcns.org/NWB
Sample datasets from Allen Institute
- http://alleninstitute.github.io/AllenSDK/cell_types.html#neurodata-without-borders
"""
from __future__ import absolute_import, division
import json
import logging
import os
from collections import defaultdict
from itertools import chain
from json.decoder import JSONDecodeError
import numpy as np
import quantities as pq
from neo.core import (Segment, SpikeTrain, Epoch, Event, AnalogSignal,
IrregularlySampledSignal, Block, ImageSequence)
from neo.io.baseio import BaseIO
from neo.io.proxyobjects import (
AnalogSignalProxy as BaseAnalogSignalProxy,
EventProxy as BaseEventProxy,
EpochProxy as BaseEpochProxy,
SpikeTrainProxy as BaseSpikeTrainProxy
)
# PyNWB imports
try:
import pynwb
from pynwb import NWBFile, TimeSeries
from pynwb.base import ProcessingModule
from pynwb.ecephys import ElectricalSeries, Device, EventDetection
from pynwb.behavior import SpatialSeries
from pynwb.misc import AnnotationSeries
from pynwb import image
from pynwb.image import ImageSeries
from pynwb.spec import NWBAttributeSpec, NWBDatasetSpec, NWBGroupSpec, NWBNamespace, \
NWBNamespaceBuilder
from pynwb.device import Device
# For calcium imaging data
from pynwb.ophys import TwoPhotonSeries, OpticalChannel, ImageSegmentation, Fluorescence
have_pynwb = True
except ImportError:
have_pynwb = False
# hdmf imports
try:
from hdmf.spec import (LinkSpec, GroupSpec, DatasetSpec, SpecNamespace,
NamespaceBuilder, AttributeSpec, DtypeSpec, RefSpec)
have_hdmf = True
except ImportError:
have_hdmf = False
except SyntaxError:
have_hdmf = False
logger = logging.getLogger("Neo")
GLOBAL_ANNOTATIONS = (
"session_start_time", "identifier", "timestamps_reference_time", "experimenter",
"experiment_description", "session_id", "institution", "keywords", "notes",
"pharmacology", "protocol", "related_publications", "slices", "source_script",
"source_script_file_name", "data_collection", "surgery", "virus", "stimulus_notes",
"lab", "session_description"
)
POSSIBLE_JSON_FIELDS = (
"source_script", "description"
)
prefix_map = {
1e9: 'giga',
1e6: 'mega',
1e3: 'kilo',
1: '',
1e-3: 'milli',
1e-6: 'micro',
1e-9: 'nano',
1e-12: 'pico'
}
def try_json_field(content):
"""
Try to interpret a string as JSON data.
If successful, return the JSON data (dict or list)
If unsuccessful, return the original string
"""
try:
return json.loads(content)
except JSONDecodeError:
return content
def get_class(module, name):
"""
Given a module path and a class name, return the class object
"""
module_path = module.split(".")
assert len(module_path) == 2 # todo: handle the general case where this isn't 2
return getattr(getattr(pynwb, module_path[1]), name)
def statistics(block): # todo: move this to be a property of Block
"""
Return simple statistics about a Neo Block.
"""
stats = {
"SpikeTrain": {"count": 0},
"AnalogSignal": {"count": 0},
"IrregularlySampledSignal": {"count": 0},
"Epoch": {"count": 0},
"Event": {"count": 0},
}
for segment in block.segments:
stats["SpikeTrain"]["count"] += len(segment.spiketrains)
stats["AnalogSignal"]["count"] += len(segment.analogsignals)
stats["IrregularlySampledSignal"]["count"] += len(segment.irregularlysampledsignals)
stats["Epoch"]["count"] += len(segment.epochs)
stats["Event"]["count"] += len(segment.events)
return stats
def get_units_conversion(signal, timeseries_class):
"""
Given a quantity array and a TimeSeries subclass, return
the conversion factor and the expected units
"""
# it would be nice if the expected units was an attribute of the PyNWB class
if "CurrentClamp" in timeseries_class.__name__:
expected_units = pq.volt
elif "VoltageClamp" in timeseries_class.__name__:
expected_units = pq.ampere
else:
# todo: warn that we don't handle this subclass yet
expected_units = signal.units
return float((signal.units / expected_units).simplified.magnitude), expected_units
def time_in_seconds(t):
return float(t.rescale("second"))
def _decompose_unit(unit):
"""
Given a quantities unit object, return a base unit name and a conversion factor.
Example:
>>> _decompose_unit(pq.mV)
('volt', 0.001)
"""
assert isinstance(unit, pq.quantity.Quantity)
assert unit.magnitude == 1
conversion = 1.0
def _decompose(unit):
dim = unit.dimensionality
if len(dim) != 1:
raise NotImplementedError("Compound units not yet supported") # e.g. volt-metre
uq, n = list(dim.items())[0]
if n != 1:
raise NotImplementedError("Compound units not yet supported") # e.g. volt^2
uq_def = uq.definition
return float(uq_def.magnitude), uq_def
conv, unit2 = _decompose(unit)
while conv != 1:
conversion *= conv
unit = unit2
conv, unit2 = _decompose(unit)
return list(unit.dimensionality.keys())[0].name, conversion
def _recompose_unit(base_unit_name, conversion):
"""
Given a base unit name and a conversion factor, return a quantities unit object
Example:
>>> _recompose_unit("ampere", 1e-9)
UnitCurrent('nanoampere', 0.001 * uA, 'nA')
"""
unit_name = None
for cf in prefix_map:
# conversion may have a different float precision to the keys in
# prefix_map, so we can't just use `prefix_map[conversion]`
if abs(conversion - cf) / cf < 1e-6:
unit_name = prefix_map[cf] + base_unit_name
if unit_name is None:
raise ValueError(f"Can't handle this conversion factor: {conversion}")
if unit_name[-1] == "s": # strip trailing 's', e.g. "volts" --> "volt"
unit_name = unit_name[:-1]
try:
return getattr(pq, unit_name)
except AttributeError:
logger.warning(f"Can't handle unit '{unit_name}'. Returning dimensionless")
return pq.dimensionless
class NWBIO(BaseIO):
"""
Class for "reading" experimental data from a .nwb file, and "writing" a .nwb file from Neo
"""
supported_objects = [Block, Segment, AnalogSignal, IrregularlySampledSignal,
SpikeTrain, Epoch, Event, ImageSequence]
readable_objects = supported_objects
writeable_objects = supported_objects
has_header = False
support_lazy = True
name = 'NeoNWB IO'
description = 'This IO reads/writes experimental data from/to an .nwb dataset'
extensions = ['nwb']
mode = 'one-file'
is_readable = True
is_writable = True
is_streameable = False
def __init__(self, filename, mode='r'):
"""
Arguments:
filename : the filename
"""
if not have_pynwb:
raise Exception("Please install the pynwb package to use NWBIO")
if not have_hdmf:
raise Exception("Please install the hdmf package to use NWBIO")
BaseIO.__init__(self, filename=filename)
self.filename = filename
self.blocks_written = 0
self.nwb_file_mode = mode
def read_all_blocks(self, lazy=False, **kwargs):
"""
Load all blocks in the file.
"""
assert self.nwb_file_mode in ('r',)
io = pynwb.NWBHDF5IO(self.filename, mode=self.nwb_file_mode,
load_namespaces=True) # Open a file with NWBHDF5IO
self._file = io.read()
self.global_block_metadata = {}
for annotation_name in GLOBAL_ANNOTATIONS:
value = getattr(self._file, annotation_name, None)
if value is not None:
if annotation_name in POSSIBLE_JSON_FIELDS:
value = try_json_field(value)
self.global_block_metadata[annotation_name] = value
if "session_description" in self.global_block_metadata:
self.global_block_metadata["description"] = self.global_block_metadata[
"session_description"]
self.global_block_metadata["file_origin"] = self.filename
if "session_start_time" in self.global_block_metadata:
self.global_block_metadata["rec_datetime"] = self.global_block_metadata[
"session_start_time"]
if "file_create_date" in self.global_block_metadata:
self.global_block_metadata["file_datetime"] = self.global_block_metadata[
"file_create_date"]
self._blocks = {}
self._read_acquisition_group(lazy=lazy)
self._read_stimulus_group(lazy)
self._read_units(lazy=lazy)
self._read_epochs_group(lazy)
return list(self._blocks.values())
def read_block(self, lazy=False, block_index=0, **kargs):
"""
Load the first block in the file.
"""
return self.read_all_blocks(lazy=lazy)[block_index]
def _get_segment(self, block_name, segment_name):
# If we've already created a Block with the given name return it,
# otherwise create it now and store it in self._blocks.
# If we've already created a Segment in the given block, return it,
# otherwise create it now and return it.
if block_name in self._blocks:
block = self._blocks[block_name]
else:
block = Block(name=block_name, **self.global_block_metadata)
self._blocks[block_name] = block
segment = None
for seg in block.segments:
if segment_name == seg.name:
segment = seg
break
if segment is None:
segment = Segment(name=segment_name)
segment.block = block
block.segments.append(segment)
return segment
def _read_epochs_group(self, lazy):
if self._file.epochs is not None:
try:
# NWB files created by Neo store the segment, block and epoch names as extra
# columns
segment_names = self._file.epochs.segment[:]
block_names = self._file.epochs.block[:]
epoch_names = self._file.epochs._name[:]
except AttributeError:
epoch_names = None
if epoch_names is not None:
unique_epoch_names = np.unique(epoch_names)
for epoch_name in unique_epoch_names:
index, = np.where((epoch_names == epoch_name))
epoch = EpochProxy(self._file.epochs, epoch_name, index)
if not lazy:
epoch = epoch.load()
segment_name = np.unique(segment_names[index])
block_name = np.unique(block_names[index])
assert segment_name.size == block_name.size == 1
segment = self._get_segment(block_name[0], segment_name[0])
segment.epochs.append(epoch)
epoch.segment = segment
else:
epoch = EpochProxy(self._file.epochs)
if not lazy:
epoch = epoch.load()
segment = self._get_segment("default", "default")
segment.epochs.append(epoch)
epoch.segment = segment
def _read_timeseries_group(self, group_name, lazy):
group = getattr(self._file, group_name)
for timeseries in group.values():
try:
# NWB files created by Neo store the segment and block names in the comments field
hierarchy = json.loads(timeseries.comments)
except JSONDecodeError:
# For NWB files created with other applications, we put everything in a single
# segment in a single block
# todo: investigate whether there is a reliable way to create multiple segments,
# e.g. using Trial information
block_name = "default"
segment_name = "default"
else:
block_name = hierarchy["block"]
segment_name = hierarchy["segment"]
segment = self._get_segment(block_name, segment_name)
if isinstance(timeseries, AnnotationSeries):
event = EventProxy(timeseries, group_name)
if not lazy:
event = event.load()
segment.events.append(event)
event.segment = segment
elif timeseries.rate: # AnalogSignal
signal = AnalogSignalProxy(timeseries, group_name)
if not lazy:
signal = signal.load()
segment.analogsignals.append(signal)
signal.segment = segment
else: # IrregularlySampledSignal
signal = AnalogSignalProxy(timeseries, group_name)
if not lazy:
signal = signal.load()
segment.irregularlysampledsignals.append(signal)
signal.segment = segment
def _read_units(self, lazy):
if self._file.units:
for id in range(len(self._file.units)):
try:
# NWB files created by Neo store the segment and block names as extra columns
segment_name = self._file.units.segment[id]
block_name = self._file.units.block[id]
except AttributeError:
# For NWB files created with other applications, we put everything in a single
# segment in a single block
segment_name = "default"
block_name = "default"
segment = self._get_segment(block_name, segment_name)
spiketrain = SpikeTrainProxy(self._file.units, id)
if not lazy:
spiketrain = spiketrain.load()
segment.spiketrains.append(spiketrain)
spiketrain.segment = segment
def _read_acquisition_group(self, lazy):
self._read_timeseries_group("acquisition", lazy)
def _read_stimulus_group(self, lazy):
self._read_timeseries_group("stimulus", lazy)
def write_all_blocks(self, blocks, **kwargs):
"""
Write list of blocks to the file
"""
# todo: allow metadata in NWBFile constructor to be taken from kwargs
annotations = defaultdict(set)
for annotation_name in GLOBAL_ANNOTATIONS:
if annotation_name in kwargs:
annotations[annotation_name] = kwargs[annotation_name]
else:
for block in blocks:
if annotation_name in block.annotations:
try:
annotations[annotation_name].add(block.annotations[annotation_name])
except TypeError:
if annotation_name in POSSIBLE_JSON_FIELDS:
encoded = json.dumps(block.annotations[annotation_name])
annotations[annotation_name].add(encoded)
else:
raise
if annotation_name in annotations:
if len(annotations[annotation_name]) > 1:
raise NotImplementedError(
"We don't yet support multiple values for {}".format(annotation_name))
# take single value from set
annotations[annotation_name], = annotations[annotation_name]
if "identifier" not in annotations:
annotations["identifier"] = self.filename
if "session_description" not in annotations:
annotations["session_description"] = blocks[0].description or self.filename
# todo: concatenate descriptions of multiple blocks if different
if "session_start_time" not in annotations:
raise Exception("Writing to NWB requires an annotation 'session_start_time'")
# todo: handle subject
# todo: store additional Neo annotations somewhere in NWB file
nwbfile = NWBFile(**annotations)
assert self.nwb_file_mode in ('w',) # possibly expand to 'a'ppend later
if self.nwb_file_mode == "w" and os.path.exists(self.filename):
os.remove(self.filename)
io_nwb = pynwb.NWBHDF5IO(self.filename, mode=self.nwb_file_mode)
if sum(statistics(block)["SpikeTrain"]["count"] for block in blocks) > 0:
nwbfile.add_unit_column('_name', 'the name attribute of the SpikeTrain')
# nwbfile.add_unit_column('_description',
# 'the description attribute of the SpikeTrain')
nwbfile.add_unit_column(
'segment', 'the name of the Neo Segment to which the SpikeTrain belongs')
nwbfile.add_unit_column(
'block', 'the name of the Neo Block to which the SpikeTrain belongs')
if sum(statistics(block)["Epoch"]["count"] for block in blocks) > 0:
nwbfile.add_epoch_column('_name', 'the name attribute of the Epoch')
# nwbfile.add_epoch_column('_description', 'the description attribute of the Epoch')
nwbfile.add_epoch_column(
'segment', 'the name of the Neo Segment to which the Epoch belongs')
nwbfile.add_epoch_column('block',
'the name of the Neo Block to which the Epoch belongs')
for i, block in enumerate(blocks):
self.write_block(nwbfile, block)
io_nwb.write(nwbfile)
io_nwb.close()
with pynwb.NWBHDF5IO(self.filename, "r") as io_validate:
errors = pynwb.validate(io_validate, namespace="core")
if errors:
raise Exception(f"Errors found when validating {self.filename}")
def write_block(self, nwbfile, block, **kwargs):
"""
Write a Block to the file
:param block: Block to be written
:param nwbfile: Representation of an NWB file
"""
electrodes = self._write_electrodes(nwbfile, block)
if not block.name:
block.name = "block%d" % self.blocks_written
for i, segment in enumerate(block.segments):
assert segment.block is block
if not segment.name:
segment.name = "%s : segment%d" % (block.name, i)
self._write_segment(nwbfile, segment, electrodes)
self.blocks_written += 1
def _write_electrodes(self, nwbfile, block):
# this handles only icephys_electrode for now
electrodes = {}
devices = {}
for segment in block.segments:
for signal in chain(segment.analogsignals, segment.irregularlysampledsignals):
if "nwb_electrode" in signal.annotations:
elec_meta = signal.annotations["nwb_electrode"].copy()
if elec_meta["name"] not in electrodes:
# todo: check for consistency if the name is already there
if elec_meta["device"]["name"] in devices:
device = devices[elec_meta["device"]["name"]]
else:
device = nwbfile.create_device(**elec_meta["device"])
devices[elec_meta["device"]["name"]] = device
elec_meta.pop("device")
electrodes[elec_meta["name"]] = nwbfile.create_icephys_electrode(
device=device, **elec_meta
)
return electrodes
def _write_segment(self, nwbfile, segment, electrodes):
# maybe use NWB trials to store Segment metadata?
for i, signal in enumerate(
chain(segment.analogsignals, segment.irregularlysampledsignals)):
assert signal.segment is segment
if not signal.name:
signal.name = "%s : analogsignal%d" % (segment.name, i)
self._write_signal(nwbfile, signal, electrodes)
for i, train in enumerate(segment.spiketrains):
assert train.segment is segment
if not train.name:
train.name = "%s : spiketrain%d" % (segment.name, i)
self._write_spiketrain(nwbfile, train)
for i, event in enumerate(segment.events):
assert event.segment is segment
if not event.name:
event.name = "%s : event%d" % (segment.name, i)
self._write_event(nwbfile, event)
for i, epoch in enumerate(segment.epochs):
if not epoch.name:
epoch.name = "%s : epoch%d" % (segment.name, i)
self._write_epoch(nwbfile, epoch)
def _write_signal(self, nwbfile, signal, electrodes):
hierarchy = {'block': signal.segment.block.name, 'segment': signal.segment.name}
if "nwb_neurodata_type" in signal.annotations:
timeseries_class = get_class(*signal.annotations["nwb_neurodata_type"])
else:
timeseries_class = TimeSeries # default
additional_metadata = {name[4:]: value
for name, value in signal.annotations.items()
if name.startswith("nwb:")}
if "nwb_electrode" in signal.annotations:
electrode_name = signal.annotations["nwb_electrode"]["name"]
additional_metadata["electrode"] = electrodes[electrode_name]
if timeseries_class != TimeSeries:
conversion, units = get_units_conversion(signal, timeseries_class)
additional_metadata["conversion"] = conversion
else:
units = signal.units
if isinstance(signal, AnalogSignal):
sampling_rate = signal.sampling_rate.rescale("Hz")
tS = timeseries_class(
name=signal.name,
starting_time=time_in_seconds(signal.t_start),
data=signal,
unit=units.dimensionality.string,
rate=float(sampling_rate),
comments=json.dumps(hierarchy),
**additional_metadata)
# todo: try to add array_annotations via "control" attribute
elif isinstance(signal, IrregularlySampledSignal):
tS = timeseries_class(
name=signal.name,
data=signal,
unit=units.dimensionality.string,
timestamps=signal.times.rescale('second').magnitude,
comments=json.dumps(hierarchy),
**additional_metadata)
else:
raise TypeError(
"signal has type {0}, should be AnalogSignal or IrregularlySampledSignal".format(
signal.__class__.__name__))
nwb_group = signal.annotations.get("nwb_group", "acquisition")
add_method_map = {
"acquisition": nwbfile.add_acquisition,
"stimulus": nwbfile.add_stimulus
}
if nwb_group in add_method_map:
add_time_series = add_method_map[nwb_group]
else:
raise NotImplementedError("NWB group '{}' not yet supported".format(nwb_group))
add_time_series(tS)
return tS
def _write_spiketrain(self, nwbfile, spiketrain):
nwbfile.add_unit(spike_times=spiketrain.rescale('s').magnitude,
obs_intervals=[[float(spiketrain.t_start.rescale('s')),
float(spiketrain.t_stop.rescale('s'))]],
_name=spiketrain.name,
# _description=spiketrain.description,
segment=spiketrain.segment.name,
block=spiketrain.segment.block.name)
# todo: handle annotations (using add_unit_column()?)
# todo: handle Neo Units
# todo: handle spike waveforms, if any (see SpikeEventSeries)
return nwbfile.units
def _write_event(self, nwbfile, event):
hierarchy = {'block': event.segment.block.name, 'segment': event.segment.name}
tS_evt = AnnotationSeries(
name=event.name,
data=event.labels,
timestamps=event.times.rescale('second').magnitude,
description=event.description or "",
comments=json.dumps(hierarchy))
nwbfile.add_acquisition(tS_evt)
return tS_evt
def _write_epoch(self, nwbfile, epoch):
for t_start, duration, label in zip(epoch.rescale('s').magnitude,
epoch.durations.rescale('s').magnitude,
epoch.labels):
nwbfile.add_epoch(t_start, t_start + duration, [label], [],
_name=epoch.name,
segment=epoch.segment.name,
block=epoch.segment.block.name)
return nwbfile.epochs
class AnalogSignalProxy(BaseAnalogSignalProxy):
common_metadata_fields = (
# fields that are the same for all TimeSeries subclasses
"comments", "description", "unit", "starting_time", "timestamps", "rate",
"data", "starting_time_unit", "timestamps_unit", "electrode"
)
def __init__(self, timeseries, nwb_group):
self._timeseries = timeseries
self.units = timeseries.unit
if timeseries.conversion:
self.units = _recompose_unit(timeseries.unit, timeseries.conversion)
if timeseries.starting_time is not None:
self.t_start = timeseries.starting_time * pq.s
else:
self.t_start = timeseries.timestamps[0] * pq.s
if timeseries.rate:
self.sampling_rate = timeseries.rate * pq.Hz
else:
self.sampling_rate = None
self.name = timeseries.name
self.annotations = {"nwb_group": nwb_group}
self.description = try_json_field(timeseries.description)
if isinstance(self.description, dict):
self.annotations["notes"] = self.description
if "name" in self.annotations:
self.annotations.pop("name")
self.description = None
self.shape = self._timeseries.data.shape
if len(self.shape) == 1:
self.shape = (self.shape[0], 1)
metadata_fields = list(timeseries.__nwbfields__)
for field_name in self.__class__.common_metadata_fields: # already handled
try:
metadata_fields.remove(field_name)
except ValueError:
pass
for field_name in metadata_fields:
value = getattr(timeseries, field_name)
if value is not None:
self.annotations[f"nwb:{field_name}"] = value
self.annotations["nwb_neurodata_type"] = (
timeseries.__class__.__module__,
timeseries.__class__.__name__
)
if hasattr(timeseries, "electrode"):
# todo: once the Group class is available, we could add electrode metadata
# to a Group containing all signals that share that electrode
# This would reduce the amount of redundancy (repeated metadata in every signal)
electrode_metadata = {"device": {}}
metadata_fields = list(timeseries.electrode.__class__.__nwbfields__) + ["name"]
metadata_fields.remove("device") # needs special handling
for field_name in metadata_fields:
value = getattr(timeseries.electrode, field_name)
if value is not None:
electrode_metadata[field_name] = value
for field_name in timeseries.electrode.device.__class__.__nwbfields__:
value = getattr(timeseries.electrode.device, field_name)
if value is not None:
electrode_metadata["device"][field_name] = value
self.annotations["nwb_electrode"] = electrode_metadata
def load(self, time_slice=None, strict_slicing=True):
"""
Load AnalogSignalProxy args:
:param time_slice: None or tuple of the time slice expressed with quantities.
None is the entire signal.
:param strict_slicing: True by default.
Control if an error is raised or not when one of the time_slice members
(t_start or t_stop) is outside the real time range of the segment.
"""
i_start, i_stop, sig_t_start = None, None, self.t_start
if time_slice:
if self.sampling_rate is None:
i_start, i_stop = np.searchsorted(self._timeseries.timestamps, time_slice)
else:
i_start, i_stop, sig_t_start = self._time_slice_indices(
time_slice, strict_slicing=strict_slicing)
signal = self._timeseries.data[i_start: i_stop]
if self.sampling_rate is None:
return IrregularlySampledSignal(
self._timeseries.timestamps[i_start:i_stop] * pq.s,
signal,
units=self.units,
t_start=sig_t_start,
sampling_rate=self.sampling_rate,
name=self.name,
description=self.description,
array_annotations=None,
**self.annotations) # todo: timeseries.control / control_description
else:
return AnalogSignal(
signal,
units=self.units,
t_start=sig_t_start,
sampling_rate=self.sampling_rate,
name=self.name,
description=self.description,
array_annotations=None,
**self.annotations) # todo: timeseries.control / control_description
class EventProxy(BaseEventProxy):
def __init__(self, timeseries, nwb_group):
self._timeseries = timeseries
self.name = timeseries.name
self.annotations = {"nwb_group": nwb_group}
self.description = try_json_field(timeseries.description)
if isinstance(self.description, dict):
self.annotations.update(self.description)
self.description = None
self.shape = self._timeseries.data.shape
def load(self, time_slice=None, strict_slicing=True):
"""
Load EventProxy args:
:param time_slice: None or tuple of the time slice expressed with quantities.
None is the entire signal.
:param strict_slicing: True by default.
Control if an error is raised or not when one of the time_slice members
(t_start or t_stop) is outside the real time range of the segment.
"""
if time_slice:
raise NotImplementedError("todo")
else:
times = self._timeseries.timestamps[:]
labels = self._timeseries.data[:]
return Event(times * pq.s,
labels=labels,
name=self.name,
description=self.description,
**self.annotations)
class EpochProxy(BaseEpochProxy):
def __init__(self, time_intervals, epoch_name=None, index=None):
"""
:param time_intervals: An epochs table,
which is a specific TimeIntervals table that stores info about long periods
:param epoch_name: (str)
Name of the epoch object
:param index: (np.array, slice)
Slice object or array of bool values masking time_intervals to be used. In case of
an array it has to have the same shape as `time_intervals`.
"""
self._time_intervals = time_intervals
if index is not None:
self._index = index
self.shape = (index.sum(),)
else:
self._index = slice(None)
self.shape = (len(time_intervals),)
self.name = epoch_name
def load(self, time_slice=None, strict_slicing=True):
"""
Load EpochProxy args:
:param time_slice: None or tuple of the time slice expressed with quantities.
None is all of the intervals.
:param strict_slicing: True by default.
Control if an error is raised or not when one of the time_slice members
(t_start or t_stop) is outside the real time range of the segment.
"""
if time_slice:
raise NotImplementedError("todo")
else:
start_times = self._time_intervals.start_time[self._index]
stop_times = self._time_intervals.stop_time[self._index]
durations = stop_times - start_times
labels = self._time_intervals.tags[self._index]
return Epoch(times=start_times * pq.s,
durations=durations * pq.s,
labels=labels,
name=self.name)
class SpikeTrainProxy(BaseSpikeTrainProxy):
def __init__(self, units_table, id):
"""
:param units_table: A Units table
(see https://pynwb.readthedocs.io/en/stable/pynwb.misc.html#pynwb.misc.Units)
:param id: the cell/unit ID (integer)
"""
self._units_table = units_table
self.id = id
self.units = pq.s
obs_intervals = units_table.get_unit_obs_intervals(id)
if len(obs_intervals) == 0:
t_start, t_stop = None, None
elif len(obs_intervals) == 1:
t_start, t_stop = obs_intervals[0]
else:
raise NotImplementedError("Can't yet handle multiple observation intervals")
self.t_start = t_start * pq.s
self.t_stop = t_stop * pq.s
self.annotations = {"nwb_group": "acquisition"}
try:
# NWB files created by Neo store the name as an extra column
self.name = units_table._name[id]
except AttributeError:
self.name = None
self.shape = None # no way to get this without reading the data
def load(self, time_slice=None, strict_slicing=True):
"""
Load SpikeTrainProxy args:
:param time_slice: None or tuple of the time slice expressed with quantities.
None is the entire spike train.
:param strict_slicing: True by default.
Control if an error is raised or not when one of the time_slice members
(t_start or t_stop) is outside the real time range of the segment.
"""
interval = None
if time_slice:
interval = (float(t) for t in time_slice) # convert from quantities
spike_times = self._units_table.get_unit_spike_times(self.id, in_interval=interval)
return SpikeTrain(
spike_times * self.units,
self.t_stop,
units=self.units,
# sampling_rate=array(1.) * Hz,
t_start=self.t_start,
# waveforms=None,
# left_sweep=None,
name=self.name,
# file_origin=None,
# description=None,
# array_annotations=None,
**self.annotations) | en | 0.766314 | NWBIO ===== IO class for reading data from a Neurodata Without Borders (NWB) dataset Documentation : https://www.nwb.org/ Depends on: h5py, nwb, dateutil Supported: Read, Write Python API - https://pynwb.readthedocs.io Sample datasets from CRCNS - https://crcns.org/NWB Sample datasets from Allen Institute - http://alleninstitute.github.io/AllenSDK/cell_types.html#neurodata-without-borders # PyNWB imports # For calcium imaging data # hdmf imports Try to interpret a string as JSON data. If successful, return the JSON data (dict or list) If unsuccessful, return the original string Given a module path and a class name, return the class object # todo: handle the general case where this isn't 2 # todo: move this to be a property of Block Return simple statistics about a Neo Block. Given a quantity array and a TimeSeries subclass, return the conversion factor and the expected units # it would be nice if the expected units was an attribute of the PyNWB class # todo: warn that we don't handle this subclass yet Given a quantities unit object, return a base unit name and a conversion factor. Example: >>> _decompose_unit(pq.mV) ('volt', 0.001) # e.g. volt-metre # e.g. volt^2 Given a base unit name and a conversion factor, return a quantities unit object Example: >>> _recompose_unit("ampere", 1e-9) UnitCurrent('nanoampere', 0.001 * uA, 'nA') # conversion may have a different float precision to the keys in # prefix_map, so we can't just use `prefix_map[conversion]` # strip trailing 's', e.g. "volts" --> "volt" Class for "reading" experimental data from a .nwb file, and "writing" a .nwb file from Neo Arguments: filename : the filename Load all blocks in the file. # Open a file with NWBHDF5IO Load the first block in the file. # If we've already created a Block with the given name return it, # otherwise create it now and store it in self._blocks. # If we've already created a Segment in the given block, return it, # otherwise create it now and return it. # NWB files created by Neo store the segment, block and epoch names as extra # columns # NWB files created by Neo store the segment and block names in the comments field # For NWB files created with other applications, we put everything in a single # segment in a single block # todo: investigate whether there is a reliable way to create multiple segments, # e.g. using Trial information # AnalogSignal # IrregularlySampledSignal # NWB files created by Neo store the segment and block names as extra columns # For NWB files created with other applications, we put everything in a single # segment in a single block Write list of blocks to the file # todo: allow metadata in NWBFile constructor to be taken from kwargs # take single value from set # todo: concatenate descriptions of multiple blocks if different # todo: handle subject # todo: store additional Neo annotations somewhere in NWB file # possibly expand to 'a'ppend later # nwbfile.add_unit_column('_description', # 'the description attribute of the SpikeTrain') # nwbfile.add_epoch_column('_description', 'the description attribute of the Epoch') Write a Block to the file :param block: Block to be written :param nwbfile: Representation of an NWB file # this handles only icephys_electrode for now # todo: check for consistency if the name is already there # maybe use NWB trials to store Segment metadata? # default # todo: try to add array_annotations via "control" attribute # _description=spiketrain.description, # todo: handle annotations (using add_unit_column()?) # todo: handle Neo Units # todo: handle spike waveforms, if any (see SpikeEventSeries) # fields that are the same for all TimeSeries subclasses # already handled # todo: once the Group class is available, we could add electrode metadata # to a Group containing all signals that share that electrode # This would reduce the amount of redundancy (repeated metadata in every signal) # needs special handling Load AnalogSignalProxy args: :param time_slice: None or tuple of the time slice expressed with quantities. None is the entire signal. :param strict_slicing: True by default. Control if an error is raised or not when one of the time_slice members (t_start or t_stop) is outside the real time range of the segment. # todo: timeseries.control / control_description # todo: timeseries.control / control_description Load EventProxy args: :param time_slice: None or tuple of the time slice expressed with quantities. None is the entire signal. :param strict_slicing: True by default. Control if an error is raised or not when one of the time_slice members (t_start or t_stop) is outside the real time range of the segment. :param time_intervals: An epochs table, which is a specific TimeIntervals table that stores info about long periods :param epoch_name: (str) Name of the epoch object :param index: (np.array, slice) Slice object or array of bool values masking time_intervals to be used. In case of an array it has to have the same shape as `time_intervals`. Load EpochProxy args: :param time_slice: None or tuple of the time slice expressed with quantities. None is all of the intervals. :param strict_slicing: True by default. Control if an error is raised or not when one of the time_slice members (t_start or t_stop) is outside the real time range of the segment. :param units_table: A Units table (see https://pynwb.readthedocs.io/en/stable/pynwb.misc.html#pynwb.misc.Units) :param id: the cell/unit ID (integer) # NWB files created by Neo store the name as an extra column # no way to get this without reading the data Load SpikeTrainProxy args: :param time_slice: None or tuple of the time slice expressed with quantities. None is the entire spike train. :param strict_slicing: True by default. Control if an error is raised or not when one of the time_slice members (t_start or t_stop) is outside the real time range of the segment. # convert from quantities # sampling_rate=array(1.) * Hz, # waveforms=None, # left_sweep=None, # file_origin=None, # description=None, # array_annotations=None, | 2.136032 | 2 |
alpyro_msgs/actionlib_tutorials/averagingresult.py | rho2/alpyro_msgs | 1 | 6631384 | <filename>alpyro_msgs/actionlib_tutorials/averagingresult.py<gh_stars>1-10
from alpyro_msgs import RosMessage, float32
class AveragingResult(RosMessage):
__msg_typ__ = "actionlib_tutorials/AveragingResult"
__msg_def__ = "ZmxvYXQzMiBtZWFuCmZsb2F0MzIgc3RkX2RldgoK"
__md5_sum__ = "d5c7decf6df75ffb4367a05c1bcc7612"
mean: float32
std_dev: float32
| <filename>alpyro_msgs/actionlib_tutorials/averagingresult.py<gh_stars>1-10
from alpyro_msgs import RosMessage, float32
class AveragingResult(RosMessage):
__msg_typ__ = "actionlib_tutorials/AveragingResult"
__msg_def__ = "ZmxvYXQzMiBtZWFuCmZsb2F0MzIgc3RkX2RldgoK"
__md5_sum__ = "d5c7decf6df75ffb4367a05c1bcc7612"
mean: float32
std_dev: float32
| none | 1 | 2.166134 | 2 |
|
hexun/hexun/spiders/pvcSpider.py | judypol/pytonStudy | 0 | 6631385 | <filename>hexun/hexun/spiders/pvcSpider.py
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from scrapy.spiders import Spider
from scrapy.spiders import Request
import json
from items import HexunItem
from utils.urlUtils import UrlUtils
from utils.dateTimeUtils import DateTimeUtils
class PVCSpider(Spider):
name = 'pvc'
urlTemplate='http://webftcn.hermes.hexun.com/shf/minute?code=DCEv{0}&start={1}&number=225&t=1513834850784'
start_urls = [
]
allowed_domains = ['*.hexun.com']
def start_requests(self):
contractList = DateTimeUtils.getContractList()
for contract in contractList:
url = self.urlTemplate.format(contract, DateTimeUtils.getStartTime())
yield Request(url=url, callback=self.parseItem)
def parseItem(self, response):
jsonData = json.loads(response.body_as_unicode().strip(';').strip('(').strip(')'))
datas = jsonData['Data'][0]
contractCode = self.getContractName(response)
for dataItem in datas:
lldpeItem = HexunItem()
lldpeItem['product'] = contractCode
lldpeItem['dateTime'] = dataItem[0]
lldpeItem['price'] = dataItem[1]
lldpeItem['amount'] = dataItem[2]
lldpeItem['volumn'] = dataItem[3]
lldpeItem['avePrice'] = dataItem[4]
lldpeItem['openInterest'] = dataItem[5]
yield lldpeItem
def getContractName(self, response):
code = UrlUtils.getQueryValue(response.url, 'code')[-4:]
return self.name + code
| <filename>hexun/hexun/spiders/pvcSpider.py
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from scrapy.spiders import Spider
from scrapy.spiders import Request
import json
from items import HexunItem
from utils.urlUtils import UrlUtils
from utils.dateTimeUtils import DateTimeUtils
class PVCSpider(Spider):
name = 'pvc'
urlTemplate='http://webftcn.hermes.hexun.com/shf/minute?code=DCEv{0}&start={1}&number=225&t=1513834850784'
start_urls = [
]
allowed_domains = ['*.hexun.com']
def start_requests(self):
contractList = DateTimeUtils.getContractList()
for contract in contractList:
url = self.urlTemplate.format(contract, DateTimeUtils.getStartTime())
yield Request(url=url, callback=self.parseItem)
def parseItem(self, response):
jsonData = json.loads(response.body_as_unicode().strip(';').strip('(').strip(')'))
datas = jsonData['Data'][0]
contractCode = self.getContractName(response)
for dataItem in datas:
lldpeItem = HexunItem()
lldpeItem['product'] = contractCode
lldpeItem['dateTime'] = dataItem[0]
lldpeItem['price'] = dataItem[1]
lldpeItem['amount'] = dataItem[2]
lldpeItem['volumn'] = dataItem[3]
lldpeItem['avePrice'] = dataItem[4]
lldpeItem['openInterest'] = dataItem[5]
yield lldpeItem
def getContractName(self, response):
code = UrlUtils.getQueryValue(response.url, 'code')[-4:]
return self.name + code
| fr | 0.208008 | #!/usr/bin/python # -*- coding: UTF-8 -*- | 2.398872 | 2 |
tests/util/kaldi-io-test.py | mxmpl/pykaldi | 916 | 6631386 | <reponame>mxmpl/pykaldi
from __future__ import print_function
import os
import unittest
from kaldi.util.io import *
class TestKaldiIO(unittest.TestCase):
def testClassifyRxfilename(self):
self.assertEqual(InputType.STANDARD_INPUT, classify_rxfilename(""))
self.assertEqual(InputType.NO_INPUT, classify_rxfilename(" "))
self.assertEqual(InputType.NO_INPUT, classify_rxfilename(" a "))
self.assertEqual(InputType.NO_INPUT, classify_rxfilename("a "))
self.assertEqual(InputType.FILE_INPUT, classify_rxfilename("a"))
self.assertEqual(InputType.STANDARD_INPUT, classify_rxfilename("-"))
self.assertEqual(InputType.PIPE_INPUT, classify_rxfilename("b|"))
self.assertEqual(InputType.NO_INPUT, classify_rxfilename("|b"))
self.assertEqual(InputType.PIPE_INPUT, classify_rxfilename("b c|"))
self.assertEqual(InputType.OFFSET_FILE_INPUT, classify_rxfilename("a b c:123"))
self.assertEqual(InputType.OFFSET_FILE_INPUT, classify_rxfilename("a b c:3"))
self.assertEqual(InputType.FILE_INPUT, classify_rxfilename("a b c:"))
self.assertEqual(InputType.FILE_INPUT, classify_rxfilename("a b c/3"))
def testClassifyWxfilename(self):
self.assertEqual(OutputType.STANDARD_OUTPUT, classify_wxfilename(""))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename(" "))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename(" a "))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename("a "))
self.assertEqual(OutputType.FILE_OUTPUT, classify_wxfilename("a"))
self.assertEqual(OutputType.STANDARD_OUTPUT, classify_wxfilename("-"))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename("b|"))
self.assertEqual(OutputType.PIPE_OUTPUT, classify_wxfilename("|b"))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename("b c|"))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename("a b c:123"))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename("a b c:3"))
self.assertEqual(OutputType.FILE_OUTPUT, classify_wxfilename("a b c:"))
self.assertEqual(OutputType.FILE_OUTPUT, classify_wxfilename("a b c/3"))
def test_text_io(self):
filename = "tmpf"
lines = ["400\t500\t600", "700\td"]
with Output(filename, False) as ko:
for line in lines:
print(line, file=ko)
with Input(filename, False) as ki:
for i, line in enumerate(ki):
self.assertEqual(line.strip(), lines[i])
os.remove(filename)
def test_binary_io(self):
filename = "tmpf"
lines = [b"\t500\t600\n", b"700\td\n"]
with Output(filename) as ko:
for line in lines:
ko.write(line)
with Input(filename) as ki:
self.assertTrue(ki.binary)
for i, line in enumerate(ki):
self.assertEqual(line, lines[i])
os.remove(filename)
def test_xopen(self):
filename = "tmpf"
lines = [b"\t500\t600\n", b"700\td\n"]
with xopen(filename, "w") as ko:
ko.writelines(lines)
with xopen(filename) as ki:
self.assertTrue(ki.binary)
for i, line in enumerate(ki):
self.assertEqual(line, lines[i])
os.remove(filename)
if __name__ == '__main__':
unittest.main()
| from __future__ import print_function
import os
import unittest
from kaldi.util.io import *
class TestKaldiIO(unittest.TestCase):
def testClassifyRxfilename(self):
self.assertEqual(InputType.STANDARD_INPUT, classify_rxfilename(""))
self.assertEqual(InputType.NO_INPUT, classify_rxfilename(" "))
self.assertEqual(InputType.NO_INPUT, classify_rxfilename(" a "))
self.assertEqual(InputType.NO_INPUT, classify_rxfilename("a "))
self.assertEqual(InputType.FILE_INPUT, classify_rxfilename("a"))
self.assertEqual(InputType.STANDARD_INPUT, classify_rxfilename("-"))
self.assertEqual(InputType.PIPE_INPUT, classify_rxfilename("b|"))
self.assertEqual(InputType.NO_INPUT, classify_rxfilename("|b"))
self.assertEqual(InputType.PIPE_INPUT, classify_rxfilename("b c|"))
self.assertEqual(InputType.OFFSET_FILE_INPUT, classify_rxfilename("a b c:123"))
self.assertEqual(InputType.OFFSET_FILE_INPUT, classify_rxfilename("a b c:3"))
self.assertEqual(InputType.FILE_INPUT, classify_rxfilename("a b c:"))
self.assertEqual(InputType.FILE_INPUT, classify_rxfilename("a b c/3"))
def testClassifyWxfilename(self):
self.assertEqual(OutputType.STANDARD_OUTPUT, classify_wxfilename(""))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename(" "))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename(" a "))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename("a "))
self.assertEqual(OutputType.FILE_OUTPUT, classify_wxfilename("a"))
self.assertEqual(OutputType.STANDARD_OUTPUT, classify_wxfilename("-"))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename("b|"))
self.assertEqual(OutputType.PIPE_OUTPUT, classify_wxfilename("|b"))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename("b c|"))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename("a b c:123"))
self.assertEqual(OutputType.NO_OUTPUT, classify_wxfilename("a b c:3"))
self.assertEqual(OutputType.FILE_OUTPUT, classify_wxfilename("a b c:"))
self.assertEqual(OutputType.FILE_OUTPUT, classify_wxfilename("a b c/3"))
def test_text_io(self):
filename = "tmpf"
lines = ["400\t500\t600", "700\td"]
with Output(filename, False) as ko:
for line in lines:
print(line, file=ko)
with Input(filename, False) as ki:
for i, line in enumerate(ki):
self.assertEqual(line.strip(), lines[i])
os.remove(filename)
def test_binary_io(self):
filename = "tmpf"
lines = [b"\t500\t600\n", b"700\td\n"]
with Output(filename) as ko:
for line in lines:
ko.write(line)
with Input(filename) as ki:
self.assertTrue(ki.binary)
for i, line in enumerate(ki):
self.assertEqual(line, lines[i])
os.remove(filename)
def test_xopen(self):
filename = "tmpf"
lines = [b"\t500\t600\n", b"700\td\n"]
with xopen(filename, "w") as ko:
ko.writelines(lines)
with xopen(filename) as ki:
self.assertTrue(ki.binary)
for i, line in enumerate(ki):
self.assertEqual(line, lines[i])
os.remove(filename)
if __name__ == '__main__':
unittest.main() | none | 1 | 2.367018 | 2 |
|
orders/permissions.py | City-of-Turku/munpalvelut_backend | 0 | 6631387 | #!/usr/bin/env python
# coding=utf-8
from rest_framework import permissions
# Owner
class IsOwner(permissions.BasePermission):
def has_permission(self, request, view):
try:
return request.user and \
str(request.user.pk) == str(request.parser_context['kwargs']['user_pk'])
except (AttributeError, KeyError):
return False
def has_object_permission(self, request, view, obj):
return request.user == obj.user
class IsOwnerOrStaff(IsOwner):
def has_permission(self, request, view):
return request.user.is_staff or \
super(IsOwnerOrStaff, self).has_permission(request, view)
def has_object_permission(self, request, view, obj):
return request.user.is_staff or \
super(IsOwnerOrStaff, self).has_object_permission(request, view, obj)
# Company User
class IsCompanyUser(permissions.BasePermission):
def has_permission(self, request, view):
try:
return request.user.company and \
str(request.user.company.pk) == str(request.parser_context['kwargs']['company_pk'])
except (AttributeError, KeyError):
return False
def has_object_permission(self, request, view, obj):
return request.user.company == obj.company
class IsCompanyUserOrStaff(IsCompanyUser):
def has_permission(self, request, view):
return request.user.is_staff or \
super(IsCompanyUserOrStaff, self).has_permission(request, view)
def has_object_permission(self, request, view, obj):
return request.user.is_staff or \
super(IsCompanyUserOrStaff, self).has_object_permission(request, view, obj)
# Rating
class CanRate(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj.can_be_rated()
| #!/usr/bin/env python
# coding=utf-8
from rest_framework import permissions
# Owner
class IsOwner(permissions.BasePermission):
def has_permission(self, request, view):
try:
return request.user and \
str(request.user.pk) == str(request.parser_context['kwargs']['user_pk'])
except (AttributeError, KeyError):
return False
def has_object_permission(self, request, view, obj):
return request.user == obj.user
class IsOwnerOrStaff(IsOwner):
def has_permission(self, request, view):
return request.user.is_staff or \
super(IsOwnerOrStaff, self).has_permission(request, view)
def has_object_permission(self, request, view, obj):
return request.user.is_staff or \
super(IsOwnerOrStaff, self).has_object_permission(request, view, obj)
# Company User
class IsCompanyUser(permissions.BasePermission):
def has_permission(self, request, view):
try:
return request.user.company and \
str(request.user.company.pk) == str(request.parser_context['kwargs']['company_pk'])
except (AttributeError, KeyError):
return False
def has_object_permission(self, request, view, obj):
return request.user.company == obj.company
class IsCompanyUserOrStaff(IsCompanyUser):
def has_permission(self, request, view):
return request.user.is_staff or \
super(IsCompanyUserOrStaff, self).has_permission(request, view)
def has_object_permission(self, request, view, obj):
return request.user.is_staff or \
super(IsCompanyUserOrStaff, self).has_object_permission(request, view, obj)
# Rating
class CanRate(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj.can_be_rated()
| en | 0.549133 | #!/usr/bin/env python # coding=utf-8 # Owner # Company User # Rating | 2.281704 | 2 |
doc/integrating.py | The-Compiler/crashbin | 0 | 6631388 | import sys
import requests
import traceback
CRASHBIN_URL = 'http://crashbin.example.org/api/report/new/'
def handle_exception(exc_type, exc_value, exc_traceback):
title = traceback.format_exception_only(exc_type, exc_value)[0]
text = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
requests.post(CRASHBIN_URL, {'title': title, 'log': text})
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handle_exception
def main():
raise Exception("Unhandled exception")
main()
| import sys
import requests
import traceback
CRASHBIN_URL = 'http://crashbin.example.org/api/report/new/'
def handle_exception(exc_type, exc_value, exc_traceback):
title = traceback.format_exception_only(exc_type, exc_value)[0]
text = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
requests.post(CRASHBIN_URL, {'title': title, 'log': text})
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handle_exception
def main():
raise Exception("Unhandled exception")
main()
| none | 1 | 2.559735 | 3 |
|
pythonFiles/printEnvVariablesToFile.py | ihnorton/vscode-python | 0 | 6631389 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import json
import sys
# Last argument is the target file into which we'll write the env variables as json.
json_file = sys.argv[-1]
with open(json_file, 'w') as outfile:
json.dump(dict(os.environ), outfile)
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import json
import sys
# Last argument is the target file into which we'll write the env variables as json.
json_file = sys.argv[-1]
with open(json_file, 'w') as outfile:
json.dump(dict(os.environ), outfile)
| en | 0.91062 | # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # Last argument is the target file into which we'll write the env variables as json. | 2.584117 | 3 |
datumaro/plugins/openvino_plugin/samples/ssd_vehicle_detection_interp.py | IRDonch/datumaro | 237 | 6631390 | <reponame>IRDonch/datumaro
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
from datumaro.components.annotation import AnnotationType, Bbox, LabelCategories
conf_thresh = 0.02
def _match_confs(confs, detections):
matches = [-1] * len(detections)
queries = {}
for i, det in enumerate(detections):
queries.setdefault(int(det[1]), []).append((det[2], i))
found_count = 0
for i, v in enumerate(confs):
if found_count == len(detections):
break
for cls_id, query in queries.items():
if found_count == len(detections):
break
for q_id, (conf, det_idx) in enumerate(query):
if v[cls_id] == conf:
matches[det_idx] = i
query.pop(q_id)
found_count += 1
break
return matches
def process_outputs(inputs, outputs):
# inputs = model input; array or images; shape = (B, H, W, C)
# outputs = model output; shape = (1, 1, N, 7); N is the number of detected bounding boxes.
# det = [image_id, label(class id), conf, x_min, y_min, x_max, y_max]
# results = conversion result; [[ Annotation, ... ], ... ]
results = []
for input_, detections in zip(inputs, outputs["detection_out"]):
input_height, input_width = input_.shape[:2]
confs = outputs["Softmax_189/Softmax_"]
detections = detections[0]
conf_ids = _match_confs(confs, detections)
image_results = []
for i, det in enumerate(detections):
image_id = int(det[0]) # pylint: disable=unused-variable
label = int(det[1])
conf = float(det[2])
det_confs = confs[conf_ids[i]]
if conf <= conf_thresh:
continue
x = max(int(det[3] * input_width), 0)
y = max(int(det[4] * input_height), 0)
w = min(int(det[5] * input_width - x), input_width)
h = min(int(det[6] * input_height - y), input_height)
image_results.append(Bbox(x, y, w, h, label=label,
attributes={ 'score': conf, 'scores': list(map(float, det_confs)) }
))
results.append(image_results)
return results
def get_categories():
# output categories - label map etc.
label_categories = LabelCategories()
label_categories.add("vehicle")
return {AnnotationType.label: label_categories}
| # Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
from datumaro.components.annotation import AnnotationType, Bbox, LabelCategories
conf_thresh = 0.02
def _match_confs(confs, detections):
matches = [-1] * len(detections)
queries = {}
for i, det in enumerate(detections):
queries.setdefault(int(det[1]), []).append((det[2], i))
found_count = 0
for i, v in enumerate(confs):
if found_count == len(detections):
break
for cls_id, query in queries.items():
if found_count == len(detections):
break
for q_id, (conf, det_idx) in enumerate(query):
if v[cls_id] == conf:
matches[det_idx] = i
query.pop(q_id)
found_count += 1
break
return matches
def process_outputs(inputs, outputs):
# inputs = model input; array or images; shape = (B, H, W, C)
# outputs = model output; shape = (1, 1, N, 7); N is the number of detected bounding boxes.
# det = [image_id, label(class id), conf, x_min, y_min, x_max, y_max]
# results = conversion result; [[ Annotation, ... ], ... ]
results = []
for input_, detections in zip(inputs, outputs["detection_out"]):
input_height, input_width = input_.shape[:2]
confs = outputs["Softmax_189/Softmax_"]
detections = detections[0]
conf_ids = _match_confs(confs, detections)
image_results = []
for i, det in enumerate(detections):
image_id = int(det[0]) # pylint: disable=unused-variable
label = int(det[1])
conf = float(det[2])
det_confs = confs[conf_ids[i]]
if conf <= conf_thresh:
continue
x = max(int(det[3] * input_width), 0)
y = max(int(det[4] * input_height), 0)
w = min(int(det[5] * input_width - x), input_width)
h = min(int(det[6] * input_height - y), input_height)
image_results.append(Bbox(x, y, w, h, label=label,
attributes={ 'score': conf, 'scores': list(map(float, det_confs)) }
))
results.append(image_results)
return results
def get_categories():
# output categories - label map etc.
label_categories = LabelCategories()
label_categories.add("vehicle")
return {AnnotationType.label: label_categories} | en | 0.360342 | # Copyright (C) 2021 Intel Corporation # # SPDX-License-Identifier: MIT # inputs = model input; array or images; shape = (B, H, W, C) # outputs = model output; shape = (1, 1, N, 7); N is the number of detected bounding boxes. # det = [image_id, label(class id), conf, x_min, y_min, x_max, y_max] # results = conversion result; [[ Annotation, ... ], ... ] # pylint: disable=unused-variable # output categories - label map etc. | 1.866618 | 2 |
cloudnet-package/trainer/task.py | Windact/cloud_detection | 0 | 6631391 | <filename>cloudnet-package/trainer/task.py
from pathlib import Path
import argparse
import sys
import logging
from datetime import datetime
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping,TensorBoard
from trainer.utils import ADAMLearningRateTracker, jacc_coef
from trainer.model import model_arch
# logger
model_logger = logging.getLogger(__name__)
model_logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
model_logger_file_handler = logging.FileHandler('model.log')
model_logger_file_handler.setFormatter(formatter)
model_logger.addHandler(model_logger_file_handler)
def _parse_arguments(argv):
"""Parses command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--train_data_path',
help='train data path',
type=str, default="/home/jupyter/cloud_detection/data/train_data.csv")
parser.add_argument(
'--val_data_path',
help='validation data path',
type=str, default="/home/jupyter/cloud_detection/data/val_data.csv")
parser.add_argument(
'--batch_size',
help='model batch size',
type=int, default=12)
parser.add_argument(
'--epochs',
help='The number of epochs to train',
type=int, default=10)
parser.add_argument(
'--random_state',
help='random state',
type=int, default=42)
parser.add_argument(
'--starting_learning_rate',
help='starting learning rate',
type=float, default=1e-4)
parser.add_argument(
'--end_learning_rate',
help='end learning rate',
type=float, default=1e-8)
parser.add_argument(
'--input_rows',
help='input image input_rows',
type=int, default=192)
parser.add_argument(
'--input_cols',
help='input image input_rows',
type=int, default=192)
parser.add_argument(
'--patience',
help='patience for early_s_patience.ReduceLROnPlateau',
type=int, default=15)
parser.add_argument(
'--decay_factor',
help='decay_factor for tensorflow.keras.callbacks.ReduceLROnPlateau',
type=float, default=0.7)
parser.add_argument(
'--experiment_name',
help='experiment_name',
type=str, default="cloudnet")
parser.add_argument(
'--early_s_patience',
help='tensorflow.keras.callbacks.EarlyStopping patience',
type=int, default=20)
parser.add_argument(
'--num_of_channels',
help='num_of_channels',
type=int, default=16)
parser.add_argument(
'--num_of_classes',
help='num_of_classes',
type=int, default=4)
parser.add_argument(
'--reshape',
help='reshape image and mask to the sampe shape',
type=bool, default=True)
parser.add_argument(
'--quick_test',
help='run the model on a smaler sample',
type=bool, default=False)
parser.add_argument(
'--train_resume',
help='resume train or not',
type=bool, default=False)
parser.add_argument(
'--job-dir',
help='Directory where to save the given model',
type=str, default='cloud_detection_models/')
return parser.parse_known_args(argv)
def main():
# Get the arguments
args = _parse_arguments(sys.argv[1:])[0]
#BATCH_SIZE = args.batch_size
# SHUFFLE_BUFFER = 10 * BATCH_SIZE
# RANDOM_STATE = args.random_state
# AUTOTUNE = tf.data.experimental.AUTOTUNE
TRAIN_DATA_PATH = args.train_data_path
VAL_DATA_PATH = args.val_data_path
#quick_test = args.quick_test
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
experiment_name = f"{args.experiment_name}_{current_time}"
ROOT_DIR = Path.cwd().resolve()
MODEL_DIR = ROOT_DIR / "models"
TRAIN_DIR = MODEL_DIR / "train"
TEST_DIR = MODEL_DIR / "test"
EXP_DIR = TRAIN_DIR / experiment_name
ORIGINAL_MODEL_WEIGHT_PATH = (MODEL_DIR / "original_weights") / "Cloud-Net_trained_on_38-Cloud_training_patches.h5" # not implemented
folders = [MODEL_DIR,TRAIN_DIR,TEST_DIR,EXP_DIR]
for folder in folders:
if not folder.exists():
folder.mkdir(parents = False,exist_ok= True)
MODEL_WEIGHTS_PATH = ROOT_DIR/"model_weights"
if not MODEL_WEIGHTS_PATH.exists():
MODEL_WEIGHTS_PATH.mkdir()
weights_path = MODEL_WEIGHTS_PATH / "weights.{epoch:02d}-{val_loss:.2f}.hdf5"
random_state = args.random_state
# hparams
# starting_learning_rate = args.starting_learning_rate
# end_learning_rate = args.end_learning_rate
# epochs = args.epochs # just a huge number. The actual training should not be limited by this value
# #val_ratio = 0.2
# patience = args.patience
# decay_factor = args.decay_factor
# experiment_name = args.experiment_name
# early_s_patience = args.early_s_patience
# params
input_rows = args.input_rows
input_cols = args.input_cols
# img_shape = (input_rows,input_cols)
num_of_channels = args.num_of_channels
num_of_classes = args.num_of_classes
reshape = args.reshape
# hparams
batch_size = args.batch_size
starting_learning_rate = args.starting_learning_rate
end_learning_rate = args.end_learning_rate
max_num_epochs = args.epochs # just a huge number. The actual training should not be limited by this value
patience = args.patience
decay_factor = args.decay_factor
early_s_patience = args.early_s_patience
train_resume = args.train_resume
# log
model_logger.info("All parameters have been paresed")
# datasets
train_dataset = load_dataset(file_paths= TRAIN_DATA_PATH, training = True,reshape= reshape, num_epochs=max_num_epochs)
val_dataset = load_dataset(file_paths= VAL_DATA_PATH, training = False,reshape= reshape)
# Model
strategy = tf.distribute.MirroredStrategy()
model_logger.info('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
model = model_arch(input_rows=input_rows,
input_cols=input_cols,
num_of_channels=num_of_channels,
num_of_classes=num_of_classes)
model.compile(optimizer=Adam(learning_rate=starting_learning_rate), loss=jacc_coef, metrics=[jacc_coef])
# model.summary()
model_checkpoint = ModelCheckpoint(weights_path, monitor='val_loss', save_best_only=True)
lr_reducer = ReduceLROnPlateau(factor=decay_factor, cooldown=0, patience=patience, min_lr=end_learning_rate, verbose=1)
csv_logger = CSVLogger(EXP_DIR / '_log_1.log')
tensorboard = TensorBoard(log_dir= EXP_DIR / 'logs', histogram_freq=0, write_graph=True,write_images=False, write_steps_per_second=False,
update_freq='epoch',profile_batch=0, embeddings_freq=0, embeddings_metadata=None, **kwargs)
if train_resume:
model.load_weights(ORIGINAL_MODEL_WEIGHT_PATH)
model_logger.info("\nTraining resumed...")
else:
model_logger.info("\nTraining started from scratch... ")
model_logger("Experiment name: ", experiment_name)
model_logger("Input image size: ", (input_rows, input_cols))
model_logger("Number of input spectral bands: ", num_of_channels)
model_logger("Learning rate: ", starting_learning_rate)
model_logger("# Epochs: ", max_num_epochs)
model_logger("Batch size: ", batch_size, "\n")
model.fit(train_dataset,validation_data = val_dataset,epochs = max_num_epochs,verbose = 1,
callbacks=[model_checkpoint, lr_reducer, ADAMLearningRateTracker(end_learning_rate), csv_logger,tensorboard])
if __name__ == '__main__':
main()
| <filename>cloudnet-package/trainer/task.py
from pathlib import Path
import argparse
import sys
import logging
from datetime import datetime
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping,TensorBoard
from trainer.utils import ADAMLearningRateTracker, jacc_coef
from trainer.model import model_arch
# logger
model_logger = logging.getLogger(__name__)
model_logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
model_logger_file_handler = logging.FileHandler('model.log')
model_logger_file_handler.setFormatter(formatter)
model_logger.addHandler(model_logger_file_handler)
def _parse_arguments(argv):
"""Parses command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--train_data_path',
help='train data path',
type=str, default="/home/jupyter/cloud_detection/data/train_data.csv")
parser.add_argument(
'--val_data_path',
help='validation data path',
type=str, default="/home/jupyter/cloud_detection/data/val_data.csv")
parser.add_argument(
'--batch_size',
help='model batch size',
type=int, default=12)
parser.add_argument(
'--epochs',
help='The number of epochs to train',
type=int, default=10)
parser.add_argument(
'--random_state',
help='random state',
type=int, default=42)
parser.add_argument(
'--starting_learning_rate',
help='starting learning rate',
type=float, default=1e-4)
parser.add_argument(
'--end_learning_rate',
help='end learning rate',
type=float, default=1e-8)
parser.add_argument(
'--input_rows',
help='input image input_rows',
type=int, default=192)
parser.add_argument(
'--input_cols',
help='input image input_rows',
type=int, default=192)
parser.add_argument(
'--patience',
help='patience for early_s_patience.ReduceLROnPlateau',
type=int, default=15)
parser.add_argument(
'--decay_factor',
help='decay_factor for tensorflow.keras.callbacks.ReduceLROnPlateau',
type=float, default=0.7)
parser.add_argument(
'--experiment_name',
help='experiment_name',
type=str, default="cloudnet")
parser.add_argument(
'--early_s_patience',
help='tensorflow.keras.callbacks.EarlyStopping patience',
type=int, default=20)
parser.add_argument(
'--num_of_channels',
help='num_of_channels',
type=int, default=16)
parser.add_argument(
'--num_of_classes',
help='num_of_classes',
type=int, default=4)
parser.add_argument(
'--reshape',
help='reshape image and mask to the sampe shape',
type=bool, default=True)
parser.add_argument(
'--quick_test',
help='run the model on a smaler sample',
type=bool, default=False)
parser.add_argument(
'--train_resume',
help='resume train or not',
type=bool, default=False)
parser.add_argument(
'--job-dir',
help='Directory where to save the given model',
type=str, default='cloud_detection_models/')
return parser.parse_known_args(argv)
def main():
# Get the arguments
args = _parse_arguments(sys.argv[1:])[0]
#BATCH_SIZE = args.batch_size
# SHUFFLE_BUFFER = 10 * BATCH_SIZE
# RANDOM_STATE = args.random_state
# AUTOTUNE = tf.data.experimental.AUTOTUNE
TRAIN_DATA_PATH = args.train_data_path
VAL_DATA_PATH = args.val_data_path
#quick_test = args.quick_test
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
experiment_name = f"{args.experiment_name}_{current_time}"
ROOT_DIR = Path.cwd().resolve()
MODEL_DIR = ROOT_DIR / "models"
TRAIN_DIR = MODEL_DIR / "train"
TEST_DIR = MODEL_DIR / "test"
EXP_DIR = TRAIN_DIR / experiment_name
ORIGINAL_MODEL_WEIGHT_PATH = (MODEL_DIR / "original_weights") / "Cloud-Net_trained_on_38-Cloud_training_patches.h5" # not implemented
folders = [MODEL_DIR,TRAIN_DIR,TEST_DIR,EXP_DIR]
for folder in folders:
if not folder.exists():
folder.mkdir(parents = False,exist_ok= True)
MODEL_WEIGHTS_PATH = ROOT_DIR/"model_weights"
if not MODEL_WEIGHTS_PATH.exists():
MODEL_WEIGHTS_PATH.mkdir()
weights_path = MODEL_WEIGHTS_PATH / "weights.{epoch:02d}-{val_loss:.2f}.hdf5"
random_state = args.random_state
# hparams
# starting_learning_rate = args.starting_learning_rate
# end_learning_rate = args.end_learning_rate
# epochs = args.epochs # just a huge number. The actual training should not be limited by this value
# #val_ratio = 0.2
# patience = args.patience
# decay_factor = args.decay_factor
# experiment_name = args.experiment_name
# early_s_patience = args.early_s_patience
# params
input_rows = args.input_rows
input_cols = args.input_cols
# img_shape = (input_rows,input_cols)
num_of_channels = args.num_of_channels
num_of_classes = args.num_of_classes
reshape = args.reshape
# hparams
batch_size = args.batch_size
starting_learning_rate = args.starting_learning_rate
end_learning_rate = args.end_learning_rate
max_num_epochs = args.epochs # just a huge number. The actual training should not be limited by this value
patience = args.patience
decay_factor = args.decay_factor
early_s_patience = args.early_s_patience
train_resume = args.train_resume
# log
model_logger.info("All parameters have been paresed")
# datasets
train_dataset = load_dataset(file_paths= TRAIN_DATA_PATH, training = True,reshape= reshape, num_epochs=max_num_epochs)
val_dataset = load_dataset(file_paths= VAL_DATA_PATH, training = False,reshape= reshape)
# Model
strategy = tf.distribute.MirroredStrategy()
model_logger.info('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
model = model_arch(input_rows=input_rows,
input_cols=input_cols,
num_of_channels=num_of_channels,
num_of_classes=num_of_classes)
model.compile(optimizer=Adam(learning_rate=starting_learning_rate), loss=jacc_coef, metrics=[jacc_coef])
# model.summary()
model_checkpoint = ModelCheckpoint(weights_path, monitor='val_loss', save_best_only=True)
lr_reducer = ReduceLROnPlateau(factor=decay_factor, cooldown=0, patience=patience, min_lr=end_learning_rate, verbose=1)
csv_logger = CSVLogger(EXP_DIR / '_log_1.log')
tensorboard = TensorBoard(log_dir= EXP_DIR / 'logs', histogram_freq=0, write_graph=True,write_images=False, write_steps_per_second=False,
update_freq='epoch',profile_batch=0, embeddings_freq=0, embeddings_metadata=None, **kwargs)
if train_resume:
model.load_weights(ORIGINAL_MODEL_WEIGHT_PATH)
model_logger.info("\nTraining resumed...")
else:
model_logger.info("\nTraining started from scratch... ")
model_logger("Experiment name: ", experiment_name)
model_logger("Input image size: ", (input_rows, input_cols))
model_logger("Number of input spectral bands: ", num_of_channels)
model_logger("Learning rate: ", starting_learning_rate)
model_logger("# Epochs: ", max_num_epochs)
model_logger("Batch size: ", batch_size, "\n")
model.fit(train_dataset,validation_data = val_dataset,epochs = max_num_epochs,verbose = 1,
callbacks=[model_checkpoint, lr_reducer, ADAMLearningRateTracker(end_learning_rate), csv_logger,tensorboard])
if __name__ == '__main__':
main()
| en | 0.500677 | # logger Parses command-line arguments. # Get the arguments #BATCH_SIZE = args.batch_size # SHUFFLE_BUFFER = 10 * BATCH_SIZE # RANDOM_STATE = args.random_state # AUTOTUNE = tf.data.experimental.AUTOTUNE #quick_test = args.quick_test # not implemented # hparams # starting_learning_rate = args.starting_learning_rate # end_learning_rate = args.end_learning_rate # epochs = args.epochs # just a huge number. The actual training should not be limited by this value # #val_ratio = 0.2 # patience = args.patience # decay_factor = args.decay_factor # experiment_name = args.experiment_name # early_s_patience = args.early_s_patience # params # img_shape = (input_rows,input_cols) # hparams # just a huge number. The actual training should not be limited by this value # log # datasets # Model # model.summary() | 2.056655 | 2 |
james-2.3.1/bin/sendmail.py | ViktorKovalenko/java_pft | 1 | 6631392 | <gh_stars>1-10
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# --------------------------------------------------------------------------
#
# This is a simple mail client intended to suffice as the required
# "sendmail" client on typical UNIX-style systems. It requires an
# SMTP SMTP server for handling the e-mail that users and system
# utilities may send via "sendmail".
#
# To install, symlink from /usr/{[s]bin,lib[exec]}/sendmail or similar
# for the particular deployment.
#
# --------------------------------------------------------------------------
import smtplib
import socket
import os
import sys
import getopt
def Usage():
print "sendmail [-f <from_addr>][-F <full name>][-t][-h]"
sys.exit(0)
def ProcessHeaders(headers, to_addrs, extract, fullname, from_addr):
hasFrom = False
for header in headers:
if header.startswith("To:"):
if extract:
#to = header[3:]
#to_addrs.append(to[("<" + to).rfind("<"):(to + ">").find(">")])
allRecipientsString = header[3:]
allRecipientsArray = allRecipientsString.split(',')
for recipient in allRecipientsArray:
to_addrs.append(recipient[("<" + recipient).rfind("<"):(recipient + ">").find(">")])
elif header.startswith("From:"):
hasFrom = True
if hasFrom:
header = "Sender"
else:
header = "From"
if fullname:
headers.insert(0, "%s: %s <%s>" % (header,fullname, from_addr))
else:
headers.insert(0, "%s: %s" % (header, from_addr))
return headers, to_addrs
def main(argv):
try:
optlist, list = getopt.getopt(sys.argv[1:], 'f:F:hti')
except getopt.GetoptError:
Usage()
print >> sys.stderr, "called exception"
sys.exit(2)
to_addrs = list
try:
from_addr = os.environ['USER'] + '@' + socket.getfqdn()
except KeyError:
from_addr = "nobody@" + socket.getfqdn()
fullname = ""
extract = False
for opt, value in optlist:
if opt == '-h':
Usage()
elif opt == '-t':
extract = True
elif opt == '-F':
fullname = value
elif opt == '-f':
from_addr = value
print "Enter message, end with ^D (Unix) or ^Z (Windows):"
processedHeaders = False
msg = []
while 1:
try:
line = raw_input()
except EOFError:
break
if not line and not processedHeaders:
msg, to_addrs = ProcessHeaders(msg, to_addrs, extract, fullname, from_addr)
processedHeaders = True
msg.append(line)
msg = "\r\n".join(msg)
if not to_addrs:
print >> sys.stderr, "Must specify recipients on command line, or use -t with To: headers in message"
sys.exit(0)
server = smtplib.SMTP('127.0.0.1')
server.set_debuglevel(0)
server.sendmail(from_addr, to_addrs, msg)
server.quit()
if __name__ == '__main__':
main(sys.argv)
| #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# --------------------------------------------------------------------------
#
# This is a simple mail client intended to suffice as the required
# "sendmail" client on typical UNIX-style systems. It requires an
# SMTP SMTP server for handling the e-mail that users and system
# utilities may send via "sendmail".
#
# To install, symlink from /usr/{[s]bin,lib[exec]}/sendmail or similar
# for the particular deployment.
#
# --------------------------------------------------------------------------
import smtplib
import socket
import os
import sys
import getopt
def Usage():
print "sendmail [-f <from_addr>][-F <full name>][-t][-h]"
sys.exit(0)
def ProcessHeaders(headers, to_addrs, extract, fullname, from_addr):
hasFrom = False
for header in headers:
if header.startswith("To:"):
if extract:
#to = header[3:]
#to_addrs.append(to[("<" + to).rfind("<"):(to + ">").find(">")])
allRecipientsString = header[3:]
allRecipientsArray = allRecipientsString.split(',')
for recipient in allRecipientsArray:
to_addrs.append(recipient[("<" + recipient).rfind("<"):(recipient + ">").find(">")])
elif header.startswith("From:"):
hasFrom = True
if hasFrom:
header = "Sender"
else:
header = "From"
if fullname:
headers.insert(0, "%s: %s <%s>" % (header,fullname, from_addr))
else:
headers.insert(0, "%s: %s" % (header, from_addr))
return headers, to_addrs
def main(argv):
try:
optlist, list = getopt.getopt(sys.argv[1:], 'f:F:hti')
except getopt.GetoptError:
Usage()
print >> sys.stderr, "called exception"
sys.exit(2)
to_addrs = list
try:
from_addr = os.environ['USER'] + '@' + socket.getfqdn()
except KeyError:
from_addr = "nobody@" + socket.getfqdn()
fullname = ""
extract = False
for opt, value in optlist:
if opt == '-h':
Usage()
elif opt == '-t':
extract = True
elif opt == '-F':
fullname = value
elif opt == '-f':
from_addr = value
print "Enter message, end with ^D (Unix) or ^Z (Windows):"
processedHeaders = False
msg = []
while 1:
try:
line = raw_input()
except EOFError:
break
if not line and not processedHeaders:
msg, to_addrs = ProcessHeaders(msg, to_addrs, extract, fullname, from_addr)
processedHeaders = True
msg.append(line)
msg = "\r\n".join(msg)
if not to_addrs:
print >> sys.stderr, "Must specify recipients on command line, or use -t with To: headers in message"
sys.exit(0)
server = smtplib.SMTP('127.0.0.1')
server.set_debuglevel(0)
server.sendmail(from_addr, to_addrs, msg)
server.quit()
if __name__ == '__main__':
main(sys.argv) | en | 0.770958 | #!/usr/bin/python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # -------------------------------------------------------------------------- # # This is a simple mail client intended to suffice as the required # "sendmail" client on typical UNIX-style systems. It requires an # SMTP SMTP server for handling the e-mail that users and system # utilities may send via "sendmail". # # To install, symlink from /usr/{[s]bin,lib[exec]}/sendmail or similar # for the particular deployment. # # -------------------------------------------------------------------------- #to = header[3:] #to_addrs.append(to[("<" + to).rfind("<"):(to + ">").find(">")]) | 2.101563 | 2 |
openregistry/lots/core/includeme.py | oleksiyVeretiuk/openregistry.lots.core | 0 | 6631393 | # -*- coding: utf-8 -*-
import logging
from pyramid.interfaces import IRequest
from openregistry.lots.core.utils import (
extract_lot, isLot, register_lotType,
lot_from_data, SubscribersPicker
)
from openprocurement.api.app import get_evenly_plugins
from openprocurement.api.interfaces import IContentConfigurator
from openregistry.lots.core.adapters import LotConfigurator
from openregistry.lots.core.models import ILot
LOGGER = logging.getLogger(__name__)
def includeme(config, plugin_map):
from openregistry.lots.core.design import add_design
add_design()
config.add_request_method(extract_lot, 'lot', reify=True)
# lotType plugins support
config.registry.lotTypes = {}
config.add_route_predicate('_internal_type', isLot)
config.add_subscriber_predicate('_internal_type', SubscribersPicker)
config.add_request_method(lot_from_data)
config.add_directive('add_lotType',
register_lotType)
config.scan("openregistry.lots.core.views")
config.scan("openregistry.lots.core.subscribers")
config.registry.registerAdapter(LotConfigurator, (ILot, IRequest),
IContentConfigurator)
config.registry.lot_type_configurator = {}
LOGGER.info("Included openprocurement.lots.core plugin", extra={'MESSAGE_ID': 'included_plugin'})
# search for plugins
get_evenly_plugins(config, plugin_map['plugins'], 'openregistry.lots.core.plugins')
| # -*- coding: utf-8 -*-
import logging
from pyramid.interfaces import IRequest
from openregistry.lots.core.utils import (
extract_lot, isLot, register_lotType,
lot_from_data, SubscribersPicker
)
from openprocurement.api.app import get_evenly_plugins
from openprocurement.api.interfaces import IContentConfigurator
from openregistry.lots.core.adapters import LotConfigurator
from openregistry.lots.core.models import ILot
LOGGER = logging.getLogger(__name__)
def includeme(config, plugin_map):
from openregistry.lots.core.design import add_design
add_design()
config.add_request_method(extract_lot, 'lot', reify=True)
# lotType plugins support
config.registry.lotTypes = {}
config.add_route_predicate('_internal_type', isLot)
config.add_subscriber_predicate('_internal_type', SubscribersPicker)
config.add_request_method(lot_from_data)
config.add_directive('add_lotType',
register_lotType)
config.scan("openregistry.lots.core.views")
config.scan("openregistry.lots.core.subscribers")
config.registry.registerAdapter(LotConfigurator, (ILot, IRequest),
IContentConfigurator)
config.registry.lot_type_configurator = {}
LOGGER.info("Included openprocurement.lots.core plugin", extra={'MESSAGE_ID': 'included_plugin'})
# search for plugins
get_evenly_plugins(config, plugin_map['plugins'], 'openregistry.lots.core.plugins')
| en | 0.735043 | # -*- coding: utf-8 -*- # lotType plugins support # search for plugins | 1.919345 | 2 |
conf/__init__.py | detorr/brook-web | 253 | 6631394 | #coding=utf-8
#。——————————————————————————————————————————
#。
#。 __init__.py.py
#。
#。 @Time : 2019-03-31 08:02
#。 @Author : capton
#。 @Software: PyCharm
#。 @Blog : http://ccapton.cn
#。 @Github : https://github.com/ccapton
#。 @Email : <EMAIL>
#。__________________________________________ | #coding=utf-8
#。——————————————————————————————————————————
#。
#。 __init__.py.py
#。
#。 @Time : 2019-03-31 08:02
#。 @Author : capton
#。 @Software: PyCharm
#。 @Blog : http://ccapton.cn
#。 @Github : https://github.com/ccapton
#。 @Email : <EMAIL>
#。__________________________________________ | zh | 0.58204 | #coding=utf-8 #。—————————————————————————————————————————— #。 #。 __init__.py.py #。 #。 @Time : 2019-03-31 08:02 #。 @Author : capton #。 @Software: PyCharm #。 @Blog : http://ccapton.cn #。 @Github : https://github.com/ccapton #。 @Email : <EMAIL> #。__________________________________________ | 1.398106 | 1 |
chatbot/utils3.py | innaiivanova/chatbot | 0 | 6631395 | # Codecademy Looping Coffee Chatbot
# <NAME>
# utils3.py works with chatbot3.py
def print_message():
print('I\'m sorry, I did not understand your selection. Please enter the corresponding letter for your response.')
def get_size():
res = input('What size drink can I get for you? \n[a] Small \n[b] Medium \n[c] Large \n> ')
if res == 'a':
return 'small'
elif res == 'b':
return 'medium'
elif res == 'c':
return 'large'
else:
print_message()
return get_size()
def order_latte():
res = input('And what kind of milk for your latte? \n[a] 2% milk \n[b] Non-fat milk \n[c] Soy milk \n> ')
if res == 'a':
return 'latte'
elif res == 'b':
return 'non-fat latte'
elif res == 'c':
return 'soy latte'
else:
print_message()
return order_latte()
def order_mocha():
while True:
res = input('Would you like to try our limited-edition peppermint mocha? \n[a] Sure! \n[b] Maybe next time! \n> ')
if res == 'a':
return 'peppermint mocha'
elif res == 'b':
return 'mocha'
else:
print_message()
def brewed_coffee():
while True:
res = input('Would you like to try our limited-edition brewed coffee? \n[a] Yes! \n[b] No, thanks! \n> ')
if res == 'a':
return 'limited-edition brewed coffee'
elif res == 'b':
return 'brewed coffee'
else:
print_message()
| # Codecademy Looping Coffee Chatbot
# <NAME>
# utils3.py works with chatbot3.py
def print_message():
print('I\'m sorry, I did not understand your selection. Please enter the corresponding letter for your response.')
def get_size():
res = input('What size drink can I get for you? \n[a] Small \n[b] Medium \n[c] Large \n> ')
if res == 'a':
return 'small'
elif res == 'b':
return 'medium'
elif res == 'c':
return 'large'
else:
print_message()
return get_size()
def order_latte():
res = input('And what kind of milk for your latte? \n[a] 2% milk \n[b] Non-fat milk \n[c] Soy milk \n> ')
if res == 'a':
return 'latte'
elif res == 'b':
return 'non-fat latte'
elif res == 'c':
return 'soy latte'
else:
print_message()
return order_latte()
def order_mocha():
while True:
res = input('Would you like to try our limited-edition peppermint mocha? \n[a] Sure! \n[b] Maybe next time! \n> ')
if res == 'a':
return 'peppermint mocha'
elif res == 'b':
return 'mocha'
else:
print_message()
def brewed_coffee():
while True:
res = input('Would you like to try our limited-edition brewed coffee? \n[a] Yes! \n[b] No, thanks! \n> ')
if res == 'a':
return 'limited-edition brewed coffee'
elif res == 'b':
return 'brewed coffee'
else:
print_message()
| en | 0.57119 | # Codecademy Looping Coffee Chatbot # <NAME> # utils3.py works with chatbot3.py | 4.063318 | 4 |
python/runtime/pai/tensorflow/evaluate.py | lhw362950217/sqlflow | 0 | 6631396 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import sys
import tensorflow as tf
from runtime.model import oss
from runtime.pai.pai_distributed import define_tf_flags
from runtime.tensorflow import is_tf_estimator
from runtime.tensorflow.evaluate import (estimator_evaluate, keras_evaluate,
write_result_metrics)
from runtime.tensorflow.import_model import import_model
from runtime.tensorflow.input_fn import get_dataset_fn
from runtime.tensorflow.keras_with_feature_column_input import \
init_model_with_feature_column
from runtime.tensorflow.set_log_level import set_log_level
try:
tf.enable_eager_execution()
except Exception as e:
sys.stderr.write("warning: failed to enable_eager_execution: %s" % e)
pass
FLAGS = define_tf_flags()
def evaluate(datasource, select, data_table, result_table, oss_model_path,
metrics):
"""PAI TensorFlow evaluate wrapper
This function do some preparation for the local evaluation, say,
download the model from OSS, extract metadata and so on.
Args:
datasource: the datasource from which to get data
select: data selection SQL statement
data_table: tmp table which holds the data from select
result_table: table to save prediction result
oss_model_path: the model path on OSS
metrics: metrics to evaluate
"""
(estimator, feature_column_names, feature_column_names_map, feature_metas,
label_meta, model_params,
feature_columns_code) = oss.load_metas(oss_model_path,
"tensorflow_model_desc")
feature_columns = eval(feature_columns_code)
# NOTE(typhoonzero): No need to eval model_params["optimizer"] and
# model_params["loss"] because predicting do not need these parameters.
is_estimator = is_tf_estimator(import_model(estimator))
# Keras single node is using h5 format to save the model, no need to deal
# with export model format. Keras distributed mode will use estimator, so
# this is also needed.
if is_estimator:
oss.load_file(oss_model_path, "exported_path")
# NOTE(typhoonzero): directory "model_save" is hardcoded in
# codegen/tensorflow/codegen.go
oss.load_dir("%s/model_save" % oss_model_path)
else:
oss.load_file(oss_model_path, "model_save")
_evaluate(datasource=datasource,
estimator_string=estimator,
select=select,
result_table=result_table,
feature_columns=feature_columns,
feature_column_names=feature_column_names,
feature_metas=feature_metas,
label_meta=label_meta,
model_params=model_params,
validation_metrics=metrics,
save="model_save",
batch_size=1,
validation_steps=None,
verbose=0,
is_pai=True,
pai_table=data_table)
def _evaluate(datasource,
estimator_string,
select,
result_table,
feature_columns,
feature_column_names,
feature_metas={},
label_meta={},
model_params={},
validation_metrics=["Accuracy"],
save="",
batch_size=1,
validation_steps=None,
verbose=0,
pai_table=""):
estimator_cls = import_model(estimator_string)
is_estimator = is_tf_estimator(estimator_cls)
set_log_level(verbose, is_estimator)
eval_dataset = get_dataset_fn(select,
datasource,
feature_column_names,
feature_metas,
label_meta,
is_pai=True,
pai_table=pai_table,
batch_size=batch_size)
model_params.update(feature_columns)
if is_estimator:
FLAGS = tf.app.flags.FLAGS
model_params["model_dir"] = FLAGS.checkpointDir
estimator = estimator_cls(**model_params)
result_metrics = estimator_evaluate(estimator, eval_dataset,
validation_metrics)
else:
keras_model = init_model_with_feature_column(estimator, model_params)
keras_model_pkg = sys.modules[estimator_cls.__module__]
result_metrics = keras_evaluate(keras_model, eval_dataset, save,
keras_model_pkg, validation_metrics)
if result_table:
metric_name_list = ["loss"] + validation_metrics
write_result_metrics(result_metrics,
metric_name_list,
result_table,
"paiio",
None,
hdfs_namenode_addr="",
hive_location="",
hdfs_user="",
hdfs_pass="")
| # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import sys
import tensorflow as tf
from runtime.model import oss
from runtime.pai.pai_distributed import define_tf_flags
from runtime.tensorflow import is_tf_estimator
from runtime.tensorflow.evaluate import (estimator_evaluate, keras_evaluate,
write_result_metrics)
from runtime.tensorflow.import_model import import_model
from runtime.tensorflow.input_fn import get_dataset_fn
from runtime.tensorflow.keras_with_feature_column_input import \
init_model_with_feature_column
from runtime.tensorflow.set_log_level import set_log_level
try:
tf.enable_eager_execution()
except Exception as e:
sys.stderr.write("warning: failed to enable_eager_execution: %s" % e)
pass
FLAGS = define_tf_flags()
def evaluate(datasource, select, data_table, result_table, oss_model_path,
metrics):
"""PAI TensorFlow evaluate wrapper
This function do some preparation for the local evaluation, say,
download the model from OSS, extract metadata and so on.
Args:
datasource: the datasource from which to get data
select: data selection SQL statement
data_table: tmp table which holds the data from select
result_table: table to save prediction result
oss_model_path: the model path on OSS
metrics: metrics to evaluate
"""
(estimator, feature_column_names, feature_column_names_map, feature_metas,
label_meta, model_params,
feature_columns_code) = oss.load_metas(oss_model_path,
"tensorflow_model_desc")
feature_columns = eval(feature_columns_code)
# NOTE(typhoonzero): No need to eval model_params["optimizer"] and
# model_params["loss"] because predicting do not need these parameters.
is_estimator = is_tf_estimator(import_model(estimator))
# Keras single node is using h5 format to save the model, no need to deal
# with export model format. Keras distributed mode will use estimator, so
# this is also needed.
if is_estimator:
oss.load_file(oss_model_path, "exported_path")
# NOTE(typhoonzero): directory "model_save" is hardcoded in
# codegen/tensorflow/codegen.go
oss.load_dir("%s/model_save" % oss_model_path)
else:
oss.load_file(oss_model_path, "model_save")
_evaluate(datasource=datasource,
estimator_string=estimator,
select=select,
result_table=result_table,
feature_columns=feature_columns,
feature_column_names=feature_column_names,
feature_metas=feature_metas,
label_meta=label_meta,
model_params=model_params,
validation_metrics=metrics,
save="model_save",
batch_size=1,
validation_steps=None,
verbose=0,
is_pai=True,
pai_table=data_table)
def _evaluate(datasource,
estimator_string,
select,
result_table,
feature_columns,
feature_column_names,
feature_metas={},
label_meta={},
model_params={},
validation_metrics=["Accuracy"],
save="",
batch_size=1,
validation_steps=None,
verbose=0,
pai_table=""):
estimator_cls = import_model(estimator_string)
is_estimator = is_tf_estimator(estimator_cls)
set_log_level(verbose, is_estimator)
eval_dataset = get_dataset_fn(select,
datasource,
feature_column_names,
feature_metas,
label_meta,
is_pai=True,
pai_table=pai_table,
batch_size=batch_size)
model_params.update(feature_columns)
if is_estimator:
FLAGS = tf.app.flags.FLAGS
model_params["model_dir"] = FLAGS.checkpointDir
estimator = estimator_cls(**model_params)
result_metrics = estimator_evaluate(estimator, eval_dataset,
validation_metrics)
else:
keras_model = init_model_with_feature_column(estimator, model_params)
keras_model_pkg = sys.modules[estimator_cls.__module__]
result_metrics = keras_evaluate(keras_model, eval_dataset, save,
keras_model_pkg, validation_metrics)
if result_table:
metric_name_list = ["loss"] + validation_metrics
write_result_metrics(result_metrics,
metric_name_list,
result_table,
"paiio",
None,
hdfs_namenode_addr="",
hive_location="",
hdfs_user="",
hdfs_pass="")
| en | 0.808378 | # Copyright 2020 The SQLFlow Authors. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License PAI TensorFlow evaluate wrapper This function do some preparation for the local evaluation, say, download the model from OSS, extract metadata and so on. Args: datasource: the datasource from which to get data select: data selection SQL statement data_table: tmp table which holds the data from select result_table: table to save prediction result oss_model_path: the model path on OSS metrics: metrics to evaluate # NOTE(typhoonzero): No need to eval model_params["optimizer"] and # model_params["loss"] because predicting do not need these parameters. # Keras single node is using h5 format to save the model, no need to deal # with export model format. Keras distributed mode will use estimator, so # this is also needed. # NOTE(typhoonzero): directory "model_save" is hardcoded in # codegen/tensorflow/codegen.go | 1.986266 | 2 |
Codewars/7kyu/friendOrFoe.py | Ry4nW/python-wars | 1 | 6631397 | def friend(x):
friendList = []
for i in x:
friendList.append(i) if len(i) == 4 else None
return friendList
# "Tenary" without {else} in list declaration needs to come
# after loop, e.g.
def friend2(x):
return [f for f in x if len(f) == 4] | def friend(x):
friendList = []
for i in x:
friendList.append(i) if len(i) == 4 else None
return friendList
# "Tenary" without {else} in list declaration needs to come
# after loop, e.g.
def friend2(x):
return [f for f in x if len(f) == 4] | en | 0.88231 | # "Tenary" without {else} in list declaration needs to come # after loop, e.g. | 3.568487 | 4 |
examples/testes_tcc/teste_velocidade_de_movimento.py | filereno/dronekit-python | 0 | 6631398 | <reponame>filereno/dronekit-python
##########DEPENDENCIES#############
from dronekit import connect, VehicleMode,LocationGlobalRelative,APIException
import time
import socket
#import exceptions
import math
import argparse
from pymavlink import mavutil
#########FUNCTIONS#################
def connectMyCopter():
parser = argparse.ArgumentParser(description='commands')
parser.add_argument('--connect')
args = parser.parse_args()
connection_string = args.connect
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
vehicle = connect(connection_string,wait_ready=True)
return vehicle
def arm_and_takeoff(targetHeight):
while vehicle.is_armable!=True:
print("Esperando o veiculo se armar")
time.sleep(1)
print("Veiculo armado")
vehicle.mode = VehicleMode("GUIDED")
while vehicle.mode!='GUIDED':
print("Aguardando entrar em modo GUIDED")
time.sleep(1)
print("Veiculo em modo GUIDED")
vehicle.armed = True
while vehicle.armed==False:
print("Esperando o veiculo se armar")
time.sleep(1)
print("Cuidado as helices virtuais estao em funcionamento")
vehicle.simple_takeoff(targetHeight) ##meters
while True:
print("Current Altitude: %d"%vehicle.location.global_relative_frame.alt, targetHeight)
if vehicle.location.global_relative_frame.alt>=.92*targetHeight:
break
time.sleep(1)
print("Target altitude reached!!")
return None
def send_local_ned_velocity(vx, vy, vz):
"""
Move vehicle in direction based on specified velocity vectors.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_BODY_OFFSET_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
vx, vy, vz, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
vehicle.send_mavlink(msg)
vehicle.flush()
def send_global_ned_velocity(vx, vy, vz):
"""
Move vehicle in direction based on specified velocity vectors.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
vx, vy, vz, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
vehicle.send_mavlink(msg)
vehicle.flush()
##########MAIN EXECUTABLE###########
if __name__ == "__main__":
# altitude = 10
vehicle = connectMyCopter()
# print("\nGet all vehicle attribute values:")
# print(" Autopilot Firmware version: %s" % vehicle.version)
# print(" Major version number: %s" % vehicle.version.major)
# print(" Minor version number: %s" % vehicle.version.minor)
# print(" Patch version number: %s" % vehicle.version.patch)
# print(" Release type: %s" % vehicle.version.release_type())
# print(" Release version: %s" % vehicle.version.release_version())
# print(" Stable release?: %s" % vehicle.version.is_stable())
# print(" Autopilot capabilities")
# print(" Supports MISSION_FLOAT message type: %s" % vehicle.capabilities.mission_float)
# print(" Supports PARAM_FLOAT message type: %s" % vehicle.capabilities.param_float)
# print(" Supports MISSION_INT message type: %s" % vehicle.capabilities.mission_int)
# print(" Supports COMMAND_INT message type: %s" % vehicle.capabilities.command_int)
# print(" Supports PARAM_UNION message type: %s" % vehicle.capabilities.param_union)
# print(" Supports ftp for file transfers: %s" % vehicle.capabilities.ftp)
# print(" Supports commanding attitude offboard: %s" % vehicle.capabilities.set_attitude_target)
# print(" Supports commanding position and velocity targets in local NED frame: %s" % vehicle.capabilities.set_attitude_target_local_ned)
# print(" Supports set position + velocity targets in global scaled integers: %s" % vehicle.capabilities.set_altitude_target_global_int)
# print(" Supports terrain protocol / data handling: %s" % vehicle.capabilities.terrain)
# print(" Supports direct actuator control: %s" % vehicle.capabilities.set_actuator_target)
# print(" Supports the flight termination command: %s" % vehicle.capabilities.flight_termination)
# print(" Supports mission_float message type: %s" % vehicle.capabilities.mission_float)
# print(" Supports onboard compass calibration: %s" % vehicle.capabilities.compass_calibration)
# print(" Global Location: %s" % vehicle.location.global_frame)
# print(" Global Location (relative altitude): %s" % vehicle.location.global_relative_frame)
# print(" Local Location: %s" % vehicle.location.local_frame)
# print(" Attitude: %s" % vehicle.attitude)
# print(" Velocity: %s" % vehicle.velocity)
# print(" GPS: %s" % vehicle.gps_0)
# print(" Gimbal status: %s" % vehicle.gimbal)
# print(" Battery: %s" % vehicle.battery)
# print(" EKF OK?: %s" % vehicle.ekf_ok)
# print(" Last Heartbeat: %s" % vehicle.last_heartbeat)
# print(" Rangefinder: %s" % vehicle.rangefinder)
# print(" Rangefinder distance: %s" % vehicle.rangefinder.distance)
# print(" Rangefinder voltage: %s" % vehicle.rangefinder.voltage)
# print(" Heading: %s" % vehicle.heading)
# print(" Is Armable?: %s" % vehicle.is_armable)
# print(" System status: %s" % vehicle.system_status.state)
# print(" Groundspeed: %s" % vehicle.groundspeed) # settable
# print(" Airspeed: %s" % vehicle.airspeed) # settable
# print(" Mode: %s" % vehicle.mode.name) # settable
# print(" Armed: %s" % vehicle.armed) # settable
# arm_and_takeoff(altitude)
# time.sleep(5)
# while counter<2:
# send_global_ned_velocity(1,0,0)
# time.sleep(1)
# print("Moving NORTH relative to front of drone")
# counter=counter+1
# time.sleep(2)
counter=0
vel1=0
vel2=0
vel3=0
while counter <= 2:
counter=counter+1
vel1= vel1+1 # 0x 0y
send_global_ned_velocity(vel1,vel2,vel3)
print("NORTE")
time.sleep(1)
if counter == 2: # NORTE
while counter >= 0: # +x 0y
send_global_ned_velocity(vel1,vel2,vel3)
print("OESTE")
counter=counter-1
vel1=vel1-1# x
vel2=vel2+1# y
time.sleep(1)
if counter == 0: # OESTE
while counter <= 2: # 0x +y
send_global_ned_velocity(vel1,vel2,vel3)
print("SUL")
counter=counter+1
vel1=vel1-1# x
vel2=vel2-1# y
time.sleep(1)
if counter == 2: # SUL
while counter >= 0: # -x 0y
send_global_ned_velocity(vel1,vel2,vel3)
print("LESTE")
counter = counter-1
vel1 = vel1+1# x
vel2 = vel2-1# y
time.sleep(1)
if counter == 0: # LESTE
while counter <= 2: # 0x -y
send_global_ned_velocity(vel1,vel2,vel3)
print("NORTE")
counter = counter+1
vel1 = vel1+1# x
vel2 = vel2+1# y
time.sleep(1)
if counter == 2:
print("TESTE")
send_global_ned_velocity(0,0,0)
else:
break
else:
break
else:
break
else:
break
else:
pass
# i = 0
# while i < 100:
# print("teste")
# if i <= 5:
# print(i)
# send_local_ned_velocity(1,0,0)
# time.sleep(0.2)
# elif i > 5 and i <= 10:
# print(i)
# send_local_ned_velocity(-0.2,0,0)
# time.sleep(0.2)
# elif i > 10 and i <= 15:
# print(i)
# send_local_ned_velocity(-0.7,0,0)
# time.sleep(0.2)
# elif i > 15 and i <= 20:
# print(i)
# send_local_ned_velocity(0.1,0,0)
# time.sleep(0.2)
# elif i > 20 and i <= 25:
# print(i)
# send_local_ned_velocity(1,0,0)
# time.sleep(0.2)
# elif i > 25 and i <= 30:
# print(i)
# send_local_ned_velocity(0,-1,0)
# time.sleep(0.2)
# elif i > 30 and i <= 35:
# print(i)
# send_local_ned_velocity(0,0.5,0)
# time.sleep(0.2)
# elif i > 35 and i <= 40:
# print(i)
# send_local_ned_velocity(0,0.9,0)
# time.sleep(0.2)
# elif i > 40 and i <= 45:
# print(i)
# send_local_ned_velocity(0,1,0)
# time.sleep(0.2)
# elif i > 45 and i <= 50:
# print(i)
# send_local_ned_velocity(0,-0.6,0)
# time.sleep(0.2)
# elif i > 50 and i <= 55:
# print(i)
# send_local_ned_velocity(1,0,0)
# time.sleep(0.2)
# elif i > 55 and i <= 60:
# print(i)
# send_local_ned_velocity(0,-0.4,0)
# time.sleep(0.2)
# elif i > 60 and i <= 65:
# print(i)
# send_local_ned_velocity(-0.9,0,0)
# time.sleep(0.2)
# elif i > 65 and i <= 70:
# print(i)
# send_local_ned_velocity(0,0,0)
# time.sleep(0.2)
# elif i > 70 and i <= 75:
# print(i)
# send_local_ned_velocity(0,1,0)
# time.sleep(0.2)
# elif i > 75 and i <= 80:
# print(i)
# send_local_ned_velocity(0,-1,0)
# time.sleep(0.2)
# elif i > 80 and i <= 85:
# print(i)
# send_local_ned_velocity(0.7,0,0)
# time.sleep(0.2)
# elif i > 85 and i <= 90:
# print(i)
# send_local_ned_velocity(1,0,0)
# time.sleep(0.2)
# elif i > 90 and i <= 95:
# print(i)
# send_local_ned_velocity(0,-0.1,0)
# time.sleep(0.2)
# elif i <=100:
# print(i)
# send_local_ned_velocity(0,0,0)
# time.sleep(0.2)
# i += 1
# #time.sleep(1)
print("Done!") | ##########DEPENDENCIES#############
from dronekit import connect, VehicleMode,LocationGlobalRelative,APIException
import time
import socket
#import exceptions
import math
import argparse
from pymavlink import mavutil
#########FUNCTIONS#################
def connectMyCopter():
parser = argparse.ArgumentParser(description='commands')
parser.add_argument('--connect')
args = parser.parse_args()
connection_string = args.connect
if not connection_string:
import dronekit_sitl
sitl = dronekit_sitl.start_default()
connection_string = sitl.connection_string()
vehicle = connect(connection_string,wait_ready=True)
return vehicle
def arm_and_takeoff(targetHeight):
while vehicle.is_armable!=True:
print("Esperando o veiculo se armar")
time.sleep(1)
print("Veiculo armado")
vehicle.mode = VehicleMode("GUIDED")
while vehicle.mode!='GUIDED':
print("Aguardando entrar em modo GUIDED")
time.sleep(1)
print("Veiculo em modo GUIDED")
vehicle.armed = True
while vehicle.armed==False:
print("Esperando o veiculo se armar")
time.sleep(1)
print("Cuidado as helices virtuais estao em funcionamento")
vehicle.simple_takeoff(targetHeight) ##meters
while True:
print("Current Altitude: %d"%vehicle.location.global_relative_frame.alt, targetHeight)
if vehicle.location.global_relative_frame.alt>=.92*targetHeight:
break
time.sleep(1)
print("Target altitude reached!!")
return None
def send_local_ned_velocity(vx, vy, vz):
"""
Move vehicle in direction based on specified velocity vectors.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_BODY_OFFSET_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
vx, vy, vz, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
vehicle.send_mavlink(msg)
vehicle.flush()
def send_global_ned_velocity(vx, vy, vz):
"""
Move vehicle in direction based on specified velocity vectors.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
vx, vy, vz, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
vehicle.send_mavlink(msg)
vehicle.flush()
##########MAIN EXECUTABLE###########
if __name__ == "__main__":
# altitude = 10
vehicle = connectMyCopter()
# print("\nGet all vehicle attribute values:")
# print(" Autopilot Firmware version: %s" % vehicle.version)
# print(" Major version number: %s" % vehicle.version.major)
# print(" Minor version number: %s" % vehicle.version.minor)
# print(" Patch version number: %s" % vehicle.version.patch)
# print(" Release type: %s" % vehicle.version.release_type())
# print(" Release version: %s" % vehicle.version.release_version())
# print(" Stable release?: %s" % vehicle.version.is_stable())
# print(" Autopilot capabilities")
# print(" Supports MISSION_FLOAT message type: %s" % vehicle.capabilities.mission_float)
# print(" Supports PARAM_FLOAT message type: %s" % vehicle.capabilities.param_float)
# print(" Supports MISSION_INT message type: %s" % vehicle.capabilities.mission_int)
# print(" Supports COMMAND_INT message type: %s" % vehicle.capabilities.command_int)
# print(" Supports PARAM_UNION message type: %s" % vehicle.capabilities.param_union)
# print(" Supports ftp for file transfers: %s" % vehicle.capabilities.ftp)
# print(" Supports commanding attitude offboard: %s" % vehicle.capabilities.set_attitude_target)
# print(" Supports commanding position and velocity targets in local NED frame: %s" % vehicle.capabilities.set_attitude_target_local_ned)
# print(" Supports set position + velocity targets in global scaled integers: %s" % vehicle.capabilities.set_altitude_target_global_int)
# print(" Supports terrain protocol / data handling: %s" % vehicle.capabilities.terrain)
# print(" Supports direct actuator control: %s" % vehicle.capabilities.set_actuator_target)
# print(" Supports the flight termination command: %s" % vehicle.capabilities.flight_termination)
# print(" Supports mission_float message type: %s" % vehicle.capabilities.mission_float)
# print(" Supports onboard compass calibration: %s" % vehicle.capabilities.compass_calibration)
# print(" Global Location: %s" % vehicle.location.global_frame)
# print(" Global Location (relative altitude): %s" % vehicle.location.global_relative_frame)
# print(" Local Location: %s" % vehicle.location.local_frame)
# print(" Attitude: %s" % vehicle.attitude)
# print(" Velocity: %s" % vehicle.velocity)
# print(" GPS: %s" % vehicle.gps_0)
# print(" Gimbal status: %s" % vehicle.gimbal)
# print(" Battery: %s" % vehicle.battery)
# print(" EKF OK?: %s" % vehicle.ekf_ok)
# print(" Last Heartbeat: %s" % vehicle.last_heartbeat)
# print(" Rangefinder: %s" % vehicle.rangefinder)
# print(" Rangefinder distance: %s" % vehicle.rangefinder.distance)
# print(" Rangefinder voltage: %s" % vehicle.rangefinder.voltage)
# print(" Heading: %s" % vehicle.heading)
# print(" Is Armable?: %s" % vehicle.is_armable)
# print(" System status: %s" % vehicle.system_status.state)
# print(" Groundspeed: %s" % vehicle.groundspeed) # settable
# print(" Airspeed: %s" % vehicle.airspeed) # settable
# print(" Mode: %s" % vehicle.mode.name) # settable
# print(" Armed: %s" % vehicle.armed) # settable
# arm_and_takeoff(altitude)
# time.sleep(5)
# while counter<2:
# send_global_ned_velocity(1,0,0)
# time.sleep(1)
# print("Moving NORTH relative to front of drone")
# counter=counter+1
# time.sleep(2)
counter=0
vel1=0
vel2=0
vel3=0
while counter <= 2:
counter=counter+1
vel1= vel1+1 # 0x 0y
send_global_ned_velocity(vel1,vel2,vel3)
print("NORTE")
time.sleep(1)
if counter == 2: # NORTE
while counter >= 0: # +x 0y
send_global_ned_velocity(vel1,vel2,vel3)
print("OESTE")
counter=counter-1
vel1=vel1-1# x
vel2=vel2+1# y
time.sleep(1)
if counter == 0: # OESTE
while counter <= 2: # 0x +y
send_global_ned_velocity(vel1,vel2,vel3)
print("SUL")
counter=counter+1
vel1=vel1-1# x
vel2=vel2-1# y
time.sleep(1)
if counter == 2: # SUL
while counter >= 0: # -x 0y
send_global_ned_velocity(vel1,vel2,vel3)
print("LESTE")
counter = counter-1
vel1 = vel1+1# x
vel2 = vel2-1# y
time.sleep(1)
if counter == 0: # LESTE
while counter <= 2: # 0x -y
send_global_ned_velocity(vel1,vel2,vel3)
print("NORTE")
counter = counter+1
vel1 = vel1+1# x
vel2 = vel2+1# y
time.sleep(1)
if counter == 2:
print("TESTE")
send_global_ned_velocity(0,0,0)
else:
break
else:
break
else:
break
else:
break
else:
pass
# i = 0
# while i < 100:
# print("teste")
# if i <= 5:
# print(i)
# send_local_ned_velocity(1,0,0)
# time.sleep(0.2)
# elif i > 5 and i <= 10:
# print(i)
# send_local_ned_velocity(-0.2,0,0)
# time.sleep(0.2)
# elif i > 10 and i <= 15:
# print(i)
# send_local_ned_velocity(-0.7,0,0)
# time.sleep(0.2)
# elif i > 15 and i <= 20:
# print(i)
# send_local_ned_velocity(0.1,0,0)
# time.sleep(0.2)
# elif i > 20 and i <= 25:
# print(i)
# send_local_ned_velocity(1,0,0)
# time.sleep(0.2)
# elif i > 25 and i <= 30:
# print(i)
# send_local_ned_velocity(0,-1,0)
# time.sleep(0.2)
# elif i > 30 and i <= 35:
# print(i)
# send_local_ned_velocity(0,0.5,0)
# time.sleep(0.2)
# elif i > 35 and i <= 40:
# print(i)
# send_local_ned_velocity(0,0.9,0)
# time.sleep(0.2)
# elif i > 40 and i <= 45:
# print(i)
# send_local_ned_velocity(0,1,0)
# time.sleep(0.2)
# elif i > 45 and i <= 50:
# print(i)
# send_local_ned_velocity(0,-0.6,0)
# time.sleep(0.2)
# elif i > 50 and i <= 55:
# print(i)
# send_local_ned_velocity(1,0,0)
# time.sleep(0.2)
# elif i > 55 and i <= 60:
# print(i)
# send_local_ned_velocity(0,-0.4,0)
# time.sleep(0.2)
# elif i > 60 and i <= 65:
# print(i)
# send_local_ned_velocity(-0.9,0,0)
# time.sleep(0.2)
# elif i > 65 and i <= 70:
# print(i)
# send_local_ned_velocity(0,0,0)
# time.sleep(0.2)
# elif i > 70 and i <= 75:
# print(i)
# send_local_ned_velocity(0,1,0)
# time.sleep(0.2)
# elif i > 75 and i <= 80:
# print(i)
# send_local_ned_velocity(0,-1,0)
# time.sleep(0.2)
# elif i > 80 and i <= 85:
# print(i)
# send_local_ned_velocity(0.7,0,0)
# time.sleep(0.2)
# elif i > 85 and i <= 90:
# print(i)
# send_local_ned_velocity(1,0,0)
# time.sleep(0.2)
# elif i > 90 and i <= 95:
# print(i)
# send_local_ned_velocity(0,-0.1,0)
# time.sleep(0.2)
# elif i <=100:
# print(i)
# send_local_ned_velocity(0,0,0)
# time.sleep(0.2)
# i += 1
# #time.sleep(1)
print("Done!") | en | 0.594413 | ##########DEPENDENCIES############# #import exceptions #########FUNCTIONS################# ##meters Move vehicle in direction based on specified velocity vectors. # time_boot_ms (not used) # target system, target component # frame # type_mask (only speeds enabled) # x, y, z positions (not used) # x, y, z velocity in m/s # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink) # send command to vehicle on 1 Hz cycle Move vehicle in direction based on specified velocity vectors. # time_boot_ms (not used) # target system, target component # frame # type_mask (only speeds enabled) # x, y, z positions (not used) # x, y, z velocity in m/s # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink) # send command to vehicle on 1 Hz cycle ##########MAIN EXECUTABLE########### # altitude = 10 # print("\nGet all vehicle attribute values:") # print(" Autopilot Firmware version: %s" % vehicle.version) # print(" Major version number: %s" % vehicle.version.major) # print(" Minor version number: %s" % vehicle.version.minor) # print(" Patch version number: %s" % vehicle.version.patch) # print(" Release type: %s" % vehicle.version.release_type()) # print(" Release version: %s" % vehicle.version.release_version()) # print(" Stable release?: %s" % vehicle.version.is_stable()) # print(" Autopilot capabilities") # print(" Supports MISSION_FLOAT message type: %s" % vehicle.capabilities.mission_float) # print(" Supports PARAM_FLOAT message type: %s" % vehicle.capabilities.param_float) # print(" Supports MISSION_INT message type: %s" % vehicle.capabilities.mission_int) # print(" Supports COMMAND_INT message type: %s" % vehicle.capabilities.command_int) # print(" Supports PARAM_UNION message type: %s" % vehicle.capabilities.param_union) # print(" Supports ftp for file transfers: %s" % vehicle.capabilities.ftp) # print(" Supports commanding attitude offboard: %s" % vehicle.capabilities.set_attitude_target) # print(" Supports commanding position and velocity targets in local NED frame: %s" % vehicle.capabilities.set_attitude_target_local_ned) # print(" Supports set position + velocity targets in global scaled integers: %s" % vehicle.capabilities.set_altitude_target_global_int) # print(" Supports terrain protocol / data handling: %s" % vehicle.capabilities.terrain) # print(" Supports direct actuator control: %s" % vehicle.capabilities.set_actuator_target) # print(" Supports the flight termination command: %s" % vehicle.capabilities.flight_termination) # print(" Supports mission_float message type: %s" % vehicle.capabilities.mission_float) # print(" Supports onboard compass calibration: %s" % vehicle.capabilities.compass_calibration) # print(" Global Location: %s" % vehicle.location.global_frame) # print(" Global Location (relative altitude): %s" % vehicle.location.global_relative_frame) # print(" Local Location: %s" % vehicle.location.local_frame) # print(" Attitude: %s" % vehicle.attitude) # print(" Velocity: %s" % vehicle.velocity) # print(" GPS: %s" % vehicle.gps_0) # print(" Gimbal status: %s" % vehicle.gimbal) # print(" Battery: %s" % vehicle.battery) # print(" EKF OK?: %s" % vehicle.ekf_ok) # print(" Last Heartbeat: %s" % vehicle.last_heartbeat) # print(" Rangefinder: %s" % vehicle.rangefinder) # print(" Rangefinder distance: %s" % vehicle.rangefinder.distance) # print(" Rangefinder voltage: %s" % vehicle.rangefinder.voltage) # print(" Heading: %s" % vehicle.heading) # print(" Is Armable?: %s" % vehicle.is_armable) # print(" System status: %s" % vehicle.system_status.state) # print(" Groundspeed: %s" % vehicle.groundspeed) # settable # print(" Airspeed: %s" % vehicle.airspeed) # settable # print(" Mode: %s" % vehicle.mode.name) # settable # print(" Armed: %s" % vehicle.armed) # settable # arm_and_takeoff(altitude) # time.sleep(5) # while counter<2: # send_global_ned_velocity(1,0,0) # time.sleep(1) # print("Moving NORTH relative to front of drone") # counter=counter+1 # time.sleep(2) # 0x 0y # NORTE # +x 0y # x # y # OESTE # 0x +y # x # y # SUL # -x 0y # x # y # LESTE # 0x -y # x # y # i = 0 # while i < 100: # print("teste") # if i <= 5: # print(i) # send_local_ned_velocity(1,0,0) # time.sleep(0.2) # elif i > 5 and i <= 10: # print(i) # send_local_ned_velocity(-0.2,0,0) # time.sleep(0.2) # elif i > 10 and i <= 15: # print(i) # send_local_ned_velocity(-0.7,0,0) # time.sleep(0.2) # elif i > 15 and i <= 20: # print(i) # send_local_ned_velocity(0.1,0,0) # time.sleep(0.2) # elif i > 20 and i <= 25: # print(i) # send_local_ned_velocity(1,0,0) # time.sleep(0.2) # elif i > 25 and i <= 30: # print(i) # send_local_ned_velocity(0,-1,0) # time.sleep(0.2) # elif i > 30 and i <= 35: # print(i) # send_local_ned_velocity(0,0.5,0) # time.sleep(0.2) # elif i > 35 and i <= 40: # print(i) # send_local_ned_velocity(0,0.9,0) # time.sleep(0.2) # elif i > 40 and i <= 45: # print(i) # send_local_ned_velocity(0,1,0) # time.sleep(0.2) # elif i > 45 and i <= 50: # print(i) # send_local_ned_velocity(0,-0.6,0) # time.sleep(0.2) # elif i > 50 and i <= 55: # print(i) # send_local_ned_velocity(1,0,0) # time.sleep(0.2) # elif i > 55 and i <= 60: # print(i) # send_local_ned_velocity(0,-0.4,0) # time.sleep(0.2) # elif i > 60 and i <= 65: # print(i) # send_local_ned_velocity(-0.9,0,0) # time.sleep(0.2) # elif i > 65 and i <= 70: # print(i) # send_local_ned_velocity(0,0,0) # time.sleep(0.2) # elif i > 70 and i <= 75: # print(i) # send_local_ned_velocity(0,1,0) # time.sleep(0.2) # elif i > 75 and i <= 80: # print(i) # send_local_ned_velocity(0,-1,0) # time.sleep(0.2) # elif i > 80 and i <= 85: # print(i) # send_local_ned_velocity(0.7,0,0) # time.sleep(0.2) # elif i > 85 and i <= 90: # print(i) # send_local_ned_velocity(1,0,0) # time.sleep(0.2) # elif i > 90 and i <= 95: # print(i) # send_local_ned_velocity(0,-0.1,0) # time.sleep(0.2) # elif i <=100: # print(i) # send_local_ned_velocity(0,0,0) # time.sleep(0.2) # i += 1 # #time.sleep(1) | 2.832681 | 3 |
examples/docs_snippets/docs_snippets/legacy/dagster_pandas_guide/shape_constrained_trip.py | makotonium/dagster | 1 | 6631399 | <reponame>makotonium/dagster
from datetime import datetime
from dagster import Out, job, op
from dagster.utils import script_relative_path
from dagster_pandas import RowCountConstraint, create_dagster_pandas_dataframe_type
from pandas import DataFrame, read_csv
# start_create_type
ShapeConstrainedTripDataFrame = create_dagster_pandas_dataframe_type(
name="ShapeConstrainedTripDataFrame", dataframe_constraints=[RowCountConstraint(4)]
)
# end_create_type
@op(out=Out(ShapeConstrainedTripDataFrame))
def load_shape_constrained_trip_dataframe() -> DataFrame:
return read_csv(
script_relative_path("./ebike_trips.csv"),
parse_dates=["start_time", "end_time"],
date_parser=lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f"),
)
@job
def shape_constrained_trip():
load_shape_constrained_trip_dataframe()
| from datetime import datetime
from dagster import Out, job, op
from dagster.utils import script_relative_path
from dagster_pandas import RowCountConstraint, create_dagster_pandas_dataframe_type
from pandas import DataFrame, read_csv
# start_create_type
ShapeConstrainedTripDataFrame = create_dagster_pandas_dataframe_type(
name="ShapeConstrainedTripDataFrame", dataframe_constraints=[RowCountConstraint(4)]
)
# end_create_type
@op(out=Out(ShapeConstrainedTripDataFrame))
def load_shape_constrained_trip_dataframe() -> DataFrame:
return read_csv(
script_relative_path("./ebike_trips.csv"),
parse_dates=["start_time", "end_time"],
date_parser=lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f"),
)
@job
def shape_constrained_trip():
load_shape_constrained_trip_dataframe() | en | 0.108884 | # start_create_type # end_create_type | 2.470645 | 2 |
sample02_always_alive.py | CMA2401PT/Phoenix-Transfer | 3 | 6631400 | <filename>sample02_always_alive.py
from proxy import forward
from proxy import utils
from threading import Thread
from queue import Queue
import time
# 有两个子线程,一个负责停的解析数据,并通过 Queue 将解析结果在线程之间传递
# 另一个子线程的目仅仅负责发送指令
# 当任意子线程死亡时,主线程尝试重连
class Config(object):
def __init__(self) -> None:
self.recv_thread_alive=True
self.working_threads_alive={}
self.receiver=None
self.sender=None
config=Config()
def recv_thread_func(recv_queue:Queue):
while True:
while config.receiver is None:
time.sleep(1)
config.recv_thread_alive=True
print('recv thread activated!')
try:
while True:
bytes_msg,(packet_id,decoded_msg)=config.receiver()
if decoded_msg is None:
# 还未实现该类型数据的解析(会有很多很多的数据包!)
# print(f'unkown decode packet ({packet_id}): ',bytes_msg)
continue
else:
# 已经实现类型数据的解析
msg,sender_subclient,target_subclient=decoded_msg
print(msg)
recv_queue.put(msg)
except Exception as e:
print('Recv thread terminated!',e)
config.recv_thread_alive=False
config.receiver=None
config.sender=None
print('Recv thread waiting for restarting...')
time.sleep(3)
def working_thread_func(thread_name):
msg=None
while True:
while (config.sender is None) or (not config.recv_thread_alive):
time.sleep(1)
config.working_threads_alive[thread_name]=True
print(f'working thread [{thread_name}] activated!')
try:
while True:
if msg is None:
command=input('cmd:')
msg,uuid_bytes=utils.pack_ws_command(command,uuid=None)
print(uuid_bytes)
config.sender(msg)
msg=None
time.sleep(0.1)
except Exception as e:
print(f'Working thread [{thread_name}] terminated!',e)
config.working_threads_alive[thread_name]=False
config.receiver=None
config.sender=None
print('Working thread waiting for restarting...')
time.sleep(3)
conn=forward.connect_to_fb_transfer(host="localhost",port=8000)
config.sender=forward.Sender(connection=conn)
config.receiver=forward.Receiver(connection=conn)
recv_queue = Queue(maxsize=10240)
recv_thread = Thread(target=recv_thread_func, args=(recv_queue,))
work_thread = Thread(target=working_thread_func, args=('user_interact',))
recv_thread.daemon = True
recv_thread.start()
work_thread.daemon = True
work_thread.start()
while True:
time.sleep(0.1)
if (not config.recv_thread_alive) or (False in config.working_threads_alive.keys()):
print('sub process crashed! tring to restart connection...')
while True:
time.sleep(3)
try:
conn=forward.connect_to_fb_transfer(host="localhost",port=8000)
config.sender=forward.Sender(connection=conn)
config.receiver=forward.Receiver(connection=conn)
break
except Exception as e:
print(f'restart error : {e} ... continue retry')
| <filename>sample02_always_alive.py
from proxy import forward
from proxy import utils
from threading import Thread
from queue import Queue
import time
# 有两个子线程,一个负责停的解析数据,并通过 Queue 将解析结果在线程之间传递
# 另一个子线程的目仅仅负责发送指令
# 当任意子线程死亡时,主线程尝试重连
class Config(object):
def __init__(self) -> None:
self.recv_thread_alive=True
self.working_threads_alive={}
self.receiver=None
self.sender=None
config=Config()
def recv_thread_func(recv_queue:Queue):
while True:
while config.receiver is None:
time.sleep(1)
config.recv_thread_alive=True
print('recv thread activated!')
try:
while True:
bytes_msg,(packet_id,decoded_msg)=config.receiver()
if decoded_msg is None:
# 还未实现该类型数据的解析(会有很多很多的数据包!)
# print(f'unkown decode packet ({packet_id}): ',bytes_msg)
continue
else:
# 已经实现类型数据的解析
msg,sender_subclient,target_subclient=decoded_msg
print(msg)
recv_queue.put(msg)
except Exception as e:
print('Recv thread terminated!',e)
config.recv_thread_alive=False
config.receiver=None
config.sender=None
print('Recv thread waiting for restarting...')
time.sleep(3)
def working_thread_func(thread_name):
msg=None
while True:
while (config.sender is None) or (not config.recv_thread_alive):
time.sleep(1)
config.working_threads_alive[thread_name]=True
print(f'working thread [{thread_name}] activated!')
try:
while True:
if msg is None:
command=input('cmd:')
msg,uuid_bytes=utils.pack_ws_command(command,uuid=None)
print(uuid_bytes)
config.sender(msg)
msg=None
time.sleep(0.1)
except Exception as e:
print(f'Working thread [{thread_name}] terminated!',e)
config.working_threads_alive[thread_name]=False
config.receiver=None
config.sender=None
print('Working thread waiting for restarting...')
time.sleep(3)
conn=forward.connect_to_fb_transfer(host="localhost",port=8000)
config.sender=forward.Sender(connection=conn)
config.receiver=forward.Receiver(connection=conn)
recv_queue = Queue(maxsize=10240)
recv_thread = Thread(target=recv_thread_func, args=(recv_queue,))
work_thread = Thread(target=working_thread_func, args=('user_interact',))
recv_thread.daemon = True
recv_thread.start()
work_thread.daemon = True
work_thread.start()
while True:
time.sleep(0.1)
if (not config.recv_thread_alive) or (False in config.working_threads_alive.keys()):
print('sub process crashed! tring to restart connection...')
while True:
time.sleep(3)
try:
conn=forward.connect_to_fb_transfer(host="localhost",port=8000)
config.sender=forward.Sender(connection=conn)
config.receiver=forward.Receiver(connection=conn)
break
except Exception as e:
print(f'restart error : {e} ... continue retry')
| zh | 0.905708 | # 有两个子线程,一个负责停的解析数据,并通过 Queue 将解析结果在线程之间传递 # 另一个子线程的目仅仅负责发送指令 # 当任意子线程死亡时,主线程尝试重连 # 还未实现该类型数据的解析(会有很多很多的数据包!) # print(f'unkown decode packet ({packet_id}): ',bytes_msg) # 已经实现类型数据的解析 | 2.827014 | 3 |
nautobot_device_onboarding/netdev_keeper.py | tim-fiola/nautobot-plugin-device-onboarding | 0 | 6631401 | <reponame>tim-fiola/nautobot-plugin-device-onboarding
"""NetDev Keeper.
(c) 2020-2021 Network To Code
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import importlib
import logging
import socket
from django.conf import settings
from napalm import get_network_driver
from napalm.base.exceptions import ConnectionException, CommandErrorException
from napalm.base.netmiko_helpers import netmiko_args
from netmiko.ssh_autodetect import SSHDetect
from netmiko.ssh_exception import NetMikoAuthenticationException
from netmiko.ssh_exception import NetMikoTimeoutException
from paramiko.ssh_exception import SSHException
from nautobot.dcim.models import Platform
from nautobot_device_onboarding.onboarding.onboarding import StandaloneOnboarding
from .constants import NETMIKO_TO_NAPALM_STATIC
from .exceptions import OnboardException
logger = logging.getLogger("rq.worker")
PLUGIN_SETTINGS = settings.PLUGINS_CONFIG["nautobot_device_onboarding"]
def get_mgmt_info(
hostname,
ip_ifs,
default_mgmt_if=PLUGIN_SETTINGS["default_management_interface"],
default_mgmt_pfxlen=PLUGIN_SETTINGS["default_management_prefix_length"],
):
"""Get the interface name and prefix length for the management interface.
Locate the interface assigned with the hostname value and retain
the interface name and IP prefix-length so that we can use it
when creating the IPAM IP-Address instance.
Note that in some cases (e.g., NAT) the hostname may differ than
the interface addresses present on the device. We need to handle this.
"""
for if_name, if_data in ip_ifs.items():
for if_addr, if_addr_data in if_data["ipv4"].items():
if if_addr == hostname:
return if_name, if_addr_data["prefix_length"]
return default_mgmt_if, default_mgmt_pfxlen
class NetdevKeeper:
"""Used to maintain information about the network device during the onboarding process."""
def __init__( # pylint: disable=R0913
self,
hostname,
port=None,
timeout=None,
username=None,
password=<PASSWORD>,
secret=None,
napalm_driver=None,
optional_args=None,
):
"""Initialize the network device keeper instance and ensure the required configuration parameters are provided.
Args:
hostname (str): IP Address or FQDN of an onboarded device
port (int): Port used to connect to an onboarded device
timeout (int): Connection timeout of an onboarded device
username (str): Device username (if unspecified, NAPALM_USERNAME settings variable will be used)
password (str): Device password (if unspecified, NAPALM_PASSWORD settings variable will be used)
secret (str): Device secret password (if unspecified, NAPALM_ARGS["secret"] settings variable will be used)
napalm_driver (str): Napalm driver name to use to onboard network device
optional_args (dict): Optional arguments passed to NAPALM and Netmiko
Raises:
OnboardException('fail-config'):
When any required config options are missing.
"""
# Attributes
self.hostname = hostname
self.port = port
self.timeout = timeout
self.username = username
self.password = password
self.secret = secret
self.napalm_driver = napalm_driver
# Netmiko and NAPALM expects optional_args to be a dictionary.
if isinstance(optional_args, dict):
self.optional_args = optional_args
elif optional_args is None:
self.optional_args = {}
else:
raise OnboardException(reason="fail-general", message="Optional arguments should be None or a dict")
self.facts = None
self.ip_ifs = None
self.netmiko_device_type = None
self.onboarding_class = StandaloneOnboarding
self.driver_addon_result = None
# Enable loading driver extensions
self.load_driver_extension = True
def check_reachability(self):
"""Ensure that the device at the mgmt-ipaddr provided is reachable.
We do this check before attempting other "show" commands so that we know we've got a
device that can be reached.
Raises:
OnboardException('fail-connect'):
When device unreachable
"""
logger.info("CHECK: IP %s:%s", self.hostname, self.port)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect((self.hostname, self.port))
except (socket.error, socket.timeout, ConnectionError):
raise OnboardException(
reason="fail-connect", message=f"ERROR device unreachable: {self.hostname}:{self.port}"
)
def guess_netmiko_device_type(self):
"""Guess the device type of host, based on Netmiko."""
guessed_device_type = None
netmiko_optional_args = netmiko_args(self.optional_args)
remote_device = {
"device_type": "autodetect",
"host": self.hostname,
"username": self.username,
"password": <PASSWORD>,
**netmiko_optional_args,
}
if self.secret:
remote_device["secret"] = self.secret
if self.port:
remote_device["port"] = self.port
if self.timeout:
remote_device["timeout"] = self.timeout
try:
logger.info("INFO guessing device type: %s", self.hostname)
guesser = SSHDetect(**remote_device)
guessed_device_type = guesser.autodetect()
logger.info("INFO guessed device type: %s", guessed_device_type)
except NetMikoAuthenticationException as err:
logger.error("ERROR %s", err)
raise OnboardException(reason="fail-login", message=f"ERROR: {str(err)}")
except (NetMikoTimeoutException, SSHException) as err:
logger.error("ERROR: %s", str(err))
raise OnboardException(reason="fail-connect", message=f"ERROR: {str(err)}")
except Exception as err:
logger.error("ERROR: %s", str(err))
raise OnboardException(reason="fail-general", message=f"ERROR: {str(err)}")
else:
if guessed_device_type is None:
logger.error("ERROR: Could not detect device type with SSHDetect")
raise OnboardException(
reason="fail-general", message="ERROR: Could not detect device type with SSHDetect"
)
return guessed_device_type
def set_napalm_driver_name(self):
"""Sets napalm driver name."""
if not self.napalm_driver:
netmiko_device_type = self.guess_netmiko_device_type()
logger.info("Guessed Netmiko Device Type: %s", netmiko_device_type)
self.netmiko_device_type = netmiko_device_type
platform_to_napalm_nautobot = {
platform.slug: platform.napalm_driver for platform in Platform.objects.all() if platform.napalm_driver
}
# Update Constants if Napalm driver is defined for Nautobot Platform
netmiko_to_napalm = {**NETMIKO_TO_NAPALM_STATIC, **platform_to_napalm_nautobot}
self.napalm_driver = netmiko_to_napalm.get(netmiko_device_type)
def check_napalm_driver_name(self):
"""Checks for napalm driver name."""
if not self.napalm_driver:
raise OnboardException(
reason="fail-general",
message=f"Onboarding for Platform {self.netmiko_device_type} not "
f"supported, as it has no specified NAPALM driver",
)
def get_onboarding_facts(self):
"""Gather information from the network device that is needed to onboard the device into the Nautobot system.
Raises:
OnboardException('fail-login'):
When unable to login to device
OnboardException('fail-execute'):
When unable to run commands to collect device information
OnboardException('fail-general'):
Any other unexpected device comms failure.
"""
self.check_reachability()
logger.info("COLLECT: device information %s", self.hostname)
try:
# Get Napalm Driver with Netmiko if needed
self.set_napalm_driver_name()
# Raise if no Napalm Driver not selected
self.check_napalm_driver_name()
driver = get_network_driver(self.napalm_driver)
# Create NAPALM optional arguments
napalm_optional_args = self.optional_args.copy()
if self.port:
napalm_optional_args["port"] = self.port
if self.secret:
napalm_optional_args["secret"] = self.secret
napalm_device = driver(
hostname=self.hostname,
username=self.username,
password=<PASSWORD>,
timeout=self.timeout,
optional_args=napalm_optional_args,
)
napalm_device.open()
logger.info("COLLECT: device facts")
self.facts = napalm_device.get_facts()
logger.info("COLLECT: device interface IPs")
self.ip_ifs = napalm_device.get_interfaces_ip()
module_name = PLUGIN_SETTINGS["onboarding_extensions_map"].get(self.napalm_driver)
if module_name and self.load_driver_extension:
try:
module = importlib.import_module(module_name)
driver_addon_class = module.OnboardingDriverExtensions(napalm_device=napalm_device)
self.onboarding_class = driver_addon_class.onboarding_class
self.driver_addon_result = driver_addon_class.ext_result
except ModuleNotFoundError:
raise OnboardException(
reason="fail-general",
message=f"ERROR: ModuleNotFoundError: Onboarding extension for napalm driver {self.napalm_driver} configured but can not be imported per configuration",
)
except ImportError as exc:
raise OnboardException(reason="fail-general", message="ERROR: ImportError: %s" % exc.args[0])
elif module_name and not self.load_driver_extension:
logger.info("INFO: Skipping execution of driver extension")
else:
logger.info(
"INFO: No onboarding extension defined for napalm driver %s, using default napalm driver",
self.napalm_driver,
)
except ConnectionException as exc:
raise OnboardException(reason="fail-login", message=exc.args[0])
except CommandErrorException as exc:
raise OnboardException(reason="fail-execute", message=exc.args[0])
except Exception as exc:
raise OnboardException(reason="fail-general", message=str(exc))
def get_netdev_dict(self):
"""Construct network device dict."""
netdev_dict = {
"netdev_hostname": self.facts["hostname"],
"netdev_vendor": self.facts["vendor"].title(),
"netdev_model": self.facts["model"].lower(),
"netdev_serial_number": self.facts["serial_number"],
"netdev_mgmt_ifname": get_mgmt_info(hostname=self.hostname, ip_ifs=self.ip_ifs)[0],
"netdev_mgmt_pflen": get_mgmt_info(hostname=self.hostname, ip_ifs=self.ip_ifs)[1],
"netdev_netmiko_device_type": self.netmiko_device_type,
"onboarding_class": self.onboarding_class,
"driver_addon_result": self.driver_addon_result,
}
return netdev_dict
| """NetDev Keeper.
(c) 2020-2021 Network To Code
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import importlib
import logging
import socket
from django.conf import settings
from napalm import get_network_driver
from napalm.base.exceptions import ConnectionException, CommandErrorException
from napalm.base.netmiko_helpers import netmiko_args
from netmiko.ssh_autodetect import SSHDetect
from netmiko.ssh_exception import NetMikoAuthenticationException
from netmiko.ssh_exception import NetMikoTimeoutException
from paramiko.ssh_exception import SSHException
from nautobot.dcim.models import Platform
from nautobot_device_onboarding.onboarding.onboarding import StandaloneOnboarding
from .constants import NETMIKO_TO_NAPALM_STATIC
from .exceptions import OnboardException
logger = logging.getLogger("rq.worker")
PLUGIN_SETTINGS = settings.PLUGINS_CONFIG["nautobot_device_onboarding"]
def get_mgmt_info(
hostname,
ip_ifs,
default_mgmt_if=PLUGIN_SETTINGS["default_management_interface"],
default_mgmt_pfxlen=PLUGIN_SETTINGS["default_management_prefix_length"],
):
"""Get the interface name and prefix length for the management interface.
Locate the interface assigned with the hostname value and retain
the interface name and IP prefix-length so that we can use it
when creating the IPAM IP-Address instance.
Note that in some cases (e.g., NAT) the hostname may differ than
the interface addresses present on the device. We need to handle this.
"""
for if_name, if_data in ip_ifs.items():
for if_addr, if_addr_data in if_data["ipv4"].items():
if if_addr == hostname:
return if_name, if_addr_data["prefix_length"]
return default_mgmt_if, default_mgmt_pfxlen
class NetdevKeeper:
"""Used to maintain information about the network device during the onboarding process."""
def __init__( # pylint: disable=R0913
self,
hostname,
port=None,
timeout=None,
username=None,
password=<PASSWORD>,
secret=None,
napalm_driver=None,
optional_args=None,
):
"""Initialize the network device keeper instance and ensure the required configuration parameters are provided.
Args:
hostname (str): IP Address or FQDN of an onboarded device
port (int): Port used to connect to an onboarded device
timeout (int): Connection timeout of an onboarded device
username (str): Device username (if unspecified, NAPALM_USERNAME settings variable will be used)
password (str): Device password (if unspecified, NAPALM_PASSWORD settings variable will be used)
secret (str): Device secret password (if unspecified, NAPALM_ARGS["secret"] settings variable will be used)
napalm_driver (str): Napalm driver name to use to onboard network device
optional_args (dict): Optional arguments passed to NAPALM and Netmiko
Raises:
OnboardException('fail-config'):
When any required config options are missing.
"""
# Attributes
self.hostname = hostname
self.port = port
self.timeout = timeout
self.username = username
self.password = password
self.secret = secret
self.napalm_driver = napalm_driver
# Netmiko and NAPALM expects optional_args to be a dictionary.
if isinstance(optional_args, dict):
self.optional_args = optional_args
elif optional_args is None:
self.optional_args = {}
else:
raise OnboardException(reason="fail-general", message="Optional arguments should be None or a dict")
self.facts = None
self.ip_ifs = None
self.netmiko_device_type = None
self.onboarding_class = StandaloneOnboarding
self.driver_addon_result = None
# Enable loading driver extensions
self.load_driver_extension = True
def check_reachability(self):
"""Ensure that the device at the mgmt-ipaddr provided is reachable.
We do this check before attempting other "show" commands so that we know we've got a
device that can be reached.
Raises:
OnboardException('fail-connect'):
When device unreachable
"""
logger.info("CHECK: IP %s:%s", self.hostname, self.port)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect((self.hostname, self.port))
except (socket.error, socket.timeout, ConnectionError):
raise OnboardException(
reason="fail-connect", message=f"ERROR device unreachable: {self.hostname}:{self.port}"
)
def guess_netmiko_device_type(self):
"""Guess the device type of host, based on Netmiko."""
guessed_device_type = None
netmiko_optional_args = netmiko_args(self.optional_args)
remote_device = {
"device_type": "autodetect",
"host": self.hostname,
"username": self.username,
"password": <PASSWORD>,
**netmiko_optional_args,
}
if self.secret:
remote_device["secret"] = self.secret
if self.port:
remote_device["port"] = self.port
if self.timeout:
remote_device["timeout"] = self.timeout
try:
logger.info("INFO guessing device type: %s", self.hostname)
guesser = SSHDetect(**remote_device)
guessed_device_type = guesser.autodetect()
logger.info("INFO guessed device type: %s", guessed_device_type)
except NetMikoAuthenticationException as err:
logger.error("ERROR %s", err)
raise OnboardException(reason="fail-login", message=f"ERROR: {str(err)}")
except (NetMikoTimeoutException, SSHException) as err:
logger.error("ERROR: %s", str(err))
raise OnboardException(reason="fail-connect", message=f"ERROR: {str(err)}")
except Exception as err:
logger.error("ERROR: %s", str(err))
raise OnboardException(reason="fail-general", message=f"ERROR: {str(err)}")
else:
if guessed_device_type is None:
logger.error("ERROR: Could not detect device type with SSHDetect")
raise OnboardException(
reason="fail-general", message="ERROR: Could not detect device type with SSHDetect"
)
return guessed_device_type
def set_napalm_driver_name(self):
"""Sets napalm driver name."""
if not self.napalm_driver:
netmiko_device_type = self.guess_netmiko_device_type()
logger.info("Guessed Netmiko Device Type: %s", netmiko_device_type)
self.netmiko_device_type = netmiko_device_type
platform_to_napalm_nautobot = {
platform.slug: platform.napalm_driver for platform in Platform.objects.all() if platform.napalm_driver
}
# Update Constants if Napalm driver is defined for Nautobot Platform
netmiko_to_napalm = {**NETMIKO_TO_NAPALM_STATIC, **platform_to_napalm_nautobot}
self.napalm_driver = netmiko_to_napalm.get(netmiko_device_type)
def check_napalm_driver_name(self):
"""Checks for napalm driver name."""
if not self.napalm_driver:
raise OnboardException(
reason="fail-general",
message=f"Onboarding for Platform {self.netmiko_device_type} not "
f"supported, as it has no specified NAPALM driver",
)
def get_onboarding_facts(self):
"""Gather information from the network device that is needed to onboard the device into the Nautobot system.
Raises:
OnboardException('fail-login'):
When unable to login to device
OnboardException('fail-execute'):
When unable to run commands to collect device information
OnboardException('fail-general'):
Any other unexpected device comms failure.
"""
self.check_reachability()
logger.info("COLLECT: device information %s", self.hostname)
try:
# Get Napalm Driver with Netmiko if needed
self.set_napalm_driver_name()
# Raise if no Napalm Driver not selected
self.check_napalm_driver_name()
driver = get_network_driver(self.napalm_driver)
# Create NAPALM optional arguments
napalm_optional_args = self.optional_args.copy()
if self.port:
napalm_optional_args["port"] = self.port
if self.secret:
napalm_optional_args["secret"] = self.secret
napalm_device = driver(
hostname=self.hostname,
username=self.username,
password=<PASSWORD>,
timeout=self.timeout,
optional_args=napalm_optional_args,
)
napalm_device.open()
logger.info("COLLECT: device facts")
self.facts = napalm_device.get_facts()
logger.info("COLLECT: device interface IPs")
self.ip_ifs = napalm_device.get_interfaces_ip()
module_name = PLUGIN_SETTINGS["onboarding_extensions_map"].get(self.napalm_driver)
if module_name and self.load_driver_extension:
try:
module = importlib.import_module(module_name)
driver_addon_class = module.OnboardingDriverExtensions(napalm_device=napalm_device)
self.onboarding_class = driver_addon_class.onboarding_class
self.driver_addon_result = driver_addon_class.ext_result
except ModuleNotFoundError:
raise OnboardException(
reason="fail-general",
message=f"ERROR: ModuleNotFoundError: Onboarding extension for napalm driver {self.napalm_driver} configured but can not be imported per configuration",
)
except ImportError as exc:
raise OnboardException(reason="fail-general", message="ERROR: ImportError: %s" % exc.args[0])
elif module_name and not self.load_driver_extension:
logger.info("INFO: Skipping execution of driver extension")
else:
logger.info(
"INFO: No onboarding extension defined for napalm driver %s, using default napalm driver",
self.napalm_driver,
)
except ConnectionException as exc:
raise OnboardException(reason="fail-login", message=exc.args[0])
except CommandErrorException as exc:
raise OnboardException(reason="fail-execute", message=exc.args[0])
except Exception as exc:
raise OnboardException(reason="fail-general", message=str(exc))
def get_netdev_dict(self):
"""Construct network device dict."""
netdev_dict = {
"netdev_hostname": self.facts["hostname"],
"netdev_vendor": self.facts["vendor"].title(),
"netdev_model": self.facts["model"].lower(),
"netdev_serial_number": self.facts["serial_number"],
"netdev_mgmt_ifname": get_mgmt_info(hostname=self.hostname, ip_ifs=self.ip_ifs)[0],
"netdev_mgmt_pflen": get_mgmt_info(hostname=self.hostname, ip_ifs=self.ip_ifs)[1],
"netdev_netmiko_device_type": self.netmiko_device_type,
"onboarding_class": self.onboarding_class,
"driver_addon_result": self.driver_addon_result,
}
return netdev_dict | en | 0.781307 | NetDev Keeper. (c) 2020-2021 Network To Code Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Get the interface name and prefix length for the management interface. Locate the interface assigned with the hostname value and retain the interface name and IP prefix-length so that we can use it when creating the IPAM IP-Address instance. Note that in some cases (e.g., NAT) the hostname may differ than the interface addresses present on the device. We need to handle this. Used to maintain information about the network device during the onboarding process. # pylint: disable=R0913 Initialize the network device keeper instance and ensure the required configuration parameters are provided. Args: hostname (str): IP Address or FQDN of an onboarded device port (int): Port used to connect to an onboarded device timeout (int): Connection timeout of an onboarded device username (str): Device username (if unspecified, NAPALM_USERNAME settings variable will be used) password (str): Device password (if unspecified, NAPALM_PASSWORD settings variable will be used) secret (str): Device secret password (if unspecified, NAPALM_ARGS["secret"] settings variable will be used) napalm_driver (str): Napalm driver name to use to onboard network device optional_args (dict): Optional arguments passed to NAPALM and Netmiko Raises: OnboardException('fail-config'): When any required config options are missing. # Attributes # Netmiko and NAPALM expects optional_args to be a dictionary. # Enable loading driver extensions Ensure that the device at the mgmt-ipaddr provided is reachable. We do this check before attempting other "show" commands so that we know we've got a device that can be reached. Raises: OnboardException('fail-connect'): When device unreachable Guess the device type of host, based on Netmiko. Sets napalm driver name. # Update Constants if Napalm driver is defined for Nautobot Platform Checks for napalm driver name. Gather information from the network device that is needed to onboard the device into the Nautobot system. Raises: OnboardException('fail-login'): When unable to login to device OnboardException('fail-execute'): When unable to run commands to collect device information OnboardException('fail-general'): Any other unexpected device comms failure. # Get Napalm Driver with Netmiko if needed # Raise if no Napalm Driver not selected # Create NAPALM optional arguments Construct network device dict. | 1.771995 | 2 |
call_variants.py | CGL-Deeplearning/FRIDAY | 6 | 6631402 | import argparse
import sys
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader
from torchvision import transforms
import multiprocessing
from torch.autograd import Variable
from modules.models.Seq2Seq_atn import EncoderCRNN, AttnDecoderRNN
from modules.core.dataloader_test import SequenceDataset
from modules.handlers.TextColor import TextColor
from collections import defaultdict
from modules.handlers.VcfWriter import VCFWriter
from modules.handlers.FileManager import FileManager
import operator
import pickle
from tqdm import tqdm
import os
import time
"""
This script uses a trained model to call variants on a given set of images generated from the genome.
The process is:
- Create a prediction table/dictionary using a trained neural network
- Convert those predictions to a VCF file
INPUT:
- A trained model
- Set of images for prediction
Output:
- A VCF file containing all the variants.
"""
FLANK_SIZE = 10
SNP = 1
IN = 2
DEL = 3
HOM = 0
HET = 1
HOM_ALT = 2
prediction_dict = defaultdict(list)
reference_dict = defaultdict(tuple)
def predict(test_file, batch_size, model_path, gpu_mode, num_workers):
"""
Create a prediction table/dictionary of an images set using a trained model.
:param test_file: File to predict on
:param batch_size: Batch size used for prediction
:param model_path: Path to a trained model
:param gpu_mode: If true, predictions will be done over GPU
:param num_workers: Number of workers to be used by the dataloader
:return: Prediction dictionary
"""
# the prediction table/dictionary
chromosome_name = ''
transformations = transforms.Compose([transforms.ToTensor()])
sys.stderr.write(TextColor.PURPLE + 'Loading data\n' + TextColor.END)
test_dset = SequenceDataset(test_file, transformations)
testloader = DataLoader(test_dset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers
)
sys.stderr.write(TextColor.PURPLE + 'Data loading finished\n' + TextColor.END)
# load the model
checkpoint = torch.load(model_path, map_location='cpu')
encoder_state_dict = checkpoint['encoder_state_dict']
decoder_state_dict = checkpoint['decoder_state_dict']
from collections import OrderedDict
new_encoder_state_dict = OrderedDict()
new_decoder_state_dict = OrderedDict()
for k, v in encoder_state_dict.items():
name = k
if k[0:7] == 'module.':
name = k[7:] # remove `module.`
new_encoder_state_dict[name] = v
for k, v in decoder_state_dict.items():
name = k
if k[0:7] == 'module.':
name = k[7:] # remove `module.`
new_decoder_state_dict[name] = v
hidden_size = 256
encoder_model = EncoderCRNN(image_channels=10, hidden_size=hidden_size)
decoder_model = AttnDecoderRNN(hidden_size=hidden_size, num_classes=6, max_length=1)
encoder_model.load_state_dict(new_encoder_state_dict)
decoder_model.load_state_dict(new_decoder_state_dict)
encoder_model.cpu()
decoder_model.cpu()
if gpu_mode:
encoder_model = encoder_model.cuda()
encoder_model = torch.nn.DataParallel(encoder_model).cuda()
decoder_model = decoder_model.cuda()
decoder_model = torch.nn.DataParallel(decoder_model).cuda()
# Change model to 'eval' mode (BN uses moving mean/var).
encoder_model.eval()
decoder_model.eval()
sys.stderr.write(TextColor.PURPLE + 'MODEL LOADED\n' + TextColor.END)
# TO HERE
with torch.no_grad():
for images, labels, positional_info in tqdm(testloader, file=sys.stdout, dynamic_ncols=True):
if gpu_mode:
# encoder_hidden = encoder_hidden.cuda()
images = images.cuda()
labels = labels.cuda()
decoder_input = torch.LongTensor(labels.size(0), 1).zero_()
encoder_hidden = torch.FloatTensor(labels.size(0), 2, hidden_size).zero_()
# if gpu_mode:
# decoder_input = decoder_input.cuda()
# encoder_hidden = encoder_hidden.cuda()
chr_name, start_positions, reference_seqs, allele_dict_paths = positional_info
window_size = images.size(2) - 2 * FLANK_SIZE
index_start = FLANK_SIZE
end_index = index_start + window_size
unrolling_genomic_position = np.zeros((images.size(0)), dtype=np.int64)
for seq_index in range(index_start, end_index):
x = images[:, :, seq_index - FLANK_SIZE:seq_index + FLANK_SIZE + 1, :]
output_enc, hidden_dec = encoder_model(x, encoder_hidden)
output_dec, decoder_hidden, attn = decoder_model(decoder_input, output_enc, hidden_dec)
encoder_hidden = decoder_hidden.detach()
topv, topi = output_dec.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
# One dimensional softmax is used to convert the logits to probability distribution
m = nn.Softmax(dim=1)
soft_probs = m(output_dec)
output_preds = soft_probs.cpu()
# record each of the predictions from a batch prediction
batches = images.size(0)
for batch in range(batches):
allele_dict_path = allele_dict_paths[batch]
chromosome_name = chr_name[batch]
reference_seq = reference_seqs[batch]
# current_genomic_position = int(start_positions[batch])
current_genomic_position = int(start_positions[batch]) + unrolling_genomic_position[batch]
ref_base = reference_seq[seq_index]
if ref_base == '*':
continue
# true_label = labels[batch, seq_index - index_start]
# fake_probs = [0.0] * 6
# fake_probs[true_label] = 1.0
# top_n, top_i = torch.FloatTensor(fake_probs).topk(1)
# predicted_label = top_i[0].item()
# reference_dict[current_genomic_position] = (ref_base, allele_dict_path)
# prediction_dict[current_genomic_position].append((predicted_label, fake_probs))
preds = output_preds[batch, :].data
top_n, top_i = preds.topk(1)
predicted_label = top_i[0].item()
reference_dict[current_genomic_position] = (ref_base, allele_dict_path)
prediction_dict[current_genomic_position].append((predicted_label, preds))
if ref_base != '*':
unrolling_genomic_position[batch] += 1
return chromosome_name
def get_record_from_prediction(pos, alleles):
predictions = prediction_dict[pos]
genotype, qual, gq = VCFWriter.process_prediction(pos, predictions)
alts = list(allele[0] for allele in alleles)
ref_base = reference_dict[pos][0][0]
return ref_base, alts, genotype, qual, gq
def produce_vcf_records(chromosome_name, output_dir, thread_no, pos_list):
"""
Convert prediction dictionary to a VCF file
:param: arg_tuple: Tuple of arguments containing these values:
- chromosome_name: Chromosome name
- pos_list: List of positions where we will search for variants
- prediction_dict: prediction dictionary containing predictions of each image records
- reference_dict: Dictionary containing reference information
- bam_file_path: Path to the BAM file
- sample_name: Name of the sample in the BAM file
- output_dir: Output directory
- thread_id: Unique id assigned to each thread
:return:
"""
# object that can write and handle VCF
# vcf_writer = VCFWriter(bam_file_path, sample_name, output_dir, thread_id)
# collate multi-allelic records to a single record
current_allele_dict = ''
allele_dict = {}
record_file = open(output_dir + chromosome_name + "_" + str(thread_no) + ".tsv", 'w')
for pos in pos_list:
allele_dict_path = reference_dict[pos][1]
if allele_dict_path != current_allele_dict:
allele_dict = pickle.load(open(allele_dict_path, 'rb'))
current_allele_dict = allele_dict_path
if pos not in allele_dict:
continue
alleles = allele_dict[pos]
record = get_record_from_prediction(pos, alleles)
if record is None:
continue
ref_base, alts, genotype, qual, gq = record
if genotype == '0/0':
continue
# print('BEFORE', record)
record = VCFWriter.get_proper_alleles(record)
ref, alts, qual, gq, genotype = record
# print('AFTER', record)
if len(alts) == 1:
alts.append('.')
rec_end = int(pos + len(ref) - 1)
record_string = chromosome_name + "\t" + str(pos) + "\t" + str(rec_end) + "\t" + ref + "\t" + '\t'.join(alts) \
+ "\t" + genotype + "\t" + str(qual) + "\t" + str(gq) + "\t" + "\n"
record_file.write(record_string)
def merge_call_files(vcf_file_directory):
filemanager_object = FileManager()
# get all bed file paths from the directory
file_paths = filemanager_object.get_file_paths_from_directory(vcf_file_directory)
all_records = []
for file_path in file_paths:
with open(file_path, 'r') as tsv:
for line in tsv:
chr_name, pos_st, pos_end, ref, alt1, alt2, genotype, qual, gq = line.strip().split('\t')
alts = []
pos_st, pos_end, qual, gq = int(pos_st), int(pos_end), float(qual), float(gq)
if alt1 != '.':
alts.append(alt1)
if alt2 != '.':
alts.append(alt2)
all_records.append((chr_name, pos_st, pos_end, ref, alts, genotype, qual, gq))
filemanager_object.delete_files(file_paths)
os.rmdir(vcf_file_directory)
return all_records
def call_variant(csv_file, batch_size, model_path, gpu_mode, num_workers, bam_file_path, sample_name, output_dir,
vcf_dir, max_threads):
program_start_time = time.time()
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "SAMPLE NAME: " + sample_name + "\n")
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "PLEASE USE --sample_name TO CHANGE SAMPLE NAME.\n")
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "OUTPUT DIRECTORY: " + output_dir + "\n")
chr_name = predict(csv_file, batch_size, model_path, gpu_mode, num_workers)
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "PREDICTION GENERATED SUCCESSFULLY.\n")
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "COMPILING PREDICTIONS TO CALL VARIANTS.\n")
pos_list = list(prediction_dict.keys())
each_chunk_size = int(len(pos_list) / max_threads)
thread_no = 1
# produce_vcf_records(chr_name, vcf_dir, thread_no, pos_list)
# exit()
for i in tqdm(range(0, len(pos_list), each_chunk_size), file=sys.stdout, dynamic_ncols=True):
start_position = i
end_position = min(i + each_chunk_size, len(pos_list))
sub_pos = pos_list[start_position:end_position]
# gather all parameters
args = (chr_name, vcf_dir, thread_no, sub_pos)
p = multiprocessing.Process(target=produce_vcf_records, args=args)
p.start()
thread_no += 1
# wait until we have room for new processes to start
while True:
if len(multiprocessing.active_children()) < max_threads:
break
# wait until we have room for new processes to start
while True:
if len(multiprocessing.active_children()) == 0:
break
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "VARIANT CALLING COMPLETE.\n")
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "MERGING FILES.\n")
all_calls = merge_call_files(vcf_dir)
# sort based on position
all_calls.sort(key=operator.itemgetter(1))
# print(all_calls)
last_end = 0
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "WRITING VCF.\n")
vcf_writer = VCFWriter(bam_file_path, sample_name, output_dir)
for record in all_calls:
# get the record filter ('PASS' or not)
rec_filter = VCFWriter.get_filter(record, last_end)
# get proper alleles. INDEL alleles are handled here.
# record = VCFWriter.get_proper_alleles(record)
chrm, st_pos, end_pos, ref, alt_field, genotype, phred_qual, phred_gq = record
# if genotype is not HOM keep track of where the previous record ended
if genotype != '0/0':
# HOM
last_end = end_pos
# add the record to VCF
vcf_writer.write_vcf_record(chrm, st_pos, end_pos, ref, alt_field, genotype, phred_qual, phred_gq, rec_filter)
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "VARIANT CALLING COMPLETE.\n")
program_end_time = time.time()
sys.stderr.write(TextColor.PURPLE + "TIME ELAPSED: " + str(program_end_time - program_start_time) + "\n")
def handle_output_directory(output_dir):
"""
Process the output directory and return a valid directory where we save the output
:param output_dir: Output directory path
:return:
"""
# process the output directory
if output_dir[-1] != "/":
output_dir += "/"
if not os.path.exists(output_dir):
os.mkdir(output_dir)
vcf_path = output_dir + "vcfs" + "/"
if not os.path.exists(vcf_path):
os.mkdir(vcf_path)
return output_dir, vcf_path
if __name__ == '__main__':
'''
Processes arguments and performs tasks.
'''
parser = argparse.ArgumentParser()
parser.add_argument(
"--csv_file",
type=str,
required=True,
help="CSV file containing all image segments for prediction."
)
parser.add_argument(
"--bam_file",
type=str,
required=True,
help="Path to the BAM file."
)
parser.add_argument(
"--batch_size",
type=int,
required=False,
default=100,
help="Batch size for testing, default is 100."
)
parser.add_argument(
"--num_workers",
type=int,
required=False,
default=4,
help="Batch size for testing, default is 100."
)
parser.add_argument(
"--model_path",
type=str,
default='./CNN.pkl',
help="Saved model path."
)
parser.add_argument(
"--gpu_mode",
type=bool,
default=False,
help="If true then cuda is on."
)
parser.add_argument(
"--sample_name",
type=str,
required=False,
default='NA12878',
help="Sample name of the sequence."
)
parser.add_argument(
"--output_dir",
type=str,
required=False,
default='vcf_output',
help="Output directory."
)
parser.add_argument(
"--max_threads",
type=int,
default=8,
help="Number of maximum threads for this region."
)
FLAGS, unparsed = parser.parse_known_args()
FLAGS.output_dir, vcf_dir = handle_output_directory(FLAGS.output_dir)
call_variant(FLAGS.csv_file,
FLAGS.batch_size,
FLAGS.model_path,
FLAGS.gpu_mode,
FLAGS.num_workers,
FLAGS.bam_file,
FLAGS.sample_name,
FLAGS.output_dir,
vcf_dir,
FLAGS.max_threads)
| import argparse
import sys
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader
from torchvision import transforms
import multiprocessing
from torch.autograd import Variable
from modules.models.Seq2Seq_atn import EncoderCRNN, AttnDecoderRNN
from modules.core.dataloader_test import SequenceDataset
from modules.handlers.TextColor import TextColor
from collections import defaultdict
from modules.handlers.VcfWriter import VCFWriter
from modules.handlers.FileManager import FileManager
import operator
import pickle
from tqdm import tqdm
import os
import time
"""
This script uses a trained model to call variants on a given set of images generated from the genome.
The process is:
- Create a prediction table/dictionary using a trained neural network
- Convert those predictions to a VCF file
INPUT:
- A trained model
- Set of images for prediction
Output:
- A VCF file containing all the variants.
"""
FLANK_SIZE = 10
SNP = 1
IN = 2
DEL = 3
HOM = 0
HET = 1
HOM_ALT = 2
prediction_dict = defaultdict(list)
reference_dict = defaultdict(tuple)
def predict(test_file, batch_size, model_path, gpu_mode, num_workers):
"""
Create a prediction table/dictionary of an images set using a trained model.
:param test_file: File to predict on
:param batch_size: Batch size used for prediction
:param model_path: Path to a trained model
:param gpu_mode: If true, predictions will be done over GPU
:param num_workers: Number of workers to be used by the dataloader
:return: Prediction dictionary
"""
# the prediction table/dictionary
chromosome_name = ''
transformations = transforms.Compose([transforms.ToTensor()])
sys.stderr.write(TextColor.PURPLE + 'Loading data\n' + TextColor.END)
test_dset = SequenceDataset(test_file, transformations)
testloader = DataLoader(test_dset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers
)
sys.stderr.write(TextColor.PURPLE + 'Data loading finished\n' + TextColor.END)
# load the model
checkpoint = torch.load(model_path, map_location='cpu')
encoder_state_dict = checkpoint['encoder_state_dict']
decoder_state_dict = checkpoint['decoder_state_dict']
from collections import OrderedDict
new_encoder_state_dict = OrderedDict()
new_decoder_state_dict = OrderedDict()
for k, v in encoder_state_dict.items():
name = k
if k[0:7] == 'module.':
name = k[7:] # remove `module.`
new_encoder_state_dict[name] = v
for k, v in decoder_state_dict.items():
name = k
if k[0:7] == 'module.':
name = k[7:] # remove `module.`
new_decoder_state_dict[name] = v
hidden_size = 256
encoder_model = EncoderCRNN(image_channels=10, hidden_size=hidden_size)
decoder_model = AttnDecoderRNN(hidden_size=hidden_size, num_classes=6, max_length=1)
encoder_model.load_state_dict(new_encoder_state_dict)
decoder_model.load_state_dict(new_decoder_state_dict)
encoder_model.cpu()
decoder_model.cpu()
if gpu_mode:
encoder_model = encoder_model.cuda()
encoder_model = torch.nn.DataParallel(encoder_model).cuda()
decoder_model = decoder_model.cuda()
decoder_model = torch.nn.DataParallel(decoder_model).cuda()
# Change model to 'eval' mode (BN uses moving mean/var).
encoder_model.eval()
decoder_model.eval()
sys.stderr.write(TextColor.PURPLE + 'MODEL LOADED\n' + TextColor.END)
# TO HERE
with torch.no_grad():
for images, labels, positional_info in tqdm(testloader, file=sys.stdout, dynamic_ncols=True):
if gpu_mode:
# encoder_hidden = encoder_hidden.cuda()
images = images.cuda()
labels = labels.cuda()
decoder_input = torch.LongTensor(labels.size(0), 1).zero_()
encoder_hidden = torch.FloatTensor(labels.size(0), 2, hidden_size).zero_()
# if gpu_mode:
# decoder_input = decoder_input.cuda()
# encoder_hidden = encoder_hidden.cuda()
chr_name, start_positions, reference_seqs, allele_dict_paths = positional_info
window_size = images.size(2) - 2 * FLANK_SIZE
index_start = FLANK_SIZE
end_index = index_start + window_size
unrolling_genomic_position = np.zeros((images.size(0)), dtype=np.int64)
for seq_index in range(index_start, end_index):
x = images[:, :, seq_index - FLANK_SIZE:seq_index + FLANK_SIZE + 1, :]
output_enc, hidden_dec = encoder_model(x, encoder_hidden)
output_dec, decoder_hidden, attn = decoder_model(decoder_input, output_enc, hidden_dec)
encoder_hidden = decoder_hidden.detach()
topv, topi = output_dec.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
# One dimensional softmax is used to convert the logits to probability distribution
m = nn.Softmax(dim=1)
soft_probs = m(output_dec)
output_preds = soft_probs.cpu()
# record each of the predictions from a batch prediction
batches = images.size(0)
for batch in range(batches):
allele_dict_path = allele_dict_paths[batch]
chromosome_name = chr_name[batch]
reference_seq = reference_seqs[batch]
# current_genomic_position = int(start_positions[batch])
current_genomic_position = int(start_positions[batch]) + unrolling_genomic_position[batch]
ref_base = reference_seq[seq_index]
if ref_base == '*':
continue
# true_label = labels[batch, seq_index - index_start]
# fake_probs = [0.0] * 6
# fake_probs[true_label] = 1.0
# top_n, top_i = torch.FloatTensor(fake_probs).topk(1)
# predicted_label = top_i[0].item()
# reference_dict[current_genomic_position] = (ref_base, allele_dict_path)
# prediction_dict[current_genomic_position].append((predicted_label, fake_probs))
preds = output_preds[batch, :].data
top_n, top_i = preds.topk(1)
predicted_label = top_i[0].item()
reference_dict[current_genomic_position] = (ref_base, allele_dict_path)
prediction_dict[current_genomic_position].append((predicted_label, preds))
if ref_base != '*':
unrolling_genomic_position[batch] += 1
return chromosome_name
def get_record_from_prediction(pos, alleles):
predictions = prediction_dict[pos]
genotype, qual, gq = VCFWriter.process_prediction(pos, predictions)
alts = list(allele[0] for allele in alleles)
ref_base = reference_dict[pos][0][0]
return ref_base, alts, genotype, qual, gq
def produce_vcf_records(chromosome_name, output_dir, thread_no, pos_list):
"""
Convert prediction dictionary to a VCF file
:param: arg_tuple: Tuple of arguments containing these values:
- chromosome_name: Chromosome name
- pos_list: List of positions where we will search for variants
- prediction_dict: prediction dictionary containing predictions of each image records
- reference_dict: Dictionary containing reference information
- bam_file_path: Path to the BAM file
- sample_name: Name of the sample in the BAM file
- output_dir: Output directory
- thread_id: Unique id assigned to each thread
:return:
"""
# object that can write and handle VCF
# vcf_writer = VCFWriter(bam_file_path, sample_name, output_dir, thread_id)
# collate multi-allelic records to a single record
current_allele_dict = ''
allele_dict = {}
record_file = open(output_dir + chromosome_name + "_" + str(thread_no) + ".tsv", 'w')
for pos in pos_list:
allele_dict_path = reference_dict[pos][1]
if allele_dict_path != current_allele_dict:
allele_dict = pickle.load(open(allele_dict_path, 'rb'))
current_allele_dict = allele_dict_path
if pos not in allele_dict:
continue
alleles = allele_dict[pos]
record = get_record_from_prediction(pos, alleles)
if record is None:
continue
ref_base, alts, genotype, qual, gq = record
if genotype == '0/0':
continue
# print('BEFORE', record)
record = VCFWriter.get_proper_alleles(record)
ref, alts, qual, gq, genotype = record
# print('AFTER', record)
if len(alts) == 1:
alts.append('.')
rec_end = int(pos + len(ref) - 1)
record_string = chromosome_name + "\t" + str(pos) + "\t" + str(rec_end) + "\t" + ref + "\t" + '\t'.join(alts) \
+ "\t" + genotype + "\t" + str(qual) + "\t" + str(gq) + "\t" + "\n"
record_file.write(record_string)
def merge_call_files(vcf_file_directory):
filemanager_object = FileManager()
# get all bed file paths from the directory
file_paths = filemanager_object.get_file_paths_from_directory(vcf_file_directory)
all_records = []
for file_path in file_paths:
with open(file_path, 'r') as tsv:
for line in tsv:
chr_name, pos_st, pos_end, ref, alt1, alt2, genotype, qual, gq = line.strip().split('\t')
alts = []
pos_st, pos_end, qual, gq = int(pos_st), int(pos_end), float(qual), float(gq)
if alt1 != '.':
alts.append(alt1)
if alt2 != '.':
alts.append(alt2)
all_records.append((chr_name, pos_st, pos_end, ref, alts, genotype, qual, gq))
filemanager_object.delete_files(file_paths)
os.rmdir(vcf_file_directory)
return all_records
def call_variant(csv_file, batch_size, model_path, gpu_mode, num_workers, bam_file_path, sample_name, output_dir,
vcf_dir, max_threads):
program_start_time = time.time()
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "SAMPLE NAME: " + sample_name + "\n")
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "PLEASE USE --sample_name TO CHANGE SAMPLE NAME.\n")
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "OUTPUT DIRECTORY: " + output_dir + "\n")
chr_name = predict(csv_file, batch_size, model_path, gpu_mode, num_workers)
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "PREDICTION GENERATED SUCCESSFULLY.\n")
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "COMPILING PREDICTIONS TO CALL VARIANTS.\n")
pos_list = list(prediction_dict.keys())
each_chunk_size = int(len(pos_list) / max_threads)
thread_no = 1
# produce_vcf_records(chr_name, vcf_dir, thread_no, pos_list)
# exit()
for i in tqdm(range(0, len(pos_list), each_chunk_size), file=sys.stdout, dynamic_ncols=True):
start_position = i
end_position = min(i + each_chunk_size, len(pos_list))
sub_pos = pos_list[start_position:end_position]
# gather all parameters
args = (chr_name, vcf_dir, thread_no, sub_pos)
p = multiprocessing.Process(target=produce_vcf_records, args=args)
p.start()
thread_no += 1
# wait until we have room for new processes to start
while True:
if len(multiprocessing.active_children()) < max_threads:
break
# wait until we have room for new processes to start
while True:
if len(multiprocessing.active_children()) == 0:
break
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "VARIANT CALLING COMPLETE.\n")
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "MERGING FILES.\n")
all_calls = merge_call_files(vcf_dir)
# sort based on position
all_calls.sort(key=operator.itemgetter(1))
# print(all_calls)
last_end = 0
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "WRITING VCF.\n")
vcf_writer = VCFWriter(bam_file_path, sample_name, output_dir)
for record in all_calls:
# get the record filter ('PASS' or not)
rec_filter = VCFWriter.get_filter(record, last_end)
# get proper alleles. INDEL alleles are handled here.
# record = VCFWriter.get_proper_alleles(record)
chrm, st_pos, end_pos, ref, alt_field, genotype, phred_qual, phred_gq = record
# if genotype is not HOM keep track of where the previous record ended
if genotype != '0/0':
# HOM
last_end = end_pos
# add the record to VCF
vcf_writer.write_vcf_record(chrm, st_pos, end_pos, ref, alt_field, genotype, phred_qual, phred_gq, rec_filter)
sys.stderr.write(TextColor.GREEN + "INFO: " + TextColor.END + "VARIANT CALLING COMPLETE.\n")
program_end_time = time.time()
sys.stderr.write(TextColor.PURPLE + "TIME ELAPSED: " + str(program_end_time - program_start_time) + "\n")
def handle_output_directory(output_dir):
"""
Process the output directory and return a valid directory where we save the output
:param output_dir: Output directory path
:return:
"""
# process the output directory
if output_dir[-1] != "/":
output_dir += "/"
if not os.path.exists(output_dir):
os.mkdir(output_dir)
vcf_path = output_dir + "vcfs" + "/"
if not os.path.exists(vcf_path):
os.mkdir(vcf_path)
return output_dir, vcf_path
if __name__ == '__main__':
'''
Processes arguments and performs tasks.
'''
parser = argparse.ArgumentParser()
parser.add_argument(
"--csv_file",
type=str,
required=True,
help="CSV file containing all image segments for prediction."
)
parser.add_argument(
"--bam_file",
type=str,
required=True,
help="Path to the BAM file."
)
parser.add_argument(
"--batch_size",
type=int,
required=False,
default=100,
help="Batch size for testing, default is 100."
)
parser.add_argument(
"--num_workers",
type=int,
required=False,
default=4,
help="Batch size for testing, default is 100."
)
parser.add_argument(
"--model_path",
type=str,
default='./CNN.pkl',
help="Saved model path."
)
parser.add_argument(
"--gpu_mode",
type=bool,
default=False,
help="If true then cuda is on."
)
parser.add_argument(
"--sample_name",
type=str,
required=False,
default='NA12878',
help="Sample name of the sequence."
)
parser.add_argument(
"--output_dir",
type=str,
required=False,
default='vcf_output',
help="Output directory."
)
parser.add_argument(
"--max_threads",
type=int,
default=8,
help="Number of maximum threads for this region."
)
FLAGS, unparsed = parser.parse_known_args()
FLAGS.output_dir, vcf_dir = handle_output_directory(FLAGS.output_dir)
call_variant(FLAGS.csv_file,
FLAGS.batch_size,
FLAGS.model_path,
FLAGS.gpu_mode,
FLAGS.num_workers,
FLAGS.bam_file,
FLAGS.sample_name,
FLAGS.output_dir,
vcf_dir,
FLAGS.max_threads)
| en | 0.695669 | This script uses a trained model to call variants on a given set of images generated from the genome. The process is: - Create a prediction table/dictionary using a trained neural network - Convert those predictions to a VCF file INPUT: - A trained model - Set of images for prediction Output: - A VCF file containing all the variants. Create a prediction table/dictionary of an images set using a trained model. :param test_file: File to predict on :param batch_size: Batch size used for prediction :param model_path: Path to a trained model :param gpu_mode: If true, predictions will be done over GPU :param num_workers: Number of workers to be used by the dataloader :return: Prediction dictionary # the prediction table/dictionary # load the model # remove `module.` # remove `module.` # Change model to 'eval' mode (BN uses moving mean/var). # TO HERE # encoder_hidden = encoder_hidden.cuda() # if gpu_mode: # decoder_input = decoder_input.cuda() # encoder_hidden = encoder_hidden.cuda() # detach from history as input # One dimensional softmax is used to convert the logits to probability distribution # record each of the predictions from a batch prediction # current_genomic_position = int(start_positions[batch]) # true_label = labels[batch, seq_index - index_start] # fake_probs = [0.0] * 6 # fake_probs[true_label] = 1.0 # top_n, top_i = torch.FloatTensor(fake_probs).topk(1) # predicted_label = top_i[0].item() # reference_dict[current_genomic_position] = (ref_base, allele_dict_path) # prediction_dict[current_genomic_position].append((predicted_label, fake_probs)) Convert prediction dictionary to a VCF file :param: arg_tuple: Tuple of arguments containing these values: - chromosome_name: Chromosome name - pos_list: List of positions where we will search for variants - prediction_dict: prediction dictionary containing predictions of each image records - reference_dict: Dictionary containing reference information - bam_file_path: Path to the BAM file - sample_name: Name of the sample in the BAM file - output_dir: Output directory - thread_id: Unique id assigned to each thread :return: # object that can write and handle VCF # vcf_writer = VCFWriter(bam_file_path, sample_name, output_dir, thread_id) # collate multi-allelic records to a single record # print('BEFORE', record) # print('AFTER', record) # get all bed file paths from the directory # produce_vcf_records(chr_name, vcf_dir, thread_no, pos_list) # exit() # gather all parameters # wait until we have room for new processes to start # wait until we have room for new processes to start # sort based on position # print(all_calls) # get the record filter ('PASS' or not) # get proper alleles. INDEL alleles are handled here. # record = VCFWriter.get_proper_alleles(record) # if genotype is not HOM keep track of where the previous record ended # HOM # add the record to VCF Process the output directory and return a valid directory where we save the output :param output_dir: Output directory path :return: # process the output directory Processes arguments and performs tasks. | 2.561951 | 3 |
utils/az_zone.py | dcalacci/labbox | 1 | 6631403 | import datetime
import numpy as np
import boto3
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
#import aws_spot_bot.config.default as uconf
from .. import configs
from configs import default as uconf
class AZZone():
def __init__(self, region, name):
self.region = region
self.name = name
boto3.setup_default_session(region_name=self.region)
self.client = boto3.client('ec2')
self.spot_pricing_history = None
self.score = None
@property
def spot_price_variance(self):
prices = [float(record['SpotPrice']) for record in self.spot_pricing_history]
return np.var(prices)
@property
def spot_price_mean(self):
prices = [float(record['SpotPrice']) for record in self.spot_pricing_history]
return np.mean(prices)
@property
def current_price(self):
if self.spot_pricing_history:
return float(self.spot_pricing_history[0]['SpotPrice'])
elif self.spot_pricing_history == []:
return None
else:
raise Exception("You must fetch the history before calling this property")
def get_spot_pricing_history(self, instance_types, product_descriptions=['Linux/UNIX']):
""" Returns the spot price history given a specified AZ and region."""
print("Getting spot prices for", self.name)
response = self.client.describe_spot_price_history(
DryRun=False,
StartTime=datetime.datetime.now() - datetime.timedelta(days=7),
EndTime=datetime.datetime.now(),
InstanceTypes=instance_types,
AvailabilityZone=self.name,
ProductDescriptions=product_descriptions)
self.spot_pricing_history = response.get('SpotPriceHistory', [])
def calculate_score(self, instance_types, bid, update=False):
if self.spot_pricing_history is None:
self.get_spot_pricing_history(instance_types)
elif update:
self.get_spot_pricing_history(instance_types)
# TODO: This should be removed but I am lazy and this is easier than catching exceptions
# @jgre can you fix?
if self.spot_pricing_history == []:
self.score = -1e10
return -1e10
# We are not interested in this AZ if its more than the bid, so lets just return
if self.current_price > bid:
self.score = 0
return 0
# Here we multiply each item by a weight.
# These weights are arbitrary and probably not ideal.
# There is much room for improvement on this scoring algorithm, but this algorithm
# works for most light use cases. Feel free to contribute!
current_price_s = bid - self.current_price
variance_s = -5 * (self.spot_price_variance * self.spot_price_mean)
mean_s = 0.5 * (bid - self.spot_price_mean)
self.score = current_price_s + variance_s + mean_s
return self.score
| import datetime
import numpy as np
import boto3
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
#import aws_spot_bot.config.default as uconf
from .. import configs
from configs import default as uconf
class AZZone():
def __init__(self, region, name):
self.region = region
self.name = name
boto3.setup_default_session(region_name=self.region)
self.client = boto3.client('ec2')
self.spot_pricing_history = None
self.score = None
@property
def spot_price_variance(self):
prices = [float(record['SpotPrice']) for record in self.spot_pricing_history]
return np.var(prices)
@property
def spot_price_mean(self):
prices = [float(record['SpotPrice']) for record in self.spot_pricing_history]
return np.mean(prices)
@property
def current_price(self):
if self.spot_pricing_history:
return float(self.spot_pricing_history[0]['SpotPrice'])
elif self.spot_pricing_history == []:
return None
else:
raise Exception("You must fetch the history before calling this property")
def get_spot_pricing_history(self, instance_types, product_descriptions=['Linux/UNIX']):
""" Returns the spot price history given a specified AZ and region."""
print("Getting spot prices for", self.name)
response = self.client.describe_spot_price_history(
DryRun=False,
StartTime=datetime.datetime.now() - datetime.timedelta(days=7),
EndTime=datetime.datetime.now(),
InstanceTypes=instance_types,
AvailabilityZone=self.name,
ProductDescriptions=product_descriptions)
self.spot_pricing_history = response.get('SpotPriceHistory', [])
def calculate_score(self, instance_types, bid, update=False):
if self.spot_pricing_history is None:
self.get_spot_pricing_history(instance_types)
elif update:
self.get_spot_pricing_history(instance_types)
# TODO: This should be removed but I am lazy and this is easier than catching exceptions
# @jgre can you fix?
if self.spot_pricing_history == []:
self.score = -1e10
return -1e10
# We are not interested in this AZ if its more than the bid, so lets just return
if self.current_price > bid:
self.score = 0
return 0
# Here we multiply each item by a weight.
# These weights are arbitrary and probably not ideal.
# There is much room for improvement on this scoring algorithm, but this algorithm
# works for most light use cases. Feel free to contribute!
current_price_s = bid - self.current_price
variance_s = -5 * (self.spot_price_variance * self.spot_price_mean)
mean_s = 0.5 * (bid - self.spot_price_mean)
self.score = current_price_s + variance_s + mean_s
return self.score
| en | 0.893165 | #import aws_spot_bot.config.default as uconf Returns the spot price history given a specified AZ and region. # TODO: This should be removed but I am lazy and this is easier than catching exceptions # @jgre can you fix? # We are not interested in this AZ if its more than the bid, so lets just return # Here we multiply each item by a weight. # These weights are arbitrary and probably not ideal. # There is much room for improvement on this scoring algorithm, but this algorithm # works for most light use cases. Feel free to contribute! | 2.38317 | 2 |
util/dataset.py | chunbolang/HPA | 3 | 6631404 | import os
import os.path
import cv2
import numpy as np
import copy
from torch.utils.data import Dataset
import torch.nn.functional as F
import torch
import random
import time
from tqdm import tqdm
from .get_weak_anns import transform_anns
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
def is_image_file(filename):
filename_lower = filename.lower()
return any(filename_lower.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(split=0, data_root=None, data_list=None, sub_list=None, filter_intersection=False):
assert split in [0, 1, 2, 3]
if not os.path.isfile(data_list):
raise (RuntimeError("Image list file do not exist: " + data_list + "\n"))
# Shaban uses these lines to remove small objects:
# if util.change_coordinates(mask, 32.0, 0.0).sum() > 2:
# filtered_item.append(item)
# which means the mask will be downsampled to 1/32 of the original size and the valid area should be larger than 2,
# therefore the area in original size should be accordingly larger than 2 * 32 * 32
image_label_list = []
list_read = open(data_list).readlines()
print("Processing data...".format(sub_list))
sub_class_file_list = {}
for sub_c in sub_list:
sub_class_file_list[sub_c] = []
for l_idx in tqdm(range(len(list_read))):
line = list_read[l_idx]
line = line.strip()
line_split = line.split(' ')
image_name = os.path.join(data_root, line_split[0])
label_name = os.path.join(data_root, line_split[1])
item = (image_name, label_name)
label = cv2.imread(label_name, cv2.IMREAD_GRAYSCALE)
label_class = np.unique(label).tolist()
if 0 in label_class:
label_class.remove(0)
if 255 in label_class:
label_class.remove(255)
new_label_class = []
if filter_intersection:
if set(label_class).issubset(set(sub_list)):
for c in label_class:
if c in sub_list:
tmp_label = np.zeros_like(label)
target_pix = np.where(label == c)
tmp_label[target_pix[0],target_pix[1]] = 1
if tmp_label.sum() >= 2 * 32 * 32:
new_label_class.append(c)
else:
for c in label_class:
if c in sub_list:
tmp_label = np.zeros_like(label)
target_pix = np.where(label == c)
tmp_label[target_pix[0],target_pix[1]] = 1
if tmp_label.sum() >= 2 * 32 * 32:
new_label_class.append(c)
label_class = new_label_class
if len(label_class) > 0:
image_label_list.append(item)
for c in label_class:
if c in sub_list:
sub_class_file_list[c].append(item)
print("Checking image&label pair {} list done! ".format(split))
return image_label_list, sub_class_file_list
class SemData(Dataset):
def __init__(self, split=3, shot=1, data_root=None, data_list=None, data_set=None, use_split_coco=False, \
transform=None, mode='train', ann_type='mask', \
ft_transform=None, ft_aug_size=None, \
ms_transform=None):
assert mode in ['train', 'val', 'demo']
assert data_set in ['pascal', 'coco']
self.mode = mode
self.split = split
self.shot = shot
self.data_root = data_root
self.ann_type = ann_type
if data_set == 'pascal':
self.class_list = list(range(1, 21)) # [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
if self.split == 3:
self.sub_list = list(range(1, 16)) # [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
self.sub_val_list = list(range(16, 21)) # [16,17,18,19,20]
elif self.split == 2:
self.sub_list = list(range(1, 11)) + list(range(16, 21)) # [1,2,3,4,5,6,7,8,9,10,16,17,18,19,20]
self.sub_val_list = list(range(11, 16)) # [11,12,13,14,15]
elif self.split == 1:
self.sub_list = list(range(1, 6)) + list(range(11, 21)) # [1,2,3,4,5,11,12,13,14,15,16,17,18,19,20]
self.sub_val_list = list(range(6, 11)) # [6,7,8,9,10]
elif self.split == 0:
self.sub_list = list(range(6, 21)) # [6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
self.sub_val_list = list(range(1, 6)) # [1,2,3,4,5]
elif data_set == 'coco':
if use_split_coco:
print('INFO: using SPLIT COCO (FWB)')
self.class_list = list(range(1, 81))
if self.split == 3:
self.sub_val_list = list(range(4, 81, 4))
self.sub_list = list(set(self.class_list) - set(self.sub_val_list))
elif self.split == 2:
self.sub_val_list = list(range(3, 80, 4))
self.sub_list = list(set(self.class_list) - set(self.sub_val_list))
elif self.split == 1:
self.sub_val_list = list(range(2, 79, 4))
self.sub_list = list(set(self.class_list) - set(self.sub_val_list))
elif self.split == 0:
self.sub_val_list = list(range(1, 78, 4))
self.sub_list = list(set(self.class_list) - set(self.sub_val_list))
else:
print('INFO: using COCO (PANet)')
self.class_list = list(range(1, 81))
if self.split == 3:
self.sub_list = list(range(1, 61))
self.sub_val_list = list(range(61, 81))
elif self.split == 2:
self.sub_list = list(range(1, 41)) + list(range(61, 81))
self.sub_val_list = list(range(41, 61))
elif self.split == 1:
self.sub_list = list(range(1, 21)) + list(range(41, 81))
self.sub_val_list = list(range(21, 41))
elif self.split == 0:
self.sub_list = list(range(21, 81))
self.sub_val_list = list(range(1, 21))
print('sub_list: ', self.sub_list)
print('sub_val_list: ', self.sub_val_list)
# if self.mode == 'train':
# self.data_list, self.sub_class_file_list = make_dataset(split, data_root, data_list, self.sub_list, True)
# assert len(self.sub_class_file_list.keys()) == len(self.sub_list)
# elif self.mode == 'val' or self.mode == 'demo':
# self.data_list, self.sub_class_file_list = make_dataset(split, data_root, data_list, self.sub_val_list, False)
# assert len(self.sub_class_file_list.keys()) == len(self.sub_val_list)
fss_list_root = './lists/{}/fss_list/{}/'.format(data_set, self.mode)
fss_data_list_path = fss_list_root + 'data_list_{}.txt'.format(split)
fss_sub_class_file_list_path = fss_list_root + 'sub_class_file_list_{}.txt'.format(split)
# Write FSS Data
# with open(fss_data_list_path, 'w') as f:
# for item in self.data_list:
# img, label = item
# f.write(img + ' ')
# f.write(label + '\n')
# with open(fss_sub_class_file_list_path, 'w') as f:
# f.write(str(self.sub_class_file_list))
# Read FSS Data
with open(fss_data_list_path, 'r') as f:
f_str = f.readlines()
self.data_list = []
for line in f_str:
img, mask = line.split(' ')
self.data_list.append((img, mask.strip()))
with open(fss_sub_class_file_list_path, 'r') as f:
f_str = f.read()
self.sub_class_file_list = eval(f_str)
self.transform = transform
self.ft_transform = ft_transform
self.ft_aug_size = ft_aug_size
self.ms_transform_list = ms_transform
def __len__(self):
return len(self.data_list)
def __getitem__(self, index):
label_class = []
image_path, label_path = self.data_list[index]
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.float32(image)
label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)
if image.shape[0] != label.shape[0] or image.shape[1] != label.shape[1]:
raise (RuntimeError("Query Image & label shape mismatch: " + image_path + " " + label_path + "\n"))
label_class = np.unique(label).tolist()
if 0 in label_class:
label_class.remove(0)
if 255 in label_class:
label_class.remove(255)
new_label_class = []
for c in label_class:
if c in self.sub_val_list:
if self.mode == 'val' or self.mode == 'demo':
new_label_class.append(c)
if c in self.sub_list:
if self.mode == 'train':
new_label_class.append(c)
label_class = new_label_class
assert len(label_class) > 0
class_chosen = label_class[random.randint(1,len(label_class))-1]
target_pix = np.where(label == class_chosen)
ignore_pix = np.where(label == 255)
label[:,:] = 0
if target_pix[0].shape[0] > 0:
label[target_pix[0],target_pix[1]] = 1
label[ignore_pix[0],ignore_pix[1]] = 255
file_class_chosen = self.sub_class_file_list[class_chosen]
num_file = len(file_class_chosen)
support_image_path_list = []
support_label_path_list = []
support_idx_list = []
for k in range(self.shot):
support_idx = random.randint(1,num_file)-1
support_image_path = image_path
support_label_path = label_path
while((support_image_path == image_path and support_label_path == label_path) or support_idx in support_idx_list):
support_idx = random.randint(1,num_file)-1
support_image_path, support_label_path = file_class_chosen[support_idx]
support_idx_list.append(support_idx)
support_image_path_list.append(support_image_path)
support_label_path_list.append(support_label_path)
support_image_list_ori = []
support_label_list_ori = []
support_label_list_ori_mask = []
subcls_list = []
for k in range(self.shot):
if self.mode == 'train':
subcls_list.append(self.sub_list.index(class_chosen))
else:
subcls_list.append(self.sub_val_list.index(class_chosen))
support_image_path = support_image_path_list[k]
support_label_path = support_label_path_list[k]
support_image = cv2.imread(support_image_path, cv2.IMREAD_COLOR)
support_image = cv2.cvtColor(support_image, cv2.COLOR_BGR2RGB)
support_image = np.float32(support_image)
support_label = cv2.imread(support_label_path, cv2.IMREAD_GRAYSCALE)
target_pix = np.where(support_label == class_chosen)
ignore_pix = np.where(support_label == 255)
support_label[:,:] = 0
support_label[target_pix[0],target_pix[1]] = 1
support_label, support_label_mask = transform_anns(support_label, self.ann_type)
support_label[ignore_pix[0],ignore_pix[1]] = 255
support_label_mask[ignore_pix[0],ignore_pix[1]] = 255
if support_image.shape[0] != support_label.shape[0] or support_image.shape[1] != support_label.shape[1]:
raise (RuntimeError("Support Image & label shape mismatch: " + support_image_path + " " + support_label_path + "\n"))
support_image_list_ori.append(support_image)
support_label_list_ori.append(support_label)
support_label_list_ori_mask.append(support_label_mask)
assert len(support_label_list_ori) == self.shot and len(support_image_list_ori) == self.shot
raw_image = image.copy()
raw_label = label.copy()
support_image_list = [[] for _ in range(self.shot)]
support_label_list = [[] for _ in range(self.shot)]
if self.transform is not None:
image, label = self.transform(image, label)
for k in range(self.shot):
support_image_list[k], support_label_list[k] = self.transform(support_image_list_ori[k], support_label_list_ori[k])
s_xs = support_image_list
s_ys = support_label_list
s_x = s_xs[0].unsqueeze(0)
for i in range(1, self.shot):
s_x = torch.cat([s_xs[i].unsqueeze(0), s_x], 0)
s_y = s_ys[0].unsqueeze(0)
for i in range(1, self.shot):
s_y = torch.cat([s_ys[i].unsqueeze(0), s_y], 0)
# Multi-Scale
if self.ms_transform_list is not None:
image_list = []
label_list = []
support_image_list = []
support_label_list = []
for ms_id in range(len(self.ms_transform_list)):
ms_transform_temp = self.ms_transform_list[ms_id]
scale_img, scale_label = ms_transform_temp(raw_image, raw_label)
scale_img_s, scale_label_s = ms_transform_temp(support_image_list_ori[0], support_label_list_ori[0])
s_x = scale_img_s.unsqueeze(0)
s_y = scale_label_s.unsqueeze(0)
for k in range(1, self.shot):
scale_img_s, scale_label_s = ms_transform_temp(support_image_list_ori[k], support_label_list_ori[k])
s_x = torch.cat([scale_img_s.unsqueeze(0), s_x], 0)
s_y = torch.cat([scale_label_s.unsqueeze(0), s_y], 0)
image_list.append(scale_img)
label_list.append(scale_label)
support_image_list.append(s_x)
support_label_list.append(s_y)
image = image_list
label = label_list
s_x = support_image_list
s_y = support_label_list
total_image_list = support_image_list_ori.copy()
total_image_list.append(raw_image)
# Return
if self.mode == 'train':
return image, label, s_x, s_y, subcls_list
elif self.mode == 'val':
return image, label, s_x, s_y, subcls_list, raw_label
elif self.mode == 'demo':
return image, label, s_x, s_y, subcls_list, total_image_list, support_label_list_ori, support_label_list_ori_mask, raw_label
| import os
import os.path
import cv2
import numpy as np
import copy
from torch.utils.data import Dataset
import torch.nn.functional as F
import torch
import random
import time
from tqdm import tqdm
from .get_weak_anns import transform_anns
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
def is_image_file(filename):
filename_lower = filename.lower()
return any(filename_lower.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(split=0, data_root=None, data_list=None, sub_list=None, filter_intersection=False):
assert split in [0, 1, 2, 3]
if not os.path.isfile(data_list):
raise (RuntimeError("Image list file do not exist: " + data_list + "\n"))
# Shaban uses these lines to remove small objects:
# if util.change_coordinates(mask, 32.0, 0.0).sum() > 2:
# filtered_item.append(item)
# which means the mask will be downsampled to 1/32 of the original size and the valid area should be larger than 2,
# therefore the area in original size should be accordingly larger than 2 * 32 * 32
image_label_list = []
list_read = open(data_list).readlines()
print("Processing data...".format(sub_list))
sub_class_file_list = {}
for sub_c in sub_list:
sub_class_file_list[sub_c] = []
for l_idx in tqdm(range(len(list_read))):
line = list_read[l_idx]
line = line.strip()
line_split = line.split(' ')
image_name = os.path.join(data_root, line_split[0])
label_name = os.path.join(data_root, line_split[1])
item = (image_name, label_name)
label = cv2.imread(label_name, cv2.IMREAD_GRAYSCALE)
label_class = np.unique(label).tolist()
if 0 in label_class:
label_class.remove(0)
if 255 in label_class:
label_class.remove(255)
new_label_class = []
if filter_intersection:
if set(label_class).issubset(set(sub_list)):
for c in label_class:
if c in sub_list:
tmp_label = np.zeros_like(label)
target_pix = np.where(label == c)
tmp_label[target_pix[0],target_pix[1]] = 1
if tmp_label.sum() >= 2 * 32 * 32:
new_label_class.append(c)
else:
for c in label_class:
if c in sub_list:
tmp_label = np.zeros_like(label)
target_pix = np.where(label == c)
tmp_label[target_pix[0],target_pix[1]] = 1
if tmp_label.sum() >= 2 * 32 * 32:
new_label_class.append(c)
label_class = new_label_class
if len(label_class) > 0:
image_label_list.append(item)
for c in label_class:
if c in sub_list:
sub_class_file_list[c].append(item)
print("Checking image&label pair {} list done! ".format(split))
return image_label_list, sub_class_file_list
class SemData(Dataset):
def __init__(self, split=3, shot=1, data_root=None, data_list=None, data_set=None, use_split_coco=False, \
transform=None, mode='train', ann_type='mask', \
ft_transform=None, ft_aug_size=None, \
ms_transform=None):
assert mode in ['train', 'val', 'demo']
assert data_set in ['pascal', 'coco']
self.mode = mode
self.split = split
self.shot = shot
self.data_root = data_root
self.ann_type = ann_type
if data_set == 'pascal':
self.class_list = list(range(1, 21)) # [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
if self.split == 3:
self.sub_list = list(range(1, 16)) # [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
self.sub_val_list = list(range(16, 21)) # [16,17,18,19,20]
elif self.split == 2:
self.sub_list = list(range(1, 11)) + list(range(16, 21)) # [1,2,3,4,5,6,7,8,9,10,16,17,18,19,20]
self.sub_val_list = list(range(11, 16)) # [11,12,13,14,15]
elif self.split == 1:
self.sub_list = list(range(1, 6)) + list(range(11, 21)) # [1,2,3,4,5,11,12,13,14,15,16,17,18,19,20]
self.sub_val_list = list(range(6, 11)) # [6,7,8,9,10]
elif self.split == 0:
self.sub_list = list(range(6, 21)) # [6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
self.sub_val_list = list(range(1, 6)) # [1,2,3,4,5]
elif data_set == 'coco':
if use_split_coco:
print('INFO: using SPLIT COCO (FWB)')
self.class_list = list(range(1, 81))
if self.split == 3:
self.sub_val_list = list(range(4, 81, 4))
self.sub_list = list(set(self.class_list) - set(self.sub_val_list))
elif self.split == 2:
self.sub_val_list = list(range(3, 80, 4))
self.sub_list = list(set(self.class_list) - set(self.sub_val_list))
elif self.split == 1:
self.sub_val_list = list(range(2, 79, 4))
self.sub_list = list(set(self.class_list) - set(self.sub_val_list))
elif self.split == 0:
self.sub_val_list = list(range(1, 78, 4))
self.sub_list = list(set(self.class_list) - set(self.sub_val_list))
else:
print('INFO: using COCO (PANet)')
self.class_list = list(range(1, 81))
if self.split == 3:
self.sub_list = list(range(1, 61))
self.sub_val_list = list(range(61, 81))
elif self.split == 2:
self.sub_list = list(range(1, 41)) + list(range(61, 81))
self.sub_val_list = list(range(41, 61))
elif self.split == 1:
self.sub_list = list(range(1, 21)) + list(range(41, 81))
self.sub_val_list = list(range(21, 41))
elif self.split == 0:
self.sub_list = list(range(21, 81))
self.sub_val_list = list(range(1, 21))
print('sub_list: ', self.sub_list)
print('sub_val_list: ', self.sub_val_list)
# if self.mode == 'train':
# self.data_list, self.sub_class_file_list = make_dataset(split, data_root, data_list, self.sub_list, True)
# assert len(self.sub_class_file_list.keys()) == len(self.sub_list)
# elif self.mode == 'val' or self.mode == 'demo':
# self.data_list, self.sub_class_file_list = make_dataset(split, data_root, data_list, self.sub_val_list, False)
# assert len(self.sub_class_file_list.keys()) == len(self.sub_val_list)
fss_list_root = './lists/{}/fss_list/{}/'.format(data_set, self.mode)
fss_data_list_path = fss_list_root + 'data_list_{}.txt'.format(split)
fss_sub_class_file_list_path = fss_list_root + 'sub_class_file_list_{}.txt'.format(split)
# Write FSS Data
# with open(fss_data_list_path, 'w') as f:
# for item in self.data_list:
# img, label = item
# f.write(img + ' ')
# f.write(label + '\n')
# with open(fss_sub_class_file_list_path, 'w') as f:
# f.write(str(self.sub_class_file_list))
# Read FSS Data
with open(fss_data_list_path, 'r') as f:
f_str = f.readlines()
self.data_list = []
for line in f_str:
img, mask = line.split(' ')
self.data_list.append((img, mask.strip()))
with open(fss_sub_class_file_list_path, 'r') as f:
f_str = f.read()
self.sub_class_file_list = eval(f_str)
self.transform = transform
self.ft_transform = ft_transform
self.ft_aug_size = ft_aug_size
self.ms_transform_list = ms_transform
def __len__(self):
return len(self.data_list)
def __getitem__(self, index):
label_class = []
image_path, label_path = self.data_list[index]
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.float32(image)
label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)
if image.shape[0] != label.shape[0] or image.shape[1] != label.shape[1]:
raise (RuntimeError("Query Image & label shape mismatch: " + image_path + " " + label_path + "\n"))
label_class = np.unique(label).tolist()
if 0 in label_class:
label_class.remove(0)
if 255 in label_class:
label_class.remove(255)
new_label_class = []
for c in label_class:
if c in self.sub_val_list:
if self.mode == 'val' or self.mode == 'demo':
new_label_class.append(c)
if c in self.sub_list:
if self.mode == 'train':
new_label_class.append(c)
label_class = new_label_class
assert len(label_class) > 0
class_chosen = label_class[random.randint(1,len(label_class))-1]
target_pix = np.where(label == class_chosen)
ignore_pix = np.where(label == 255)
label[:,:] = 0
if target_pix[0].shape[0] > 0:
label[target_pix[0],target_pix[1]] = 1
label[ignore_pix[0],ignore_pix[1]] = 255
file_class_chosen = self.sub_class_file_list[class_chosen]
num_file = len(file_class_chosen)
support_image_path_list = []
support_label_path_list = []
support_idx_list = []
for k in range(self.shot):
support_idx = random.randint(1,num_file)-1
support_image_path = image_path
support_label_path = label_path
while((support_image_path == image_path and support_label_path == label_path) or support_idx in support_idx_list):
support_idx = random.randint(1,num_file)-1
support_image_path, support_label_path = file_class_chosen[support_idx]
support_idx_list.append(support_idx)
support_image_path_list.append(support_image_path)
support_label_path_list.append(support_label_path)
support_image_list_ori = []
support_label_list_ori = []
support_label_list_ori_mask = []
subcls_list = []
for k in range(self.shot):
if self.mode == 'train':
subcls_list.append(self.sub_list.index(class_chosen))
else:
subcls_list.append(self.sub_val_list.index(class_chosen))
support_image_path = support_image_path_list[k]
support_label_path = support_label_path_list[k]
support_image = cv2.imread(support_image_path, cv2.IMREAD_COLOR)
support_image = cv2.cvtColor(support_image, cv2.COLOR_BGR2RGB)
support_image = np.float32(support_image)
support_label = cv2.imread(support_label_path, cv2.IMREAD_GRAYSCALE)
target_pix = np.where(support_label == class_chosen)
ignore_pix = np.where(support_label == 255)
support_label[:,:] = 0
support_label[target_pix[0],target_pix[1]] = 1
support_label, support_label_mask = transform_anns(support_label, self.ann_type)
support_label[ignore_pix[0],ignore_pix[1]] = 255
support_label_mask[ignore_pix[0],ignore_pix[1]] = 255
if support_image.shape[0] != support_label.shape[0] or support_image.shape[1] != support_label.shape[1]:
raise (RuntimeError("Support Image & label shape mismatch: " + support_image_path + " " + support_label_path + "\n"))
support_image_list_ori.append(support_image)
support_label_list_ori.append(support_label)
support_label_list_ori_mask.append(support_label_mask)
assert len(support_label_list_ori) == self.shot and len(support_image_list_ori) == self.shot
raw_image = image.copy()
raw_label = label.copy()
support_image_list = [[] for _ in range(self.shot)]
support_label_list = [[] for _ in range(self.shot)]
if self.transform is not None:
image, label = self.transform(image, label)
for k in range(self.shot):
support_image_list[k], support_label_list[k] = self.transform(support_image_list_ori[k], support_label_list_ori[k])
s_xs = support_image_list
s_ys = support_label_list
s_x = s_xs[0].unsqueeze(0)
for i in range(1, self.shot):
s_x = torch.cat([s_xs[i].unsqueeze(0), s_x], 0)
s_y = s_ys[0].unsqueeze(0)
for i in range(1, self.shot):
s_y = torch.cat([s_ys[i].unsqueeze(0), s_y], 0)
# Multi-Scale
if self.ms_transform_list is not None:
image_list = []
label_list = []
support_image_list = []
support_label_list = []
for ms_id in range(len(self.ms_transform_list)):
ms_transform_temp = self.ms_transform_list[ms_id]
scale_img, scale_label = ms_transform_temp(raw_image, raw_label)
scale_img_s, scale_label_s = ms_transform_temp(support_image_list_ori[0], support_label_list_ori[0])
s_x = scale_img_s.unsqueeze(0)
s_y = scale_label_s.unsqueeze(0)
for k in range(1, self.shot):
scale_img_s, scale_label_s = ms_transform_temp(support_image_list_ori[k], support_label_list_ori[k])
s_x = torch.cat([scale_img_s.unsqueeze(0), s_x], 0)
s_y = torch.cat([scale_label_s.unsqueeze(0), s_y], 0)
image_list.append(scale_img)
label_list.append(scale_label)
support_image_list.append(s_x)
support_label_list.append(s_y)
image = image_list
label = label_list
s_x = support_image_list
s_y = support_label_list
total_image_list = support_image_list_ori.copy()
total_image_list.append(raw_image)
# Return
if self.mode == 'train':
return image, label, s_x, s_y, subcls_list
elif self.mode == 'val':
return image, label, s_x, s_y, subcls_list, raw_label
elif self.mode == 'demo':
return image, label, s_x, s_y, subcls_list, total_image_list, support_label_list_ori, support_label_list_ori_mask, raw_label
| en | 0.523007 | # Shaban uses these lines to remove small objects: # if util.change_coordinates(mask, 32.0, 0.0).sum() > 2: # filtered_item.append(item) # which means the mask will be downsampled to 1/32 of the original size and the valid area should be larger than 2, # therefore the area in original size should be accordingly larger than 2 * 32 * 32 # [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20] # [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] # [16,17,18,19,20] # [1,2,3,4,5,6,7,8,9,10,16,17,18,19,20] # [11,12,13,14,15] # [1,2,3,4,5,11,12,13,14,15,16,17,18,19,20] # [6,7,8,9,10] # [6,7,8,9,10,11,12,13,14,15,16,17,18,19,20] # [1,2,3,4,5] # if self.mode == 'train': # self.data_list, self.sub_class_file_list = make_dataset(split, data_root, data_list, self.sub_list, True) # assert len(self.sub_class_file_list.keys()) == len(self.sub_list) # elif self.mode == 'val' or self.mode == 'demo': # self.data_list, self.sub_class_file_list = make_dataset(split, data_root, data_list, self.sub_val_list, False) # assert len(self.sub_class_file_list.keys()) == len(self.sub_val_list) # Write FSS Data # with open(fss_data_list_path, 'w') as f: # for item in self.data_list: # img, label = item # f.write(img + ' ') # f.write(label + '\n') # with open(fss_sub_class_file_list_path, 'w') as f: # f.write(str(self.sub_class_file_list)) # Read FSS Data # Multi-Scale # Return | 2.70684 | 3 |
setup_competition.py | JakeRoggenbuck/server-public | 1 | 6631405 | #!/usr/bin/env python3
# Copyright (c) 2019 FRC Team 1678: Citrus Circuits
"""Sets up the MongoDB document for a competition, should be run before every competition."""
# External imports
import re
from pymongo import MongoClient
# Internal imports
import cloud_database_communicator
import local_database_communicator
import utils
utils.log_info('Competition setup started')
# Makes connection with local database through port 27017, the default listening port of MongoDB
DB = MongoClient('localhost', 27017).scouting_system
COMPETITION_KEY = input('Input the competition code from TBA: ')
# Use a regular expression to determine if competition code is in the correct format
# First capture group: Matches 4 digits
# Second capture group: Matches 1 or more letters
CODE_MATCH = re.fullmatch(r'(?P<year>[0-9]{4})(?P<comp_code>.+)', COMPETITION_KEY)
if CODE_MATCH is None:
raise ValueError('Competition code is not in the correct format')
# Creates the competition.txt file
# Also writes the competition code to it so it can be used in other scripts
utils.save_event_key(COMPETITION_KEY)
# Checks that the competition inputted by the user is not already in the database
if len(list(DB.competitions.find({'tba_event_key': COMPETITION_KEY}))) != 0:
raise Exception(f'The competition {COMPETITION_KEY} already exists in the database.')
# Inserts document into collection
local_database_communicator.add_competition(local_database_communicator.DB, COMPETITION_KEY)
cloud_database_communicator.add_competition_cloud(COMPETITION_KEY)
utils.log_info('Competition setup finished')
| #!/usr/bin/env python3
# Copyright (c) 2019 FRC Team 1678: Citrus Circuits
"""Sets up the MongoDB document for a competition, should be run before every competition."""
# External imports
import re
from pymongo import MongoClient
# Internal imports
import cloud_database_communicator
import local_database_communicator
import utils
utils.log_info('Competition setup started')
# Makes connection with local database through port 27017, the default listening port of MongoDB
DB = MongoClient('localhost', 27017).scouting_system
COMPETITION_KEY = input('Input the competition code from TBA: ')
# Use a regular expression to determine if competition code is in the correct format
# First capture group: Matches 4 digits
# Second capture group: Matches 1 or more letters
CODE_MATCH = re.fullmatch(r'(?P<year>[0-9]{4})(?P<comp_code>.+)', COMPETITION_KEY)
if CODE_MATCH is None:
raise ValueError('Competition code is not in the correct format')
# Creates the competition.txt file
# Also writes the competition code to it so it can be used in other scripts
utils.save_event_key(COMPETITION_KEY)
# Checks that the competition inputted by the user is not already in the database
if len(list(DB.competitions.find({'tba_event_key': COMPETITION_KEY}))) != 0:
raise Exception(f'The competition {COMPETITION_KEY} already exists in the database.')
# Inserts document into collection
local_database_communicator.add_competition(local_database_communicator.DB, COMPETITION_KEY)
cloud_database_communicator.add_competition_cloud(COMPETITION_KEY)
utils.log_info('Competition setup finished')
| en | 0.818235 | #!/usr/bin/env python3 # Copyright (c) 2019 FRC Team 1678: Citrus Circuits Sets up the MongoDB document for a competition, should be run before every competition. # External imports # Internal imports # Makes connection with local database through port 27017, the default listening port of MongoDB # Use a regular expression to determine if competition code is in the correct format # First capture group: Matches 4 digits # Second capture group: Matches 1 or more letters # Creates the competition.txt file # Also writes the competition code to it so it can be used in other scripts # Checks that the competition inputted by the user is not already in the database # Inserts document into collection | 3.044832 | 3 |
helpmeplease/helpme.py | Max1993Liu/ask_for_help | 0 | 6631406 | import smtplib
import ssl
import json
import os
import socket
import functools
from pathlib import Path
from email.message import EmailMessage
from .trackerror import get_code
__all__ = ['ask_for_help', 'show_recipients', 'add_recipient', 'reset_my_email', 'init_setting']
_CONFIG_PATH = Path(__file__).absolute().parent / 'config.json'
def get_config():
with open(_CONFIG_PATH, 'rb') as f:
return json.load(f)
def write_config(config):
with open(_CONFIG_PATH, 'w') as f:
return json.dump(config, f)
def add_recipient(name, email):
config = get_config()['GOOD_PEOPLE']
if name in config['GOOD_PEOPLE']:
raise ValueError('{} is already in your recipient list.'.format(name))
config['GOOD_PEOPLE'][name] = email
write_config(config)
def show_recipients():
return get_config()['GOOD_PEOPLE']
def reset_my_email(email, password, host=''):
config = get_config()
config['MY_EMAIL'] = email
config['MY_PASSWORD'] = password
config['HOST'] = host or socket.getfqdn()
write_config(config)
def init_setting():
config = get_config()
if config['MY_EMAIL'] == '<EMAIL>':
addr = input('Enter your email address:')
pwd = input('Enter your email password:')
config['MY_EMAIL'] = addr
config['MY_PASSWORD'] = pwd
write_config(config)
def send_email(msg, address, use_ssl=False):
""" Send msg """
MY_EMAIL, MY_PASSWORD = get_config()['MY_EMAIL'], get_config()['MY_PASSWORD']
MY_HOST = config['HOST']
if use_ssl:
context = ssl.create_default_context()
with smtplib.SMTP_SSL(MY_HOST, port=465, context=context) as server:
server.login(MY_EMAIL, MY_PASSWORD)
server.send_message(msg, MY_EMAIL, [address])
server.close()
else:
with smtplib.SMTP(MY_HOST, port=587) as server:
server.starttls()
server.login(MY_EMAIL, MY_PASSWORD)
server.send_message(msg, MY_EMAIL, [address])
server.close()
def create_message(code, ex_msg, address):
""" Create an error report"""
msg = EmailMessage()
content = 'Error Message:\n' + ex_msg + '\n\nSource Code:\n' + code
msg.set_content(content.replace('\t', ' '*4)) # replace tab with spaces for better formatting
MY_EMAIL = get_config()['MY_EMAIL']
msg['Subject'] = '{} needs your help!'.format(MY_EMAIL.split('@')[0])
msg['From'] = MY_EMAIL
msg['To'] = address
return msg
class ask_for_help:
def __init__(self, who=None):
init_setting()
recipients = get_config()['GOOD_PEOPLE']
available = list(recipients.keys())
if who and who not in available:
raise ValueError('Please add {} to the recipients list using add_recipient.'.format(who))
if who is None:
who = available[0]
self.who = who
self.address = recipients[who]
def __call__(self, f):
f_name = f.__name__
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
# generate an error report
source_code = get_code(f)
ex_msg = str(e)
error_report = create_message(source_code, ex_msg, self.address)
send_email(error_report, self.address)
print('{} will help you!'.format(self.who))
return wrapped
| import smtplib
import ssl
import json
import os
import socket
import functools
from pathlib import Path
from email.message import EmailMessage
from .trackerror import get_code
__all__ = ['ask_for_help', 'show_recipients', 'add_recipient', 'reset_my_email', 'init_setting']
_CONFIG_PATH = Path(__file__).absolute().parent / 'config.json'
def get_config():
with open(_CONFIG_PATH, 'rb') as f:
return json.load(f)
def write_config(config):
with open(_CONFIG_PATH, 'w') as f:
return json.dump(config, f)
def add_recipient(name, email):
config = get_config()['GOOD_PEOPLE']
if name in config['GOOD_PEOPLE']:
raise ValueError('{} is already in your recipient list.'.format(name))
config['GOOD_PEOPLE'][name] = email
write_config(config)
def show_recipients():
return get_config()['GOOD_PEOPLE']
def reset_my_email(email, password, host=''):
config = get_config()
config['MY_EMAIL'] = email
config['MY_PASSWORD'] = password
config['HOST'] = host or socket.getfqdn()
write_config(config)
def init_setting():
config = get_config()
if config['MY_EMAIL'] == '<EMAIL>':
addr = input('Enter your email address:')
pwd = input('Enter your email password:')
config['MY_EMAIL'] = addr
config['MY_PASSWORD'] = pwd
write_config(config)
def send_email(msg, address, use_ssl=False):
""" Send msg """
MY_EMAIL, MY_PASSWORD = get_config()['MY_EMAIL'], get_config()['MY_PASSWORD']
MY_HOST = config['HOST']
if use_ssl:
context = ssl.create_default_context()
with smtplib.SMTP_SSL(MY_HOST, port=465, context=context) as server:
server.login(MY_EMAIL, MY_PASSWORD)
server.send_message(msg, MY_EMAIL, [address])
server.close()
else:
with smtplib.SMTP(MY_HOST, port=587) as server:
server.starttls()
server.login(MY_EMAIL, MY_PASSWORD)
server.send_message(msg, MY_EMAIL, [address])
server.close()
def create_message(code, ex_msg, address):
""" Create an error report"""
msg = EmailMessage()
content = 'Error Message:\n' + ex_msg + '\n\nSource Code:\n' + code
msg.set_content(content.replace('\t', ' '*4)) # replace tab with spaces for better formatting
MY_EMAIL = get_config()['MY_EMAIL']
msg['Subject'] = '{} needs your help!'.format(MY_EMAIL.split('@')[0])
msg['From'] = MY_EMAIL
msg['To'] = address
return msg
class ask_for_help:
def __init__(self, who=None):
init_setting()
recipients = get_config()['GOOD_PEOPLE']
available = list(recipients.keys())
if who and who not in available:
raise ValueError('Please add {} to the recipients list using add_recipient.'.format(who))
if who is None:
who = available[0]
self.who = who
self.address = recipients[who]
def __call__(self, f):
f_name = f.__name__
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
# generate an error report
source_code = get_code(f)
ex_msg = str(e)
error_report = create_message(source_code, ex_msg, self.address)
send_email(error_report, self.address)
print('{} will help you!'.format(self.who))
return wrapped
| en | 0.486614 | Send msg Create an error report # replace tab with spaces for better formatting # generate an error report | 2.700957 | 3 |
examples/rec_sys.py | getumen/oml | 1 | 6631407 | <gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import generators
from __future__ import print_function
from __future__ import unicode_literals
import os
import numpy as np
from matplotlib import pyplot as plt
from oml.datasouces.iterator import DictIterator
from oml.models.fm import FM, PoissonFM
from oml.models.regularizers import L2Sq
from oml.optimizers.sgd import Fobos
data = np.loadtxt('./ml-latest-small/ratings.csv', skiprows=1, delimiter=',')
np.random.shuffle(data)
data = data[:, :3].astype(int)
x = []
t = []
for line in data:
x.append({'u_{}'.format(line[0]): 1, 'i_{}'.format(line[1]): 1})
t.append(line[2])
train_iter = DictIterator(x=x[:data.shape[0] // 5 * 4], t=t[:data.shape[0] // 5 * 4], batch_size=100)
test_iter = DictIterator(x=x[data.shape[0] // 5 * 4:], t=t[data.shape[0] // 5 * 4:], batch_size=1000)
results = {}
out = 'fm_out'
def opt_test(optimizer, label):
try:
os.mkdir(out)
except FileExistsError:
pass
if not os.path.isfile('./{}/{}_{}.csv'.format(out, label, 'rmse')):
print(label)
optimizer.optimize(train_iter, test_iter, show_evaluation=True, show_loss=True, epoch=5)
np.savetxt('./{}/{}_{}.csv'.format(out, label, 'loss'), optimizer.loss, delimiter=',')
np.savetxt('./{}/{}_{}.csv'.format(out, label, 'rmse'), optimizer.evaluation, delimiter=',')
results[label] = {
'loss': optimizer.loss,
'rmse': optimizer.evaluation
}
opt_test(Fobos(FM(reg=L2Sq())), 'Fobos')
def plot():
for i, title in enumerate(['loss', 'rmse']):
plt.subplot(1, 2, i + 1)
plt.title(title)
for method in results.keys():
r = np.loadtxt('./{}/{}_{}.csv'.format(out, method, title))
r = r[::max(len(r) // 100, 1)]
plt.plot(list(range(len(r))), r, label=method)
plt.legend()
plot()
results = {}
out = 'poisson_fm_out'
def opt_test(optimizer, label):
try:
os.mkdir(out)
except FileExistsError:
pass
if not os.path.isfile('./{}/{}_{}.csv'.format(out, label, 'loss')):
print(label)
optimizer.optimize(train_iter, test_iter, show_evaluation=True, epoch=5)
np.savetxt('./{}/{}_{}.csv'.format(out, label, 'loss'), optimizer.loss, delimiter=',')
np.savetxt('./{}/{}_{}.csv'.format(out, label, 'rmse'), optimizer.evaluation, delimiter=',')
results[label] = {
'loss': optimizer.loss,
'rmse': optimizer.evaluation
}
opt_test(Fobos(PoissonFM(reg=L2Sq())), 'Fobos')
def plot():
for i, title in enumerate(['loss', 'rmse']):
plt.subplot(1, 2, i + 1)
plt.title(title)
for method in results.keys():
r = np.loadtxt('./{}/{}_{}.csv'.format(out, method, title))
r = r[::max(len(r) // 100, 1)]
plt.plot(list(range(len(r))), r, label=method)
plt.legend()
plot()
plt.savefig('{}.png'.format('rec_sys'))
| from __future__ import absolute_import
from __future__ import division
from __future__ import generators
from __future__ import print_function
from __future__ import unicode_literals
import os
import numpy as np
from matplotlib import pyplot as plt
from oml.datasouces.iterator import DictIterator
from oml.models.fm import FM, PoissonFM
from oml.models.regularizers import L2Sq
from oml.optimizers.sgd import Fobos
data = np.loadtxt('./ml-latest-small/ratings.csv', skiprows=1, delimiter=',')
np.random.shuffle(data)
data = data[:, :3].astype(int)
x = []
t = []
for line in data:
x.append({'u_{}'.format(line[0]): 1, 'i_{}'.format(line[1]): 1})
t.append(line[2])
train_iter = DictIterator(x=x[:data.shape[0] // 5 * 4], t=t[:data.shape[0] // 5 * 4], batch_size=100)
test_iter = DictIterator(x=x[data.shape[0] // 5 * 4:], t=t[data.shape[0] // 5 * 4:], batch_size=1000)
results = {}
out = 'fm_out'
def opt_test(optimizer, label):
try:
os.mkdir(out)
except FileExistsError:
pass
if not os.path.isfile('./{}/{}_{}.csv'.format(out, label, 'rmse')):
print(label)
optimizer.optimize(train_iter, test_iter, show_evaluation=True, show_loss=True, epoch=5)
np.savetxt('./{}/{}_{}.csv'.format(out, label, 'loss'), optimizer.loss, delimiter=',')
np.savetxt('./{}/{}_{}.csv'.format(out, label, 'rmse'), optimizer.evaluation, delimiter=',')
results[label] = {
'loss': optimizer.loss,
'rmse': optimizer.evaluation
}
opt_test(Fobos(FM(reg=L2Sq())), 'Fobos')
def plot():
for i, title in enumerate(['loss', 'rmse']):
plt.subplot(1, 2, i + 1)
plt.title(title)
for method in results.keys():
r = np.loadtxt('./{}/{}_{}.csv'.format(out, method, title))
r = r[::max(len(r) // 100, 1)]
plt.plot(list(range(len(r))), r, label=method)
plt.legend()
plot()
results = {}
out = 'poisson_fm_out'
def opt_test(optimizer, label):
try:
os.mkdir(out)
except FileExistsError:
pass
if not os.path.isfile('./{}/{}_{}.csv'.format(out, label, 'loss')):
print(label)
optimizer.optimize(train_iter, test_iter, show_evaluation=True, epoch=5)
np.savetxt('./{}/{}_{}.csv'.format(out, label, 'loss'), optimizer.loss, delimiter=',')
np.savetxt('./{}/{}_{}.csv'.format(out, label, 'rmse'), optimizer.evaluation, delimiter=',')
results[label] = {
'loss': optimizer.loss,
'rmse': optimizer.evaluation
}
opt_test(Fobos(PoissonFM(reg=L2Sq())), 'Fobos')
def plot():
for i, title in enumerate(['loss', 'rmse']):
plt.subplot(1, 2, i + 1)
plt.title(title)
for method in results.keys():
r = np.loadtxt('./{}/{}_{}.csv'.format(out, method, title))
r = r[::max(len(r) // 100, 1)]
plt.plot(list(range(len(r))), r, label=method)
plt.legend()
plot()
plt.savefig('{}.png'.format('rec_sys')) | none | 1 | 2.099518 | 2 |
|
Chapter 2/ch2_challenge3.py | MattSumrall/python-projects | 0 | 6631408 | <filename>Chapter 2/ch2_challenge3.py
# <NAME>
# ITEC 1250
# Chapter 2 Challenge 3
# Tipper Program
print("\nWhat is your bill total?")
total = float(input("\nEnter your food charge: "))
a = total * .15
b = total * .20
print("\n15% tip: $" + format(a, ",.2f"), "\n20% tip: $" + format(b, ",.2f"), sep = "\n")
input("\nPress enter key to continue")
| <filename>Chapter 2/ch2_challenge3.py
# <NAME>
# ITEC 1250
# Chapter 2 Challenge 3
# Tipper Program
print("\nWhat is your bill total?")
total = float(input("\nEnter your food charge: "))
a = total * .15
b = total * .20
print("\n15% tip: $" + format(a, ",.2f"), "\n20% tip: $" + format(b, ",.2f"), sep = "\n")
input("\nPress enter key to continue")
| en | 0.569313 | # <NAME> # ITEC 1250 # Chapter 2 Challenge 3 # Tipper Program | 3.921039 | 4 |
Platforms/Osu/main_osu.py | The-CJ/Phaazebot | 2 | 6631409 | <gh_stars>1-10
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from phaazebot import Phaazebot
import osu_irc
class PhaazebotOsu(osu_irc.Client):
def __init__(self, BASE:"Phaazebot", *args, **kwargs):
super().__init__(*args, **kwargs)
self.BASE:"Phaazebot" = BASE
def __bool__(self):
return self.BASE.IsReady.osu
async def onReady(self):
self.BASE.Logger.info("osu! connected")
self.BASE.IsReady.osu = True
async def onMessage(self, Message:osu_irc.Message):
pass
| from typing import TYPE_CHECKING
if TYPE_CHECKING:
from phaazebot import Phaazebot
import osu_irc
class PhaazebotOsu(osu_irc.Client):
def __init__(self, BASE:"Phaazebot", *args, **kwargs):
super().__init__(*args, **kwargs)
self.BASE:"Phaazebot" = BASE
def __bool__(self):
return self.BASE.IsReady.osu
async def onReady(self):
self.BASE.Logger.info("osu! connected")
self.BASE.IsReady.osu = True
async def onMessage(self, Message:osu_irc.Message):
pass | none | 1 | 2.836995 | 3 |
|
test/test_packet.py | beckjake/pyssh | 0 | 6631410 | <filename>test/test_packet.py
import unittest
import pytest
import io
from pyssh import packet
from pyssh.crypto import hashers, symmetric
from pyssh import compression
from builtins import int, bytes
class DummyCipher(object):
def __init__(self, block_size):
self.block_size = block_size
class Object(object):
pass
class TestPadding(unittest.TestCase):
"""Test padding messages to some length."""
def _some_eam_pad(self, num):
encryptor = DummyCipher(num)
hasher = Object()
hasher.ENCRYPT_FIRST = False
compressor = Object()
builder = packet.PacketBuilder(encryptor, hasher, compressor)
padded_length = len(builder.pad_packet(b'\x00', True))
assert padded_length % num == 0
# secondary goal
assert 4 <= (padded_length - 6) <= 4 + num
def _some_etm_pad(self, num):
encryptor = DummyCipher(num)
hasher = Object()
hasher.ENCRYPT_FIRST = True
compressor = Object()
builder = packet.PacketBuilder(encryptor, hasher, compressor)
padded_length = len(builder.pad_packet(b'\x00', False))
assert padded_length % num == 4
# secondary goal
assert 4 <= (padded_length - 6) <= 4 + num
def _some_pad(self, num):
self._some_etm_pad(num)
self._some_eam_pad(num)
def test_pad_8(self):
self._some_pad(8)
def test_pad_16(self):
self._some_pad(16)
def test_pad_12(self):
self._some_pad(12)
def test_pad_24(self):
self._some_pad(24)
def test_pad_32(self):
self._some_pad(32)
class ROT128Cipher(symmetric.BaseCipher):
NAME = 'rot128'
def process_block(self, data):
data = bytes(data)
ret = []
for byte in data:
val = (byte + 128) % 256
ret.append(bytes([val]))
return b''.join(ret)
# return b''.join((bytes[(c+128 % 256)] for c in bytes(data)))
class TestNoneBidi(unittest.TestCase):
def setUp(self):
encryptor = symmetric.NoneCipher(None, None, None)
hasher = hashers.NoneHasher()
compressor = compression.NoneCompressor()
decryptor = symmetric.NoneCipher(None, None, None)
validator = hashers.NoneHasher()
decompressor = compression.NoneCompressor()
self.builder = packet.PacketBuilder(encryptor, hasher, compressor)
self.packet_reader = packet.PacketReader(decryptor, validator, decompressor)
def test_create(self):
payload = b'\x00'
expect = b'\x00\x00\x00\x0C\x0A\x00'
built = self.builder.create_packet(payload)
assert built.startswith(expect)
reader = io.BytesIO(built)
assert self.packet_reader.read_packet(reader) == payload
def test_toolong(self):
payload = b'\x00'* (1024 * (2 ** 10))
with pytest.raises(ValueError):
self.builder.create_packet(payload)
class TestBidi(unittest.TestCase):
def setUp(self):
encryptor = ROT128Cipher()
hasher = hashers.MD5Hasher(b'\x00'*16)
compressor = compression.NoneCompressor()
self.builder = packet.PacketBuilder(encryptor, hasher, compressor)
decryptor = ROT128Cipher()
validator = hashers.MD5Hasher(b'\x00'*16)
decompressor = compression.NoneCompressor()
self.packet_reader = packet.PacketReader(decryptor, validator, decompressor)
# TODO: fix this test.
#@pytest.<EMAIL>.xfail
def test_create(self):
payload = b'\x00\x01\x02\x03'
expect = b'\x80\x80\x80\x8C\x87'
built = self.builder.create_packet(payload)
assert built.startswith(expect)
reader = io.BytesIO(built)
assert self.packet_reader.read_packet(reader) == payload
def test_write(self):
payload = b'\x00\x01\x02\x03'
expect = b'\x80\x80\x80\x8C\x87'
writer = io.BytesIO(b'')
self.builder.write_packet(writer, payload)
assert writer.getvalue().startswith(expect)
def test_read(self):
payload = b'\x00\x01\x02\x03'
built = b'\x80\x80\x80\x90\x8B\x80\x81\x82\x83\x82\x78\x13\xA9\xF4\x2A\xC4\x97\x6A\x8C\xE1\x4A\x99\xD7\xF1\xEA\x71\x91\x3B\x7E\xB2\xC8\xF1\x18\x93\xA8\x56'
reader = io.BytesIO(built)
assert self.packet_reader.read_packet(reader) == payload
class TestBidiETM(unittest.TestCase):
def setUp(self):
encryptor = ROT128Cipher()
hasher = hashers.MD5ETMHasher(b'\x00'*16)
compressor = compression.NoneCompressor()
self.builder = packet.PacketBuilder(encryptor, hasher, compressor)
decryptor = ROT128Cipher()
validator = hashers.MD5ETMHasher(b'\x00'*16)
decompressor = compression.NoneCompressor()
self.packet_reader = packet.PacketReader(decryptor, validator, decompressor)
def test_create(self):
payload = b'\x00\x01\x02\x03'
expect = b'\x00\x00\x00\x10\x8B'
built = self.builder.create_packet(payload)
assert built.startswith(expect)
reader = io.BytesIO(built)
assert self.packet_reader.read_packet(reader) == payload
def test_write(self):
payload = b'\x00\x01\x02\x03'
expect = b'\x00\x00\x00\x10\x8B'
writer = io.BytesIO(b'')
self.builder.write_packet(writer, payload)
assert writer.getvalue().startswith(expect)
def test_read(self):
payload = b'\x00\x01\x02\x03'
built = b'\x00\x00\x00\x10\x8B\x80\x81\x82\x83\x6C\x0B\x80\x55\x11\xD0\xF1\x89\x0C\x53\x31\x67\x82\xBA\x6D\x2A\x7E\x57\x8D\xEB\xAB\xD5\x70\x83\x9C\xC5\x67'
reader = io.BytesIO(built)
assert self.packet_reader.read_packet(reader) == payload
| <filename>test/test_packet.py
import unittest
import pytest
import io
from pyssh import packet
from pyssh.crypto import hashers, symmetric
from pyssh import compression
from builtins import int, bytes
class DummyCipher(object):
def __init__(self, block_size):
self.block_size = block_size
class Object(object):
pass
class TestPadding(unittest.TestCase):
"""Test padding messages to some length."""
def _some_eam_pad(self, num):
encryptor = DummyCipher(num)
hasher = Object()
hasher.ENCRYPT_FIRST = False
compressor = Object()
builder = packet.PacketBuilder(encryptor, hasher, compressor)
padded_length = len(builder.pad_packet(b'\x00', True))
assert padded_length % num == 0
# secondary goal
assert 4 <= (padded_length - 6) <= 4 + num
def _some_etm_pad(self, num):
encryptor = DummyCipher(num)
hasher = Object()
hasher.ENCRYPT_FIRST = True
compressor = Object()
builder = packet.PacketBuilder(encryptor, hasher, compressor)
padded_length = len(builder.pad_packet(b'\x00', False))
assert padded_length % num == 4
# secondary goal
assert 4 <= (padded_length - 6) <= 4 + num
def _some_pad(self, num):
self._some_etm_pad(num)
self._some_eam_pad(num)
def test_pad_8(self):
self._some_pad(8)
def test_pad_16(self):
self._some_pad(16)
def test_pad_12(self):
self._some_pad(12)
def test_pad_24(self):
self._some_pad(24)
def test_pad_32(self):
self._some_pad(32)
class ROT128Cipher(symmetric.BaseCipher):
NAME = 'rot128'
def process_block(self, data):
data = bytes(data)
ret = []
for byte in data:
val = (byte + 128) % 256
ret.append(bytes([val]))
return b''.join(ret)
# return b''.join((bytes[(c+128 % 256)] for c in bytes(data)))
class TestNoneBidi(unittest.TestCase):
def setUp(self):
encryptor = symmetric.NoneCipher(None, None, None)
hasher = hashers.NoneHasher()
compressor = compression.NoneCompressor()
decryptor = symmetric.NoneCipher(None, None, None)
validator = hashers.NoneHasher()
decompressor = compression.NoneCompressor()
self.builder = packet.PacketBuilder(encryptor, hasher, compressor)
self.packet_reader = packet.PacketReader(decryptor, validator, decompressor)
def test_create(self):
payload = b'\x00'
expect = b'\x00\x00\x00\x0C\x0A\x00'
built = self.builder.create_packet(payload)
assert built.startswith(expect)
reader = io.BytesIO(built)
assert self.packet_reader.read_packet(reader) == payload
def test_toolong(self):
payload = b'\x00'* (1024 * (2 ** 10))
with pytest.raises(ValueError):
self.builder.create_packet(payload)
class TestBidi(unittest.TestCase):
def setUp(self):
encryptor = ROT128Cipher()
hasher = hashers.MD5Hasher(b'\x00'*16)
compressor = compression.NoneCompressor()
self.builder = packet.PacketBuilder(encryptor, hasher, compressor)
decryptor = ROT128Cipher()
validator = hashers.MD5Hasher(b'\x00'*16)
decompressor = compression.NoneCompressor()
self.packet_reader = packet.PacketReader(decryptor, validator, decompressor)
# TODO: fix this test.
#@pytest.<EMAIL>.xfail
def test_create(self):
payload = b'\x00\x01\x02\x03'
expect = b'\x80\x80\x80\x8C\x87'
built = self.builder.create_packet(payload)
assert built.startswith(expect)
reader = io.BytesIO(built)
assert self.packet_reader.read_packet(reader) == payload
def test_write(self):
payload = b'\x00\x01\x02\x03'
expect = b'\x80\x80\x80\x8C\x87'
writer = io.BytesIO(b'')
self.builder.write_packet(writer, payload)
assert writer.getvalue().startswith(expect)
def test_read(self):
payload = b'\x00\x01\x02\x03'
built = b'\x80\x80\x80\x90\x8B\x80\x81\x82\x83\x82\x78\x13\xA9\xF4\x2A\xC4\x97\x6A\x8C\xE1\x4A\x99\xD7\xF1\xEA\x71\x91\x3B\x7E\xB2\xC8\xF1\x18\x93\xA8\x56'
reader = io.BytesIO(built)
assert self.packet_reader.read_packet(reader) == payload
class TestBidiETM(unittest.TestCase):
def setUp(self):
encryptor = ROT128Cipher()
hasher = hashers.MD5ETMHasher(b'\x00'*16)
compressor = compression.NoneCompressor()
self.builder = packet.PacketBuilder(encryptor, hasher, compressor)
decryptor = ROT128Cipher()
validator = hashers.MD5ETMHasher(b'\x00'*16)
decompressor = compression.NoneCompressor()
self.packet_reader = packet.PacketReader(decryptor, validator, decompressor)
def test_create(self):
payload = b'\x00\x01\x02\x03'
expect = b'\x00\x00\x00\x10\x8B'
built = self.builder.create_packet(payload)
assert built.startswith(expect)
reader = io.BytesIO(built)
assert self.packet_reader.read_packet(reader) == payload
def test_write(self):
payload = b'\x00\x01\x02\x03'
expect = b'\x00\x00\x00\x10\x8B'
writer = io.BytesIO(b'')
self.builder.write_packet(writer, payload)
assert writer.getvalue().startswith(expect)
def test_read(self):
payload = b'\x00\x01\x02\x03'
built = b'\x00\x00\x00\x10\x8B\x80\x81\x82\x83\x6C\x0B\x80\x55\x11\xD0\xF1\x89\x0C\x53\x31\x67\x82\xBA\x6D\x2A\x7E\x57\x8D\xEB\xAB\xD5\x70\x83\x9C\xC5\x67'
reader = io.BytesIO(built)
assert self.packet_reader.read_packet(reader) == payload
| en | 0.506551 | Test padding messages to some length. # secondary goal # secondary goal # return b''.join((bytes[(c+128 % 256)] for c in bytes(data))) # TODO: fix this test. #@pytest.<EMAIL>.xfail | 2.903814 | 3 |
testscripts/RDKB/component/HAL_Ethsw/TS_ethsw_stub_hal_Set_Port_Admin_Status_True_Disabled_Port.py | cablelabs/tools-tdkb | 0 | 6631411 | <reponame>cablelabs/tools-tdkb
##########################################################################
# Copyright 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>7</version>
<name>TS_ethsw_stub_hal_Set_Port_Admin_Status_True_Disabled_Port</name>
<primitive_test_id/>
<primitive_test_name>ethsw_stub_hal_SetPortAdminStatus</primitive_test_name>
<primitive_test_version>2</primitive_test_version>
<status>FREE</status>
<synopsis>To validate Ethsw HAL API CcspHalEthSwSetPortAdminStatus() if it return FAILURE in case of setting port status to up for disabled/disconnected port.</synopsis>
<groups_id/>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_HAL_Ethsw_9</test_case_id>
<test_objective>To validate Ethsw HAL API CcspHalEthSwSetPortAdminStatus() if it return FAILURE in case of setting port status to up for disabled/disconnected port.</test_objective>
<test_type>Negative</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state of DUT
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>CcspHalEthSwSetPortAdminStatus, CcspHalEthSwGetPortAdminStatus</api_or_interface_used>
<input_parameters>PortID, adminstatus</input_parameters>
<automation_approch>1. Load halethsw module.
2. From script invoke ethsw_stub_hal_SetPortAdminStatus().
3. Set the value of Admin port status
4. Validation of the result is done within the python script and send the result status to Test Manager.
5. Test Manager will publish the result in GUI as PASS/FAILURE based on the response from HAL_Ethsw stub.</automation_approch>
<except_output>API should return FAILURE.</except_output>
<priority>High</priority>
<test_stub_interface>HAL_Ethsw</test_stub_interface>
<test_script>TS_ethsw_stub_hal_Set_Port_Admin_Status_True_Disabled_Port</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
#LIbrary funtions
import tdklib;
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
#No CPE should be connected to testPort
testPort = 4;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("halethsw","RDKB");
obj.configureTestCase(ip,port,'TS_ethsw_stub_hal_Set_Port_Admin_Status_True_Disabled_Port');
#Get the result of connection with test component and STB
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus;
if "SUCCESS" in loadmodulestatus.upper():
obj.setLoadModuleStatus("SUCCESS");
#Script to load the configuration file of the component
tdkTestObj = obj.createTestStep("ethsw_stub_hal_Get_Port_Admin_Status");
tdkTestObj.addParameter("PortID",testPort);
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult and details:
currPortStatus = details;
print "TEST STEP 1: Retrieve the current Ethsw_Get_Port_Admin_Status";
print "EXPECTED RESULT 1: Should retrieve the Ethsw_Get_Port_Admin_Status successfully";
print "ACTUAL RESULT 1: Current port status is %s" %currPortStatus;
print "[TEST EXECUTION RESULT] : %s" %actualresult;
#if port status is disconnected then validate the test
if currPortStatus == "CCSP_HAL_ETHSW_AdminDown":
tdkTestObj = obj.createTestStep("ethsw_stub_hal_SetPortAdminStatus");
tdkTestObj.addParameter("PortID",testPort);
tdkTestObj.addParameter("adminstatus","CCSP_HAL_ETHSW_AdminUp");
expectedresult = "FAILURE";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
tdkTestObj = obj.createTestStep("ethsw_stub_hal_Get_Port_Admin_Status");
tdkTestObj.addParameter("PortID",testPort);
tdkTestObj.executeTestCase("SUCCESS");
portStatusAfterSet = tdkTestObj.getResultDetails();
if expectedresult in actualresult or portStatusAfterSet == currPortStatus:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Retrieve the EthSw_SetPortAdminStatus of a port - %d" %testPort;
print "EXPECTED RESULT 2: As the port is down, EthSw_SetPortAdminStatus should be failed";
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult;
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Retrieve the EthSw_SetPortAdminStatus of a down port - %d" %testPort;
print "EXPECTED RESULT 2:As the port is down, EthSw_SetPortAdminStatus should be failed";
print "ACTUAL RESULT 2: %s" %details;
print "[TEST EXECUTION RESULT] : Failure";
else:
tdkTestObj.setResultStatus("FAILURE");
print "It seems port is connected to CPE, so test cannot be validated"
print "Please disconnect the port %d before validating the test" %testPort;
else:
print "TEST STEP 1: Retrieve the current Ethsw_Get_Port_Admin_Status";
print "EXPECTED RESULT 1: Should retrieve the Ethsw_Get_Port_Admin_Status successfully";
print "ACTUAL RESULT 1: %s" %details;
print "[TEST EXECUTION RESULT] : %s" %actualresult;
obj.unloadModule("halethsw");
else:
print "Failed to load the module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| ##########################################################################
# Copyright 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>7</version>
<name>TS_ethsw_stub_hal_Set_Port_Admin_Status_True_Disabled_Port</name>
<primitive_test_id/>
<primitive_test_name>ethsw_stub_hal_SetPortAdminStatus</primitive_test_name>
<primitive_test_version>2</primitive_test_version>
<status>FREE</status>
<synopsis>To validate Ethsw HAL API CcspHalEthSwSetPortAdminStatus() if it return FAILURE in case of setting port status to up for disabled/disconnected port.</synopsis>
<groups_id/>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_HAL_Ethsw_9</test_case_id>
<test_objective>To validate Ethsw HAL API CcspHalEthSwSetPortAdminStatus() if it return FAILURE in case of setting port status to up for disabled/disconnected port.</test_objective>
<test_type>Negative</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state of DUT
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>CcspHalEthSwSetPortAdminStatus, CcspHalEthSwGetPortAdminStatus</api_or_interface_used>
<input_parameters>PortID, adminstatus</input_parameters>
<automation_approch>1. Load halethsw module.
2. From script invoke ethsw_stub_hal_SetPortAdminStatus().
3. Set the value of Admin port status
4. Validation of the result is done within the python script and send the result status to Test Manager.
5. Test Manager will publish the result in GUI as PASS/FAILURE based on the response from HAL_Ethsw stub.</automation_approch>
<except_output>API should return FAILURE.</except_output>
<priority>High</priority>
<test_stub_interface>HAL_Ethsw</test_stub_interface>
<test_script>TS_ethsw_stub_hal_Set_Port_Admin_Status_True_Disabled_Port</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
#LIbrary funtions
import tdklib;
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
#No CPE should be connected to testPort
testPort = 4;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("halethsw","RDKB");
obj.configureTestCase(ip,port,'TS_ethsw_stub_hal_Set_Port_Admin_Status_True_Disabled_Port');
#Get the result of connection with test component and STB
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus;
if "SUCCESS" in loadmodulestatus.upper():
obj.setLoadModuleStatus("SUCCESS");
#Script to load the configuration file of the component
tdkTestObj = obj.createTestStep("ethsw_stub_hal_Get_Port_Admin_Status");
tdkTestObj.addParameter("PortID",testPort);
expectedresult = "SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult and details:
currPortStatus = details;
print "TEST STEP 1: Retrieve the current Ethsw_Get_Port_Admin_Status";
print "EXPECTED RESULT 1: Should retrieve the Ethsw_Get_Port_Admin_Status successfully";
print "ACTUAL RESULT 1: Current port status is %s" %currPortStatus;
print "[TEST EXECUTION RESULT] : %s" %actualresult;
#if port status is disconnected then validate the test
if currPortStatus == "CCSP_HAL_ETHSW_AdminDown":
tdkTestObj = obj.createTestStep("ethsw_stub_hal_SetPortAdminStatus");
tdkTestObj.addParameter("PortID",testPort);
tdkTestObj.addParameter("adminstatus","CCSP_HAL_ETHSW_AdminUp");
expectedresult = "FAILURE";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
tdkTestObj = obj.createTestStep("ethsw_stub_hal_Get_Port_Admin_Status");
tdkTestObj.addParameter("PortID",testPort);
tdkTestObj.executeTestCase("SUCCESS");
portStatusAfterSet = tdkTestObj.getResultDetails();
if expectedresult in actualresult or portStatusAfterSet == currPortStatus:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Retrieve the EthSw_SetPortAdminStatus of a port - %d" %testPort;
print "EXPECTED RESULT 2: As the port is down, EthSw_SetPortAdminStatus should be failed";
print "ACTUAL RESULT 2: %s" %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : %s" %actualresult;
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Retrieve the EthSw_SetPortAdminStatus of a down port - %d" %testPort;
print "EXPECTED RESULT 2:As the port is down, EthSw_SetPortAdminStatus should be failed";
print "ACTUAL RESULT 2: %s" %details;
print "[TEST EXECUTION RESULT] : Failure";
else:
tdkTestObj.setResultStatus("FAILURE");
print "It seems port is connected to CPE, so test cannot be validated"
print "Please disconnect the port %d before validating the test" %testPort;
else:
print "TEST STEP 1: Retrieve the current Ethsw_Get_Port_Admin_Status";
print "EXPECTED RESULT 1: Should retrieve the Ethsw_Get_Port_Admin_Status successfully";
print "ACTUAL RESULT 1: %s" %details;
print "[TEST EXECUTION RESULT] : %s" %actualresult;
obj.unloadModule("halethsw");
else:
print "Failed to load the module";
obj.setLoadModuleStatus("FAILURE");
print "Module loading failed"; | en | 0.486629 | ########################################################################## # Copyright 2016-2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ########################################################################## <?xml version="1.0" encoding="UTF-8"?><xml> <id/> <version>7</version> <name>TS_ethsw_stub_hal_Set_Port_Admin_Status_True_Disabled_Port</name> <primitive_test_id/> <primitive_test_name>ethsw_stub_hal_SetPortAdminStatus</primitive_test_name> <primitive_test_version>2</primitive_test_version> <status>FREE</status> <synopsis>To validate Ethsw HAL API CcspHalEthSwSetPortAdminStatus() if it return FAILURE in case of setting port status to up for disabled/disconnected port.</synopsis> <groups_id/> <execution_time>1</execution_time> <long_duration>false</long_duration> <remarks/> <skip>false</skip> <box_types> <box_type>Broadband</box_type> </box_types> <rdk_versions> <rdk_version>RDKB</rdk_version> </rdk_versions> <test_cases> <test_case_id>TC_HAL_Ethsw_9</test_case_id> <test_objective>To validate Ethsw HAL API CcspHalEthSwSetPortAdminStatus() if it return FAILURE in case of setting port status to up for disabled/disconnected port.</test_objective> <test_type>Negative</test_type> <test_setup>Broadband</test_setup> <pre_requisite>1.Ccsp Components should be in a running state of DUT 2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite> <api_or_interface_used>CcspHalEthSwSetPortAdminStatus, CcspHalEthSwGetPortAdminStatus</api_or_interface_used> <input_parameters>PortID, adminstatus</input_parameters> <automation_approch>1. Load halethsw module. 2. From script invoke ethsw_stub_hal_SetPortAdminStatus(). 3. Set the value of Admin port status 4. Validation of the result is done within the python script and send the result status to Test Manager. 5. Test Manager will publish the result in GUI as PASS/FAILURE based on the response from HAL_Ethsw stub.</automation_approch> <except_output>API should return FAILURE.</except_output> <priority>High</priority> <test_stub_interface>HAL_Ethsw</test_stub_interface> <test_script>TS_ethsw_stub_hal_Set_Port_Admin_Status_True_Disabled_Port</test_script> <skipped>No</skipped> <release_version/> <remarks/> </test_cases> <script_tags/> </xml> #LIbrary funtions #IP and Port of box, No need to change, #This will be replaced with correspoing Box Ip and port while executing script #No CPE should be connected to testPort #Test component to be tested #Get the result of connection with test component and STB #Script to load the configuration file of the component #if port status is disconnected then validate the test #Set the result status of execution #Get the result of execution | 1.123058 | 1 |
API/src/main/resources/Lib/robot/errors.py | TagExpress/SikuliX1 | 0 | 6631412 | # Copyright (c) 2010-2020, sikuli.org, sikulix.com - MIT license
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions and return codes used internally.
External libraries should not used exceptions defined here.
"""
try:
unicode
except NameError:
unicode = str
# Return codes from Robot and Rebot.
# RC below 250 is the number of failed critical tests and exactly 250
# means that number or more such failures.
INFO_PRINTED = 251 # --help or --version
DATA_ERROR = 252 # Invalid data or cli args
STOPPED_BY_USER = 253 # KeyboardInterrupt or SystemExit
FRAMEWORK_ERROR = 255 # Unexpected error
class RobotError(Exception):
"""Base class for Robot Framework errors.
Do not raise this method but use more specific errors instead.
"""
def __init__(self, message='', details=''):
Exception.__init__(self, message)
self.details = details
@property
def message(self):
return unicode(self)
class FrameworkError(RobotError):
"""Can be used when the core framework goes to unexpected state.
It is good to explicitly raise a FrameworkError if some framework
component is used incorrectly. This is pretty much same as
'Internal Error' and should of course never happen.
"""
class DataError(RobotError):
"""Used when the provided test data is invalid.
DataErrors are not caught by keywords that run other keywords
(e.g. `Run Keyword And Expect Error`).
"""
class VariableError(DataError):
"""Used when variable does not exist.
VariableErrors are caught by keywords that run other keywords
(e.g. `Run Keyword And Expect Error`).
"""
class KeywordError(DataError):
"""Used when no keyword is found or there is more than one match.
KeywordErrors are caught by keywords that run other keywords
(e.g. `Run Keyword And Expect Error`).
"""
class TimeoutError(RobotError):
"""Used when a test or keyword timeout occurs.
This exception is handled specially so that execution of the
current test is always stopped immediately and it is not caught by
keywords executing other keywords (e.g. `Run Keyword And Expect
Error`).
"""
def __init__(self, message='', test_timeout=True):
RobotError.__init__(self, message)
self.test_timeout = test_timeout
@property
def keyword_timeout(self):
return not self.test_timeout
class Information(RobotError):
"""Used by argument parser with --help or --version."""
class ExecutionStatus(RobotError):
"""Base class for exceptions communicating status in test execution."""
def __init__(self, message, test_timeout=False, keyword_timeout=False,
syntax=False, exit=False, continue_on_failure=False,
return_value=None):
if '\r\n' in message:
message = message.replace('\r\n', '\n')
from robot.utils import cut_long_message
RobotError.__init__(self, cut_long_message(message))
self.test_timeout = test_timeout
self.keyword_timeout = keyword_timeout
self.syntax = syntax
self.exit = exit
self._continue_on_failure = continue_on_failure
self.return_value = return_value
@property
def timeout(self):
return self.test_timeout or self.keyword_timeout
@property
def dont_continue(self):
return self.timeout or self.syntax or self.exit
@property
def continue_on_failure(self):
return self._continue_on_failure
@continue_on_failure.setter
def continue_on_failure(self, continue_on_failure):
self._continue_on_failure = continue_on_failure
for child in getattr(self, '_errors', []):
if child is not self:
child.continue_on_failure = continue_on_failure
def can_continue(self, teardown=False, templated=False, dry_run=False):
if dry_run:
return True
if self.syntax or self.exit or self.test_timeout:
return False
if templated:
return True
if self.keyword_timeout:
return False
if teardown:
return True
return self.continue_on_failure
def get_errors(self):
return [self]
@property
def status(self):
return 'FAIL'
class ExecutionFailed(ExecutionStatus):
"""Used for communicating failures in test execution."""
class HandlerExecutionFailed(ExecutionFailed):
def __init__(self, details):
error = details.error
timeout = isinstance(error, TimeoutError)
test_timeout = timeout and error.test_timeout
keyword_timeout = timeout and error.keyword_timeout
syntax = (isinstance(error, DataError)
and not isinstance(error, (KeywordError, VariableError)))
exit_on_failure = self._get(error, 'EXIT_ON_FAILURE')
continue_on_failure = self._get(error, 'CONTINUE_ON_FAILURE')
ExecutionFailed.__init__(self, details.message, test_timeout,
keyword_timeout, syntax, exit_on_failure,
continue_on_failure)
self.full_message = details.message
self.traceback = details.traceback
def _get(self, error, attr):
return bool(getattr(error, 'ROBOT_' + attr, False))
class ExecutionFailures(ExecutionFailed):
def __init__(self, errors, message=None):
message = message or self._format_message([e.message for e in errors])
ExecutionFailed.__init__(self, message, **self._get_attrs(errors))
self._errors = errors
def _format_message(self, messages):
if len(messages) == 1:
return messages[0]
prefix = 'Several failures occurred:'
if any(msg.startswith('*HTML*') for msg in messages):
prefix = '*HTML* ' + prefix
messages = self._format_html_messages(messages)
return '\n\n'.join(
[prefix] +
['%d) %s' % (i, m) for i, m in enumerate(messages, start=1)]
)
def _format_html_messages(self, messages):
from robot.utils import html_escape
for msg in messages:
if msg.startswith('*HTML*'):
yield msg[6:].lstrip()
else:
yield html_escape(msg)
def _get_attrs(self, errors):
return {
'test_timeout': any(e.test_timeout for e in errors),
'keyword_timeout': any(e.keyword_timeout for e in errors),
'syntax': any(e.syntax for e in errors),
'exit': any(e.exit for e in errors),
'continue_on_failure': all(e.continue_on_failure for e in errors)
}
def get_errors(self):
return self._errors
class UserKeywordExecutionFailed(ExecutionFailures):
def __init__(self, run_errors=None, teardown_errors=None):
errors = self._get_active_errors(run_errors, teardown_errors)
message = self._get_message(run_errors, teardown_errors)
ExecutionFailures.__init__(self, errors, message)
if run_errors and not teardown_errors:
self._errors = run_errors.get_errors()
else:
self._errors = [self]
def _get_active_errors(self, *errors):
return [err for err in errors if err]
def _get_message(self, run_errors, teardown_errors):
run_msg = run_errors.message if run_errors else ''
td_msg = teardown_errors.message if teardown_errors else ''
if not td_msg:
return run_msg
if not run_msg:
return 'Keyword teardown failed:\n%s' % td_msg
return '%s\n\nAlso keyword teardown failed:\n%s' % (run_msg, td_msg)
class ExecutionPassed(ExecutionStatus):
"""Base class for all exceptions communicating that execution passed.
Should not be raised directly, but more detailed exceptions used instead.
"""
def __init__(self, message=None, **kwargs):
ExecutionStatus.__init__(self, message or self._get_message(), **kwargs)
self._earlier_failures = []
def _get_message(self):
from robot.utils import printable_name
return ("Invalid '%s' usage."
% printable_name(type(self).__name__, code_style=True))
def set_earlier_failures(self, failures):
if failures:
self._earlier_failures = list(failures) + self._earlier_failures
@property
def earlier_failures(self):
if not self._earlier_failures:
return None
return ExecutionFailures(self._earlier_failures)
@property
def status(self):
return 'PASS' if not self._earlier_failures else 'FAIL'
class PassExecution(ExecutionPassed):
"""Used by 'Pass Execution' keyword."""
def __init__(self, message):
ExecutionPassed.__init__(self, message)
class ContinueForLoop(ExecutionPassed):
"""Used by 'Continue For Loop' keyword."""
class ExitForLoop(ExecutionPassed):
"""Used by 'Exit For Loop' keyword."""
class ReturnFromKeyword(ExecutionPassed):
"""Used by 'Return From Keyword' keyword."""
def __init__(self, return_value=None, failures=None):
ExecutionPassed.__init__(self, return_value=return_value)
if failures:
self.set_earlier_failures(failures)
class RemoteError(RobotError):
"""Used by Remote library to report remote errors."""
def __init__(self, message='', details='', fatal=False, continuable=False):
RobotError.__init__(self, message, details)
self.ROBOT_EXIT_ON_FAILURE = fatal
self.ROBOT_CONTINUE_ON_FAILURE = continuable
| # Copyright (c) 2010-2020, sikuli.org, sikulix.com - MIT license
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions and return codes used internally.
External libraries should not used exceptions defined here.
"""
try:
unicode
except NameError:
unicode = str
# Return codes from Robot and Rebot.
# RC below 250 is the number of failed critical tests and exactly 250
# means that number or more such failures.
INFO_PRINTED = 251 # --help or --version
DATA_ERROR = 252 # Invalid data or cli args
STOPPED_BY_USER = 253 # KeyboardInterrupt or SystemExit
FRAMEWORK_ERROR = 255 # Unexpected error
class RobotError(Exception):
"""Base class for Robot Framework errors.
Do not raise this method but use more specific errors instead.
"""
def __init__(self, message='', details=''):
Exception.__init__(self, message)
self.details = details
@property
def message(self):
return unicode(self)
class FrameworkError(RobotError):
"""Can be used when the core framework goes to unexpected state.
It is good to explicitly raise a FrameworkError if some framework
component is used incorrectly. This is pretty much same as
'Internal Error' and should of course never happen.
"""
class DataError(RobotError):
"""Used when the provided test data is invalid.
DataErrors are not caught by keywords that run other keywords
(e.g. `Run Keyword And Expect Error`).
"""
class VariableError(DataError):
"""Used when variable does not exist.
VariableErrors are caught by keywords that run other keywords
(e.g. `Run Keyword And Expect Error`).
"""
class KeywordError(DataError):
"""Used when no keyword is found or there is more than one match.
KeywordErrors are caught by keywords that run other keywords
(e.g. `Run Keyword And Expect Error`).
"""
class TimeoutError(RobotError):
"""Used when a test or keyword timeout occurs.
This exception is handled specially so that execution of the
current test is always stopped immediately and it is not caught by
keywords executing other keywords (e.g. `Run Keyword And Expect
Error`).
"""
def __init__(self, message='', test_timeout=True):
RobotError.__init__(self, message)
self.test_timeout = test_timeout
@property
def keyword_timeout(self):
return not self.test_timeout
class Information(RobotError):
"""Used by argument parser with --help or --version."""
class ExecutionStatus(RobotError):
"""Base class for exceptions communicating status in test execution."""
def __init__(self, message, test_timeout=False, keyword_timeout=False,
syntax=False, exit=False, continue_on_failure=False,
return_value=None):
if '\r\n' in message:
message = message.replace('\r\n', '\n')
from robot.utils import cut_long_message
RobotError.__init__(self, cut_long_message(message))
self.test_timeout = test_timeout
self.keyword_timeout = keyword_timeout
self.syntax = syntax
self.exit = exit
self._continue_on_failure = continue_on_failure
self.return_value = return_value
@property
def timeout(self):
return self.test_timeout or self.keyword_timeout
@property
def dont_continue(self):
return self.timeout or self.syntax or self.exit
@property
def continue_on_failure(self):
return self._continue_on_failure
@continue_on_failure.setter
def continue_on_failure(self, continue_on_failure):
self._continue_on_failure = continue_on_failure
for child in getattr(self, '_errors', []):
if child is not self:
child.continue_on_failure = continue_on_failure
def can_continue(self, teardown=False, templated=False, dry_run=False):
if dry_run:
return True
if self.syntax or self.exit or self.test_timeout:
return False
if templated:
return True
if self.keyword_timeout:
return False
if teardown:
return True
return self.continue_on_failure
def get_errors(self):
return [self]
@property
def status(self):
return 'FAIL'
class ExecutionFailed(ExecutionStatus):
"""Used for communicating failures in test execution."""
class HandlerExecutionFailed(ExecutionFailed):
def __init__(self, details):
error = details.error
timeout = isinstance(error, TimeoutError)
test_timeout = timeout and error.test_timeout
keyword_timeout = timeout and error.keyword_timeout
syntax = (isinstance(error, DataError)
and not isinstance(error, (KeywordError, VariableError)))
exit_on_failure = self._get(error, 'EXIT_ON_FAILURE')
continue_on_failure = self._get(error, 'CONTINUE_ON_FAILURE')
ExecutionFailed.__init__(self, details.message, test_timeout,
keyword_timeout, syntax, exit_on_failure,
continue_on_failure)
self.full_message = details.message
self.traceback = details.traceback
def _get(self, error, attr):
return bool(getattr(error, 'ROBOT_' + attr, False))
class ExecutionFailures(ExecutionFailed):
def __init__(self, errors, message=None):
message = message or self._format_message([e.message for e in errors])
ExecutionFailed.__init__(self, message, **self._get_attrs(errors))
self._errors = errors
def _format_message(self, messages):
if len(messages) == 1:
return messages[0]
prefix = 'Several failures occurred:'
if any(msg.startswith('*HTML*') for msg in messages):
prefix = '*HTML* ' + prefix
messages = self._format_html_messages(messages)
return '\n\n'.join(
[prefix] +
['%d) %s' % (i, m) for i, m in enumerate(messages, start=1)]
)
def _format_html_messages(self, messages):
from robot.utils import html_escape
for msg in messages:
if msg.startswith('*HTML*'):
yield msg[6:].lstrip()
else:
yield html_escape(msg)
def _get_attrs(self, errors):
return {
'test_timeout': any(e.test_timeout for e in errors),
'keyword_timeout': any(e.keyword_timeout for e in errors),
'syntax': any(e.syntax for e in errors),
'exit': any(e.exit for e in errors),
'continue_on_failure': all(e.continue_on_failure for e in errors)
}
def get_errors(self):
return self._errors
class UserKeywordExecutionFailed(ExecutionFailures):
def __init__(self, run_errors=None, teardown_errors=None):
errors = self._get_active_errors(run_errors, teardown_errors)
message = self._get_message(run_errors, teardown_errors)
ExecutionFailures.__init__(self, errors, message)
if run_errors and not teardown_errors:
self._errors = run_errors.get_errors()
else:
self._errors = [self]
def _get_active_errors(self, *errors):
return [err for err in errors if err]
def _get_message(self, run_errors, teardown_errors):
run_msg = run_errors.message if run_errors else ''
td_msg = teardown_errors.message if teardown_errors else ''
if not td_msg:
return run_msg
if not run_msg:
return 'Keyword teardown failed:\n%s' % td_msg
return '%s\n\nAlso keyword teardown failed:\n%s' % (run_msg, td_msg)
class ExecutionPassed(ExecutionStatus):
"""Base class for all exceptions communicating that execution passed.
Should not be raised directly, but more detailed exceptions used instead.
"""
def __init__(self, message=None, **kwargs):
ExecutionStatus.__init__(self, message or self._get_message(), **kwargs)
self._earlier_failures = []
def _get_message(self):
from robot.utils import printable_name
return ("Invalid '%s' usage."
% printable_name(type(self).__name__, code_style=True))
def set_earlier_failures(self, failures):
if failures:
self._earlier_failures = list(failures) + self._earlier_failures
@property
def earlier_failures(self):
if not self._earlier_failures:
return None
return ExecutionFailures(self._earlier_failures)
@property
def status(self):
return 'PASS' if not self._earlier_failures else 'FAIL'
class PassExecution(ExecutionPassed):
"""Used by 'Pass Execution' keyword."""
def __init__(self, message):
ExecutionPassed.__init__(self, message)
class ContinueForLoop(ExecutionPassed):
"""Used by 'Continue For Loop' keyword."""
class ExitForLoop(ExecutionPassed):
"""Used by 'Exit For Loop' keyword."""
class ReturnFromKeyword(ExecutionPassed):
"""Used by 'Return From Keyword' keyword."""
def __init__(self, return_value=None, failures=None):
ExecutionPassed.__init__(self, return_value=return_value)
if failures:
self.set_earlier_failures(failures)
class RemoteError(RobotError):
"""Used by Remote library to report remote errors."""
def __init__(self, message='', details='', fatal=False, continuable=False):
RobotError.__init__(self, message, details)
self.ROBOT_EXIT_ON_FAILURE = fatal
self.ROBOT_CONTINUE_ON_FAILURE = continuable
| en | 0.816489 | # Copyright (c) 2010-2020, sikuli.org, sikulix.com - MIT license # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Exceptions and return codes used internally. External libraries should not used exceptions defined here. # Return codes from Robot and Rebot. # RC below 250 is the number of failed critical tests and exactly 250 # means that number or more such failures. # --help or --version # Invalid data or cli args # KeyboardInterrupt or SystemExit # Unexpected error Base class for Robot Framework errors. Do not raise this method but use more specific errors instead. Can be used when the core framework goes to unexpected state. It is good to explicitly raise a FrameworkError if some framework component is used incorrectly. This is pretty much same as 'Internal Error' and should of course never happen. Used when the provided test data is invalid. DataErrors are not caught by keywords that run other keywords (e.g. `Run Keyword And Expect Error`). Used when variable does not exist. VariableErrors are caught by keywords that run other keywords (e.g. `Run Keyword And Expect Error`). Used when no keyword is found or there is more than one match. KeywordErrors are caught by keywords that run other keywords (e.g. `Run Keyword And Expect Error`). Used when a test or keyword timeout occurs. This exception is handled specially so that execution of the current test is always stopped immediately and it is not caught by keywords executing other keywords (e.g. `Run Keyword And Expect Error`). Used by argument parser with --help or --version. Base class for exceptions communicating status in test execution. Used for communicating failures in test execution. Base class for all exceptions communicating that execution passed. Should not be raised directly, but more detailed exceptions used instead. Used by 'Pass Execution' keyword. Used by 'Continue For Loop' keyword. Used by 'Exit For Loop' keyword. Used by 'Return From Keyword' keyword. Used by Remote library to report remote errors. | 2.442519 | 2 |
dbReports/iondb/rundb/data/archive_report.py | sequencer2014/TS | 0 | 6631413 | <gh_stars>0
#!/usr/bin/env python
# Copyright (C) 2014 Ion Torrent Systems, Inc. All Rights Reserved
#
# List all Reports and status of file categories, showing archive location
#
import sys
from iondb.bin import djangoinit
from iondb.rundb.models import Results
from iondb.rundb.data import dmactions_types
# Write the column headers
sys.stdout.write("Report Name," + ",".join(dmactions_types.FILESET_TYPES) + "\n")
# Get list of Result objects from database
results = Results.objects.all().order_by('timeStamp')
for result in results:
sys.stdout.write(result.resultsName)
# Get DMFileStat objects for this Report
for dm_type in dmactions_types.FILESET_TYPES:
dmfilestat = result.get_filestat(dm_type)
sys.stdout.write(",")
sys.stdout.write(str(dmfilestat.archivepath))
print
| #!/usr/bin/env python
# Copyright (C) 2014 Ion Torrent Systems, Inc. All Rights Reserved
#
# List all Reports and status of file categories, showing archive location
#
import sys
from iondb.bin import djangoinit
from iondb.rundb.models import Results
from iondb.rundb.data import dmactions_types
# Write the column headers
sys.stdout.write("Report Name," + ",".join(dmactions_types.FILESET_TYPES) + "\n")
# Get list of Result objects from database
results = Results.objects.all().order_by('timeStamp')
for result in results:
sys.stdout.write(result.resultsName)
# Get DMFileStat objects for this Report
for dm_type in dmactions_types.FILESET_TYPES:
dmfilestat = result.get_filestat(dm_type)
sys.stdout.write(",")
sys.stdout.write(str(dmfilestat.archivepath))
print | en | 0.696278 | #!/usr/bin/env python # Copyright (C) 2014 Ion Torrent Systems, Inc. All Rights Reserved # # List all Reports and status of file categories, showing archive location # # Write the column headers # Get list of Result objects from database # Get DMFileStat objects for this Report | 1.969358 | 2 |
tests/clients/test_alerts.py | ryanvanasse/py42 | 0 | 6631414 | <gh_stars>0
import pytest
from py42.clients.alerts import AlertsClient
from py42.sdk.queries.alerts.alert_query import AlertQuery
from py42.services.alertrules import AlertRulesService
from py42.services.alerts import AlertService
@pytest.fixture
def mock_alerts_service(mocker):
return mocker.MagicMock(spec=AlertService)
@pytest.fixture
def mock_alert_rules_service(mocker):
return mocker.MagicMock(spec=AlertRulesService)
@pytest.fixture
def mock_alert_query(mocker):
return mocker.MagicMock(spec=AlertQuery)
class TestAlertsClient(object):
_alert_ids = [u"test-id1", u"test-id2"]
def test_rules_returns_rules_client(
self, mock_alerts_service, mock_alert_rules_service
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
assert alert_client.rules
def test_alerts_client_calls_search_with_expected_value(
self, mock_alerts_service, mock_alert_rules_service, mock_alert_query,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.search(mock_alert_query)
mock_alerts_service.search.assert_called_once_with(mock_alert_query, 1, None)
def test_alerts_client_calls_get_details_with_expected_value(
self, mock_alerts_service, mock_alert_rules_service
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.get_details(self._alert_ids)
mock_alerts_service.get_details.assert_called_once_with(self._alert_ids)
def test_alerts_client_calls_update_state_with_resolve_state_and_expected_value(
self, mock_alerts_service, mock_alert_rules_service,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.resolve(self._alert_ids)
mock_alerts_service.update_state.assert_called_once_with(
"RESOLVED", self._alert_ids, note=None
)
def test_alerts_client_calls_update_state_with_reopen_state_and_expected_value(
self, mock_alerts_service, mock_alert_rules_service,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.reopen(self._alert_ids)
mock_alerts_service.update_state.assert_called_once_with(
"OPEN", self._alert_ids, note=None
)
def test_alerts_client_calls_update_state_with_state_and_expected_value(
self, mock_alerts_service, mock_alert_rules_service,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.update_state("RESOLVED", self._alert_ids)
mock_alerts_service.update_state.assert_called_once_with(
"RESOLVED", self._alert_ids, note=None
)
def test_alerts_client_calls_update_note_with_expected_value_and_param(
self, mock_alerts_service, mock_alert_rules_service,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.update_note("alert-id", "a note")
mock_alerts_service.update_note.assert_called_once_with("alert-id", "a note")
def test_alerts_client_calls_search_all_pages_with_expected_value_and_param(
self, mock_alerts_service, mock_alert_rules_service,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
query = '{"test": "data"}}'
alert_client.search_all_pages(query)
mock_alerts_service.search_all_pages.assert_called_once_with(query)
def test_alerts_client_calls_get_aggregate_data_with_expected_value_and_param(
self, mock_alerts_service, mock_alert_rules_service,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.get_aggregate_data("alert-id")
mock_alerts_service.get_aggregate_data.assert_called_once_with("alert-id")
| import pytest
from py42.clients.alerts import AlertsClient
from py42.sdk.queries.alerts.alert_query import AlertQuery
from py42.services.alertrules import AlertRulesService
from py42.services.alerts import AlertService
@pytest.fixture
def mock_alerts_service(mocker):
return mocker.MagicMock(spec=AlertService)
@pytest.fixture
def mock_alert_rules_service(mocker):
return mocker.MagicMock(spec=AlertRulesService)
@pytest.fixture
def mock_alert_query(mocker):
return mocker.MagicMock(spec=AlertQuery)
class TestAlertsClient(object):
_alert_ids = [u"test-id1", u"test-id2"]
def test_rules_returns_rules_client(
self, mock_alerts_service, mock_alert_rules_service
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
assert alert_client.rules
def test_alerts_client_calls_search_with_expected_value(
self, mock_alerts_service, mock_alert_rules_service, mock_alert_query,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.search(mock_alert_query)
mock_alerts_service.search.assert_called_once_with(mock_alert_query, 1, None)
def test_alerts_client_calls_get_details_with_expected_value(
self, mock_alerts_service, mock_alert_rules_service
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.get_details(self._alert_ids)
mock_alerts_service.get_details.assert_called_once_with(self._alert_ids)
def test_alerts_client_calls_update_state_with_resolve_state_and_expected_value(
self, mock_alerts_service, mock_alert_rules_service,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.resolve(self._alert_ids)
mock_alerts_service.update_state.assert_called_once_with(
"RESOLVED", self._alert_ids, note=None
)
def test_alerts_client_calls_update_state_with_reopen_state_and_expected_value(
self, mock_alerts_service, mock_alert_rules_service,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.reopen(self._alert_ids)
mock_alerts_service.update_state.assert_called_once_with(
"OPEN", self._alert_ids, note=None
)
def test_alerts_client_calls_update_state_with_state_and_expected_value(
self, mock_alerts_service, mock_alert_rules_service,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.update_state("RESOLVED", self._alert_ids)
mock_alerts_service.update_state.assert_called_once_with(
"RESOLVED", self._alert_ids, note=None
)
def test_alerts_client_calls_update_note_with_expected_value_and_param(
self, mock_alerts_service, mock_alert_rules_service,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.update_note("alert-id", "a note")
mock_alerts_service.update_note.assert_called_once_with("alert-id", "a note")
def test_alerts_client_calls_search_all_pages_with_expected_value_and_param(
self, mock_alerts_service, mock_alert_rules_service,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
query = '{"test": "data"}}'
alert_client.search_all_pages(query)
mock_alerts_service.search_all_pages.assert_called_once_with(query)
def test_alerts_client_calls_get_aggregate_data_with_expected_value_and_param(
self, mock_alerts_service, mock_alert_rules_service,
):
alert_client = AlertsClient(mock_alerts_service, mock_alert_rules_service)
alert_client.get_aggregate_data("alert-id")
mock_alerts_service.get_aggregate_data.assert_called_once_with("alert-id") | none | 1 | 2.145568 | 2 |
|
will/plugins/friendly/random_topic.py | Ashex/will | 349 | 6631415 | <reponame>Ashex/will
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
import requests
class RandomTopicPlugin(WillPlugin):
@respond_to("new topic")
def give_us_somethin_to_talk_about(self, message):
"""new topic: set the room topic to a random conversation starter."""
r = requests.get("http://www.chatoms.com/chatom.json?Normal=1&Fun=2&Philosophy=3&Out+There=4")
data = r.json()
self.set_topic(data["text"], message=message)
| from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
import requests
class RandomTopicPlugin(WillPlugin):
@respond_to("new topic")
def give_us_somethin_to_talk_about(self, message):
"""new topic: set the room topic to a random conversation starter."""
r = requests.get("http://www.chatoms.com/chatom.json?Normal=1&Fun=2&Philosophy=3&Out+There=4")
data = r.json()
self.set_topic(data["text"], message=message) | en | 0.833777 | new topic: set the room topic to a random conversation starter. | 2.787349 | 3 |
chemprop/data/vocab.py | wengong-jin/chemprop | 77 | 6631416 | <reponame>wengong-jin/chemprop
from argparse import Namespace
from copy import deepcopy
from functools import partial
from multiprocessing import Pool
import random
from typing import Callable, List, FrozenSet, Set, Tuple, Union
from collections import Counter
from rdkit import Chem
import torch
from chemprop.features import atom_features, bond_features, get_atom_fdim, FunctionalGroupFeaturizer
class Vocab:
def __init__(self, args: Namespace, smiles: List[str]):
self.substructure_sizes = args.bert_substructure_sizes
self.vocab_func = partial(
atom_vocab,
vocab_func=args.bert_vocab_func,
substructure_sizes=self.substructure_sizes,
args=args
)
if args.bert_vocab_func == 'feature_vector':
self.unk = None
self.output_size = get_atom_fdim(args, is_output=True)
return # don't need a real vocab list here
self.unk = 'unk'
self.smiles = smiles
self.vocab = get_vocab(args, self.vocab_func, self.smiles)
self.vocab.add(self.unk)
self.vocab_size = len(self.vocab)
self.vocab_mapping = {word: i for i, word in enumerate(sorted(self.vocab))}
self.output_size = self.vocab_size
def w2i(self, word: str) -> int:
if self.unk is None:
return word # in this case, we didn't map to a vocab at all; we're just predicting the original features
return self.vocab_mapping[word] if word in self.vocab_mapping else self.vocab_mapping[self.unk]
def smiles2indices(self, smiles: str) -> Tuple[List[int], List[List[int]]]:
features, nb_indices = self.vocab_func(smiles, nb_info=True)
return [self.w2i(word) for word in features], nb_indices
def get_substructures_from_atom(atom: Chem.Atom,
max_size: int,
substructure: Set[int] = None) -> Set[FrozenSet[int]]:
"""
Recursively gets all substructures up to a maximum size starting from an atom in a substructure.
:param atom: The atom to start at.
:param max_size: The maximum size of the substructure to fine.
:param substructure: The current substructure that atom is in.
:return: A set of substructures starting at atom where each substructure is a frozenset of indices.
"""
assert max_size >= 1
if substructure is None:
substructure = {atom.GetIdx()}
substructures = {frozenset(substructure)}
if len(substructure) == max_size:
return substructures
# Get neighbors which are not already in the substructure
new_neighbors = [neighbor for neighbor in atom.GetNeighbors() if neighbor.GetIdx() not in substructure]
for neighbor in new_neighbors:
# Define new substructure with neighbor
new_substructure = deepcopy(substructure)
new_substructure.add(neighbor.GetIdx())
# Skip if new substructure has already been considered
if frozenset(new_substructure) in substructures:
continue
# Recursively get substructures including this substructure plus neighbor
new_substructures = get_substructures_from_atom(neighbor, max_size, new_substructure)
# Add those substructures to current set of substructures
substructures |= new_substructures
return substructures
def get_substructures(atoms: List[Chem.Atom],
sizes: List[int],
max_count: int = None) -> Set[FrozenSet[int]]:
"""
Gets up to max_count substructures (frozenset of atom indices) from a molecule.
Note: Uses randomness to guarantee that the first max_count substructures
found are a random sample of the substructures in the molecule.
(It's not perfectly random, depending on the graph structure, but probably good enough
for our purposes. There's a bit of bias toward substructures on the periphery.)
:param atoms: A list of atoms in the molecule.
:param sizes: The sizes of substructures to find.
:param max_count: The maximum number of substructures to find.
:return: A set of substructures where each substructure is a frozenset of indices.
"""
max_count = max_count or float('inf')
random.shuffle(atoms)
substructures = set()
for atom in atoms:
# Get all substructures up to max size starting from atom
new_substructures = get_substructures_from_atom(atom, max(sizes))
# Filter substructures to those which are one of the desired sizes
new_substructures = [substructure for substructure in new_substructures if len(substructure) in sizes]
for new_substructure in new_substructures:
if len(substructures) >= max_count:
return substructures
substructures.add(new_substructure)
return substructures
def substructure_to_feature(mol: Chem.Mol,
substructure: FrozenSet[int],
fg_features: List[List[int]] = None) -> str:
"""
Converts a substructure (set of atom indices) to a feature string
by sorting and concatenating atom and bond feature vectors.
:param mol: A molecule.
:param substructure: A set of atom indices representing a substructure.
:param fg_features: A list of k-hot vector indicating the functional groups the atom belongs to.
:return: A string representing the featurization of the substructure.
"""
if fg_features is None:
fg_features = [None] * mol.GetNumAtoms()
substructure = list(substructure)
atoms = [Chem.Mol.GetAtomWithIdx(mol, idx) for idx in substructure]
bonds = []
for i in range(len(substructure)):
for j in range(i + 1, len(substructure)):
a1, a2 = substructure[i], substructure[j]
bond = mol.GetBondBetweenAtoms(a1, a2)
if bond is not None:
bonds.append(bond)
features = [str(atom_features(atom, fg_features[atom.GetIdx()])) for atom in atoms] + \
[str(bond_features(bond)) for bond in bonds]
features.sort() # ensure identical feature string for different atom/bond ordering
features = str(features)
return features
def atom_vocab(smiles: str,
vocab_func: str,
args: Namespace = None,
substructure_sizes: List[int] = None,
nb_info: bool = False) -> Union[List[str],
Tuple[List[str], List[List[int]]]]:
if vocab_func not in ['atom', 'atom_features', 'feature_vector', 'substructure']:
raise ValueError(f'vocab_func "{vocab_func}" not supported.')
mol = Chem.MolFromSmiles(smiles)
atoms = mol.GetAtoms()
if args is not None and \
('functional_group' in args.additional_atom_features or
'functional_group' in args.additional_output_features):
fg_featurizer = FunctionalGroupFeaturizer(args)
fg_features = fg_featurizer.featurize(mol)
else:
fg_features = [None] * len(atoms)
if vocab_func == 'feature_vector':
features = [atom_features(atom, fg) for atom, fg in zip(atoms, fg_features)]
elif vocab_func == 'atom_features':
features = [str(atom_features(atom, fg)) for atom, fg in zip(atoms, fg_features)]
elif vocab_func == 'atom':
features = [str(atom.GetAtomicNum()) for atom in atoms]
elif vocab_func == 'substructure':
substructures = get_substructures(list(atoms), substructure_sizes)
features = [substructure_to_feature(mol, substructure, fg_features) for substructure in substructures]
else:
raise ValueError(f'vocab_func "{vocab_func}" not supported.')
if nb_info:
nb_indices = []
for atom in atoms:
nb_indices.append([nb.GetIdx() for nb in atom.GetNeighbors()]) # atoms are sorted by idx
return features, nb_indices
return features
def vocab(pair: Tuple[Callable, str, bool]) -> Set[str]:
vocab_func, smiles, as_set = pair
return set(vocab_func(smiles, nb_info=False)) if as_set else vocab_func(smiles, nb_info=False)
def get_vocab(args: Namespace, vocab_func: Callable, smiles: List[str]) -> Set[str]:
sequential, max_vocab_size, smiles_to_sample = args.sequential, args.bert_max_vocab_size, args.bert_smiles_to_sample
if smiles_to_sample > 0 and smiles_to_sample < len(smiles):
random.shuffle(smiles)
smiles = smiles[:smiles_to_sample]
pairs = [(vocab_func, smile, max_vocab_size == 0) for smile in smiles]
if max_vocab_size == 0:
if sequential:
return set.union(*map(vocab, pairs))
with Pool() as pool:
return set.union(*pool.map(vocab, pairs))
else:
if sequential:
vocab_lists = map(vocab, pairs)
else:
with Pool() as pool:
vocab_lists = pool.map(vocab, pairs)
counter = Counter()
for elt_list in vocab_lists:
counter.update(elt_list)
return set([elt for elt, count in counter.most_common(max_vocab_size)])
def load_vocab(path: str) -> Vocab:
"""
Loads the Vocab a model was trained with.
:param path: Path where the model checkpoint is saved.
:return: The Vocab object that the model was trained with.
"""
return torch.load(path, map_location=lambda storage, loc: storage)['args'].vocab
| from argparse import Namespace
from copy import deepcopy
from functools import partial
from multiprocessing import Pool
import random
from typing import Callable, List, FrozenSet, Set, Tuple, Union
from collections import Counter
from rdkit import Chem
import torch
from chemprop.features import atom_features, bond_features, get_atom_fdim, FunctionalGroupFeaturizer
class Vocab:
def __init__(self, args: Namespace, smiles: List[str]):
self.substructure_sizes = args.bert_substructure_sizes
self.vocab_func = partial(
atom_vocab,
vocab_func=args.bert_vocab_func,
substructure_sizes=self.substructure_sizes,
args=args
)
if args.bert_vocab_func == 'feature_vector':
self.unk = None
self.output_size = get_atom_fdim(args, is_output=True)
return # don't need a real vocab list here
self.unk = 'unk'
self.smiles = smiles
self.vocab = get_vocab(args, self.vocab_func, self.smiles)
self.vocab.add(self.unk)
self.vocab_size = len(self.vocab)
self.vocab_mapping = {word: i for i, word in enumerate(sorted(self.vocab))}
self.output_size = self.vocab_size
def w2i(self, word: str) -> int:
if self.unk is None:
return word # in this case, we didn't map to a vocab at all; we're just predicting the original features
return self.vocab_mapping[word] if word in self.vocab_mapping else self.vocab_mapping[self.unk]
def smiles2indices(self, smiles: str) -> Tuple[List[int], List[List[int]]]:
features, nb_indices = self.vocab_func(smiles, nb_info=True)
return [self.w2i(word) for word in features], nb_indices
def get_substructures_from_atom(atom: Chem.Atom,
max_size: int,
substructure: Set[int] = None) -> Set[FrozenSet[int]]:
"""
Recursively gets all substructures up to a maximum size starting from an atom in a substructure.
:param atom: The atom to start at.
:param max_size: The maximum size of the substructure to fine.
:param substructure: The current substructure that atom is in.
:return: A set of substructures starting at atom where each substructure is a frozenset of indices.
"""
assert max_size >= 1
if substructure is None:
substructure = {atom.GetIdx()}
substructures = {frozenset(substructure)}
if len(substructure) == max_size:
return substructures
# Get neighbors which are not already in the substructure
new_neighbors = [neighbor for neighbor in atom.GetNeighbors() if neighbor.GetIdx() not in substructure]
for neighbor in new_neighbors:
# Define new substructure with neighbor
new_substructure = deepcopy(substructure)
new_substructure.add(neighbor.GetIdx())
# Skip if new substructure has already been considered
if frozenset(new_substructure) in substructures:
continue
# Recursively get substructures including this substructure plus neighbor
new_substructures = get_substructures_from_atom(neighbor, max_size, new_substructure)
# Add those substructures to current set of substructures
substructures |= new_substructures
return substructures
def get_substructures(atoms: List[Chem.Atom],
sizes: List[int],
max_count: int = None) -> Set[FrozenSet[int]]:
"""
Gets up to max_count substructures (frozenset of atom indices) from a molecule.
Note: Uses randomness to guarantee that the first max_count substructures
found are a random sample of the substructures in the molecule.
(It's not perfectly random, depending on the graph structure, but probably good enough
for our purposes. There's a bit of bias toward substructures on the periphery.)
:param atoms: A list of atoms in the molecule.
:param sizes: The sizes of substructures to find.
:param max_count: The maximum number of substructures to find.
:return: A set of substructures where each substructure is a frozenset of indices.
"""
max_count = max_count or float('inf')
random.shuffle(atoms)
substructures = set()
for atom in atoms:
# Get all substructures up to max size starting from atom
new_substructures = get_substructures_from_atom(atom, max(sizes))
# Filter substructures to those which are one of the desired sizes
new_substructures = [substructure for substructure in new_substructures if len(substructure) in sizes]
for new_substructure in new_substructures:
if len(substructures) >= max_count:
return substructures
substructures.add(new_substructure)
return substructures
def substructure_to_feature(mol: Chem.Mol,
substructure: FrozenSet[int],
fg_features: List[List[int]] = None) -> str:
"""
Converts a substructure (set of atom indices) to a feature string
by sorting and concatenating atom and bond feature vectors.
:param mol: A molecule.
:param substructure: A set of atom indices representing a substructure.
:param fg_features: A list of k-hot vector indicating the functional groups the atom belongs to.
:return: A string representing the featurization of the substructure.
"""
if fg_features is None:
fg_features = [None] * mol.GetNumAtoms()
substructure = list(substructure)
atoms = [Chem.Mol.GetAtomWithIdx(mol, idx) for idx in substructure]
bonds = []
for i in range(len(substructure)):
for j in range(i + 1, len(substructure)):
a1, a2 = substructure[i], substructure[j]
bond = mol.GetBondBetweenAtoms(a1, a2)
if bond is not None:
bonds.append(bond)
features = [str(atom_features(atom, fg_features[atom.GetIdx()])) for atom in atoms] + \
[str(bond_features(bond)) for bond in bonds]
features.sort() # ensure identical feature string for different atom/bond ordering
features = str(features)
return features
def atom_vocab(smiles: str,
vocab_func: str,
args: Namespace = None,
substructure_sizes: List[int] = None,
nb_info: bool = False) -> Union[List[str],
Tuple[List[str], List[List[int]]]]:
if vocab_func not in ['atom', 'atom_features', 'feature_vector', 'substructure']:
raise ValueError(f'vocab_func "{vocab_func}" not supported.')
mol = Chem.MolFromSmiles(smiles)
atoms = mol.GetAtoms()
if args is not None and \
('functional_group' in args.additional_atom_features or
'functional_group' in args.additional_output_features):
fg_featurizer = FunctionalGroupFeaturizer(args)
fg_features = fg_featurizer.featurize(mol)
else:
fg_features = [None] * len(atoms)
if vocab_func == 'feature_vector':
features = [atom_features(atom, fg) for atom, fg in zip(atoms, fg_features)]
elif vocab_func == 'atom_features':
features = [str(atom_features(atom, fg)) for atom, fg in zip(atoms, fg_features)]
elif vocab_func == 'atom':
features = [str(atom.GetAtomicNum()) for atom in atoms]
elif vocab_func == 'substructure':
substructures = get_substructures(list(atoms), substructure_sizes)
features = [substructure_to_feature(mol, substructure, fg_features) for substructure in substructures]
else:
raise ValueError(f'vocab_func "{vocab_func}" not supported.')
if nb_info:
nb_indices = []
for atom in atoms:
nb_indices.append([nb.GetIdx() for nb in atom.GetNeighbors()]) # atoms are sorted by idx
return features, nb_indices
return features
def vocab(pair: Tuple[Callable, str, bool]) -> Set[str]:
vocab_func, smiles, as_set = pair
return set(vocab_func(smiles, nb_info=False)) if as_set else vocab_func(smiles, nb_info=False)
def get_vocab(args: Namespace, vocab_func: Callable, smiles: List[str]) -> Set[str]:
sequential, max_vocab_size, smiles_to_sample = args.sequential, args.bert_max_vocab_size, args.bert_smiles_to_sample
if smiles_to_sample > 0 and smiles_to_sample < len(smiles):
random.shuffle(smiles)
smiles = smiles[:smiles_to_sample]
pairs = [(vocab_func, smile, max_vocab_size == 0) for smile in smiles]
if max_vocab_size == 0:
if sequential:
return set.union(*map(vocab, pairs))
with Pool() as pool:
return set.union(*pool.map(vocab, pairs))
else:
if sequential:
vocab_lists = map(vocab, pairs)
else:
with Pool() as pool:
vocab_lists = pool.map(vocab, pairs)
counter = Counter()
for elt_list in vocab_lists:
counter.update(elt_list)
return set([elt for elt, count in counter.most_common(max_vocab_size)])
def load_vocab(path: str) -> Vocab:
"""
Loads the Vocab a model was trained with.
:param path: Path where the model checkpoint is saved.
:return: The Vocab object that the model was trained with.
"""
return torch.load(path, map_location=lambda storage, loc: storage)['args'].vocab | en | 0.878891 | # don't need a real vocab list here # in this case, we didn't map to a vocab at all; we're just predicting the original features Recursively gets all substructures up to a maximum size starting from an atom in a substructure. :param atom: The atom to start at. :param max_size: The maximum size of the substructure to fine. :param substructure: The current substructure that atom is in. :return: A set of substructures starting at atom where each substructure is a frozenset of indices. # Get neighbors which are not already in the substructure # Define new substructure with neighbor # Skip if new substructure has already been considered # Recursively get substructures including this substructure plus neighbor # Add those substructures to current set of substructures Gets up to max_count substructures (frozenset of atom indices) from a molecule. Note: Uses randomness to guarantee that the first max_count substructures found are a random sample of the substructures in the molecule. (It's not perfectly random, depending on the graph structure, but probably good enough for our purposes. There's a bit of bias toward substructures on the periphery.) :param atoms: A list of atoms in the molecule. :param sizes: The sizes of substructures to find. :param max_count: The maximum number of substructures to find. :return: A set of substructures where each substructure is a frozenset of indices. # Get all substructures up to max size starting from atom # Filter substructures to those which are one of the desired sizes Converts a substructure (set of atom indices) to a feature string by sorting and concatenating atom and bond feature vectors. :param mol: A molecule. :param substructure: A set of atom indices representing a substructure. :param fg_features: A list of k-hot vector indicating the functional groups the atom belongs to. :return: A string representing the featurization of the substructure. # ensure identical feature string for different atom/bond ordering # atoms are sorted by idx Loads the Vocab a model was trained with. :param path: Path where the model checkpoint is saved. :return: The Vocab object that the model was trained with. | 2.105777 | 2 |
demo/app01/models.py | General-ITer/Django-Introduction | 0 | 6631417 | from django.db import models
# Create your models here.
class ap1(models.Model):
username = models.CharField(max_length=30)
class Meta:
app_label = 'app02' #如果指定将在app02对应的数据库下创建数据表
class ap2(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
birth_date = models.DateField() | from django.db import models
# Create your models here.
class ap1(models.Model):
username = models.CharField(max_length=30)
class Meta:
app_label = 'app02' #如果指定将在app02对应的数据库下创建数据表
class ap2(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
birth_date = models.DateField() | zh | 0.592169 | # Create your models here. #如果指定将在app02对应的数据库下创建数据表 | 2.408643 | 2 |
terroroftinytown/tracker/bootstrap.py | Flashfire42/terroroftinytown | 59 | 6631418 | <gh_stars>10-100
# encoding=utf-8
import argparse
import configparser
import logging
import signal
import redis
import tornado.httpserver
import tornado.ioloop
from terroroftinytown.tracker.app import Application
from terroroftinytown.tracker.database import Database
from terroroftinytown.tracker.logs import GzipTimedRotatingFileHandler, \
LogFilter
from terroroftinytown.tracker.stats import Stats
logger = logging.getLogger(__name__)
class Bootstrap:
def __init__(self):
self.arg_parser = argparse.ArgumentParser()
self.config = configparser.ConfigParser()
def start(self, args=None):
self.setup_args()
self.parse_args(args=args)
self.load_config()
self.setup_database()
def setup_args(self):
self.arg_parser.add_argument('config')
self.arg_parser.add_argument('--debug', action='store_true')
def parse_args(self, args=None):
self.args = self.arg_parser.parse_args(args=args)
def load_config(self):
self.config.read([self.args.config])
def setup_database(self):
self.database = Database(
path=self.config['database']['path'],
)
def setup_redis(self):
kwargs = {
'db': self.config.getint('redis', 'db', fallback=0),
'password': self.config.get('redis', 'password', fallback=None),
}
if self.config['redis']['unix']:
kwargs['unix_socket_path'] = self.config['redis']['unix']
else:
kwargs['host'] = self.config.get('redis', 'host', fallback='localhost')
kwargs['port'] = self.config.getint('redis', 'port', fallback=6379)
self.redis = redis.Redis(**kwargs)
def setup_stats(self):
self.stats = Stats(
self.redis,
self.config.get('redis', 'prefix', fallback=''),
self.config.getint('redis', 'max_stats', fallback=30)
)
def setup_logging(self):
log_path = self.config.get('logging', 'path', fallback=None)
if not log_path:
return
if self.args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
handler = GzipTimedRotatingFileHandler(
filename=log_path,
backupCount=self.config.get('logging', 'backup_count', fallback=52),
encoding='utf-8')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
log_filter = LogFilter()
handler.addFilter(log_filter)
class ApplicationBootstrap(Bootstrap):
def start(self):
super().start()
self.setup_redis()
self.setup_stats()
self.setup_application()
self.setup_logging()
self.setup_signal_handlers()
self.boot()
def setup_application(self):
self.application = Application(
self.database,
self.redis,
debug=self.args.debug,
cookie_secret=self.config['web']['cookie_secret'],
maintenance_sentinel=self.config['web'].get('maintenance_sentinel_file'),
)
def boot(self):
host = self.config['web'].get('host', 'localhost')
port = int(self.config['web']['port'])
xheaders = self.config.getboolean('web', 'xheaders', fallback=False)
logger.info('Application booting. Listen on %s:%s', host, port)
if xheaders:
logger.info('Using xheaders.')
self.server = tornado.httpserver.HTTPServer(
self.application, xheaders=xheaders
)
self.server.listen(port, address=host)
tornado.ioloop.IOLoop.instance().start()
def setup_signal_handlers(self):
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
def _signal_handler(self, signal_number, stack_frame):
logger.info('Shutting down.')
io_loop = tornado.ioloop.IOLoop.instance()
io_loop.add_callback_from_signal(self.stop)
def stop(self):
io_loop = tornado.ioloop.IOLoop.instance()
self.server.stop()
io_loop.call_later(1, io_loop.stop)
| # encoding=utf-8
import argparse
import configparser
import logging
import signal
import redis
import tornado.httpserver
import tornado.ioloop
from terroroftinytown.tracker.app import Application
from terroroftinytown.tracker.database import Database
from terroroftinytown.tracker.logs import GzipTimedRotatingFileHandler, \
LogFilter
from terroroftinytown.tracker.stats import Stats
logger = logging.getLogger(__name__)
class Bootstrap:
def __init__(self):
self.arg_parser = argparse.ArgumentParser()
self.config = configparser.ConfigParser()
def start(self, args=None):
self.setup_args()
self.parse_args(args=args)
self.load_config()
self.setup_database()
def setup_args(self):
self.arg_parser.add_argument('config')
self.arg_parser.add_argument('--debug', action='store_true')
def parse_args(self, args=None):
self.args = self.arg_parser.parse_args(args=args)
def load_config(self):
self.config.read([self.args.config])
def setup_database(self):
self.database = Database(
path=self.config['database']['path'],
)
def setup_redis(self):
kwargs = {
'db': self.config.getint('redis', 'db', fallback=0),
'password': self.config.get('redis', 'password', fallback=None),
}
if self.config['redis']['unix']:
kwargs['unix_socket_path'] = self.config['redis']['unix']
else:
kwargs['host'] = self.config.get('redis', 'host', fallback='localhost')
kwargs['port'] = self.config.getint('redis', 'port', fallback=6379)
self.redis = redis.Redis(**kwargs)
def setup_stats(self):
self.stats = Stats(
self.redis,
self.config.get('redis', 'prefix', fallback=''),
self.config.getint('redis', 'max_stats', fallback=30)
)
def setup_logging(self):
log_path = self.config.get('logging', 'path', fallback=None)
if not log_path:
return
if self.args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
handler = GzipTimedRotatingFileHandler(
filename=log_path,
backupCount=self.config.get('logging', 'backup_count', fallback=52),
encoding='utf-8')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
log_filter = LogFilter()
handler.addFilter(log_filter)
class ApplicationBootstrap(Bootstrap):
def start(self):
super().start()
self.setup_redis()
self.setup_stats()
self.setup_application()
self.setup_logging()
self.setup_signal_handlers()
self.boot()
def setup_application(self):
self.application = Application(
self.database,
self.redis,
debug=self.args.debug,
cookie_secret=self.config['web']['cookie_secret'],
maintenance_sentinel=self.config['web'].get('maintenance_sentinel_file'),
)
def boot(self):
host = self.config['web'].get('host', 'localhost')
port = int(self.config['web']['port'])
xheaders = self.config.getboolean('web', 'xheaders', fallback=False)
logger.info('Application booting. Listen on %s:%s', host, port)
if xheaders:
logger.info('Using xheaders.')
self.server = tornado.httpserver.HTTPServer(
self.application, xheaders=xheaders
)
self.server.listen(port, address=host)
tornado.ioloop.IOLoop.instance().start()
def setup_signal_handlers(self):
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
def _signal_handler(self, signal_number, stack_frame):
logger.info('Shutting down.')
io_loop = tornado.ioloop.IOLoop.instance()
io_loop.add_callback_from_signal(self.stop)
def stop(self):
io_loop = tornado.ioloop.IOLoop.instance()
self.server.stop()
io_loop.call_later(1, io_loop.stop) | en | 0.70014 | # encoding=utf-8 | 1.932263 | 2 |
addons/mod.py | Ha1vorsen/Kurisu | 0 | 6631419 | import datetime
import discord
import json
import re
import time
from discord.ext import commands
from subprocess import call
class Mod:
"""
Staff commands.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
async def add_restriction(self, member, rst):
with open("data/restrictions.json", "r") as f:
rsts = json.load(f)
if member.id not in rsts:
rsts[member.id] = []
if rst not in rsts[member.id]:
rsts[member.id].append(rst)
with open("data/restrictions.json", "w") as f:
json.dump(rsts, f)
async def remove_restriction(self, member, rst):
with open("data/restrictions.json", "r") as f:
rsts = json.load(f)
if member.id not in rsts:
rsts[member.id] = []
if rst in rsts[member.id]:
rsts[member.id].remove(rst)
with open("data/restrictions.json", "w") as f:
json.dump(rsts, f)
@commands.has_permissions(administrator=True)
@commands.command()
async def quit(self, *gamename):
"""Stops the bot."""
await self.bot.say("👋 Bye bye!")
await self.bot.close()
@commands.has_permissions(manage_server=True)
@commands.command(hidden=True)
async def pull(self, *gamename):
"""Pull new changes from GitHub and restart."""
await self.bot.say("Pulling changes...")
call(['git', 'pull'])
await self.bot.say("👋 Restarting bot!")
await self.bot.close()
@commands.command(pass_context=True, hidden=True)
async def userinfo(self, ctx, user):
"""Gets user info. Staff and Helpers only."""
issuer = ctx.message.author
if (self.bot.helpers_role not in issuer.roles) and (self.bot.staff_role not in issuer.roles):
msg = "{0} This command is limited to Staff and Helpers.".format(issuer.mention)
await self.bot.say(msg)
return
u = ctx.message.mentions[0]
role = u.top_role.name
if role == "@everyone":
role = "@ everyone"
await self.bot.say("name = {}\nid = {}\ndiscriminator = {}\navatar = {}\nbot = {}\navatar_url = {}\ndefault_avatar = {}\ndefault_avatar_url = <{}>\ncreated_at = {}\ndisplay_name = {}\njoined_at = {}\nstatus = {}\ngame = {}\ncolour = {}\ntop_role = {}\n".format(u.name, u.id, u.discriminator, u.avatar, u.bot, u.avatar_url, u.default_avatar, u.default_avatar_url, u.created_at, u.display_name, u.joined_at, u.status, u.game, u.colour, role))
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, hidden=True)
async def matchuser(self, ctx, *, rgx: str):
"""Match users by regex."""
author = ctx.message.author
msg = "```\nmembers:\n"
for m in self.bot.server.members:
if bool(re.search(rgx, m.name, re.IGNORECASE)):
msg += "{} - {}#{}\n".format(m.id, m.name, m.discriminator)
msg += "```"
await self.bot.send_message(author, msg)
@commands.has_permissions(administrator=True)
@commands.command(pass_context=True, hidden=True)
async def multiban(self, ctx, *, members: str):
"""Multi-ban users."""
author = ctx.message.author
msg = "```\nbanned:\n"
for m in ctx.message.mentions:
msg += "{} - {}#{}\n".format(m.id, m.name, m.discriminator)
try:
await self.bot.ban(m)
except discord.error.NotFound:
pass
msg += "```"
await self.bot.send_message(author, msg)
@commands.has_permissions(administrator=True)
@commands.command(pass_context=True, hidden=True)
async def multibanre(self, ctx, *, rgx: str):
"""Multi-ban users by regex."""
author = ctx.message.author
msg = "```\nbanned:\n"
toban = [] # because "dictionary changed size during iteration"
for m in self.bot.server.members:
if bool(re.search(rgx, m.name, re.IGNORECASE)):
msg += "{} - {}#{}\n".format(m.id, m.name, m.discriminator)
toban.append(m)
for m in toban:
try:
await self.bot.ban(m)
except discord.error.NotFound:
pass
msg += "```"
await self.bot.send_message(author, msg)
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="clear")
async def purge(self, ctx, limit: int):
"""Clears a given number of messages. Staff only."""
try:
await self.bot.purge_from(ctx.message.channel, limit=limit)
msg = "🗑 **Cleared**: {} cleared {} messages in {}".format(ctx.message.author.mention, limit, ctx.message.channel.mention)
await self.bot.send_message(self.bot.modlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="mute")
async def mute(self, ctx, user, *, reason=""):
"""Mutes a user so they can't speak. Staff only."""
try:
member = ctx.message.mentions[0]
await self.add_restriction(member, "Muted")
await self.bot.add_roles(member, self.bot.muted_role)
msg_user = "You were muted!"
if reason != "":
msg_user += " The given reason is: " + reason
try:
await self.bot.send_message(member, msg_user)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
await self.bot.say("{} can no longer speak.".format(member.mention))
msg = "🔇 **Muted**: {} muted {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += "\nPlease add an explanation below. In the future, it is recommended to use `.mute <user> [reason]` as the reason is automatically sent to the user."
await self.bot.send_message(self.bot.modlogs_channel, msg)
# change to permanent mute
if member.id in self.bot.timemutes:
self.bot.timemutes.pop(member.id)
with open("data/timemutes.json", "r") as f:
timemutes = json.load(f)
timemutes.pop(member.id)
with open("data/timemutes.json", "w") as f:
json.dump(timemutes, f)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="timemute")
async def timemute(self, ctx, user, length, *, reason=""):
"""Mutes a user for a limited period of time so they can't speak. Staff only.\n\nLength format: #d#h#m#s"""
try:
member = ctx.message.mentions[0]
await self.add_restriction(member, "Muted")
await self.bot.add_roles(member, self.bot.muted_role)
issuer = ctx.message.author
# thanks Luc#5653
units = {
"d": 86400,
"h": 3600,
"m": 60,
"s": 1
}
seconds = 0
match = re.findall("([0-9]+[smhd])", length) # Thanks to 3dshax server's former bot
if match is None:
return None
for item in match:
seconds += int(item[:-1]) * units[item[-1]]
timestamp = datetime.datetime.now()
delta = datetime.timedelta(seconds=seconds)
unmute_time = timestamp + delta
unmute_time_string = unmute_time.strftime("%Y-%m-%d %H:%M:%S")
with open("data/timemutes.json", "r") as f:
timemutes = json.load(f)
timemutes[member.id] = unmute_time_string
self.bot.timemutes[member.id] = [unmute_time, False] # last variable is "notified", for <=10 minute notifications
with open("data/timemutes.json", "w") as f:
json.dump(timemutes, f)
msg_user = "You were muted!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nThis mute expires {} {}.".format(unmute_time_string, time.tzname[0])
try:
await self.bot.send_message(member, msg_user)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
await self.bot.say("{} can no longer speak.".format(member.mention))
msg = "🔇 **Timed mute**: {} muted {} until {} | {}#{}".format(issuer.mention, member.mention, unmute_time_string, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += "\nPlease add an explanation below. In the future, it is recommended to use `.timemute <user> <length> [reason]` as the reason is automatically sent to the user."
await self.bot.send_message(self.bot.modlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="unmute")
async def unmute(self, ctx, user):
"""Unmutes a user so they can speak. Staff only."""
try:
member = ctx.message.mentions[0]
await self.remove_restriction(member, "Muted")
await self.bot.remove_roles(member, self.bot.muted_role)
await self.bot.say("{} can now speak again.".format(member.mention))
msg = "🔈 **Unmuted**: {} unmuted {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
await self.bot.send_message(self.bot.modlogs_channel, msg)
if member.id in self.bot.timemutes:
self.bot.timemutes.pop(member.id)
with open("data/timemutes.json", "r") as f:
timemutes = json.load(f)
timemutes.pop(member.id)
with open("data/timemutes.json", "w") as f:
json.dump(timemutes, f)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="noembed")
async def noembed(self, ctx, user, *, reason=""):
"""Removes embed permissions from a user. Staff only."""
try:
member = ctx.message.mentions[0]
await self.add_restriction(member, "No-Embed")
await self.bot.add_roles(member, self.bot.noembed_role)
msg_user = "You lost embed and upload permissions!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
try:
await self.bot.send_message(member, msg_user)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
await self.bot.say("{} can no longer embed links or attach files.".format(member.mention))
msg = "🚫 **Removed Embed**: {} removed embed from {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += "\nPlease add an explanation below. In the future, it is recommended to use `.noembed <user> [reason]` as the reason is automatically sent to the user."
await self.bot.send_message(self.bot.modlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="embed")
async def embed(self, ctx, user):
"""Restore embed permissios for a user. Staff only."""
try:
member = ctx.message.mentions[0]
await self.remove_restriction(member, "No-Embed")
await self.bot.remove_roles(member, self.bot.noembed_role)
await self.bot.say("{} can now embed links and attach files again.".format(member.mention))
msg = "⭕️ **Restored Embed**: {} restored embed to {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
await self.bot.send_message(self.bot.modlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.command(pass_context=True, name="takehelp")
async def takehelp(self, ctx, user, *, reason=""):
"""Remove access to help-and-questions. Staff and Helpers only."""
author = ctx.message.author
if (self.bot.helpers_role not in author.roles) and (self.bot.staff_role not in author.roles):
msg = "{} You cannot use this command.".format(author.mention)
await self.bot.say(msg)
return
try:
member = ctx.message.mentions[0]
await self.add_restriction(member, "No-Help")
await self.bot.add_roles(member, self.bot.nohelp_role)
msg_user = "You lost access to help channels!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
try:
await self.bot.send_message(member, msg_user)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
await self.bot.say("{} can no longer access the help channels.".format(member.mention))
msg = "🚫 **Help access removed**: {} removed access to help channels from {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += "\nPlease add an explanation below. In the future, it is recommended to use `.takehelp <user> [reason]` as the reason is automatically sent to the user."
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.helpers_channel, msg)
#add to .takehelp
if member.id in self.bot.timenohelp:
self.bot.timenohelp.pop(member.id)
with open("data/timenohelp.json", "r") as f:
timenohelp = json.load(f)
timenohelp.pop(member.id)
with open("data/timenohelp.json", "w") as f:
json.dump(timenohelp, f)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.command(pass_context=True, name="givehelp")
async def givehelp(self, ctx, user):
"""Restore access to help-and-questions. Staff and Helpers only."""
author = ctx.message.author
if (self.bot.helpers_role not in author.roles) and (self.bot.staff_role not in author.roles):
msg = "{} You cannot use this command.".format(author.mention)
await self.bot.say(msg)
return
try:
member = ctx.message.mentions[0]
await self.remove_restriction(member, "No-Help")
await self.bot.remove_roles(member, self.bot.nohelp_role)
await self.bot.say("{} can access the help channels again.".format(member.mention))
msg = "⭕️ **Help access restored**: {} restored access to help channels to {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.helpers_channel, msg)
#add to .givehelp
if member.id in self.bot.timenohelp:
self.bot.timenohelp.pop(member.id)
with open("data/timenohelp.json", "r") as f:
timenohelp = json.load(f)
timenohelp.pop(member.id)
with open("data/timenohelp.json", "w") as f:
json.dump(timenohelp, f)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.command(pass_context=True, name="timetakehelp")
async def timetakehelp(self, ctx, user, length, *, reason=""):
"""Restricts a user from Assistance Channels for a limited period of time. Staff and Helpers only.\n\nLength format: #d#h#m#s"""
author = ctx.message.author
if (self.bot.helpers_role not in author.roles) and (self.bot.staff_role not in author.roles):
msg = "{} You cannot use this command.".format(author.mention)
await self.bot.say(msg)
return
try:
member = ctx.message.mentions[0]
await self.add_restriction(member, "No-Help")
await self.bot.add_roles(member, self.bot.nohelp_role)
issuer = ctx.message.author
# thanks Luc#5653
units = {
"d": 86400,
"h": 3600,
"m": 60,
"s": 1
}
seconds = 0
match = re.findall("([0-9]+[smhd])", length) # Thanks to 3dshax server's former bot
if match is None:
return None
for item in match:
seconds += int(item[:-1]) * units[item[-1]]
timestamp = datetime.datetime.now()
delta = datetime.timedelta(seconds=seconds)
unnohelp_time = timestamp + delta
unnohelp_time_string = unnohelp_time.strftime("%Y-%m-%d %H:%M:%S")
with open("data/timenohelp.json", "r") as f:
timenohelp = json.load(f)
timenohelp[member.id] = unnohelp_time_string
self.bot.timenohelp[member.id] = [unnohelp_time, False] # last variable is "notified", for <=10 minute notifications
with open("data/timenohelp.json", "w") as f:
json.dump(timenohelp, f)
msg_user = "You lost access to help channels temporarily!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
msg_user += "\n\nThis restriction expires {} {}.".format(unnohelp_time_string, time.tzname[0])
try:
await self.bot.send_message(member, msg_user)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
await self.bot.say("{} can no longer speak in Assistance Channels.".format(member.mention))
msg = "🚫 **Timed No-Help**: {} restricted {} until {} | {}#{}".format(issuer.mention, member.mention, unnohelp_time_string, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += "\nPlease add an explanation below. In the future, it is recommended to use `.timetakehelp <user> <length> [reason]` as the reason is automatically sent to the user."
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.helpers_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("?? I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="probate")
async def probate(self, ctx, user, *, reason=""):
"""Probate a user. Staff only."""
try:
member = ctx.message.mentions[0]
await self.add_restriction(member, "Probation")
await self.bot.add_roles(member, self.bot.probation_role)
msg_user = "You are under probation!"
if reason != "":
msg_user += " The given reason is: " + reason
try:
await self.bot.send_message(member, msg_user)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
await self.bot.say("{} is now in probation.".format(member.mention))
msg = "🚫 **Probated**: {} probated {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += "\nPlease add an explanation below. In the future, it is recommended to use `.probate <user> [reason]` as the reason is automatically sent to the user."
await self.bot.send_message(self.bot.modlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="unprobate")
async def unprobate(self, ctx, user):
"""Unprobate a user. Staff only."""
try:
member = ctx.message.mentions[0]
await self.remove_restriction(member, "Probation")
await self.bot.remove_roles(member, self.bot.probation_role)
await self.bot.say("{} is out of probation.".format(member.mention))
msg = "⭕️ **Un-probated**: {} un-probated {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
await self.bot.send_message(self.bot.modlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True)
async def playing(self, ctx, *gamename):
"""Sets playing message. Staff only."""
try:
await self.bot.change_presence(game=discord.Game(name='{}'.format(" ".join(gamename))))
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True)
async def status(self, ctx, status):
"""Sets status. Staff only."""
try:
if status == "online":
await self.bot.change_presence(status=discord.Status.online)
elif status == "offline":
await self.bot.change_presence(status=discord.Status.offline)
elif status == "idle":
await self.bot.change_presence(status=discord.Status.idle)
elif status == "dnd":
await self.bot.change_presence(status=discord.Status.dnd)
elif status == "invisible":
await self.bot.change_presence(status=discord.Status.invisible)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, hidden=True)
async def username(self, ctx, *, username):
"""Sets bot name. Staff only."""
try:
await self.bot.edit_profile(username=('{}'.format(username)))
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
def setup(bot):
bot.add_cog(Mod(bot))
| import datetime
import discord
import json
import re
import time
from discord.ext import commands
from subprocess import call
class Mod:
"""
Staff commands.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
async def add_restriction(self, member, rst):
with open("data/restrictions.json", "r") as f:
rsts = json.load(f)
if member.id not in rsts:
rsts[member.id] = []
if rst not in rsts[member.id]:
rsts[member.id].append(rst)
with open("data/restrictions.json", "w") as f:
json.dump(rsts, f)
async def remove_restriction(self, member, rst):
with open("data/restrictions.json", "r") as f:
rsts = json.load(f)
if member.id not in rsts:
rsts[member.id] = []
if rst in rsts[member.id]:
rsts[member.id].remove(rst)
with open("data/restrictions.json", "w") as f:
json.dump(rsts, f)
@commands.has_permissions(administrator=True)
@commands.command()
async def quit(self, *gamename):
"""Stops the bot."""
await self.bot.say("👋 Bye bye!")
await self.bot.close()
@commands.has_permissions(manage_server=True)
@commands.command(hidden=True)
async def pull(self, *gamename):
"""Pull new changes from GitHub and restart."""
await self.bot.say("Pulling changes...")
call(['git', 'pull'])
await self.bot.say("👋 Restarting bot!")
await self.bot.close()
@commands.command(pass_context=True, hidden=True)
async def userinfo(self, ctx, user):
"""Gets user info. Staff and Helpers only."""
issuer = ctx.message.author
if (self.bot.helpers_role not in issuer.roles) and (self.bot.staff_role not in issuer.roles):
msg = "{0} This command is limited to Staff and Helpers.".format(issuer.mention)
await self.bot.say(msg)
return
u = ctx.message.mentions[0]
role = u.top_role.name
if role == "@everyone":
role = "@ everyone"
await self.bot.say("name = {}\nid = {}\ndiscriminator = {}\navatar = {}\nbot = {}\navatar_url = {}\ndefault_avatar = {}\ndefault_avatar_url = <{}>\ncreated_at = {}\ndisplay_name = {}\njoined_at = {}\nstatus = {}\ngame = {}\ncolour = {}\ntop_role = {}\n".format(u.name, u.id, u.discriminator, u.avatar, u.bot, u.avatar_url, u.default_avatar, u.default_avatar_url, u.created_at, u.display_name, u.joined_at, u.status, u.game, u.colour, role))
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, hidden=True)
async def matchuser(self, ctx, *, rgx: str):
"""Match users by regex."""
author = ctx.message.author
msg = "```\nmembers:\n"
for m in self.bot.server.members:
if bool(re.search(rgx, m.name, re.IGNORECASE)):
msg += "{} - {}#{}\n".format(m.id, m.name, m.discriminator)
msg += "```"
await self.bot.send_message(author, msg)
@commands.has_permissions(administrator=True)
@commands.command(pass_context=True, hidden=True)
async def multiban(self, ctx, *, members: str):
"""Multi-ban users."""
author = ctx.message.author
msg = "```\nbanned:\n"
for m in ctx.message.mentions:
msg += "{} - {}#{}\n".format(m.id, m.name, m.discriminator)
try:
await self.bot.ban(m)
except discord.error.NotFound:
pass
msg += "```"
await self.bot.send_message(author, msg)
@commands.has_permissions(administrator=True)
@commands.command(pass_context=True, hidden=True)
async def multibanre(self, ctx, *, rgx: str):
"""Multi-ban users by regex."""
author = ctx.message.author
msg = "```\nbanned:\n"
toban = [] # because "dictionary changed size during iteration"
for m in self.bot.server.members:
if bool(re.search(rgx, m.name, re.IGNORECASE)):
msg += "{} - {}#{}\n".format(m.id, m.name, m.discriminator)
toban.append(m)
for m in toban:
try:
await self.bot.ban(m)
except discord.error.NotFound:
pass
msg += "```"
await self.bot.send_message(author, msg)
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="clear")
async def purge(self, ctx, limit: int):
"""Clears a given number of messages. Staff only."""
try:
await self.bot.purge_from(ctx.message.channel, limit=limit)
msg = "🗑 **Cleared**: {} cleared {} messages in {}".format(ctx.message.author.mention, limit, ctx.message.channel.mention)
await self.bot.send_message(self.bot.modlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="mute")
async def mute(self, ctx, user, *, reason=""):
"""Mutes a user so they can't speak. Staff only."""
try:
member = ctx.message.mentions[0]
await self.add_restriction(member, "Muted")
await self.bot.add_roles(member, self.bot.muted_role)
msg_user = "You were muted!"
if reason != "":
msg_user += " The given reason is: " + reason
try:
await self.bot.send_message(member, msg_user)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
await self.bot.say("{} can no longer speak.".format(member.mention))
msg = "🔇 **Muted**: {} muted {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += "\nPlease add an explanation below. In the future, it is recommended to use `.mute <user> [reason]` as the reason is automatically sent to the user."
await self.bot.send_message(self.bot.modlogs_channel, msg)
# change to permanent mute
if member.id in self.bot.timemutes:
self.bot.timemutes.pop(member.id)
with open("data/timemutes.json", "r") as f:
timemutes = json.load(f)
timemutes.pop(member.id)
with open("data/timemutes.json", "w") as f:
json.dump(timemutes, f)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="timemute")
async def timemute(self, ctx, user, length, *, reason=""):
"""Mutes a user for a limited period of time so they can't speak. Staff only.\n\nLength format: #d#h#m#s"""
try:
member = ctx.message.mentions[0]
await self.add_restriction(member, "Muted")
await self.bot.add_roles(member, self.bot.muted_role)
issuer = ctx.message.author
# thanks Luc#5653
units = {
"d": 86400,
"h": 3600,
"m": 60,
"s": 1
}
seconds = 0
match = re.findall("([0-9]+[smhd])", length) # Thanks to 3dshax server's former bot
if match is None:
return None
for item in match:
seconds += int(item[:-1]) * units[item[-1]]
timestamp = datetime.datetime.now()
delta = datetime.timedelta(seconds=seconds)
unmute_time = timestamp + delta
unmute_time_string = unmute_time.strftime("%Y-%m-%d %H:%M:%S")
with open("data/timemutes.json", "r") as f:
timemutes = json.load(f)
timemutes[member.id] = unmute_time_string
self.bot.timemutes[member.id] = [unmute_time, False] # last variable is "notified", for <=10 minute notifications
with open("data/timemutes.json", "w") as f:
json.dump(timemutes, f)
msg_user = "You were muted!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nThis mute expires {} {}.".format(unmute_time_string, time.tzname[0])
try:
await self.bot.send_message(member, msg_user)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
await self.bot.say("{} can no longer speak.".format(member.mention))
msg = "🔇 **Timed mute**: {} muted {} until {} | {}#{}".format(issuer.mention, member.mention, unmute_time_string, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += "\nPlease add an explanation below. In the future, it is recommended to use `.timemute <user> <length> [reason]` as the reason is automatically sent to the user."
await self.bot.send_message(self.bot.modlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="unmute")
async def unmute(self, ctx, user):
"""Unmutes a user so they can speak. Staff only."""
try:
member = ctx.message.mentions[0]
await self.remove_restriction(member, "Muted")
await self.bot.remove_roles(member, self.bot.muted_role)
await self.bot.say("{} can now speak again.".format(member.mention))
msg = "🔈 **Unmuted**: {} unmuted {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
await self.bot.send_message(self.bot.modlogs_channel, msg)
if member.id in self.bot.timemutes:
self.bot.timemutes.pop(member.id)
with open("data/timemutes.json", "r") as f:
timemutes = json.load(f)
timemutes.pop(member.id)
with open("data/timemutes.json", "w") as f:
json.dump(timemutes, f)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="noembed")
async def noembed(self, ctx, user, *, reason=""):
"""Removes embed permissions from a user. Staff only."""
try:
member = ctx.message.mentions[0]
await self.add_restriction(member, "No-Embed")
await self.bot.add_roles(member, self.bot.noembed_role)
msg_user = "You lost embed and upload permissions!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
try:
await self.bot.send_message(member, msg_user)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
await self.bot.say("{} can no longer embed links or attach files.".format(member.mention))
msg = "🚫 **Removed Embed**: {} removed embed from {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += "\nPlease add an explanation below. In the future, it is recommended to use `.noembed <user> [reason]` as the reason is automatically sent to the user."
await self.bot.send_message(self.bot.modlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="embed")
async def embed(self, ctx, user):
"""Restore embed permissios for a user. Staff only."""
try:
member = ctx.message.mentions[0]
await self.remove_restriction(member, "No-Embed")
await self.bot.remove_roles(member, self.bot.noembed_role)
await self.bot.say("{} can now embed links and attach files again.".format(member.mention))
msg = "⭕️ **Restored Embed**: {} restored embed to {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
await self.bot.send_message(self.bot.modlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.command(pass_context=True, name="takehelp")
async def takehelp(self, ctx, user, *, reason=""):
"""Remove access to help-and-questions. Staff and Helpers only."""
author = ctx.message.author
if (self.bot.helpers_role not in author.roles) and (self.bot.staff_role not in author.roles):
msg = "{} You cannot use this command.".format(author.mention)
await self.bot.say(msg)
return
try:
member = ctx.message.mentions[0]
await self.add_restriction(member, "No-Help")
await self.bot.add_roles(member, self.bot.nohelp_role)
msg_user = "You lost access to help channels!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
try:
await self.bot.send_message(member, msg_user)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
await self.bot.say("{} can no longer access the help channels.".format(member.mention))
msg = "🚫 **Help access removed**: {} removed access to help channels from {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += "\nPlease add an explanation below. In the future, it is recommended to use `.takehelp <user> [reason]` as the reason is automatically sent to the user."
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.helpers_channel, msg)
#add to .takehelp
if member.id in self.bot.timenohelp:
self.bot.timenohelp.pop(member.id)
with open("data/timenohelp.json", "r") as f:
timenohelp = json.load(f)
timenohelp.pop(member.id)
with open("data/timenohelp.json", "w") as f:
json.dump(timenohelp, f)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.command(pass_context=True, name="givehelp")
async def givehelp(self, ctx, user):
"""Restore access to help-and-questions. Staff and Helpers only."""
author = ctx.message.author
if (self.bot.helpers_role not in author.roles) and (self.bot.staff_role not in author.roles):
msg = "{} You cannot use this command.".format(author.mention)
await self.bot.say(msg)
return
try:
member = ctx.message.mentions[0]
await self.remove_restriction(member, "No-Help")
await self.bot.remove_roles(member, self.bot.nohelp_role)
await self.bot.say("{} can access the help channels again.".format(member.mention))
msg = "⭕️ **Help access restored**: {} restored access to help channels to {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.helpers_channel, msg)
#add to .givehelp
if member.id in self.bot.timenohelp:
self.bot.timenohelp.pop(member.id)
with open("data/timenohelp.json", "r") as f:
timenohelp = json.load(f)
timenohelp.pop(member.id)
with open("data/timenohelp.json", "w") as f:
json.dump(timenohelp, f)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.command(pass_context=True, name="timetakehelp")
async def timetakehelp(self, ctx, user, length, *, reason=""):
"""Restricts a user from Assistance Channels for a limited period of time. Staff and Helpers only.\n\nLength format: #d#h#m#s"""
author = ctx.message.author
if (self.bot.helpers_role not in author.roles) and (self.bot.staff_role not in author.roles):
msg = "{} You cannot use this command.".format(author.mention)
await self.bot.say(msg)
return
try:
member = ctx.message.mentions[0]
await self.add_restriction(member, "No-Help")
await self.bot.add_roles(member, self.bot.nohelp_role)
issuer = ctx.message.author
# thanks Luc#5653
units = {
"d": 86400,
"h": 3600,
"m": 60,
"s": 1
}
seconds = 0
match = re.findall("([0-9]+[smhd])", length) # Thanks to 3dshax server's former bot
if match is None:
return None
for item in match:
seconds += int(item[:-1]) * units[item[-1]]
timestamp = datetime.datetime.now()
delta = datetime.timedelta(seconds=seconds)
unnohelp_time = timestamp + delta
unnohelp_time_string = unnohelp_time.strftime("%Y-%m-%d %H:%M:%S")
with open("data/timenohelp.json", "r") as f:
timenohelp = json.load(f)
timenohelp[member.id] = unnohelp_time_string
self.bot.timenohelp[member.id] = [unnohelp_time, False] # last variable is "notified", for <=10 minute notifications
with open("data/timenohelp.json", "w") as f:
json.dump(timenohelp, f)
msg_user = "You lost access to help channels temporarily!"
if reason != "":
msg_user += " The given reason is: " + reason
msg_user += "\n\nIf you feel this was unjustified, you may appeal in <#270890866820775946>."
msg_user += "\n\nThis restriction expires {} {}.".format(unnohelp_time_string, time.tzname[0])
try:
await self.bot.send_message(member, msg_user)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
await self.bot.say("{} can no longer speak in Assistance Channels.".format(member.mention))
msg = "🚫 **Timed No-Help**: {} restricted {} until {} | {}#{}".format(issuer.mention, member.mention, unnohelp_time_string, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += "\nPlease add an explanation below. In the future, it is recommended to use `.timetakehelp <user> <length> [reason]` as the reason is automatically sent to the user."
await self.bot.send_message(self.bot.modlogs_channel, msg)
await self.bot.send_message(self.bot.helpers_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("?? I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="probate")
async def probate(self, ctx, user, *, reason=""):
"""Probate a user. Staff only."""
try:
member = ctx.message.mentions[0]
await self.add_restriction(member, "Probation")
await self.bot.add_roles(member, self.bot.probation_role)
msg_user = "You are under probation!"
if reason != "":
msg_user += " The given reason is: " + reason
try:
await self.bot.send_message(member, msg_user)
except discord.errors.Forbidden:
pass # don't fail in case user has DMs disabled for this server, or blocked the bot
await self.bot.say("{} is now in probation.".format(member.mention))
msg = "🚫 **Probated**: {} probated {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
if reason != "":
msg += "\n✏️ __Reason__: " + reason
else:
msg += "\nPlease add an explanation below. In the future, it is recommended to use `.probate <user> [reason]` as the reason is automatically sent to the user."
await self.bot.send_message(self.bot.modlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True, name="unprobate")
async def unprobate(self, ctx, user):
"""Unprobate a user. Staff only."""
try:
member = ctx.message.mentions[0]
await self.remove_restriction(member, "Probation")
await self.bot.remove_roles(member, self.bot.probation_role)
await self.bot.say("{} is out of probation.".format(member.mention))
msg = "⭕️ **Un-probated**: {} un-probated {} | {}#{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator))
await self.bot.send_message(self.bot.modlogs_channel, msg)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True)
async def playing(self, ctx, *gamename):
"""Sets playing message. Staff only."""
try:
await self.bot.change_presence(game=discord.Game(name='{}'.format(" ".join(gamename))))
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True)
async def status(self, ctx, status):
"""Sets status. Staff only."""
try:
if status == "online":
await self.bot.change_presence(status=discord.Status.online)
elif status == "offline":
await self.bot.change_presence(status=discord.Status.offline)
elif status == "idle":
await self.bot.change_presence(status=discord.Status.idle)
elif status == "dnd":
await self.bot.change_presence(status=discord.Status.dnd)
elif status == "invisible":
await self.bot.change_presence(status=discord.Status.invisible)
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
@commands.has_permissions(ban_members=True)
@commands.command(pass_context=True, hidden=True)
async def username(self, ctx, *, username):
"""Sets bot name. Staff only."""
try:
await self.bot.edit_profile(username=('{}'.format(username)))
except discord.errors.Forbidden:
await self.bot.say("💢 I don't have permission to do this.")
def setup(bot):
bot.add_cog(Mod(bot))
| en | 0.714654 | Staff commands. Stops the bot. Pull new changes from GitHub and restart. Gets user info. Staff and Helpers only. Match users by regex. #{}\n".format(m.id, m.name, m.discriminator) Multi-ban users. #{}\n".format(m.id, m.name, m.discriminator) Multi-ban users by regex. # because "dictionary changed size during iteration" #{}\n".format(m.id, m.name, m.discriminator) Clears a given number of messages. Staff only. Mutes a user so they can't speak. Staff only. # don't fail in case user has DMs disabled for this server, or blocked the bot #{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator)) # change to permanent mute Mutes a user for a limited period of time so they can't speak. Staff only.\n\nLength format: #d#h#m#s # thanks Luc#5653 # Thanks to 3dshax server's former bot # last variable is "notified", for <=10 minute notifications # don't fail in case user has DMs disabled for this server, or blocked the bot #{}".format(issuer.mention, member.mention, unmute_time_string, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator)) Unmutes a user so they can speak. Staff only. #{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator)) Removes embed permissions from a user. Staff only. #270890866820775946>." # don't fail in case user has DMs disabled for this server, or blocked the bot #{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator)) Restore embed permissios for a user. Staff only. #{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator)) Remove access to help-and-questions. Staff and Helpers only. #270890866820775946>." # don't fail in case user has DMs disabled for this server, or blocked the bot #{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator)) #add to .takehelp Restore access to help-and-questions. Staff and Helpers only. #{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator)) #add to .givehelp Restricts a user from Assistance Channels for a limited period of time. Staff and Helpers only.\n\nLength format: #d#h#m#s # thanks Luc#5653 # Thanks to 3dshax server's former bot # last variable is "notified", for <=10 minute notifications #270890866820775946>." # don't fail in case user has DMs disabled for this server, or blocked the bot #{}".format(issuer.mention, member.mention, unnohelp_time_string, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator)) Probate a user. Staff only. # don't fail in case user has DMs disabled for this server, or blocked the bot #{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator)) Unprobate a user. Staff only. #{}".format(ctx.message.author.mention, member.mention, self.bot.escape_name(member.name), self.bot.escape_name(member.discriminator)) Sets playing message. Staff only. Sets status. Staff only. Sets bot name. Staff only. | 2.452453 | 2 |
PhysicsTools/PatExamples/test/analyzePatBTag_cfg.py | ckamtsikis/cmssw | 852 | 6631420 | import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatUtils.bJetOperatingPointsParameters_cfi import *
process = cms.Process("PatBTagAnalyzer")
process.source = cms.Source("PoolSource",
#fileNames = cms.untracked.vstring('file:PATLayer1_Output.fromAOD_full_ttbar.root')
fileNames = cms.untracked.vstring('/store/relval/2008/7/21/RelVal-RelValTTbar-1216579481-IDEAL_V5-2nd/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/CMSSW_2_1_0_pre9-RelVal-1216579481-IDEAL_V5-2nd-unmerged/0000/00BCD825-6E57-DD11-8C1F-000423D98EA8.root')
)
process.MessageLogger = cms.Service("MessageLogger")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = cms.string('IDEAL_V5::All')
process.load("Configuration.StandardSequences.MagneticField_cff")
# PAT Layer 1
process.load("PhysicsTools.PatAlgos.patLayer0_cff") # need to load this
process.load("PhysicsTools.PatAlgos.patLayer1_cff") # even if we run only layer 1
process.TFileService = cms.Service("TFileService",
fileName = cms.string('btagpatanalyzerpy.root')
)
# request a summary at the end of the file
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.PatBTagAnalyzerTC2 = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(True),
tagger = cms.untracked.string('TC2'),
purity = cms.string('Loose'),
discriminator = cms.string('trackCountingHighEffBJetTags'),
maxdiscriminatorcut = cms.untracked.double(30.0),
mindiscriminatorcut = cms.untracked.double(-10.0)
)
)
process.PatBTagAnalyzerTC3 = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('TC3'),
purity = cms.string('Loose'),
discriminator = cms.string('trackCountingHighPurBJetTags'),
maxdiscriminatorcut = cms.untracked.double(30.0),
mindiscriminatorcut = cms.untracked.double(-10.0)
)
)
process.PatBTagAnalyzerTP = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('TP'),
purity = cms.string('Loose'),
discriminator = cms.string('jetProbabilityBJetTags'),
maxdiscriminatorcut = cms.untracked.double(2.6),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerBTP = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('BTP'),
purity = cms.string('Loose'),
discriminator = cms.string('jetBProbabilityBJetTags'),
maxdiscriminatorcut = cms.untracked.double(8.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerSSV = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('SSV'),
purity = cms.string('Loose'),
discriminator = cms.string('simpleSecondaryVertexBJetTags'),
maxdiscriminatorcut = cms.untracked.double(8.0),
mindiscriminatorcut = cms.untracked.double(0.0)
)
)
process.PatBTagAnalyzerCSV = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('CSV'),
purity = cms.string('Loose'),
discriminator = cms.string('combinedSecondaryVertexBJetTags'),
maxdiscriminatorcut = cms.untracked.double(1.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerMSV = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('MSV'),
purity = cms.string('Loose'),
discriminator = cms.string('combinedSecondaryVertexMVABJetTags'),
maxdiscriminatorcut = cms.untracked.double(1.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerIPM = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('IPM'),
purity = cms.string('Loose'),
discriminator = cms.string('impactParameterMVABJetTags'),
maxdiscriminatorcut = cms.untracked.double(1.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerSET = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('SET'),
purity = cms.string('Loose'),
discriminator = cms.string('softElectronBJetTags'),
maxdiscriminatorcut = cms.untracked.double(1.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerSMT = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('SMT'),
purity = cms.string('Loose'),
discriminator = cms.string('softMuonBJetTags'),
maxdiscriminatorcut = cms.untracked.double(1.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerSMNIPT = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('SMNIPT'),
purity = cms.string('Loose'),
discriminator = cms.string('softMuonNoIPBJetTags'),
maxdiscriminatorcut = cms.untracked.double(1.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.p = cms.Path(
process.patLayer0 *
process.patLayer1 *
process.PatBTagAnalyzerTC2 *
process.PatBTagAnalyzerTC3 *
process.PatBTagAnalyzerBTP *
process.PatBTagAnalyzerSSV *
process.PatBTagAnalyzerCSV *
process.PatBTagAnalyzerMSV *
process.PatBTagAnalyzerIPM *
process.PatBTagAnalyzerSET *
process.PatBTagAnalyzerSMT *
process.PatBTagAnalyzerSMNIPT *
process.PatBTagAnalyzerTP
)
| import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatUtils.bJetOperatingPointsParameters_cfi import *
process = cms.Process("PatBTagAnalyzer")
process.source = cms.Source("PoolSource",
#fileNames = cms.untracked.vstring('file:PATLayer1_Output.fromAOD_full_ttbar.root')
fileNames = cms.untracked.vstring('/store/relval/2008/7/21/RelVal-RelValTTbar-1216579481-IDEAL_V5-2nd/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/CMSSW_2_1_0_pre9-RelVal-1216579481-IDEAL_V5-2nd-unmerged/0000/00BCD825-6E57-DD11-8C1F-000423D98EA8.root')
)
process.MessageLogger = cms.Service("MessageLogger")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = cms.string('IDEAL_V5::All')
process.load("Configuration.StandardSequences.MagneticField_cff")
# PAT Layer 1
process.load("PhysicsTools.PatAlgos.patLayer0_cff") # need to load this
process.load("PhysicsTools.PatAlgos.patLayer1_cff") # even if we run only layer 1
process.TFileService = cms.Service("TFileService",
fileName = cms.string('btagpatanalyzerpy.root')
)
# request a summary at the end of the file
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.PatBTagAnalyzerTC2 = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(True),
tagger = cms.untracked.string('TC2'),
purity = cms.string('Loose'),
discriminator = cms.string('trackCountingHighEffBJetTags'),
maxdiscriminatorcut = cms.untracked.double(30.0),
mindiscriminatorcut = cms.untracked.double(-10.0)
)
)
process.PatBTagAnalyzerTC3 = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('TC3'),
purity = cms.string('Loose'),
discriminator = cms.string('trackCountingHighPurBJetTags'),
maxdiscriminatorcut = cms.untracked.double(30.0),
mindiscriminatorcut = cms.untracked.double(-10.0)
)
)
process.PatBTagAnalyzerTP = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('TP'),
purity = cms.string('Loose'),
discriminator = cms.string('jetProbabilityBJetTags'),
maxdiscriminatorcut = cms.untracked.double(2.6),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerBTP = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('BTP'),
purity = cms.string('Loose'),
discriminator = cms.string('jetBProbabilityBJetTags'),
maxdiscriminatorcut = cms.untracked.double(8.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerSSV = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('SSV'),
purity = cms.string('Loose'),
discriminator = cms.string('simpleSecondaryVertexBJetTags'),
maxdiscriminatorcut = cms.untracked.double(8.0),
mindiscriminatorcut = cms.untracked.double(0.0)
)
)
process.PatBTagAnalyzerCSV = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('CSV'),
purity = cms.string('Loose'),
discriminator = cms.string('combinedSecondaryVertexBJetTags'),
maxdiscriminatorcut = cms.untracked.double(1.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerMSV = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('MSV'),
purity = cms.string('Loose'),
discriminator = cms.string('combinedSecondaryVertexMVABJetTags'),
maxdiscriminatorcut = cms.untracked.double(1.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerIPM = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('IPM'),
purity = cms.string('Loose'),
discriminator = cms.string('impactParameterMVABJetTags'),
maxdiscriminatorcut = cms.untracked.double(1.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerSET = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('SET'),
purity = cms.string('Loose'),
discriminator = cms.string('softElectronBJetTags'),
maxdiscriminatorcut = cms.untracked.double(1.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerSMT = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('SMT'),
purity = cms.string('Loose'),
discriminator = cms.string('softMuonBJetTags'),
maxdiscriminatorcut = cms.untracked.double(1.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.PatBTagAnalyzerSMNIPT = cms.EDAnalyzer("PatBTagAnalyzer",
BJetOperatingPointsParameters,
jetTag = cms.untracked.InputTag("selectedLayer1Jets"),
BjetTag = cms.PSet(
verbose = cms.untracked.bool(False),
tagger = cms.untracked.string('SMNIPT'),
purity = cms.string('Loose'),
discriminator = cms.string('softMuonNoIPBJetTags'),
maxdiscriminatorcut = cms.untracked.double(1.1),
mindiscriminatorcut = cms.untracked.double(-0.1)
)
)
process.p = cms.Path(
process.patLayer0 *
process.patLayer1 *
process.PatBTagAnalyzerTC2 *
process.PatBTagAnalyzerTC3 *
process.PatBTagAnalyzerBTP *
process.PatBTagAnalyzerSSV *
process.PatBTagAnalyzerCSV *
process.PatBTagAnalyzerMSV *
process.PatBTagAnalyzerIPM *
process.PatBTagAnalyzerSET *
process.PatBTagAnalyzerSMT *
process.PatBTagAnalyzerSMNIPT *
process.PatBTagAnalyzerTP
)
| en | 0.540353 | #fileNames = cms.untracked.vstring('file:PATLayer1_Output.fromAOD_full_ttbar.root') # PAT Layer 1 # need to load this # even if we run only layer 1 # request a summary at the end of the file | 1.668577 | 2 |
soccer/gameplay/fsm.py | Alex-Gurung/robocup-software | 1 | 6631421 | <gh_stars>1-10
import logging
from enum import Enum
import graphviz as gv
from typing import Union, Callable
## @brief generic hierarchial state machine class.
#
# states can have substates. If the machine is in a state, then it is also implicitly in that state's parent state
# this basically provides for polymorphism/subclassing of state machines
#
# There are three methods corresponding to each state:
# * on_enter_STATE
# * execute_STATE
# * on_exit_STATE
#
# Subclasses of StateMachine can optionally implement them and they will automatically be called at the appropriate times.
class StateMachine:
def __init__(self, start_state):
# stores all states in the form _state_hierarchy[state] = parent_state
self._state_hierarchy = {}
self._transitions = {}
self._start_state = start_state
self._state = None
@property
def start_state(self) -> None:
return self._start_state
## Resets the FSM back into the start state
def restart(self) -> None:
self.transition(self.start_state)
## Registers a new state (which can optionally be a substate of an existing state)
def add_state(self, state, parent_state=None):
if not isinstance(state, Enum):
raise TypeError("State should be an Enum type")
self._state_hierarchy[state] = parent_state
## Runs the FSM
# checks transition conditions for all edges leading away from the current state
# if one evaluates to true, we transition to it
# if more than one evaluates to true, we throw a RuntimeError
def spin(self):
s1 = self.state
# call execute_STATENAME
if self.state is not None:
for state in self.ancestors_of_state(self.state) + [self.state]:
method_name = "execute_" + state.name
state_method = None
try:
state_method = getattr(self, method_name)
except AttributeError:
pass
if state_method is not None:
state_method()
if self.state is None:
self.transition(self.start_state)
else:
# transition if an 'event' fires
next_states = []
if self.state in self._transitions:
for next_state, transition in self._transitions[
self.state].items():
if transition['condition']():
next_states += [next_state]
if len(next_states) > 1:
logging.warn(
"Ambiguous fsm transitions from state'" + str(self.state) +
"'. The following states are reachable now: " + str(
next_states) +
"; Proceeding by taking the first option.")
if len(next_states) > 0:
self.transition(next_states[0])
# if a transition occurred during the spin, we'll spin again
# note: this could potentially cause infinite recursion (although it shouldn't)
if s1 != self.state:
StateMachine.spin(self)
# if you add a transition that already exists, the old one will be overwritten
def add_transition(self, from_state, to_state,
condition: Union[bool, Callable],
event_name: str):
if isinstance(condition, bool):
condition = lambda: condition
if from_state not in self._transitions:
self._transitions[from_state] = {}
self._transitions[from_state][to_state] = {'condition': condition,
'name': event_name}
# sets @state to the new_state given
# calls 'on_exit_STATENAME()' if it exists
# calls 'on_enter_STATENAME()' if it exists
def transition(self, new_state):
# print("TRANSITION: " + str(self.__class__.__name__) + ": " + str(self.state) + " -> " + str(new_state))
if self.state is not None:
for state in self.ancestors_of_state(self.state) + [self.state]:
if not self.state_is_substate(new_state, state):
method_name = "on_exit_" + state.name
state_method = None
try:
state_method = getattr(self, method_name) # call the transition FROM method if it exists
except AttributeError:
pass
if state_method is not None:
state_method()
for state in self.ancestors_of_state(new_state) + [new_state]:
if not self.state_is_substate(self.state, state):
method_name = "on_enter_" + state.name
state_method = None
try:
state_method = getattr(self, method_name) # call the transition TO method if it exists
except AttributeError:
pass
if state_method is not None:
state_method()
self._state = new_state
# traverses the state hierarchy to see if it's in @state or one of @state's descendent states
def is_in_state(self, state):
return self.state_is_substate(self.state, state)
def state_is_substate(self, state, possible_parent):
ancestor = state
while ancestor is not None:
if possible_parent == ancestor: return True
ancestor = self._state_hierarchy[ancestor]
return False
# looks at the list @ancestors and returns the one that the current state is a descendant of
# returns None if the current state doesn't descend from one in the list
def corresponding_ancestor_state(self, ancestors):
state = self.state
while state is not None:
if state in ancestors:
return state
state = self._state_hierarchy[state]
return None
# returns a list of the ancestors of the given state
# if B is a child state of A and C is a child state of B, ancestors_of_state(C) == [A, B]
# if @state has no ancestors, returns an empty list
def ancestors_of_state(self, state):
ancestors = []
state = self._state_hierarchy[state]
while state is not None:
ancestors.insert(0, state)
state = self._state_hierarchy[state]
return ancestors
# returns a graphviz.Digraph object
def as_graphviz(self):
g = gv.Digraph(self.__class__.__name__, format='png')
cluster_index = 0
subgraphs = {}
subgraphs[None] = g
for state in self._state_hierarchy:
if state not in subgraphs and state in self._state_hierarchy.values(
):
sg = gv.Digraph(
'cluster_' + str(cluster_index),
graph_attr={'label': state.__module__ + "::" + state.name,
'style': 'dotted'})
cluster_index += 1
subgraphs[state] = sg
for state in self._state_hierarchy:
has_children = state in self._state_hierarchy.values()
if not has_children:
enclosing_graph = subgraphs[self._state_hierarchy[state]]
shape = 'diamond' if state == self.start_state else 'ellipse'
enclosing_graph.node(
state.name,
label=state.__module__ + "::" + state.name,
shape=shape)
for state, subgraph in subgraphs.items():
if state is not None:
subgraphs[self._state_hierarchy[state]].subgraph(subgraph)
for start in self._transitions:
for end, event in self._transitions[start].items():
g.edge(start.name,
end.name,
label=event['name'],
decorate='True')
return g
# writes a png file of the graphviz output to the specified location
def write_diagram_png(self, filename: str):
g = self.as_graphviz()
g.render(filename=filename, cleanup=True)
@property
def state(self):
return self._state
| import logging
from enum import Enum
import graphviz as gv
from typing import Union, Callable
## @brief generic hierarchial state machine class.
#
# states can have substates. If the machine is in a state, then it is also implicitly in that state's parent state
# this basically provides for polymorphism/subclassing of state machines
#
# There are three methods corresponding to each state:
# * on_enter_STATE
# * execute_STATE
# * on_exit_STATE
#
# Subclasses of StateMachine can optionally implement them and they will automatically be called at the appropriate times.
class StateMachine:
def __init__(self, start_state):
# stores all states in the form _state_hierarchy[state] = parent_state
self._state_hierarchy = {}
self._transitions = {}
self._start_state = start_state
self._state = None
@property
def start_state(self) -> None:
return self._start_state
## Resets the FSM back into the start state
def restart(self) -> None:
self.transition(self.start_state)
## Registers a new state (which can optionally be a substate of an existing state)
def add_state(self, state, parent_state=None):
if not isinstance(state, Enum):
raise TypeError("State should be an Enum type")
self._state_hierarchy[state] = parent_state
## Runs the FSM
# checks transition conditions for all edges leading away from the current state
# if one evaluates to true, we transition to it
# if more than one evaluates to true, we throw a RuntimeError
def spin(self):
s1 = self.state
# call execute_STATENAME
if self.state is not None:
for state in self.ancestors_of_state(self.state) + [self.state]:
method_name = "execute_" + state.name
state_method = None
try:
state_method = getattr(self, method_name)
except AttributeError:
pass
if state_method is not None:
state_method()
if self.state is None:
self.transition(self.start_state)
else:
# transition if an 'event' fires
next_states = []
if self.state in self._transitions:
for next_state, transition in self._transitions[
self.state].items():
if transition['condition']():
next_states += [next_state]
if len(next_states) > 1:
logging.warn(
"Ambiguous fsm transitions from state'" + str(self.state) +
"'. The following states are reachable now: " + str(
next_states) +
"; Proceeding by taking the first option.")
if len(next_states) > 0:
self.transition(next_states[0])
# if a transition occurred during the spin, we'll spin again
# note: this could potentially cause infinite recursion (although it shouldn't)
if s1 != self.state:
StateMachine.spin(self)
# if you add a transition that already exists, the old one will be overwritten
def add_transition(self, from_state, to_state,
condition: Union[bool, Callable],
event_name: str):
if isinstance(condition, bool):
condition = lambda: condition
if from_state not in self._transitions:
self._transitions[from_state] = {}
self._transitions[from_state][to_state] = {'condition': condition,
'name': event_name}
# sets @state to the new_state given
# calls 'on_exit_STATENAME()' if it exists
# calls 'on_enter_STATENAME()' if it exists
def transition(self, new_state):
# print("TRANSITION: " + str(self.__class__.__name__) + ": " + str(self.state) + " -> " + str(new_state))
if self.state is not None:
for state in self.ancestors_of_state(self.state) + [self.state]:
if not self.state_is_substate(new_state, state):
method_name = "on_exit_" + state.name
state_method = None
try:
state_method = getattr(self, method_name) # call the transition FROM method if it exists
except AttributeError:
pass
if state_method is not None:
state_method()
for state in self.ancestors_of_state(new_state) + [new_state]:
if not self.state_is_substate(self.state, state):
method_name = "on_enter_" + state.name
state_method = None
try:
state_method = getattr(self, method_name) # call the transition TO method if it exists
except AttributeError:
pass
if state_method is not None:
state_method()
self._state = new_state
# traverses the state hierarchy to see if it's in @state or one of @state's descendent states
def is_in_state(self, state):
return self.state_is_substate(self.state, state)
def state_is_substate(self, state, possible_parent):
ancestor = state
while ancestor is not None:
if possible_parent == ancestor: return True
ancestor = self._state_hierarchy[ancestor]
return False
# looks at the list @ancestors and returns the one that the current state is a descendant of
# returns None if the current state doesn't descend from one in the list
def corresponding_ancestor_state(self, ancestors):
state = self.state
while state is not None:
if state in ancestors:
return state
state = self._state_hierarchy[state]
return None
# returns a list of the ancestors of the given state
# if B is a child state of A and C is a child state of B, ancestors_of_state(C) == [A, B]
# if @state has no ancestors, returns an empty list
def ancestors_of_state(self, state):
ancestors = []
state = self._state_hierarchy[state]
while state is not None:
ancestors.insert(0, state)
state = self._state_hierarchy[state]
return ancestors
# returns a graphviz.Digraph object
def as_graphviz(self):
g = gv.Digraph(self.__class__.__name__, format='png')
cluster_index = 0
subgraphs = {}
subgraphs[None] = g
for state in self._state_hierarchy:
if state not in subgraphs and state in self._state_hierarchy.values(
):
sg = gv.Digraph(
'cluster_' + str(cluster_index),
graph_attr={'label': state.__module__ + "::" + state.name,
'style': 'dotted'})
cluster_index += 1
subgraphs[state] = sg
for state in self._state_hierarchy:
has_children = state in self._state_hierarchy.values()
if not has_children:
enclosing_graph = subgraphs[self._state_hierarchy[state]]
shape = 'diamond' if state == self.start_state else 'ellipse'
enclosing_graph.node(
state.name,
label=state.__module__ + "::" + state.name,
shape=shape)
for state, subgraph in subgraphs.items():
if state is not None:
subgraphs[self._state_hierarchy[state]].subgraph(subgraph)
for start in self._transitions:
for end, event in self._transitions[start].items():
g.edge(start.name,
end.name,
label=event['name'],
decorate='True')
return g
# writes a png file of the graphviz output to the specified location
def write_diagram_png(self, filename: str):
g = self.as_graphviz()
g.render(filename=filename, cleanup=True)
@property
def state(self):
return self._state | en | 0.815515 | ## @brief generic hierarchial state machine class. # # states can have substates. If the machine is in a state, then it is also implicitly in that state's parent state # this basically provides for polymorphism/subclassing of state machines # # There are three methods corresponding to each state: # * on_enter_STATE # * execute_STATE # * on_exit_STATE # # Subclasses of StateMachine can optionally implement them and they will automatically be called at the appropriate times. # stores all states in the form _state_hierarchy[state] = parent_state ## Resets the FSM back into the start state ## Registers a new state (which can optionally be a substate of an existing state) ## Runs the FSM # checks transition conditions for all edges leading away from the current state # if one evaluates to true, we transition to it # if more than one evaluates to true, we throw a RuntimeError # call execute_STATENAME # transition if an 'event' fires # if a transition occurred during the spin, we'll spin again # note: this could potentially cause infinite recursion (although it shouldn't) # if you add a transition that already exists, the old one will be overwritten # sets @state to the new_state given # calls 'on_exit_STATENAME()' if it exists # calls 'on_enter_STATENAME()' if it exists # print("TRANSITION: " + str(self.__class__.__name__) + ": " + str(self.state) + " -> " + str(new_state)) # call the transition FROM method if it exists # call the transition TO method if it exists # traverses the state hierarchy to see if it's in @state or one of @state's descendent states # looks at the list @ancestors and returns the one that the current state is a descendant of # returns None if the current state doesn't descend from one in the list # returns a list of the ancestors of the given state # if B is a child state of A and C is a child state of B, ancestors_of_state(C) == [A, B] # if @state has no ancestors, returns an empty list # returns a graphviz.Digraph object # writes a png file of the graphviz output to the specified location | 2.805237 | 3 |
gmpm.py | eyalbetzalel/pytorch-generative-v6 | 0 | 6631422 | import h5py
import numpy as np
import os
def load_h5_dataset(directory):
print(" --------------------------------- ")
print("Start loading Datasat from H5DF files...")
data = []
flagOneFile = 0
for filename in os.listdir(directory):
if flagOneFile:
break
if filename.endswith(".h5"):
with h5py.File(filename, "r") as f:
a_group_key = list(f.keys())[0]
# Get the data
temp = list(f[a_group_key])
data.append(temp[1:])
flagOneFile = 0
continue
else:
continue
data_flat = [item for sublist in data for item in sublist]
data_flat = np.stack(data_flat, axis=0)
precent_train_test_split = 0.7
train = data_flat[:int(np.floor(precent_train_test_split * data_flat.shape[0])), :]
test = data_flat[int(np.floor(precent_train_test_split * data_flat.shape[0])) + 1:, :]
if not os.path.isfile('test_imagegpt.h5'):
print("Saving H5DF files...")
test_h5 = h5py.File('test_imagegpt.h5', 'w')
test_h5.create_dataset('test', data=test)
train_h5 = h5py.File('train_imagegpt.h5', 'w')
train_h5.create_dataset('train', data=train)
print(" --------------------------------- ")
print("Finish loading Datasat from H5DF files...")
return train, test
directory = "./"
train, test = load_h5_dataset(directory) | import h5py
import numpy as np
import os
def load_h5_dataset(directory):
print(" --------------------------------- ")
print("Start loading Datasat from H5DF files...")
data = []
flagOneFile = 0
for filename in os.listdir(directory):
if flagOneFile:
break
if filename.endswith(".h5"):
with h5py.File(filename, "r") as f:
a_group_key = list(f.keys())[0]
# Get the data
temp = list(f[a_group_key])
data.append(temp[1:])
flagOneFile = 0
continue
else:
continue
data_flat = [item for sublist in data for item in sublist]
data_flat = np.stack(data_flat, axis=0)
precent_train_test_split = 0.7
train = data_flat[:int(np.floor(precent_train_test_split * data_flat.shape[0])), :]
test = data_flat[int(np.floor(precent_train_test_split * data_flat.shape[0])) + 1:, :]
if not os.path.isfile('test_imagegpt.h5'):
print("Saving H5DF files...")
test_h5 = h5py.File('test_imagegpt.h5', 'w')
test_h5.create_dataset('test', data=test)
train_h5 = h5py.File('train_imagegpt.h5', 'w')
train_h5.create_dataset('train', data=train)
print(" --------------------------------- ")
print("Finish loading Datasat from H5DF files...")
return train, test
directory = "./"
train, test = load_h5_dataset(directory) | en | 0.33085 | # Get the data | 2.466875 | 2 |
XML_parser.py | arkasarius/python-IMDB-TFG | 1 | 6631423 | import xml.etree.ElementTree as ET
import os
import json
import functions as fun
tree = ET.parse('The_Matrix.xml')
root = tree.getroot()
a=0
for face in root.iter('Face'):
name=int(face.attrib.get('person_id'))
f=open("thematrix/"+str(name)+".txt","a+")
n=face.attrib.get('face_embedding').replace("[","").replace(",","").replace("]","")
f.write(n+'\n')
f.close()
| import xml.etree.ElementTree as ET
import os
import json
import functions as fun
tree = ET.parse('The_Matrix.xml')
root = tree.getroot()
a=0
for face in root.iter('Face'):
name=int(face.attrib.get('person_id'))
f=open("thematrix/"+str(name)+".txt","a+")
n=face.attrib.get('face_embedding').replace("[","").replace(",","").replace("]","")
f.write(n+'\n')
f.close()
| none | 1 | 2.687968 | 3 |
|
plenum/test/pool_transactions/test_nodes_ha_change_back.py | steptan/indy-plenum | 0 | 6631424 | from plenum.common.constants import ALIAS, NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT
from plenum.test.pool_transactions.helper import updateNodeData
from plenum.test.test_node import TestNode, checkNodesConnected
from stp_core.network.port_dispenser import genHa
from plenum.common.config_helper import PNodeConfigHelper
def testChangeNodeHaBack(looper, txnPoolNodeSet, tdir, tconf,
steward1, stewardWallet, nodeThetaAdded):
"""
The case:
The Node HA is updated with some HA (let's name it 'correct' HA).
Then the Steward makes a mistake and sends the NODE txn with other HA
('wrong' HA). The Steward replaces back 'wrong' HA by 'correct' HA sending
yet another one NODE txn.
"""
steward, stewardWallet, theta = nodeThetaAdded
clientHa = theta.cliNodeReg['ThetaC'] # use the same client HA
# do all exercises without the Node
theta.stop()
looper.removeProdable(name=theta.name)
# step 1: set 'correct' HA
correctNodeHa = genHa(1)
op = {
ALIAS: theta.name,
NODE_IP: correctNodeHa.host,
NODE_PORT: correctNodeHa.port,
CLIENT_IP: clientHa.host,
CLIENT_PORT: clientHa.port,
}
updateNodeData(looper, steward, stewardWallet, theta,
op)
# step 2: set 'wrong' HA
wrongNodeHa = genHa(1)
op.update({NODE_IP: wrongNodeHa.host, NODE_PORT: wrongNodeHa.port})
updateNodeData(looper, steward, stewardWallet, theta,
op)
# step 3: set 'correct' HA back
op.update({NODE_IP: correctNodeHa.host, NODE_PORT: correctNodeHa.port})
updateNodeData(looper, steward, stewardWallet, theta,
op)
# In order to save the time the pool connection is not maintaining
# during the steps, only the final result is checked.
config_helper = PNodeConfigHelper(theta.name, tconf, chroot=tdir)
restartedNode = TestNode(theta.name,
config_helper=config_helper,
config=tconf, ha=correctNodeHa, cliha=clientHa)
looper.add(restartedNode)
txnPoolNodeSet[-1] = restartedNode
looper.run(checkNodesConnected(txnPoolNodeSet))
# check Theta HA
for n in txnPoolNodeSet:
assert n.nodeReg['Theta'] == correctNodeHa
| from plenum.common.constants import ALIAS, NODE_IP, NODE_PORT, CLIENT_IP, CLIENT_PORT
from plenum.test.pool_transactions.helper import updateNodeData
from plenum.test.test_node import TestNode, checkNodesConnected
from stp_core.network.port_dispenser import genHa
from plenum.common.config_helper import PNodeConfigHelper
def testChangeNodeHaBack(looper, txnPoolNodeSet, tdir, tconf,
steward1, stewardWallet, nodeThetaAdded):
"""
The case:
The Node HA is updated with some HA (let's name it 'correct' HA).
Then the Steward makes a mistake and sends the NODE txn with other HA
('wrong' HA). The Steward replaces back 'wrong' HA by 'correct' HA sending
yet another one NODE txn.
"""
steward, stewardWallet, theta = nodeThetaAdded
clientHa = theta.cliNodeReg['ThetaC'] # use the same client HA
# do all exercises without the Node
theta.stop()
looper.removeProdable(name=theta.name)
# step 1: set 'correct' HA
correctNodeHa = genHa(1)
op = {
ALIAS: theta.name,
NODE_IP: correctNodeHa.host,
NODE_PORT: correctNodeHa.port,
CLIENT_IP: clientHa.host,
CLIENT_PORT: clientHa.port,
}
updateNodeData(looper, steward, stewardWallet, theta,
op)
# step 2: set 'wrong' HA
wrongNodeHa = genHa(1)
op.update({NODE_IP: wrongNodeHa.host, NODE_PORT: wrongNodeHa.port})
updateNodeData(looper, steward, stewardWallet, theta,
op)
# step 3: set 'correct' HA back
op.update({NODE_IP: correctNodeHa.host, NODE_PORT: correctNodeHa.port})
updateNodeData(looper, steward, stewardWallet, theta,
op)
# In order to save the time the pool connection is not maintaining
# during the steps, only the final result is checked.
config_helper = PNodeConfigHelper(theta.name, tconf, chroot=tdir)
restartedNode = TestNode(theta.name,
config_helper=config_helper,
config=tconf, ha=correctNodeHa, cliha=clientHa)
looper.add(restartedNode)
txnPoolNodeSet[-1] = restartedNode
looper.run(checkNodesConnected(txnPoolNodeSet))
# check Theta HA
for n in txnPoolNodeSet:
assert n.nodeReg['Theta'] == correctNodeHa
| en | 0.781484 | The case: The Node HA is updated with some HA (let's name it 'correct' HA). Then the Steward makes a mistake and sends the NODE txn with other HA ('wrong' HA). The Steward replaces back 'wrong' HA by 'correct' HA sending yet another one NODE txn. # use the same client HA # do all exercises without the Node # step 1: set 'correct' HA # step 2: set 'wrong' HA # step 3: set 'correct' HA back # In order to save the time the pool connection is not maintaining # during the steps, only the final result is checked. # check Theta HA | 1.883327 | 2 |
Analyze_other_models.py | panda0881/Selectional_Preference | 0 | 6631425 | <reponame>panda0881/Selectional_Preference
import os
import json
from scipy.stats import spearmanr
def analyze_model(model_name):
print('We are working on model:', model_name)
tmp_dobj_scores = list()
with open('Other_model_result/' + model_name + '_verb_dobj_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_dobj_scores.append(0)
else:
tmp_dobj_scores.append(float(words[2]))
confident_dobj_annotation = list()
confident_dobj_scores = list()
for i in dobj_confident_position:
confident_dobj_annotation.append(dobj_annotations[i])
confident_dobj_scores.append(tmp_dobj_scores[i])
print('dobj:', spearmanr(confident_dobj_annotation, confident_dobj_scores)[0])
tmp_nsubj_scores = list()
with open('Other_model_result/' + model_name + '_verb_nsubj_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_nsubj_scores.append(0)
else:
tmp_nsubj_scores.append(float(words[2]))
confident_nsubj_annotation = list()
confident_nsubj_scores = list()
for i in nsubj_confident_position:
# if tmp_nsubj_scores[i] == 0:
# continue
confident_nsubj_annotation.append(nsubj_annotations[i])
confident_nsubj_scores.append(tmp_nsubj_scores[i])
print('nsubj:', spearmanr(confident_nsubj_annotation, confident_nsubj_scores)[0])
tmp_amod_scores = list()
with open('Other_model_result/' + model_name + '_noun_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_amod_scores.append(0)
else:
tmp_amod_scores.append(float(words[2]))
confident_amod_annotation = list()
confident_amod_scores = list()
for i in amod_confident_position:
confident_amod_annotation.append(amod_annotations[i])
confident_amod_scores.append(tmp_amod_scores[i])
print('amod:', spearmanr(confident_amod_annotation, confident_amod_scores)[0])
tmp_dobj_amod_scores = list()
if os.path.isfile('Other_model_result/' + model_name + '_verb_dobj_amod_result'):
with open('Other_model_result/' + model_name + '_verb_dobj_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_dobj_amod_scores.append(0)
else:
tmp_dobj_amod_scores.append(float(words[2]))
confident_dobj_amod_annotation = list()
confident_dobj_amod_scores = list()
for i in dobj_amod_confident_position:
confident_dobj_amod_annotation.append(dobj_amod_annotations[i])
confident_dobj_amod_scores.append(tmp_dobj_amod_scores[i])
print('dobj_amod:', spearmanr(confident_dobj_amod_annotation, confident_dobj_amod_scores)[0])
else:
print('dobj_amod: -')
tmp_nsubj_amod_scores = list()
if os.path.isfile('Other_model_result/' + model_name + '_verb_nsubj_amod_result'):
with open('Other_model_result/' + model_name + '_verb_nsubj_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_nsubj_amod_scores.append(0)
else:
tmp_nsubj_amod_scores.append(float(words[2]))
confident_nsubj_amod_annotation = list()
confident_nsubj_amod_scores = list()
for i in nsubj_amod_confident_position:
confident_nsubj_amod_annotation.append(nsubj_amod_annotations[i])
confident_nsubj_amod_scores.append(tmp_nsubj_amod_scores[i])
print('nsubj_amod:', spearmanr(confident_nsubj_amod_annotation, confident_nsubj_amod_scores)[0])
else:
print('nsubj_amod: -')
def analyze_model_by_pair(model_name):
print('We are working on model:', model_name)
tmp_dobj_scores = list()
with open('Other_model_result/' + model_name + '_verb_dobj_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_dobj_scores.append(0)
else:
tmp_dobj_scores.append(float(words[2]))
confident_dobj_annotation = list()
confident_dobj_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in dobj_confident_position:
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_dobj_annotation.append(tmp_annotation)
confident_dobj_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(dobj_annotations[i])
tmp_score.append(tmp_dobj_scores[i])
spearmans = list()
for i in range(len(confident_dobj_annotation)):
tmp_spearman = spearmanr(confident_dobj_annotation[i], confident_dobj_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('dobj:', sum(spearmans)/len(spearmans))
tmp_nsubj_scores = list()
with open('Other_model_result/' + model_name + '_verb_nsubj_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_nsubj_scores.append(0)
else:
tmp_nsubj_scores.append(float(words[2]))
confident_nsubj_annotation = list()
confident_nsubj_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in nsubj_confident_position:
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_nsubj_annotation.append(tmp_annotation)
confident_nsubj_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(nsubj_annotations[i])
tmp_score.append(tmp_nsubj_scores[i])
spearmans = list()
for i in range(len(confident_nsubj_annotation)):
tmp_spearman = spearmanr(confident_nsubj_annotation[i], confident_nsubj_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('nsubj:', sum(spearmans)/len(spearmans))
tmp_amod_scores = list()
with open('Other_model_result/' + model_name + '_noun_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_amod_scores.append(0)
else:
tmp_amod_scores.append(float(words[2]))
confident_amod_annotation = list()
confident_amod_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in amod_confident_position:
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_amod_annotation.append(tmp_annotation)
confident_amod_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(amod_annotations[i])
tmp_score.append(tmp_amod_scores[i])
spearmans = list()
for i in range(len(confident_amod_annotation)):
tmp_spearman = spearmanr(confident_amod_annotation[i], confident_amod_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('amod:', sum(spearmans)/len(spearmans))
tmp_dobj_amod_scores = list()
if os.path.isfile('Other_model_result/' + model_name + '_verb_dobj_amod_result'):
with open('Other_model_result/' + model_name + '_verb_dobj_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_dobj_amod_scores.append(0)
else:
tmp_dobj_amod_scores.append(float(words[2]))
confident_dobj_amod_annotation = list()
confident_dobj_amod_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in dobj_amod_confident_position:
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_dobj_amod_annotation.append(tmp_annotation)
confident_dobj_amod_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(dobj_amod_annotations[i])
tmp_score.append(tmp_dobj_amod_scores[i])
spearmans = list()
for i in range(len(confident_dobj_amod_annotation)):
tmp_spearman = spearmanr(confident_dobj_amod_annotation[i], confident_dobj_amod_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('dobj_amod:', sum(spearmans)/len(spearmans))
else:
print('dobj_amod: -')
tmp_nsubj_amod_scores = list()
if os.path.isfile('Other_model_result/' + model_name + '_verb_nsubj_amod_result'):
with open('Other_model_result/' + model_name + '_verb_nsubj_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_nsubj_amod_scores.append(0)
else:
tmp_nsubj_amod_scores.append(float(words[2]))
confident_nsubj_amod_annotation = list()
confident_nsubj_amod_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in nsubj_amod_confident_position:
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_nsubj_amod_annotation.append(tmp_annotation)
confident_nsubj_amod_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(nsubj_amod_annotations[i])
tmp_score.append(tmp_nsubj_amod_scores[i])
spearmans = list()
for i in range(len(confident_nsubj_amod_annotation)):
tmp_spearman = spearmanr(confident_nsubj_amod_annotation[i], confident_nsubj_amod_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('nsubj_amod:', sum(spearmans)/len(spearmans))
else:
print('nsubj_amod: -')
def analyze_model_by_pair_all(model_name):
print('We are working on model:', model_name)
tmp_dobj_scores = list()
with open('Other_model_result/' + model_name + '_verb_dobj_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_dobj_scores.append(0)
else:
tmp_dobj_scores.append(float(words[2]))
confident_dobj_annotation = list()
confident_dobj_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in range(2000):
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_dobj_annotation.append(tmp_annotation)
confident_dobj_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(dobj_annotations[i])
tmp_score.append(tmp_dobj_scores[i])
spearmans = list()
for i in range(len(confident_dobj_annotation)):
tmp_spearman = spearmanr(confident_dobj_annotation[i], confident_dobj_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('dobj:', sum(spearmans)/len(spearmans))
tmp_nsubj_scores = list()
with open('Other_model_result/' + model_name + '_verb_nsubj_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_nsubj_scores.append(0)
else:
tmp_nsubj_scores.append(float(words[2]))
confident_nsubj_annotation = list()
confident_nsubj_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in range(2000):
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_nsubj_annotation.append(tmp_annotation)
confident_nsubj_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(nsubj_annotations[i])
tmp_score.append(tmp_nsubj_scores[i])
spearmans = list()
for i in range(len(confident_nsubj_annotation)):
tmp_spearman = spearmanr(confident_nsubj_annotation[i], confident_nsubj_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('nsubj:', sum(spearmans)/len(spearmans))
tmp_amod_scores = list()
with open('Other_model_result/' + model_name + '_noun_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_amod_scores.append(0)
else:
tmp_amod_scores.append(float(words[2]))
confident_amod_annotation = list()
confident_amod_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in range(2000):
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_amod_annotation.append(tmp_annotation)
confident_amod_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(amod_annotations[i])
tmp_score.append(tmp_amod_scores[i])
spearmans = list()
for i in range(len(confident_amod_annotation)):
tmp_spearman = spearmanr(confident_amod_annotation[i], confident_amod_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('amod:', sum(spearmans)/len(spearmans))
tmp_dobj_amod_scores = list()
if os.path.isfile('Other_model_result/' + model_name + '_verb_dobj_amod_result'):
with open('Other_model_result/' + model_name + '_verb_dobj_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_dobj_amod_scores.append(0)
else:
tmp_dobj_amod_scores.append(float(words[2]))
confident_dobj_amod_annotation = list()
confident_dobj_amod_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in range(2000):
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_dobj_amod_annotation.append(tmp_annotation)
confident_dobj_amod_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(dobj_amod_annotations[i])
tmp_score.append(tmp_dobj_amod_scores[i])
spearmans = list()
for i in range(len(confident_dobj_amod_annotation)):
tmp_spearman = spearmanr(confident_dobj_amod_annotation[i], confident_dobj_amod_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('dobj_amod:', sum(spearmans)/len(spearmans))
else:
print('dobj_amod: -')
tmp_nsubj_amod_scores = list()
if os.path.isfile('Other_model_result/' + model_name + '_verb_nsubj_amod_result'):
with open('Other_model_result/' + model_name + '_verb_nsubj_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_nsubj_amod_scores.append(0)
else:
tmp_nsubj_amod_scores.append(float(words[2]))
confident_nsubj_amod_annotation = list()
confident_nsubj_amod_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in range(2000):
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_nsubj_amod_annotation.append(tmp_annotation)
confident_nsubj_amod_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(nsubj_amod_annotations[i])
tmp_score.append(tmp_nsubj_amod_scores[i])
spearmans = list()
for i in range(len(confident_nsubj_amod_annotation)):
tmp_spearman = spearmanr(confident_nsubj_amod_annotation[i], confident_nsubj_amod_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('nsubj_amod:', sum(spearmans)/len(spearmans))
else:
print('nsubj_amod: -')
with open('confident_pairs.json', 'r') as f:
confident_pairs = json.load(f)
with open('difficult_pairs.json', 'r') as f:
difficult_pairs = json.load(f)
dobj_annotations = list()
dobj_confident_position = list()
with open('dobj_annotation.txt', 'r') as f:
for line in f:
words = line[:-1].split('\t')
dobj_annotations.append(float(words[2]))
tmp_confident_pairs = confident_pairs['dobj']
for pair in tmp_confident_pairs:
p_pos = int(pair.split('v')[1].split('_')[0])
tmp = pair.split('_')
a_pos = int(tmp[-1])
dobj_confident_position.append((p_pos-1)*4+a_pos-1)
dobj_confident_position.sort()
nsubj_annotations = list()
nsubj_confident_position = list()
with open('nsubj_annotation.txt', 'r') as f:
for line in f:
words = line[:-1].split('\t')
nsubj_annotations.append(float(words[2]))
tmp_confident_pairs = confident_pairs['nsubj']
for pair in tmp_confident_pairs:
p_pos = int(pair.split('v')[1].split('_')[0])
tmp = pair.split('_')
a_pos = int(tmp[-1])
nsubj_confident_position.append((p_pos-1)*4+a_pos-1)
nsubj_confident_position.sort()
amod_annotations = list()
amod_confident_position = list()
with open('amod_annotation.txt', 'r') as f:
for line in f:
words = line[:-1].split('\t')
amod_annotations.append(float(words[2]))
tmp_confident_pairs = confident_pairs['amod']
for pair in tmp_confident_pairs:
p_pos = int(pair.split('n')[1].split('_')[0])
tmp = pair.split('_')
a_pos = int(tmp[-1])
amod_confident_position.append((p_pos-1)*4+a_pos-1)
amod_confident_position.sort()
dobj_amod_annotations = list()
dobj_amod_confident_position = list()
with open('dobj_amod_annotation.txt', 'r') as f:
for line in f:
words = line[:-1].split('\t')
dobj_amod_annotations.append(float(words[2]))
tmp_confident_pairs = confident_pairs['dobj_amod']
for pair in tmp_confident_pairs:
p_pos = int(pair.split('v')[1].split('_')[0])
tmp = pair.split('_')
a_pos = int(tmp[-1])
dobj_amod_confident_position.append((p_pos-1)*4+a_pos-1)
dobj_amod_confident_position.sort()
nsubj_amod_annotations = list()
nsubj_amod_confident_position = list()
with open('nsubj_amod_annotation.txt', 'r') as f:
for line in f:
words = line[:-1].split('\t')
nsubj_amod_annotations.append(float(words[2]))
tmp_confident_pairs = confident_pairs['nsubj_amod']
for pair in tmp_confident_pairs:
p_pos = int(pair.split('v')[1].split('_')[0])
tmp = pair.split('_')
a_pos = int(tmp[-1])
nsubj_amod_confident_position.append((p_pos-1)*4+a_pos-1)
nsubj_amod_confident_position.sort()
# analyze_model('depemb')
# analyze_model('word2vec')
# analyze_model('glove')
# analyze_model('depcontext')
#
# print('')
# analyze_model('wiki_pp')
# analyze_model('yelp_pp')
# analyze_model('nyt_pp')
# print('')
# analyze_model('wiki_ds')
# analyze_model('yelp_ds')
# analyze_model('nyt_ds')
# print('')
# analyze_model('wiki')
# analyze_model('yelp')
# analyze_model('nyt')
# print('')
# analyze_model('filter_wiki')
# analyze_model('filter_yelp')
# analyze_model('filter_nyt')
# print('')
# print('')
# print('')
#
# analyze_model_by_pair('word2vec')
# analyze_model_by_pair('glove')
# analyze_model_by_pair('depcontext')
#
# print('')
# analyze_model_by_pair('wiki_pp')
# analyze_model_by_pair('yelp_pp')
# analyze_model_by_pair('nyt_pp')
# print('')
# analyze_model_by_pair('wiki_ds')
# analyze_model_by_pair('yelp_ds')
# analyze_model_by_pair('nyt_ds')
# print('')
# analyze_model_by_pair('wiki')
# analyze_model_by_pair('yelp')
# analyze_model_by_pair('nyt')
# print('')
# analyze_model_by_pair('filter_wiki')
# analyze_model_by_pair('filter_yelp')
# analyze_model_by_pair('filter_nyt')
print('')
print('')
print('')
analyze_model_by_pair_all('word2vec')
analyze_model_by_pair_all('glove')
analyze_model_by_pair_all('depcontext')
print('')
analyze_model_by_pair_all('wiki_pp')
analyze_model_by_pair_all('yelp_pp')
analyze_model_by_pair_all('nyt_pp')
print('')
analyze_model_by_pair_all('wiki_ds')
analyze_model_by_pair_all('yelp_ds')
analyze_model_by_pair_all('nyt_ds')
print('')
analyze_model_by_pair_all('wiki')
analyze_model_by_pair_all('yelp')
analyze_model_by_pair_all('nyt')
print('')
analyze_model_by_pair_all('filter_wiki')
analyze_model_by_pair_all('filter_yelp')
analyze_model_by_pair_all('filter_nyt')
| import os
import json
from scipy.stats import spearmanr
def analyze_model(model_name):
print('We are working on model:', model_name)
tmp_dobj_scores = list()
with open('Other_model_result/' + model_name + '_verb_dobj_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_dobj_scores.append(0)
else:
tmp_dobj_scores.append(float(words[2]))
confident_dobj_annotation = list()
confident_dobj_scores = list()
for i in dobj_confident_position:
confident_dobj_annotation.append(dobj_annotations[i])
confident_dobj_scores.append(tmp_dobj_scores[i])
print('dobj:', spearmanr(confident_dobj_annotation, confident_dobj_scores)[0])
tmp_nsubj_scores = list()
with open('Other_model_result/' + model_name + '_verb_nsubj_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_nsubj_scores.append(0)
else:
tmp_nsubj_scores.append(float(words[2]))
confident_nsubj_annotation = list()
confident_nsubj_scores = list()
for i in nsubj_confident_position:
# if tmp_nsubj_scores[i] == 0:
# continue
confident_nsubj_annotation.append(nsubj_annotations[i])
confident_nsubj_scores.append(tmp_nsubj_scores[i])
print('nsubj:', spearmanr(confident_nsubj_annotation, confident_nsubj_scores)[0])
tmp_amod_scores = list()
with open('Other_model_result/' + model_name + '_noun_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_amod_scores.append(0)
else:
tmp_amod_scores.append(float(words[2]))
confident_amod_annotation = list()
confident_amod_scores = list()
for i in amod_confident_position:
confident_amod_annotation.append(amod_annotations[i])
confident_amod_scores.append(tmp_amod_scores[i])
print('amod:', spearmanr(confident_amod_annotation, confident_amod_scores)[0])
tmp_dobj_amod_scores = list()
if os.path.isfile('Other_model_result/' + model_name + '_verb_dobj_amod_result'):
with open('Other_model_result/' + model_name + '_verb_dobj_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_dobj_amod_scores.append(0)
else:
tmp_dobj_amod_scores.append(float(words[2]))
confident_dobj_amod_annotation = list()
confident_dobj_amod_scores = list()
for i in dobj_amod_confident_position:
confident_dobj_amod_annotation.append(dobj_amod_annotations[i])
confident_dobj_amod_scores.append(tmp_dobj_amod_scores[i])
print('dobj_amod:', spearmanr(confident_dobj_amod_annotation, confident_dobj_amod_scores)[0])
else:
print('dobj_amod: -')
tmp_nsubj_amod_scores = list()
if os.path.isfile('Other_model_result/' + model_name + '_verb_nsubj_amod_result'):
with open('Other_model_result/' + model_name + '_verb_nsubj_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_nsubj_amod_scores.append(0)
else:
tmp_nsubj_amod_scores.append(float(words[2]))
confident_nsubj_amod_annotation = list()
confident_nsubj_amod_scores = list()
for i in nsubj_amod_confident_position:
confident_nsubj_amod_annotation.append(nsubj_amod_annotations[i])
confident_nsubj_amod_scores.append(tmp_nsubj_amod_scores[i])
print('nsubj_amod:', spearmanr(confident_nsubj_amod_annotation, confident_nsubj_amod_scores)[0])
else:
print('nsubj_amod: -')
def analyze_model_by_pair(model_name):
print('We are working on model:', model_name)
tmp_dobj_scores = list()
with open('Other_model_result/' + model_name + '_verb_dobj_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_dobj_scores.append(0)
else:
tmp_dobj_scores.append(float(words[2]))
confident_dobj_annotation = list()
confident_dobj_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in dobj_confident_position:
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_dobj_annotation.append(tmp_annotation)
confident_dobj_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(dobj_annotations[i])
tmp_score.append(tmp_dobj_scores[i])
spearmans = list()
for i in range(len(confident_dobj_annotation)):
tmp_spearman = spearmanr(confident_dobj_annotation[i], confident_dobj_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('dobj:', sum(spearmans)/len(spearmans))
tmp_nsubj_scores = list()
with open('Other_model_result/' + model_name + '_verb_nsubj_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_nsubj_scores.append(0)
else:
tmp_nsubj_scores.append(float(words[2]))
confident_nsubj_annotation = list()
confident_nsubj_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in nsubj_confident_position:
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_nsubj_annotation.append(tmp_annotation)
confident_nsubj_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(nsubj_annotations[i])
tmp_score.append(tmp_nsubj_scores[i])
spearmans = list()
for i in range(len(confident_nsubj_annotation)):
tmp_spearman = spearmanr(confident_nsubj_annotation[i], confident_nsubj_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('nsubj:', sum(spearmans)/len(spearmans))
tmp_amod_scores = list()
with open('Other_model_result/' + model_name + '_noun_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_amod_scores.append(0)
else:
tmp_amod_scores.append(float(words[2]))
confident_amod_annotation = list()
confident_amod_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in amod_confident_position:
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_amod_annotation.append(tmp_annotation)
confident_amod_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(amod_annotations[i])
tmp_score.append(tmp_amod_scores[i])
spearmans = list()
for i in range(len(confident_amod_annotation)):
tmp_spearman = spearmanr(confident_amod_annotation[i], confident_amod_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('amod:', sum(spearmans)/len(spearmans))
tmp_dobj_amod_scores = list()
if os.path.isfile('Other_model_result/' + model_name + '_verb_dobj_amod_result'):
with open('Other_model_result/' + model_name + '_verb_dobj_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_dobj_amod_scores.append(0)
else:
tmp_dobj_amod_scores.append(float(words[2]))
confident_dobj_amod_annotation = list()
confident_dobj_amod_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in dobj_amod_confident_position:
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_dobj_amod_annotation.append(tmp_annotation)
confident_dobj_amod_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(dobj_amod_annotations[i])
tmp_score.append(tmp_dobj_amod_scores[i])
spearmans = list()
for i in range(len(confident_dobj_amod_annotation)):
tmp_spearman = spearmanr(confident_dobj_amod_annotation[i], confident_dobj_amod_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('dobj_amod:', sum(spearmans)/len(spearmans))
else:
print('dobj_amod: -')
tmp_nsubj_amod_scores = list()
if os.path.isfile('Other_model_result/' + model_name + '_verb_nsubj_amod_result'):
with open('Other_model_result/' + model_name + '_verb_nsubj_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_nsubj_amod_scores.append(0)
else:
tmp_nsubj_amod_scores.append(float(words[2]))
confident_nsubj_amod_annotation = list()
confident_nsubj_amod_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in nsubj_amod_confident_position:
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_nsubj_amod_annotation.append(tmp_annotation)
confident_nsubj_amod_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(nsubj_amod_annotations[i])
tmp_score.append(tmp_nsubj_amod_scores[i])
spearmans = list()
for i in range(len(confident_nsubj_amod_annotation)):
tmp_spearman = spearmanr(confident_nsubj_amod_annotation[i], confident_nsubj_amod_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('nsubj_amod:', sum(spearmans)/len(spearmans))
else:
print('nsubj_amod: -')
def analyze_model_by_pair_all(model_name):
print('We are working on model:', model_name)
tmp_dobj_scores = list()
with open('Other_model_result/' + model_name + '_verb_dobj_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_dobj_scores.append(0)
else:
tmp_dobj_scores.append(float(words[2]))
confident_dobj_annotation = list()
confident_dobj_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in range(2000):
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_dobj_annotation.append(tmp_annotation)
confident_dobj_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(dobj_annotations[i])
tmp_score.append(tmp_dobj_scores[i])
spearmans = list()
for i in range(len(confident_dobj_annotation)):
tmp_spearman = spearmanr(confident_dobj_annotation[i], confident_dobj_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('dobj:', sum(spearmans)/len(spearmans))
tmp_nsubj_scores = list()
with open('Other_model_result/' + model_name + '_verb_nsubj_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_nsubj_scores.append(0)
else:
tmp_nsubj_scores.append(float(words[2]))
confident_nsubj_annotation = list()
confident_nsubj_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in range(2000):
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_nsubj_annotation.append(tmp_annotation)
confident_nsubj_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(nsubj_annotations[i])
tmp_score.append(tmp_nsubj_scores[i])
spearmans = list()
for i in range(len(confident_nsubj_annotation)):
tmp_spearman = spearmanr(confident_nsubj_annotation[i], confident_nsubj_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('nsubj:', sum(spearmans)/len(spearmans))
tmp_amod_scores = list()
with open('Other_model_result/' + model_name + '_noun_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_amod_scores.append(0)
else:
tmp_amod_scores.append(float(words[2]))
confident_amod_annotation = list()
confident_amod_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in range(2000):
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_amod_annotation.append(tmp_annotation)
confident_amod_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(amod_annotations[i])
tmp_score.append(tmp_amod_scores[i])
spearmans = list()
for i in range(len(confident_amod_annotation)):
tmp_spearman = spearmanr(confident_amod_annotation[i], confident_amod_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('amod:', sum(spearmans)/len(spearmans))
tmp_dobj_amod_scores = list()
if os.path.isfile('Other_model_result/' + model_name + '_verb_dobj_amod_result'):
with open('Other_model_result/' + model_name + '_verb_dobj_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_dobj_amod_scores.append(0)
else:
tmp_dobj_amod_scores.append(float(words[2]))
confident_dobj_amod_annotation = list()
confident_dobj_amod_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in range(2000):
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_dobj_amod_annotation.append(tmp_annotation)
confident_dobj_amod_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(dobj_amod_annotations[i])
tmp_score.append(tmp_dobj_amod_scores[i])
spearmans = list()
for i in range(len(confident_dobj_amod_annotation)):
tmp_spearman = spearmanr(confident_dobj_amod_annotation[i], confident_dobj_amod_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('dobj_amod:', sum(spearmans)/len(spearmans))
else:
print('dobj_amod: -')
tmp_nsubj_amod_scores = list()
if os.path.isfile('Other_model_result/' + model_name + '_verb_nsubj_amod_result'):
with open('Other_model_result/' + model_name + '_verb_nsubj_amod_result', 'r') as f:
for line in f:
words = line[:-1].split('\t')
if words[2] == 'NAN':
tmp_nsubj_amod_scores.append(0)
else:
tmp_nsubj_amod_scores.append(float(words[2]))
confident_nsubj_amod_annotation = list()
confident_nsubj_amod_scores = list()
tmp_annotation = list()
tmp_score = list()
last_predict = 0
for i in range(2000):
if int(i / 4) > last_predict:
if len(tmp_annotation) > 1:
confident_nsubj_amod_annotation.append(tmp_annotation)
confident_nsubj_amod_scores.append(tmp_score)
tmp_annotation = list()
tmp_score = list()
last_predict = int(i/4)
tmp_annotation.append(nsubj_amod_annotations[i])
tmp_score.append(tmp_nsubj_amod_scores[i])
spearmans = list()
for i in range(len(confident_nsubj_amod_annotation)):
tmp_spearman = spearmanr(confident_nsubj_amod_annotation[i], confident_nsubj_amod_scores[i])[0]
if tmp_spearman > -1.5:
spearmans.append(tmp_spearman)
print('nsubj_amod:', sum(spearmans)/len(spearmans))
else:
print('nsubj_amod: -')
with open('confident_pairs.json', 'r') as f:
confident_pairs = json.load(f)
with open('difficult_pairs.json', 'r') as f:
difficult_pairs = json.load(f)
dobj_annotations = list()
dobj_confident_position = list()
with open('dobj_annotation.txt', 'r') as f:
for line in f:
words = line[:-1].split('\t')
dobj_annotations.append(float(words[2]))
tmp_confident_pairs = confident_pairs['dobj']
for pair in tmp_confident_pairs:
p_pos = int(pair.split('v')[1].split('_')[0])
tmp = pair.split('_')
a_pos = int(tmp[-1])
dobj_confident_position.append((p_pos-1)*4+a_pos-1)
dobj_confident_position.sort()
nsubj_annotations = list()
nsubj_confident_position = list()
with open('nsubj_annotation.txt', 'r') as f:
for line in f:
words = line[:-1].split('\t')
nsubj_annotations.append(float(words[2]))
tmp_confident_pairs = confident_pairs['nsubj']
for pair in tmp_confident_pairs:
p_pos = int(pair.split('v')[1].split('_')[0])
tmp = pair.split('_')
a_pos = int(tmp[-1])
nsubj_confident_position.append((p_pos-1)*4+a_pos-1)
nsubj_confident_position.sort()
amod_annotations = list()
amod_confident_position = list()
with open('amod_annotation.txt', 'r') as f:
for line in f:
words = line[:-1].split('\t')
amod_annotations.append(float(words[2]))
tmp_confident_pairs = confident_pairs['amod']
for pair in tmp_confident_pairs:
p_pos = int(pair.split('n')[1].split('_')[0])
tmp = pair.split('_')
a_pos = int(tmp[-1])
amod_confident_position.append((p_pos-1)*4+a_pos-1)
amod_confident_position.sort()
dobj_amod_annotations = list()
dobj_amod_confident_position = list()
with open('dobj_amod_annotation.txt', 'r') as f:
for line in f:
words = line[:-1].split('\t')
dobj_amod_annotations.append(float(words[2]))
tmp_confident_pairs = confident_pairs['dobj_amod']
for pair in tmp_confident_pairs:
p_pos = int(pair.split('v')[1].split('_')[0])
tmp = pair.split('_')
a_pos = int(tmp[-1])
dobj_amod_confident_position.append((p_pos-1)*4+a_pos-1)
dobj_amod_confident_position.sort()
nsubj_amod_annotations = list()
nsubj_amod_confident_position = list()
with open('nsubj_amod_annotation.txt', 'r') as f:
for line in f:
words = line[:-1].split('\t')
nsubj_amod_annotations.append(float(words[2]))
tmp_confident_pairs = confident_pairs['nsubj_amod']
for pair in tmp_confident_pairs:
p_pos = int(pair.split('v')[1].split('_')[0])
tmp = pair.split('_')
a_pos = int(tmp[-1])
nsubj_amod_confident_position.append((p_pos-1)*4+a_pos-1)
nsubj_amod_confident_position.sort()
# analyze_model('depemb')
# analyze_model('word2vec')
# analyze_model('glove')
# analyze_model('depcontext')
#
# print('')
# analyze_model('wiki_pp')
# analyze_model('yelp_pp')
# analyze_model('nyt_pp')
# print('')
# analyze_model('wiki_ds')
# analyze_model('yelp_ds')
# analyze_model('nyt_ds')
# print('')
# analyze_model('wiki')
# analyze_model('yelp')
# analyze_model('nyt')
# print('')
# analyze_model('filter_wiki')
# analyze_model('filter_yelp')
# analyze_model('filter_nyt')
# print('')
# print('')
# print('')
#
# analyze_model_by_pair('word2vec')
# analyze_model_by_pair('glove')
# analyze_model_by_pair('depcontext')
#
# print('')
# analyze_model_by_pair('wiki_pp')
# analyze_model_by_pair('yelp_pp')
# analyze_model_by_pair('nyt_pp')
# print('')
# analyze_model_by_pair('wiki_ds')
# analyze_model_by_pair('yelp_ds')
# analyze_model_by_pair('nyt_ds')
# print('')
# analyze_model_by_pair('wiki')
# analyze_model_by_pair('yelp')
# analyze_model_by_pair('nyt')
# print('')
# analyze_model_by_pair('filter_wiki')
# analyze_model_by_pair('filter_yelp')
# analyze_model_by_pair('filter_nyt')
print('')
print('')
print('')
analyze_model_by_pair_all('word2vec')
analyze_model_by_pair_all('glove')
analyze_model_by_pair_all('depcontext')
print('')
analyze_model_by_pair_all('wiki_pp')
analyze_model_by_pair_all('yelp_pp')
analyze_model_by_pair_all('nyt_pp')
print('')
analyze_model_by_pair_all('wiki_ds')
analyze_model_by_pair_all('yelp_ds')
analyze_model_by_pair_all('nyt_ds')
print('')
analyze_model_by_pair_all('wiki')
analyze_model_by_pair_all('yelp')
analyze_model_by_pair_all('nyt')
print('')
analyze_model_by_pair_all('filter_wiki')
analyze_model_by_pair_all('filter_yelp')
analyze_model_by_pair_all('filter_nyt') | en | 0.26733 | # if tmp_nsubj_scores[i] == 0: # continue # analyze_model('depemb') # analyze_model('word2vec') # analyze_model('glove') # analyze_model('depcontext') # # print('') # analyze_model('wiki_pp') # analyze_model('yelp_pp') # analyze_model('nyt_pp') # print('') # analyze_model('wiki_ds') # analyze_model('yelp_ds') # analyze_model('nyt_ds') # print('') # analyze_model('wiki') # analyze_model('yelp') # analyze_model('nyt') # print('') # analyze_model('filter_wiki') # analyze_model('filter_yelp') # analyze_model('filter_nyt') # print('') # print('') # print('') # # analyze_model_by_pair('word2vec') # analyze_model_by_pair('glove') # analyze_model_by_pair('depcontext') # # print('') # analyze_model_by_pair('wiki_pp') # analyze_model_by_pair('yelp_pp') # analyze_model_by_pair('nyt_pp') # print('') # analyze_model_by_pair('wiki_ds') # analyze_model_by_pair('yelp_ds') # analyze_model_by_pair('nyt_ds') # print('') # analyze_model_by_pair('wiki') # analyze_model_by_pair('yelp') # analyze_model_by_pair('nyt') # print('') # analyze_model_by_pair('filter_wiki') # analyze_model_by_pair('filter_yelp') # analyze_model_by_pair('filter_nyt') | 2.733311 | 3 |
diddiparser/lib/__init__.py | DiddiLeija/diddiparser | 1 | 6631426 | <filename>diddiparser/lib/__init__.py<gh_stars>1-10
"Standard lib for DiddiScript."
from diddiparser.lib import lang_runners
from diddiparser.lib import diddi_stdfuncs
from diddiparser.lib.lang_runners import __all__ as lang_runners_all
from diddiparser.lib.diddi_stdfuncs import __all__ as diddi_stdfuncs_all
__all__ = lang_runners_all + diddi_stdfuncs_all
# add here the known functions
STD_FUNCS = tuple(__all__)
KNOWN_FUNCS = {"pyrun": lang_runners.pyrun,
"ramz_goto": diddi_stdfuncs.ramz_goto,
"openfile": diddi_stdfuncs.openfile,
"subprocess_run": diddi_stdfuncs.subprocess_run}
__all__.append("KNOWN_FUNCS")
| <filename>diddiparser/lib/__init__.py<gh_stars>1-10
"Standard lib for DiddiScript."
from diddiparser.lib import lang_runners
from diddiparser.lib import diddi_stdfuncs
from diddiparser.lib.lang_runners import __all__ as lang_runners_all
from diddiparser.lib.diddi_stdfuncs import __all__ as diddi_stdfuncs_all
__all__ = lang_runners_all + diddi_stdfuncs_all
# add here the known functions
STD_FUNCS = tuple(__all__)
KNOWN_FUNCS = {"pyrun": lang_runners.pyrun,
"ramz_goto": diddi_stdfuncs.ramz_goto,
"openfile": diddi_stdfuncs.openfile,
"subprocess_run": diddi_stdfuncs.subprocess_run}
__all__.append("KNOWN_FUNCS")
| en | 0.924323 | # add here the known functions | 1.915649 | 2 |
PlanIt/notebooks/cost_handling.py | awoodwa/PlanIt | 0 | 6631427 | def cost_of_wind(turbines):
'''
This function takes the number of turbines to be installed and
calculates the cost of installation.
Inputs
turbines : integer value of turbines (1.3M USD per turbine)
Outputs
cost : float value of dollars
'''
# 1 turbine costs approximately 1.3 M USD
cost = turbines * 1.3e6
return cost
def cost_of_solar(annual_solar_mean):
'''
This function calculates the cost of a solar panel installed in a
given location that has some annual solar intake.
Inputs
annual_solar_mean : float of output of solar handling function (kWh)
Outputs
cost : float in USD
'''
# solar cost is calculated by kW
daily_solar = 1000 * annual_solar_mean / 8760 # daily solar power in W
cost = 3.14 * daily_solar
return cost
| def cost_of_wind(turbines):
'''
This function takes the number of turbines to be installed and
calculates the cost of installation.
Inputs
turbines : integer value of turbines (1.3M USD per turbine)
Outputs
cost : float value of dollars
'''
# 1 turbine costs approximately 1.3 M USD
cost = turbines * 1.3e6
return cost
def cost_of_solar(annual_solar_mean):
'''
This function calculates the cost of a solar panel installed in a
given location that has some annual solar intake.
Inputs
annual_solar_mean : float of output of solar handling function (kWh)
Outputs
cost : float in USD
'''
# solar cost is calculated by kW
daily_solar = 1000 * annual_solar_mean / 8760 # daily solar power in W
cost = 3.14 * daily_solar
return cost
| en | 0.817378 | This function takes the number of turbines to be installed and calculates the cost of installation. Inputs turbines : integer value of turbines (1.3M USD per turbine) Outputs cost : float value of dollars # 1 turbine costs approximately 1.3 M USD This function calculates the cost of a solar panel installed in a given location that has some annual solar intake. Inputs annual_solar_mean : float of output of solar handling function (kWh) Outputs cost : float in USD # solar cost is calculated by kW # daily solar power in W | 4.018326 | 4 |
jetpack/functional.py | vasudevanv/jetpack | 0 | 6631428 | <filename>jetpack/functional.py
import functools
def first(x):
return x[0]
def last(x):
return x[-1]
def compose(*functions):
return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def _polyval(coeffs, x):
p = coeffs[0]
for c in coeffs[1:]:
p = c + x*p
return p
| <filename>jetpack/functional.py
import functools
def first(x):
return x[0]
def last(x):
return x[-1]
def compose(*functions):
return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def _polyval(coeffs, x):
p = coeffs[0]
for c in coeffs[1:]:
p = c + x*p
return p
| none | 1 | 2.884258 | 3 |
|
plugins/ghetto.py | Arna-Maity/corobo | 81 | 6631429 | import re
import requests
from errbot import BotPlugin, re_botcmd
class Ghetto(BotPlugin):
"""
Real talk yo
"""
@re_botcmd(pattern=r'ghetto\s+(.+)',
re_cmd_name_help='ghetto <sentence>',
flags=re.IGNORECASE)
def ghetto(self, msg, match):
"""
Real talk yo
"""
rq = requests.post('http://www.gizoogle.net/textilizer.php',
data={'translatetext': match.group(1)})
translated_text = re.search(
r'<textarea .*;\"/>(.+)</textarea>', rq.text)
if translated_text is not None:
return translated_text.group(1)
else:
return 'Shiznit happens!'
| import re
import requests
from errbot import BotPlugin, re_botcmd
class Ghetto(BotPlugin):
"""
Real talk yo
"""
@re_botcmd(pattern=r'ghetto\s+(.+)',
re_cmd_name_help='ghetto <sentence>',
flags=re.IGNORECASE)
def ghetto(self, msg, match):
"""
Real talk yo
"""
rq = requests.post('http://www.gizoogle.net/textilizer.php',
data={'translatetext': match.group(1)})
translated_text = re.search(
r'<textarea .*;\"/>(.+)</textarea>', rq.text)
if translated_text is not None:
return translated_text.group(1)
else:
return 'Shiznit happens!'
| en | 0.895787 | Real talk yo Real talk yo | 2.680181 | 3 |
pyro/distributions/transforms/polynomial.py | akern40/pyro | 1 | 6631430 | <filename>pyro/distributions/transforms/polynomial.py<gh_stars>1-10
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
import torch.nn as nn
from torch.distributions import constraints
from pyro.distributions.torch_transform import TransformModule
from pyro.distributions.util import copy_docs_from
from pyro.nn import AutoRegressiveNN
@copy_docs_from(TransformModule)
class Polynomial(TransformModule):
"""
An autoregressive bijective transform as described in Jaini et al. (2019)
applying following equation element-wise,
:math:`y_n = c_n + \\int^{x_n}_0\\sum^K_{k=1}\\left(\\sum^R_{r=0}a^{(n)}_{r,k}u^r\\right)du`
where :math:`x_n` is the :math:`n`th input, :math:`y_n` is the :math:`n`th
output, and :math:`c_n\\in\\mathbb{R}`,
:math:`\\left\\{a^{(n)}_{r,k}\\in\\mathbb{R}\\right\\}` are learnable parameters
that are the output of an autoregressive NN inputting
:math:`x_{\\prec n}={x_1,x_2,\\ldots,x_{n-1}}`.
Together with :class:`~pyro.distributions.TransformedDistribution` this provides
a way to create richer variational approximations.
Example usage:
>>> from pyro.nn import AutoRegressiveNN
>>> input_dim = 10
>>> count_degree = 4
>>> count_sum = 3
>>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))
>>> param_dims = [(count_degree + 1)*count_sum]
>>> arn = AutoRegressiveNN(input_dim, [input_dim*10], param_dims)
>>> transform = Polynomial(arn, input_dim=input_dim, count_degree=count_degree,
... count_sum=count_sum)
>>> pyro.module("my_transform", transform) # doctest: +SKIP
>>> flow_dist = dist.TransformedDistribution(base_dist, [transform])
>>> flow_dist.sample() # doctest: +SKIP
The inverse of this transform does not possess an analytical solution and is
left unimplemented. However, the inverse is cached when the forward operation is
called during sampling, and so samples drawn using a polynomial transform can be
scored.
:param autoregressive_nn: an autoregressive neural network whose forward call
returns a tensor of real-valued
numbers of size (batch_size, (count_degree+1)*count_sum, input_dim)
:type autoregressive_nn: nn.Module
:param count_degree: The degree of the polynomial to use for each element-wise
transformation.
:type count_degree: int
:param count_sum: The number of polynomials to sum in each element-wise
transformation.
:type count_sum: int
References:
[1] <NAME>, <NAME>, <NAME>. Sum-of-squares polynomial flow.
[arXiv:1905.02325]
"""
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 1
autoregressive = True
def __init__(self, autoregressive_nn, input_dim, count_degree, count_sum):
super().__init__(cache_size=1)
self.arn = autoregressive_nn
self.input_dim = input_dim
self.count_degree = count_degree
self.count_sum = count_sum
self._cached_logDetJ = None
self.c = nn.Parameter(torch.Tensor(input_dim))
self.reset_parameters()
# Vector of powers of input dimension
powers = torch.arange(1, count_degree + 2, dtype=torch.get_default_dtype())
self.register_buffer('powers', powers)
# Build mask of constants
mask = self.powers + torch.arange(count_degree + 1).unsqueeze(-1).type_as(powers)
power_mask = mask
mask = mask.reciprocal()
self.register_buffer('power_mask', power_mask)
self.register_buffer('mask', mask)
def reset_parameters(self):
stdv = 1. / math.sqrt(self.c.size(0))
self.c.data.uniform_(-stdv, stdv)
def _call(self, x):
"""
:param x: the input into the bijection
:type x: torch.Tensor
Invokes the bijection x=>y; in the prototypical context of a
:class:`~pyro.distributions.TransformedDistribution` `x` is a sample from
the base distribution (or the output of a previous transform)
"""
# Calculate the polynomial coefficients
# ~ (batch_size, count_sum, count_degree+1, input_dim)
A = self.arn(x).view(-1, self.count_sum, self.count_degree + 1, self.input_dim)
# Take cross product of coefficients across degree dim
# ~ (batch_size, count_sum, count_degree+1, count_degree+1, input_dim)
coefs = A.unsqueeze(-2) * A.unsqueeze(-3)
# Calculate output as sum-of-squares polynomial
x_view = x.view(-1, 1, 1, self.input_dim)
x_pow_matrix = x_view.pow(self.power_mask.unsqueeze(-1)).unsqueeze(-4)
# Eq (8) from the paper, expanding the squared term and integrating
# NOTE: The view_as is necessary because the batch dimensions were collapsed previously
y = self.c + (coefs * x_pow_matrix * self.mask.unsqueeze(-1)).sum((1, 2, 3)).view_as(x)
# log(|det(J)|) is calculated by the fundamental theorem of calculus, i.e. remove the constant
# term and the integral from eq (8) (the equation for this isn't given in the paper)
x_pow_matrix = x_view.pow(self.power_mask.unsqueeze(-1) - 1).unsqueeze(-4)
self._cached_logDetJ = torch.log((coefs * x_pow_matrix).sum((1, 2, 3)).view_as(x) + 1e-8).sum(-1)
return y
def _inverse(self, y):
"""
:param y: the output of the bijection
:type y: torch.Tensor
Inverts y => x. As noted above, this implementation is incapable of
inverting arbitrary values `y`; rather it assumes `y` is the result of a
previously computed application of the bijector to some `x` (which was
cached on the forward call)
"""
raise KeyError("Polynomial object expected to find key in intermediates cache but didn't")
def log_abs_det_jacobian(self, x, y):
"""
Calculates the elementwise determinant of the log Jacobian
"""
x_old, y_old = self._cached_x_y
if x is not x_old or y is not y_old:
# This call to the parent class Transform will update the cache
# as well as calling self._call and recalculating y and log_detJ
self(x)
return self._cached_logDetJ
def polynomial(input_dim, hidden_dims=None):
"""
A helper function to create a :class:`~pyro.distributions.transforms.Polynomial`
object that takes care of constructing an autoregressive network with the
correct input/output dimensions.
:param input_dim: Dimension of input variable
:type input_dim: int
:param hidden_dims: The desired hidden dimensions of of the autoregressive
network. Defaults to using [input_dim * 10]
"""
count_degree = 4
count_sum = 3
if hidden_dims is None:
hidden_dims = [input_dim * 10]
arn = AutoRegressiveNN(input_dim, hidden_dims, param_dims=[(count_degree + 1) * count_sum])
return Polynomial(arn, input_dim=input_dim, count_degree=count_degree, count_sum=count_sum)
| <filename>pyro/distributions/transforms/polynomial.py<gh_stars>1-10
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
import torch.nn as nn
from torch.distributions import constraints
from pyro.distributions.torch_transform import TransformModule
from pyro.distributions.util import copy_docs_from
from pyro.nn import AutoRegressiveNN
@copy_docs_from(TransformModule)
class Polynomial(TransformModule):
"""
An autoregressive bijective transform as described in Jaini et al. (2019)
applying following equation element-wise,
:math:`y_n = c_n + \\int^{x_n}_0\\sum^K_{k=1}\\left(\\sum^R_{r=0}a^{(n)}_{r,k}u^r\\right)du`
where :math:`x_n` is the :math:`n`th input, :math:`y_n` is the :math:`n`th
output, and :math:`c_n\\in\\mathbb{R}`,
:math:`\\left\\{a^{(n)}_{r,k}\\in\\mathbb{R}\\right\\}` are learnable parameters
that are the output of an autoregressive NN inputting
:math:`x_{\\prec n}={x_1,x_2,\\ldots,x_{n-1}}`.
Together with :class:`~pyro.distributions.TransformedDistribution` this provides
a way to create richer variational approximations.
Example usage:
>>> from pyro.nn import AutoRegressiveNN
>>> input_dim = 10
>>> count_degree = 4
>>> count_sum = 3
>>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))
>>> param_dims = [(count_degree + 1)*count_sum]
>>> arn = AutoRegressiveNN(input_dim, [input_dim*10], param_dims)
>>> transform = Polynomial(arn, input_dim=input_dim, count_degree=count_degree,
... count_sum=count_sum)
>>> pyro.module("my_transform", transform) # doctest: +SKIP
>>> flow_dist = dist.TransformedDistribution(base_dist, [transform])
>>> flow_dist.sample() # doctest: +SKIP
The inverse of this transform does not possess an analytical solution and is
left unimplemented. However, the inverse is cached when the forward operation is
called during sampling, and so samples drawn using a polynomial transform can be
scored.
:param autoregressive_nn: an autoregressive neural network whose forward call
returns a tensor of real-valued
numbers of size (batch_size, (count_degree+1)*count_sum, input_dim)
:type autoregressive_nn: nn.Module
:param count_degree: The degree of the polynomial to use for each element-wise
transformation.
:type count_degree: int
:param count_sum: The number of polynomials to sum in each element-wise
transformation.
:type count_sum: int
References:
[1] <NAME>, <NAME>, <NAME>. Sum-of-squares polynomial flow.
[arXiv:1905.02325]
"""
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 1
autoregressive = True
def __init__(self, autoregressive_nn, input_dim, count_degree, count_sum):
super().__init__(cache_size=1)
self.arn = autoregressive_nn
self.input_dim = input_dim
self.count_degree = count_degree
self.count_sum = count_sum
self._cached_logDetJ = None
self.c = nn.Parameter(torch.Tensor(input_dim))
self.reset_parameters()
# Vector of powers of input dimension
powers = torch.arange(1, count_degree + 2, dtype=torch.get_default_dtype())
self.register_buffer('powers', powers)
# Build mask of constants
mask = self.powers + torch.arange(count_degree + 1).unsqueeze(-1).type_as(powers)
power_mask = mask
mask = mask.reciprocal()
self.register_buffer('power_mask', power_mask)
self.register_buffer('mask', mask)
def reset_parameters(self):
stdv = 1. / math.sqrt(self.c.size(0))
self.c.data.uniform_(-stdv, stdv)
def _call(self, x):
"""
:param x: the input into the bijection
:type x: torch.Tensor
Invokes the bijection x=>y; in the prototypical context of a
:class:`~pyro.distributions.TransformedDistribution` `x` is a sample from
the base distribution (or the output of a previous transform)
"""
# Calculate the polynomial coefficients
# ~ (batch_size, count_sum, count_degree+1, input_dim)
A = self.arn(x).view(-1, self.count_sum, self.count_degree + 1, self.input_dim)
# Take cross product of coefficients across degree dim
# ~ (batch_size, count_sum, count_degree+1, count_degree+1, input_dim)
coefs = A.unsqueeze(-2) * A.unsqueeze(-3)
# Calculate output as sum-of-squares polynomial
x_view = x.view(-1, 1, 1, self.input_dim)
x_pow_matrix = x_view.pow(self.power_mask.unsqueeze(-1)).unsqueeze(-4)
# Eq (8) from the paper, expanding the squared term and integrating
# NOTE: The view_as is necessary because the batch dimensions were collapsed previously
y = self.c + (coefs * x_pow_matrix * self.mask.unsqueeze(-1)).sum((1, 2, 3)).view_as(x)
# log(|det(J)|) is calculated by the fundamental theorem of calculus, i.e. remove the constant
# term and the integral from eq (8) (the equation for this isn't given in the paper)
x_pow_matrix = x_view.pow(self.power_mask.unsqueeze(-1) - 1).unsqueeze(-4)
self._cached_logDetJ = torch.log((coefs * x_pow_matrix).sum((1, 2, 3)).view_as(x) + 1e-8).sum(-1)
return y
def _inverse(self, y):
"""
:param y: the output of the bijection
:type y: torch.Tensor
Inverts y => x. As noted above, this implementation is incapable of
inverting arbitrary values `y`; rather it assumes `y` is the result of a
previously computed application of the bijector to some `x` (which was
cached on the forward call)
"""
raise KeyError("Polynomial object expected to find key in intermediates cache but didn't")
def log_abs_det_jacobian(self, x, y):
"""
Calculates the elementwise determinant of the log Jacobian
"""
x_old, y_old = self._cached_x_y
if x is not x_old or y is not y_old:
# This call to the parent class Transform will update the cache
# as well as calling self._call and recalculating y and log_detJ
self(x)
return self._cached_logDetJ
def polynomial(input_dim, hidden_dims=None):
"""
A helper function to create a :class:`~pyro.distributions.transforms.Polynomial`
object that takes care of constructing an autoregressive network with the
correct input/output dimensions.
:param input_dim: Dimension of input variable
:type input_dim: int
:param hidden_dims: The desired hidden dimensions of of the autoregressive
network. Defaults to using [input_dim * 10]
"""
count_degree = 4
count_sum = 3
if hidden_dims is None:
hidden_dims = [input_dim * 10]
arn = AutoRegressiveNN(input_dim, hidden_dims, param_dims=[(count_degree + 1) * count_sum])
return Polynomial(arn, input_dim=input_dim, count_degree=count_degree, count_sum=count_sum)
| en | 0.767842 | # Copyright (c) 2017-2019 Uber Technologies, Inc. # SPDX-License-Identifier: Apache-2.0 An autoregressive bijective transform as described in Jaini et al. (2019) applying following equation element-wise, :math:`y_n = c_n + \\int^{x_n}_0\\sum^K_{k=1}\\left(\\sum^R_{r=0}a^{(n)}_{r,k}u^r\\right)du` where :math:`x_n` is the :math:`n`th input, :math:`y_n` is the :math:`n`th output, and :math:`c_n\\in\\mathbb{R}`, :math:`\\left\\{a^{(n)}_{r,k}\\in\\mathbb{R}\\right\\}` are learnable parameters that are the output of an autoregressive NN inputting :math:`x_{\\prec n}={x_1,x_2,\\ldots,x_{n-1}}`. Together with :class:`~pyro.distributions.TransformedDistribution` this provides a way to create richer variational approximations. Example usage: >>> from pyro.nn import AutoRegressiveNN >>> input_dim = 10 >>> count_degree = 4 >>> count_sum = 3 >>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim)) >>> param_dims = [(count_degree + 1)*count_sum] >>> arn = AutoRegressiveNN(input_dim, [input_dim*10], param_dims) >>> transform = Polynomial(arn, input_dim=input_dim, count_degree=count_degree, ... count_sum=count_sum) >>> pyro.module("my_transform", transform) # doctest: +SKIP >>> flow_dist = dist.TransformedDistribution(base_dist, [transform]) >>> flow_dist.sample() # doctest: +SKIP The inverse of this transform does not possess an analytical solution and is left unimplemented. However, the inverse is cached when the forward operation is called during sampling, and so samples drawn using a polynomial transform can be scored. :param autoregressive_nn: an autoregressive neural network whose forward call returns a tensor of real-valued numbers of size (batch_size, (count_degree+1)*count_sum, input_dim) :type autoregressive_nn: nn.Module :param count_degree: The degree of the polynomial to use for each element-wise transformation. :type count_degree: int :param count_sum: The number of polynomials to sum in each element-wise transformation. :type count_sum: int References: [1] <NAME>, <NAME>, <NAME>. Sum-of-squares polynomial flow. [arXiv:1905.02325] # Vector of powers of input dimension # Build mask of constants :param x: the input into the bijection :type x: torch.Tensor Invokes the bijection x=>y; in the prototypical context of a :class:`~pyro.distributions.TransformedDistribution` `x` is a sample from the base distribution (or the output of a previous transform) # Calculate the polynomial coefficients # ~ (batch_size, count_sum, count_degree+1, input_dim) # Take cross product of coefficients across degree dim # ~ (batch_size, count_sum, count_degree+1, count_degree+1, input_dim) # Calculate output as sum-of-squares polynomial # Eq (8) from the paper, expanding the squared term and integrating # NOTE: The view_as is necessary because the batch dimensions were collapsed previously # log(|det(J)|) is calculated by the fundamental theorem of calculus, i.e. remove the constant # term and the integral from eq (8) (the equation for this isn't given in the paper) :param y: the output of the bijection :type y: torch.Tensor Inverts y => x. As noted above, this implementation is incapable of inverting arbitrary values `y`; rather it assumes `y` is the result of a previously computed application of the bijector to some `x` (which was cached on the forward call) Calculates the elementwise determinant of the log Jacobian # This call to the parent class Transform will update the cache # as well as calling self._call and recalculating y and log_detJ A helper function to create a :class:`~pyro.distributions.transforms.Polynomial` object that takes care of constructing an autoregressive network with the correct input/output dimensions. :param input_dim: Dimension of input variable :type input_dim: int :param hidden_dims: The desired hidden dimensions of of the autoregressive network. Defaults to using [input_dim * 10] | 2.270287 | 2 |
astor_real_estate/astor_housing.py | deanchristakos/astor_real_estate | 0 | 6631431 | <gh_stars>0
import logging
logging.basicConfig(format='%(asctime)s %(funcName)s %(message)s', filename='/var/log/astor_square/astor_housing.log',level=logging.DEBUG)
from astor_schemas import *
import math
from astor_square_utils import *
class UnitTaxInfo(object):
def __init__(self, bbl=None, connection_pool=None):
self.connection_pool = connection_pool
self.query = None
self.bbl = bbl
self.neighborhood = None
self.building_class = None
self.borough_block_lot = None
self.address = None
self.year_built = None
self.total_units = None
self.gross_square_feet = None
self.estimated_gross_income = None
self.gross_income_per_square_foot = None
self.estimated_expense = None
self.expense_per_square_foot = None
self.net_operating_income = None
self.net_operating_income_per_square_foot = None
self.full_market_value = None
self.market_value_per_square_foot = None
self.net_present_value = None
self.net_present_value_per_square_foot = None
self.last_year_annual_tax = None
self.this_year_annual_tax = None
self.full_addr = None
@property
def full_address(self):
if self.full_addr is None and self.address is not None:
borough = self.bbl[0]
city = get_borough_city(borough)
state = 'NY'
zip = None #getzipcode(self.address, city, state)
if zip is None:
zip = ''
self.full_addr = self.address + ' ' + city + ', ' + state + ' ' + zip
return self.full_addr.strip()
class Comparable(UnitTaxInfo):
def __init__(self, bbl=None, connection_pool=None):
UnitTaxInfo.__init__(self, bbl, connection_pool)
self.query = 'select DISTINCT * from tax_analysis_city_comparables where borough_block_lot = %s'
self.bbl = None
self.neighborhood = None
self.building_class = None
self.borough_block_lot = None
self.address = None
self.year_built = None
self.total_units = None
self.gross_square_feet = None
self.estimated_gross_income = None
self.gross_income_per_square_foot = None
self.estimated_expense = None
self.expense_per_square_foot = None
self.net_operating_income = None
self.full_market_value = None
self.market_value_per_square_foot = None
self.comparablebbl = None
self.annual_tax = None
self.comp_quality = None
self.year = None
self.fiscal_year = None
self.lat = None
self.long = None
def __repr__(self):
return "<Comparable(bbl={self.bbl!r},comparablebbl={self.comparablebbl!r})>".format(self=self)
def create_comparable_from_row(self, row):
self.neighborhood = row[0]
self.building_class = row[1]
self.borough_block_lot = row[2]
self.bbl = self.borough_block_lot.replace('-','') if self.bbl is None else self.bbl
logging.debug('bbl set to ' + self.bbl + ' from ' + self.borough_block_lot)
self.address = row[3]
self.year_built = row[4]
self.total_units = row[5]
self.gross_square_feet = row[6]
self.estimated_gross_income = row[7]
self.gross_income_per_square_foot = row[8]
self.estimated_expense = row[9]
self.expense_per_square_foot = row[10]
self.net_operating_income = row[11]
if self.net_operating_income is not None and self.gross_square_feet is not None:
self.net_operating_income_per_square_foot = self.net_operating_income / self.gross_square_feet
self.full_market_value = row[12]
self.market_value_per_square_foot = row[13]
self.distance_from_subject_in_miles = row[14]
self.comparablebbl = row[15]
self.year = row[16]
self.fiscal_year = row[17]
self.comp_quality = row[18]
self.lat = row[19]
self.long = row[20]
return
def load_comparable_attributes(self):
if self.bbl is None:
return
query_bbl = create_dashed_bbl(self.bbl)
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(self.query, (query_bbl,))
row = cursor.fetchone()
self.neighborhood = row[0]
self.building_class = row[1]
self.borough_block_lot = row[2]
self.bbl = self.borough_block_lot.replace('-','')
self.address = row[3]
self.year_built = row[4]
self.total_units = row[5]
self.gross_square_feet = row[6]
self.estimated_gross_income = row[7]
self.gross_income_per_square_foot = row[8]
self.estimated_expense = row[9]
self.expense_per_square_foot = row[10]
self.net_operating_income = row[11]
if self.net_operating_income is not None and self.gross_square_feet is not None:
self.net_operating_income_per_square_foot = self.net_operating_income / self.gross_square_feet
self.full_market_value = row[12]
self.market_value_per_square_foot = row[13]
self.comparablebbl = row[14]
def get_json(self):
if self.bbl is None and self.connection_pool is not None:
logging.debug('loading comparable attributes')
self.load_comparable_attributes()
elif self.bbl is None and self.connection_pool is None:
logging.debug('No bbl. Returning blank result')
return '{}'
schema = ComparableSchema()
return schema.dump(self)
class PropertyTaxAnalysis(UnitTaxInfo):
def __init__(self, bbl=None, connection_pool=None):
UnitTaxInfo.__init__(self, bbl, connection_pool)
self.query = '''SELECT *
FROM building_tax_analysis b
LEFT JOIN bbl_locations l ON
b.borough_block_lot = l.borough_block_lot
WHERE b.borough_block_lot = %s AND fiscal_year IS NOT NULL ORDER BY fiscal_year DESC'''
self.bbl = bbl
self.last_year_total_market_value = None
self.this_year_total_market_value = None
self.last_year_assessed_value = None
self.this_year_assessed_value = None
self.last_year_transitional_assessed_value = None
self.this_year_transitional_assessed_value = None
self.lat = None
self.long = None
def __repr__(self):
return "<PropertyTaxAnalysis(bbl={self.bbl!r})>".format(self=self)
def load_tax_analysis_attributes(self):
if self.bbl is None:
return
query_bbl = create_dashed_bbl(self.bbl)
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(self.query, (query_bbl,))
row = cursor.fetchone()
self.neighborhood = row[0]
self.building_class = row[1]
self.borough_block_lot = row[2]
self.address = row[3]
self.year_built = row[4]
self.total_units = row[5]
self.gross_square_feet = row[6]
self.estimated_gross_income = row[7]
self.gross_income_per_square_foot = row[8]
self.estimated_expense = row[9]
self.expense_per_square_foot = row[10]
self.net_operating_income = row[11]
if self.net_operating_income is not None and self.gross_square_feet is not None:
self.net_operating_income_per_square_foot = self.net_operating_income / self.gross_square_feet
self.full_market_value = row[12]
self.market_value_per_square_foot = row[13]
self.last_year_total_market_value = row[14]
self.this_year_total_market_value = row[15]
self.last_year_assessed_value = row[16]
self.this_year_assessed_value = row[17]
self.last_year_transitional_assessed_value = row[18]
self.this_year_transitional_assessed_value = row[19]
self.last_year_annual_tax = row[20]
self.this_year_annual_tax = row[21]
self.lat = row[28]
self.long = row[29]
self.connection_pool.putconn(dbconnection)
return
def get_json(self):
if self.neighborhood is None and self.connection_pool is not None:
self.load_tax_analysis_attributes()
elif self.neighborhood is None and self.connection_pool is None:
return ''
try:
schema = PropertyTaxAnalysisSchema()
result = schema.dump(self)
except Exception as e:
logging.error('problem getting schema: ' + str(e))
result = {}
return schema.dump(self)
class CondoTaxAnalysis(PropertyTaxAnalysis):
def __init__(self, bbl=None, connection_pool=None):
PropertyTaxAnalysis.__init__(self, bbl, connection_pool)
self.query = 'select * from condo_tax_analysis where borough_block_lot = %s'
def __repr__(self):
return "<CondoTaxAnalysis(bbl={self.bbl!r})>".format(self=self)
class UnitAndBuildingTaxAnalysis(object):
def __init__(self, unit_tax_analysis, building_tax_analysis):
self.unit_tax_analysis = unit_tax_analysis
if self.unit_tax_analysis.neighborhood is None and self.unit_tax_analysis.connection_pool is not None:
self.unit_tax_analysis.load_tax_analysis_attributes()
self.building_tax_analysis = building_tax_analysis
if self.building_tax_analysis.neighborhood is None and self.building_tax_analysis.connection_pool is not None:
self.building_tax_analysis.load_tax_analysis_attributes()
def __repr__(self):
return "<UnitAndBuildingTaxAnalysis(unit_tax_analysis={self.unit_tax_analysis!r}, building_tax_analysis={self.building_tax_analysis!r})>".format(self=self)
def get_json(self):
schema = UnitAndBuildingTaxAnalysisSchema()
return schema.dump(self)
class CityComparable(Comparable):
def __init__(self, bbl=None, connection_pool=None):
Comparable.__init__(self, bbl, connection_pool)
self.unadjusted_income_query = '''SELECT
estimated_gross_income,
gross_income_per_square_foot,
estimated_expense,
expense_per_square_foot,
net_operating_income,
full_market_value,
market_value_per_square_foot
FROM city_comparables_unadjusted
WHERE year = %s
AND borough_block_lot = %s'''
self.unadjusted_income_query_alt = '''SELECT
estimated_gross_income,
gross_income_per_square_foot,
estimated_expense,
expense_per_square_foot,
net_operating_income,
full_market_value,
market_value_per_square_foot
FROM building_tax_analysis
WHERE year = %s
AND borough_block_lot = %s'''
self.unadjusted_estimated_gross_income = None
self.unadjusted_gross_income_per_square_foot = None
self.unadjusted_estimated_expense = None
self.unadjusted_expense_per_square_foot = None
self.unadjusted_net_operating_income = None
self.unadjusted_full_market_value = None
self.unadjusted_market_value_per_square_foot = None
def add_unadjusted_data_from_row(self, row):
self.unadjusted_estimated_gross_income = row[0]
self.unadjusted_gross_income_per_square_foot = row[1]
self.unadjusted_estimated_expense = row[2]
self.unadjusted_expense_per_square_foot = row[3]
self.unadjusted_net_operating_income = row[4]
self.unadjusted_full_market_value = row[5]
self.unadjusted_market_value_per_square_foot = row[6]
def get_json(self):
if self.bbl is None and self.connection_pool is not None:
logging.debug('loading comparable attributes')
self.load_comparable_attributes()
elif self.bbl is None and self.connection_pool is None:
logging.debug('No bbl. Returning blank result')
return '{}'
schema = CityComparableSchema()
return schema.dump(self)
class CityComparables(object):
def __init__(self, bbl=None, connection_pool=None):
self.query = """SELECT DISTINCT
c.neighborhood,
c.building_class,
c.borough_block_lot,
c.address,
c.year_built,
c.total_units,
c.gross_square_feet,
c.estimated_gross_income,
c.gross_income_per_square_foot,
c.estimated_expense,
c.expense_per_square_foot,
c.net_operating_income,
c.full_market_value,
c.market_value_per_square_foot,
c.distance_from_subject_in_miles,
c.comparableof,
c.year,
c.fiscal_year,
s.score,
l.lat,
l.long
FROM tax_analysis_city_comparables c
LEFT JOIN similar_bbls s on REPLACE(c.borough_block_lot, '-', '') = s.similar_bbl
AND REPLACE(c.comparableof, '-','') = s.bbl AND s.city_comp = True
LEFT JOIN bbl_locations l ON l.borough_block_lot = c.borough_block_lot
where c.comparableof = %s"""
self.comparables = []
self.comparableof = bbl
self.connection_pool = connection_pool
query_bbl = create_dashed_bbl(self.comparableof)
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
logging.debug('executing query ' + self.query + ' with argument ' + query_bbl)
cursor.execute(self.query, (query_bbl,))
rows = cursor.fetchall()
logging.debug('got ' + str(len(rows)) + ' comparable results')
for row in rows:
comparable = CityComparable()
comparable.create_comparable_from_row(row)
cursor.execute(comparable.unadjusted_income_query, (comparable.year, comparable.borough_block_lot))
unadjusted_row = cursor.fetchone()
if unadjusted_row is not None:
comparable.add_unadjusted_data_from_row(unadjusted_row)
else:
cursor.execute(comparable.unadjusted_income_query_alt, (comparable.year, comparable.borough_block_lot))
unadjusted_row = cursor.fetchone()
if unadjusted_row is not None:
comparable.add_unadjusted_data_from_row(unadjusted_row)
self.comparables.append(comparable)
self.connection_pool.putconn(dbconnection)
return
def get_json(self):
result = [c.get_json() for c in self.comparables]
json_result = json.dumps(result)
return result
class RecommendedComparables(object):
def __init__(self, bbl=None, year=None, connection_pool=None):
self.comparable_bbls_query = 'SELECT DISTINCT similar_bbl, score FROM similar_bbls WHERE bbl = %s'
query_template = 'select DISTINCT * from tax_analysis_recommended_comparables where borough_block_lot IN ('
query_template = '''SELECT DISTINCT
c.neighborhood,
c.building_class,
c.borough_block_lot,
c.address,
c.year_built,
c.total_units,
c.gross_square_feet,
c.estimated_gross_income,
c.gross_income_per_square_foot,
c.estimated_expense,
c.expense_per_square_foot,
c.net_operating_income,
c.full_market_value,
c.market_value_per_square_foot,
c.distance_from_subject_in_miles,
c.annual_tax,
c.comparableof,
c.year,
c.fiscal_year,
l.lat,
l.long
FROM tax_analysis_recommended_comparables c
LEFT JOIN bbl_locations l ON l.borough_block_lot = c.borough_block_lot
where c.borough_block_lot IN (
'''
self.comparables = []
self.comparableof = bbl
self.connection_pool = connection_pool
self.year = year
query_bbl = create_dashed_bbl(self.comparableof)
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
logging.debug('executing query ' + self.comparable_bbls_query + ' with argument ' + bbl)
cursor.execute(self.comparable_bbls_query, (bbl,))
rows = cursor.fetchall()
if rows is None or len(rows) == 0:
return
recommended_bbls = [create_dashed_bbl(row[0]) for row in rows]
scores = {}
for row in rows:
scores[row[0]] = row[1]
self.query = query_template + ','.join(['%s']*len(recommended_bbls)) + ')'
if year is not None:
self.query = self.query + " AND year = %s"
logging.debug('executing query ' + self.query + ' with argument ' + str(recommended_bbls))
if year is None:
cursor.execute(self.query, tuple(recommended_bbls))
else:
cursor.execute(self.query, tuple(recommended_bbls) + tuple([year]))
rows = cursor.fetchall()
logging.debug('got ' + str(len(rows)) + ' comparable results')
for row in rows:
comparable = Comparable()
self.create_recommended_comparable_from_row(comparable, row)
if comparable.borough_block_lot.replace('-','') in scores.keys():
comparable.comp_quality = scores[comparable.borough_block_lot.replace('-','')]
self.comparables.append(comparable)
self.connection_pool.putconn(dbconnection)
return
def create_recommended_comparable_from_row(self, comparable, row):
comparable.neighborhood = row[0]
comparable.building_class = row[1]
comparable.borough_block_lot = row[2]
comparable.bbl = comparable.borough_block_lot.replace('-','') if comparable.bbl is None else comparable.bbl
logging.debug('bbl set to ' + comparable.bbl + ' from ' + comparable.borough_block_lot)
comparable.address = row[3]
comparable.year_built = row[4]
comparable.total_units = row[5]
comparable.gross_square_feet = row[6]
comparable.estimated_gross_income = row[7]
comparable.gross_income_per_square_foot = row[8]
comparable.estimated_expense = row[9]
comparable.expense_per_square_foot = row[10]
comparable.net_operating_income = row[11]
if comparable.net_operating_income is not None and comparable.gross_square_feet is not None and comparable.gross_square_feet != 0:
comparable.net_operating_income_per_square_foot = comparable.net_operating_income / comparable.gross_square_feet
comparable.net_present_value = comparable.net_operating_income/ (.06 - .02)
comparable.net_present_value_per_square_foot = comparable.net_present_value / comparable.gross_square_feet
comparable.full_market_value = row[12]
comparable.market_value_per_square_foot = row[13]
comparable.distance_from_subject_in_miles = row[14]
comparable.annual_tax = row[15]
comparable.comparableof = row[16]
comparable.year = row[17]
comparable.fiscal_year = row[18]
comparable.lat = row[19]
comparable.long = row[20]
def get_json(self):
result = [c.get_json() for c in self.comparables]
json_result = json.dumps(result)
return result
'''
neighborhood | text | | |
building_class | text | | |
borough_block_lot | character varying(15) | | |
address | text | | |
year_built | integer | | |
total_units | integer | | |
gross_square_feet | double precision | | |
estimated_gross_income | double precision | | |
gross_income_per_square_foot | double precision | | |
estimated_expense | double precision | | |
expense_per_square_foot | double precision | | |
net_operating_income | double precision | | |
full_market_value | double precision | | |
market_value_per_square_foot | double precision | | |
last_year_total_market_value | double precision | | |
this_year_total_market_value | double precision | | |
last_year_assessed_value | double precision | | |
this_year_assessed_value | double precision | | |
last_year_transitional_assessed_value | double precision | | |
this_year_transitional_assessed_value | double precision
'''
class Building(object):
def __init__(self, bbl=None, connection_pool = None):
self.bbl = bbl
self.connection_pool = connection_pool
self._init()
def _init(self):
self.dbconnection = None
self.address = None
self.lotarea = None
self.bldgarea = None
self.comarea = None
self.resarea = None
self.officearea = None
self.retailarea = None
self.garagearea = None
self.strgearea = None
self.factryarea = None
self.otherarea = None
self.numfloors = None
self.unitsres = None
self.unitstotal = None
self.yearbuilt = None
self.yearalter1 = None
self.yearalter2 = None
self.xcoord = None
self.ycoord = None
self.gr_sqft = None
self.property_tax = None
self.nearby_buildings = []
self.sales = []
return
def __repr__(self):
return "<Bulding(bbl={self.bbl!r})>".format(self=self)
def load_building_attributes(self):
query = """SELECT bbl, address, zipcode, lotarea, bldgarea, comarea, resarea, officearea, retailarea, garagearea, strgearea, factryarea, otherarea,
numfloors, unitsres, unitstotal, yearbuilt, yearalter1, yearalter2, xcoord, ycoord FROM pluto WHERE bbl = %s"""
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(query, (self.bbl,))
description = cursor.description
column_names = [d[0] for d in description]
column_types = [d[1] for d in description]
results = cursor.fetchone()
if results is None:
logging.error('no data for bbl ' + self.bbl)
return
self.address = results[1] + ' NEW YORK, NY ' + str(results[2])
self.lotarea = results[3]
self.bldgarea = results[4]
self.comarea = results[5]
self.resarea = results[6]
self.officearea = results[7]
self.retailarea = results[8]
self.garagearea = results[9]
self.strgearea = results[10]
self.factryarea = results[11]
self.otherarea = results[12]
self.numfloors = results[13]
self.unitsres = results[14]
self.unitstotal = results[15]
self.yearbuilt = results[16]
self.yearalter1 = results[17]
self.yearalter2 = results[18]
self.xcoord = results[19]
self.ycoord = results[20]
query = 'SELECT gr_sqft FROM tc234 WHERE bble=%s'
cursor.execute(query, (self.bbl,))
row = cursor.fetchone()
if row is None:
query = 'SELECT gr_sqft FROM tc1 WHERE bble=%s'
cursor.execute(query, (self.bbl,))
row = cursor.fetchone()
if row is not None:
self.gr_sqft = row[0]
tax_query = 'SELECT tax_year, tax_bill FROM tax_records WHERE bbl=%s AND tax_bill IS NOT NULL ORDER BY bill_date DESC;'
cursor.execute(tax_query, (self.bbl,))
row = cursor.fetchone()
if row is not None:
self.property_tax = row[1]
self.connection_pool.putconn(dbconnection)
return
def get_attributes_as_array(self):
attribute_array = [ \
self.lotarea, \
self.bldgarea, \
self.comarea, \
self.resarea, \
self.officearea, \
self.retailarea, \
self.garagearea, \
self.strgearea, \
self.factryarea, \
self.otherarea, \
self.numfloors, \
self.unitsres, \
self.unitstotal, \
self.yearbuilt, \
self.yearalter1, \
self.yearalter2 \
]
return attribute_array
def get_json(self):
if self.xcoord is None and self.connection_pool is not None:
self.load_building_attributes()
elif self.xcoord is None and self.connection_pool is None:
return ''
schema = BuildingSchema()
return schema.dump(self)
def _get_location_of_bbl(self, bbl):
query = '''select xcoord, ycoord FROM pluto WHERE bbl = %s'''
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(query, (bbl,))
result = cursor.fetchone()
if result is None:
logging.error('no location for bbl ' + bbl)
return
self.connection_pool.putconn(dbconnection)
return (result[0], result[1])
def _distance(self, x1, y1, x2, y2):
return math.sqrt( (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) )
def load_nearby_buildings(self, distance=750):
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
if (self.xcoord is None):
coords = self._get_location_of_bbl(self.bbl)
self.xcoord = coords[0]
self.ycoord = coords[1]
x1 = self.xcoord + distance
x2 = self.xcoord - distance
y1 = self.ycoord + distance
y2 = self.ycoord - distance
borough = int(self.bbl[0])
borough_string = get_borough_string(borough)
query = '''select
borough, block, lot, bbl::text AS bbl, address, zipcode,
gross_square_feet, stories, residential_units, total_units,
lot_area, residential_area, retail_area, office_area, common_area, storage_area, garage_area,
factory_area, building_area, other_area,
yearbuilt, last_year_altered, xcoord, ycoord
from building_test WHERE borough = %s AND
xcoord > %s AND xcoord < %s AND ycoord > %s AND ycoord < %s'''
cursor.execute(query, (borough_string, x2, x1, y2, y1))
rows = cursor.fetchall()
for results in rows:
bbl = results[3]
if bbl == self.bbl:
continue
if results[4] is None:
continue
bldg = Building(bbl)
bldg.address = results[4] + ' NEW YORK, NY ' + str(results[5])
bldg.lotarea = results[6]
bldg.bldgarea = results[7]
bldg.comarea = results[8]
bldg.resarea = results[9]
bldg.officearea = results[10]
bldg.retailarea = results[11]
bldg.garagearea = results[12]
bldg.strgearea = results[13]
bldg.factryarea = results[14]
bldg.otherarea = results[15]
bldg.numfloors = results[16]
bldg.unitsres = results[17]
bldg.unitstotal = results[18]
bldg.yearbuilt = results[19]
bldg.yearalter1 = results[20]
bldg.yearalter2 = results[21]
bldg.xcoord = results[22]
bldg.ycoord = results[23]
if self._distance(self.xcoord, self.ycoord, bldg.xcoord, bldg.ycoord) <= 750 \
and self._distance(self.xcoord, self.ycoord, bldg.xcoord, bldg.ycoord) != 0:
self.nearby_buildings.append(bldg)
query = 'SELECT gr_sqft FROM tc234 WHERE bble=%s'
cursor.execute(query, (bbl,))
row = cursor.fetchone()
if row is None:
query = 'SELECT gr_sqft FROM tc1 WHERE bble=%s'
cursor.execute(query, (bbl,))
row = cursor.fetchone()
if row is not None:
bldg.gr_sqft = row[0]
tax_query = 'SELECT tax_year, tax_bill FROM tax_records WHERE bbl=%s AND tax_bill IS NOT NULL ORDER BY bill_date DESC;'
cursor.execute(tax_query, (bbl,))
row = cursor.fetchone()
if row is not None:
bldg.property_tax = row[1]
self.connection_pool.putconn(dbconnection)
# will be quicker to calculate radius here, anyway
def get_units_in_building(self):
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
borough = self.bbl[0]
block = int(self.bbl[1:6])
lot = int(self.bbl[6:10])
units = []
unit_bbls = []
if lot > 7500:
# this is a condo building
address_query = '''SELECT hnum_lo, hnum_hi, str_name FROM tc234 WHERE bble=%s'''
cursor.execute(address_query, (self.bbl,))
row = cursor.fetchone()
hnum_lo = row[0]
hnum_hi = row[1]
str_name = row[2]
unit_query = "SELECT bble FROM tc234 WHERE bble LIKE %s AND (hnum_lo=%s OR hnum_hi=%s) AND str_name=%s"
cursor.execute(unit_query, (self.bbl[0:6]+'%', hnum_lo, hnum_hi, str_name,))
rows = cursor.fetchall()
self.connection_pool.putconn(dbconnection)
unit_bbls = [r[0] for r in rows]
for unit_bbl in unit_bbls:
condo_unit = CondoUnit(unit_bbl, self.bbl, self.connection_pool)
units.append(condo_unit)
self.units = units
return units
class ApartmentBuilding(Building):
def __init__(self, bbl=None, connection_pool=None):
Building.__init__(self, bbl, connection_pool)
self._init()
def _init(self):
self.cur_fv_l = None
self.cur_fv_t = None
self.new_fv_l = None
self.new_fv_t = None
self.curavl = None
self.curavt = None
self.curexl = None
self.curext = None
self.curavl_a = None
self.curavt_a = None
self.curexl_a = None
self.curext_a = None
self.tn_avt = None
self.tn_avl = None
self.tn_ext = None
self.tn_avl_a = None
self.tn_avt_a = None
self.tn_exl_a = None
self.tn_ext_a = None
self.fn_avl = None
self.fn_avt = None
self.fn_exl = None
self.fn_avl_a = None
self.fn_avt_a = None
self.fn_exl_a = None
self.fn_ext_a = None
def load_building_attributes(self):
Building.load_building_attributes(self)
query = '''SELECT * FROM tc234 WHERE bble=%s'''
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(query, (self.bbl,))
row = cursor.fetchone()
description = cursor.description
column_names = [d[0] for d in description]
column_types = [d[1] for d in description]
for varname in vars(self).keys():
try:
idx = column_names.index(varname)
except ValueError:
continue
vars(self)[varname] = row[idx]
def _load(self):
if self.connection_pool is None:
return None
self.load_building_attributes()
query = '''SELECT * FROM tc234 WHERE bble=%s'''
altquery = '''SELECT * FROM tc1 WHERE bble=%s'''
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(query, (self.bbl,))
row = cursor.fetchone()
if row is None:
cursor.execute(altquery, (self.bbl,))
row = cursor.fetchone()
if row is None:
return
description = cursor.description
column_names = [d[0] for d in description]
column_types = [d[1] for d in description]
for varname in vars(self).keys():
try:
idx = column_names.index(varname)
except ValueError:
continue
vars(self)[varname] = row[idx]
def get_json(self):
if self.xcoord is None and self.connection_pool is not None:
self.load_building_attributes()
elif self.xcoord is None and self.connection_pool is None:
return ''
schema = ApartmentBuildingSchema()
return schema.dump(self)
class CoopBuilding(ApartmentBuilding):
def __init__(self, bbl=None, connection_pool=None):
ApartmentBuilding.__init__(self, bbl, connection_pool)
pass
class CondoBuilding(Building):
def __init__(self, bbl=None, connection_pool=None):
Building.__init__(self, bbl, connection_pool)
def get_json(self):
if self.xcoord is None and self.connection_pool is not None:
self.load_building_attributes()
elif self.xcoord is None and self.connection_pool is None:
return ''
schema = CondoBuildingSchema()
return schema.dump(self)
class Unit(object):
def __init__(self, id=None, building_bbl=None, connection_pool=None):
self.id = id
self.building_bbl = building_bbl
self.connection_pool = connection_pool
self._init()
def _init(self):
self.gr_sqft = None
self.aptno = None
class CoopUnit(Unit):
def __init__(self, bbl=None, building_bbl=None, connection_pool=None):
self.bbl = bbl
self.building_bbl = building_bbl
Unit.__init__(self, bbl, connection_pool)
self.sales = []
class CondoUnit(Unit):
def __init__(self, bbl=None, building_bbl=None, connection_pool=None):
self.bbl = bbl
super(CondoUnit, self).__init__(bbl, building_bbl, connection_pool)
self._init()
self._load()
def _load(self):
if self.connection_pool is None:
return None
query = '''SELECT * FROM tc234 WHERE bble=%s'''
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(query, (self.bbl,))
row = cursor.fetchone()
description = cursor.description
column_names = [d[0] for d in description]
column_types = [d[1] for d in description]
for varname in vars(self).keys():
try:
idx = column_names.index(varname)
except ValueError:
continue
vars(self)[varname] = row[idx]
sales_queries = """SELECT DocumentId, doctype, borough, block, lot,
DocDate, DocAmount, PartyType, PartyName FROM getallsales(%s,%s,%s);"""
borough = int(self.bbl[0])
block = str(int(self.bbl[1:6]))
lot = str(int(self.bbl[6:10]))
cursor.execute(sales_queries, (borough, block, lot,))
rows = cursor.fetchall()
sales = {}
for row in rows:
#def __init__(self, price=None, date=None, seller=None, buyer=None):
docid = row[0]
if docid not in sales.keys():
sale = {}
sale['price'] = row[6]
sale['date'] = row[5]
if row[7] == '2':
sale['buyer'] = row[8]
else:
sale['seller'] = row[8]
sales[docid] = sale
else:
sale = sales[docid]
if row[7] == '2':
sale['buyer'] = row[8]
else:
sale['seller'] = row[9]
for docid, sale in sales.iteritems():
property_sale = PropertySale(sale['price'], sale['date'], sale['seller'], sale['buyer'])
self.sales.append(property_sale)
self.sales.sort(key=lambda x: x.date)
tax_query = 'SELECT tax_year, tax_bill FROM tax_records WHERE bbl=%s AND tax_bill IS NOT NULL ORDER BY bill_date DESC;'
cursor.execute(tax_query, (self.bbl,))
row = cursor.fetchone()
self.property_tax = row[1]
self.connection_pool.putconn(dbconnection)
def _init(self):
super(CondoUnit, self)._init()
self.cur_fv_l = None
self.cur_fv_t = None
self.new_fv_l = None
self.new_fv_t = None
self.curavl = None
self.curavt = None
self.curexl = None
self.curext = None
self.curavl_a = None
self.curavt_a = None
self.curexl_a = None
self.curext_a = None
self.tn_avt = None
self.tn_avl = None
self.tn_ext = None
self.tn_avl_a = None
self.tn_avt_a = None
self.tn_exl_a = None
self.tn_ext_a = None
self.fn_avl = None
self.fn_avt = None
self.fn_exl = None
self.fn_avl_a = None
self.fn_avt_a = None
self.fn_exl_a = None
self.fn_ext_a = None
self.property_tax = None
self.sales = []
def get_last_sale(self):
if len(self.sales) > 0:
return self.sales[-1]
else:
return None
def get_json(self):
schema = CondoUnitSchema()
return schema.dump(self)
class PropertySale:
def __init__(self, price=None, date=None, seller=None, buyer=None):
self.price = price
self.date = date
self.seller = seller
self.buyer = buyer
class MailingAddress:
def __init__(self, bbl, connection_pool=None):
self.bbl = bbl
self.address = None
self.connection_pool = connection_pool
def _load(self):
if self.connection_pool is None:
return None
query = '''SELECT bbl, address FROM mailing_addresses WHERE bbl=%s'''
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(query, (self.bbl,))
row = cursor.fetchone()
if row is not None:
self.address = row[1]
def get_json(self):
if self.address is None:
self._load()
schema = MailingAddressSchema()
return schema.dump(self)
| import logging
logging.basicConfig(format='%(asctime)s %(funcName)s %(message)s', filename='/var/log/astor_square/astor_housing.log',level=logging.DEBUG)
from astor_schemas import *
import math
from astor_square_utils import *
class UnitTaxInfo(object):
def __init__(self, bbl=None, connection_pool=None):
self.connection_pool = connection_pool
self.query = None
self.bbl = bbl
self.neighborhood = None
self.building_class = None
self.borough_block_lot = None
self.address = None
self.year_built = None
self.total_units = None
self.gross_square_feet = None
self.estimated_gross_income = None
self.gross_income_per_square_foot = None
self.estimated_expense = None
self.expense_per_square_foot = None
self.net_operating_income = None
self.net_operating_income_per_square_foot = None
self.full_market_value = None
self.market_value_per_square_foot = None
self.net_present_value = None
self.net_present_value_per_square_foot = None
self.last_year_annual_tax = None
self.this_year_annual_tax = None
self.full_addr = None
@property
def full_address(self):
if self.full_addr is None and self.address is not None:
borough = self.bbl[0]
city = get_borough_city(borough)
state = 'NY'
zip = None #getzipcode(self.address, city, state)
if zip is None:
zip = ''
self.full_addr = self.address + ' ' + city + ', ' + state + ' ' + zip
return self.full_addr.strip()
class Comparable(UnitTaxInfo):
def __init__(self, bbl=None, connection_pool=None):
UnitTaxInfo.__init__(self, bbl, connection_pool)
self.query = 'select DISTINCT * from tax_analysis_city_comparables where borough_block_lot = %s'
self.bbl = None
self.neighborhood = None
self.building_class = None
self.borough_block_lot = None
self.address = None
self.year_built = None
self.total_units = None
self.gross_square_feet = None
self.estimated_gross_income = None
self.gross_income_per_square_foot = None
self.estimated_expense = None
self.expense_per_square_foot = None
self.net_operating_income = None
self.full_market_value = None
self.market_value_per_square_foot = None
self.comparablebbl = None
self.annual_tax = None
self.comp_quality = None
self.year = None
self.fiscal_year = None
self.lat = None
self.long = None
def __repr__(self):
return "<Comparable(bbl={self.bbl!r},comparablebbl={self.comparablebbl!r})>".format(self=self)
def create_comparable_from_row(self, row):
self.neighborhood = row[0]
self.building_class = row[1]
self.borough_block_lot = row[2]
self.bbl = self.borough_block_lot.replace('-','') if self.bbl is None else self.bbl
logging.debug('bbl set to ' + self.bbl + ' from ' + self.borough_block_lot)
self.address = row[3]
self.year_built = row[4]
self.total_units = row[5]
self.gross_square_feet = row[6]
self.estimated_gross_income = row[7]
self.gross_income_per_square_foot = row[8]
self.estimated_expense = row[9]
self.expense_per_square_foot = row[10]
self.net_operating_income = row[11]
if self.net_operating_income is not None and self.gross_square_feet is not None:
self.net_operating_income_per_square_foot = self.net_operating_income / self.gross_square_feet
self.full_market_value = row[12]
self.market_value_per_square_foot = row[13]
self.distance_from_subject_in_miles = row[14]
self.comparablebbl = row[15]
self.year = row[16]
self.fiscal_year = row[17]
self.comp_quality = row[18]
self.lat = row[19]
self.long = row[20]
return
def load_comparable_attributes(self):
if self.bbl is None:
return
query_bbl = create_dashed_bbl(self.bbl)
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(self.query, (query_bbl,))
row = cursor.fetchone()
self.neighborhood = row[0]
self.building_class = row[1]
self.borough_block_lot = row[2]
self.bbl = self.borough_block_lot.replace('-','')
self.address = row[3]
self.year_built = row[4]
self.total_units = row[5]
self.gross_square_feet = row[6]
self.estimated_gross_income = row[7]
self.gross_income_per_square_foot = row[8]
self.estimated_expense = row[9]
self.expense_per_square_foot = row[10]
self.net_operating_income = row[11]
if self.net_operating_income is not None and self.gross_square_feet is not None:
self.net_operating_income_per_square_foot = self.net_operating_income / self.gross_square_feet
self.full_market_value = row[12]
self.market_value_per_square_foot = row[13]
self.comparablebbl = row[14]
def get_json(self):
if self.bbl is None and self.connection_pool is not None:
logging.debug('loading comparable attributes')
self.load_comparable_attributes()
elif self.bbl is None and self.connection_pool is None:
logging.debug('No bbl. Returning blank result')
return '{}'
schema = ComparableSchema()
return schema.dump(self)
class PropertyTaxAnalysis(UnitTaxInfo):
def __init__(self, bbl=None, connection_pool=None):
UnitTaxInfo.__init__(self, bbl, connection_pool)
self.query = '''SELECT *
FROM building_tax_analysis b
LEFT JOIN bbl_locations l ON
b.borough_block_lot = l.borough_block_lot
WHERE b.borough_block_lot = %s AND fiscal_year IS NOT NULL ORDER BY fiscal_year DESC'''
self.bbl = bbl
self.last_year_total_market_value = None
self.this_year_total_market_value = None
self.last_year_assessed_value = None
self.this_year_assessed_value = None
self.last_year_transitional_assessed_value = None
self.this_year_transitional_assessed_value = None
self.lat = None
self.long = None
def __repr__(self):
return "<PropertyTaxAnalysis(bbl={self.bbl!r})>".format(self=self)
def load_tax_analysis_attributes(self):
if self.bbl is None:
return
query_bbl = create_dashed_bbl(self.bbl)
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(self.query, (query_bbl,))
row = cursor.fetchone()
self.neighborhood = row[0]
self.building_class = row[1]
self.borough_block_lot = row[2]
self.address = row[3]
self.year_built = row[4]
self.total_units = row[5]
self.gross_square_feet = row[6]
self.estimated_gross_income = row[7]
self.gross_income_per_square_foot = row[8]
self.estimated_expense = row[9]
self.expense_per_square_foot = row[10]
self.net_operating_income = row[11]
if self.net_operating_income is not None and self.gross_square_feet is not None:
self.net_operating_income_per_square_foot = self.net_operating_income / self.gross_square_feet
self.full_market_value = row[12]
self.market_value_per_square_foot = row[13]
self.last_year_total_market_value = row[14]
self.this_year_total_market_value = row[15]
self.last_year_assessed_value = row[16]
self.this_year_assessed_value = row[17]
self.last_year_transitional_assessed_value = row[18]
self.this_year_transitional_assessed_value = row[19]
self.last_year_annual_tax = row[20]
self.this_year_annual_tax = row[21]
self.lat = row[28]
self.long = row[29]
self.connection_pool.putconn(dbconnection)
return
def get_json(self):
if self.neighborhood is None and self.connection_pool is not None:
self.load_tax_analysis_attributes()
elif self.neighborhood is None and self.connection_pool is None:
return ''
try:
schema = PropertyTaxAnalysisSchema()
result = schema.dump(self)
except Exception as e:
logging.error('problem getting schema: ' + str(e))
result = {}
return schema.dump(self)
class CondoTaxAnalysis(PropertyTaxAnalysis):
def __init__(self, bbl=None, connection_pool=None):
PropertyTaxAnalysis.__init__(self, bbl, connection_pool)
self.query = 'select * from condo_tax_analysis where borough_block_lot = %s'
def __repr__(self):
return "<CondoTaxAnalysis(bbl={self.bbl!r})>".format(self=self)
class UnitAndBuildingTaxAnalysis(object):
def __init__(self, unit_tax_analysis, building_tax_analysis):
self.unit_tax_analysis = unit_tax_analysis
if self.unit_tax_analysis.neighborhood is None and self.unit_tax_analysis.connection_pool is not None:
self.unit_tax_analysis.load_tax_analysis_attributes()
self.building_tax_analysis = building_tax_analysis
if self.building_tax_analysis.neighborhood is None and self.building_tax_analysis.connection_pool is not None:
self.building_tax_analysis.load_tax_analysis_attributes()
def __repr__(self):
return "<UnitAndBuildingTaxAnalysis(unit_tax_analysis={self.unit_tax_analysis!r}, building_tax_analysis={self.building_tax_analysis!r})>".format(self=self)
def get_json(self):
schema = UnitAndBuildingTaxAnalysisSchema()
return schema.dump(self)
class CityComparable(Comparable):
def __init__(self, bbl=None, connection_pool=None):
Comparable.__init__(self, bbl, connection_pool)
self.unadjusted_income_query = '''SELECT
estimated_gross_income,
gross_income_per_square_foot,
estimated_expense,
expense_per_square_foot,
net_operating_income,
full_market_value,
market_value_per_square_foot
FROM city_comparables_unadjusted
WHERE year = %s
AND borough_block_lot = %s'''
self.unadjusted_income_query_alt = '''SELECT
estimated_gross_income,
gross_income_per_square_foot,
estimated_expense,
expense_per_square_foot,
net_operating_income,
full_market_value,
market_value_per_square_foot
FROM building_tax_analysis
WHERE year = %s
AND borough_block_lot = %s'''
self.unadjusted_estimated_gross_income = None
self.unadjusted_gross_income_per_square_foot = None
self.unadjusted_estimated_expense = None
self.unadjusted_expense_per_square_foot = None
self.unadjusted_net_operating_income = None
self.unadjusted_full_market_value = None
self.unadjusted_market_value_per_square_foot = None
def add_unadjusted_data_from_row(self, row):
self.unadjusted_estimated_gross_income = row[0]
self.unadjusted_gross_income_per_square_foot = row[1]
self.unadjusted_estimated_expense = row[2]
self.unadjusted_expense_per_square_foot = row[3]
self.unadjusted_net_operating_income = row[4]
self.unadjusted_full_market_value = row[5]
self.unadjusted_market_value_per_square_foot = row[6]
def get_json(self):
if self.bbl is None and self.connection_pool is not None:
logging.debug('loading comparable attributes')
self.load_comparable_attributes()
elif self.bbl is None and self.connection_pool is None:
logging.debug('No bbl. Returning blank result')
return '{}'
schema = CityComparableSchema()
return schema.dump(self)
class CityComparables(object):
def __init__(self, bbl=None, connection_pool=None):
self.query = """SELECT DISTINCT
c.neighborhood,
c.building_class,
c.borough_block_lot,
c.address,
c.year_built,
c.total_units,
c.gross_square_feet,
c.estimated_gross_income,
c.gross_income_per_square_foot,
c.estimated_expense,
c.expense_per_square_foot,
c.net_operating_income,
c.full_market_value,
c.market_value_per_square_foot,
c.distance_from_subject_in_miles,
c.comparableof,
c.year,
c.fiscal_year,
s.score,
l.lat,
l.long
FROM tax_analysis_city_comparables c
LEFT JOIN similar_bbls s on REPLACE(c.borough_block_lot, '-', '') = s.similar_bbl
AND REPLACE(c.comparableof, '-','') = s.bbl AND s.city_comp = True
LEFT JOIN bbl_locations l ON l.borough_block_lot = c.borough_block_lot
where c.comparableof = %s"""
self.comparables = []
self.comparableof = bbl
self.connection_pool = connection_pool
query_bbl = create_dashed_bbl(self.comparableof)
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
logging.debug('executing query ' + self.query + ' with argument ' + query_bbl)
cursor.execute(self.query, (query_bbl,))
rows = cursor.fetchall()
logging.debug('got ' + str(len(rows)) + ' comparable results')
for row in rows:
comparable = CityComparable()
comparable.create_comparable_from_row(row)
cursor.execute(comparable.unadjusted_income_query, (comparable.year, comparable.borough_block_lot))
unadjusted_row = cursor.fetchone()
if unadjusted_row is not None:
comparable.add_unadjusted_data_from_row(unadjusted_row)
else:
cursor.execute(comparable.unadjusted_income_query_alt, (comparable.year, comparable.borough_block_lot))
unadjusted_row = cursor.fetchone()
if unadjusted_row is not None:
comparable.add_unadjusted_data_from_row(unadjusted_row)
self.comparables.append(comparable)
self.connection_pool.putconn(dbconnection)
return
def get_json(self):
result = [c.get_json() for c in self.comparables]
json_result = json.dumps(result)
return result
class RecommendedComparables(object):
def __init__(self, bbl=None, year=None, connection_pool=None):
self.comparable_bbls_query = 'SELECT DISTINCT similar_bbl, score FROM similar_bbls WHERE bbl = %s'
query_template = 'select DISTINCT * from tax_analysis_recommended_comparables where borough_block_lot IN ('
query_template = '''SELECT DISTINCT
c.neighborhood,
c.building_class,
c.borough_block_lot,
c.address,
c.year_built,
c.total_units,
c.gross_square_feet,
c.estimated_gross_income,
c.gross_income_per_square_foot,
c.estimated_expense,
c.expense_per_square_foot,
c.net_operating_income,
c.full_market_value,
c.market_value_per_square_foot,
c.distance_from_subject_in_miles,
c.annual_tax,
c.comparableof,
c.year,
c.fiscal_year,
l.lat,
l.long
FROM tax_analysis_recommended_comparables c
LEFT JOIN bbl_locations l ON l.borough_block_lot = c.borough_block_lot
where c.borough_block_lot IN (
'''
self.comparables = []
self.comparableof = bbl
self.connection_pool = connection_pool
self.year = year
query_bbl = create_dashed_bbl(self.comparableof)
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
logging.debug('executing query ' + self.comparable_bbls_query + ' with argument ' + bbl)
cursor.execute(self.comparable_bbls_query, (bbl,))
rows = cursor.fetchall()
if rows is None or len(rows) == 0:
return
recommended_bbls = [create_dashed_bbl(row[0]) for row in rows]
scores = {}
for row in rows:
scores[row[0]] = row[1]
self.query = query_template + ','.join(['%s']*len(recommended_bbls)) + ')'
if year is not None:
self.query = self.query + " AND year = %s"
logging.debug('executing query ' + self.query + ' with argument ' + str(recommended_bbls))
if year is None:
cursor.execute(self.query, tuple(recommended_bbls))
else:
cursor.execute(self.query, tuple(recommended_bbls) + tuple([year]))
rows = cursor.fetchall()
logging.debug('got ' + str(len(rows)) + ' comparable results')
for row in rows:
comparable = Comparable()
self.create_recommended_comparable_from_row(comparable, row)
if comparable.borough_block_lot.replace('-','') in scores.keys():
comparable.comp_quality = scores[comparable.borough_block_lot.replace('-','')]
self.comparables.append(comparable)
self.connection_pool.putconn(dbconnection)
return
def create_recommended_comparable_from_row(self, comparable, row):
comparable.neighborhood = row[0]
comparable.building_class = row[1]
comparable.borough_block_lot = row[2]
comparable.bbl = comparable.borough_block_lot.replace('-','') if comparable.bbl is None else comparable.bbl
logging.debug('bbl set to ' + comparable.bbl + ' from ' + comparable.borough_block_lot)
comparable.address = row[3]
comparable.year_built = row[4]
comparable.total_units = row[5]
comparable.gross_square_feet = row[6]
comparable.estimated_gross_income = row[7]
comparable.gross_income_per_square_foot = row[8]
comparable.estimated_expense = row[9]
comparable.expense_per_square_foot = row[10]
comparable.net_operating_income = row[11]
if comparable.net_operating_income is not None and comparable.gross_square_feet is not None and comparable.gross_square_feet != 0:
comparable.net_operating_income_per_square_foot = comparable.net_operating_income / comparable.gross_square_feet
comparable.net_present_value = comparable.net_operating_income/ (.06 - .02)
comparable.net_present_value_per_square_foot = comparable.net_present_value / comparable.gross_square_feet
comparable.full_market_value = row[12]
comparable.market_value_per_square_foot = row[13]
comparable.distance_from_subject_in_miles = row[14]
comparable.annual_tax = row[15]
comparable.comparableof = row[16]
comparable.year = row[17]
comparable.fiscal_year = row[18]
comparable.lat = row[19]
comparable.long = row[20]
def get_json(self):
result = [c.get_json() for c in self.comparables]
json_result = json.dumps(result)
return result
'''
neighborhood | text | | |
building_class | text | | |
borough_block_lot | character varying(15) | | |
address | text | | |
year_built | integer | | |
total_units | integer | | |
gross_square_feet | double precision | | |
estimated_gross_income | double precision | | |
gross_income_per_square_foot | double precision | | |
estimated_expense | double precision | | |
expense_per_square_foot | double precision | | |
net_operating_income | double precision | | |
full_market_value | double precision | | |
market_value_per_square_foot | double precision | | |
last_year_total_market_value | double precision | | |
this_year_total_market_value | double precision | | |
last_year_assessed_value | double precision | | |
this_year_assessed_value | double precision | | |
last_year_transitional_assessed_value | double precision | | |
this_year_transitional_assessed_value | double precision
'''
class Building(object):
def __init__(self, bbl=None, connection_pool = None):
self.bbl = bbl
self.connection_pool = connection_pool
self._init()
def _init(self):
self.dbconnection = None
self.address = None
self.lotarea = None
self.bldgarea = None
self.comarea = None
self.resarea = None
self.officearea = None
self.retailarea = None
self.garagearea = None
self.strgearea = None
self.factryarea = None
self.otherarea = None
self.numfloors = None
self.unitsres = None
self.unitstotal = None
self.yearbuilt = None
self.yearalter1 = None
self.yearalter2 = None
self.xcoord = None
self.ycoord = None
self.gr_sqft = None
self.property_tax = None
self.nearby_buildings = []
self.sales = []
return
def __repr__(self):
return "<Bulding(bbl={self.bbl!r})>".format(self=self)
def load_building_attributes(self):
query = """SELECT bbl, address, zipcode, lotarea, bldgarea, comarea, resarea, officearea, retailarea, garagearea, strgearea, factryarea, otherarea,
numfloors, unitsres, unitstotal, yearbuilt, yearalter1, yearalter2, xcoord, ycoord FROM pluto WHERE bbl = %s"""
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(query, (self.bbl,))
description = cursor.description
column_names = [d[0] for d in description]
column_types = [d[1] for d in description]
results = cursor.fetchone()
if results is None:
logging.error('no data for bbl ' + self.bbl)
return
self.address = results[1] + ' NEW YORK, NY ' + str(results[2])
self.lotarea = results[3]
self.bldgarea = results[4]
self.comarea = results[5]
self.resarea = results[6]
self.officearea = results[7]
self.retailarea = results[8]
self.garagearea = results[9]
self.strgearea = results[10]
self.factryarea = results[11]
self.otherarea = results[12]
self.numfloors = results[13]
self.unitsres = results[14]
self.unitstotal = results[15]
self.yearbuilt = results[16]
self.yearalter1 = results[17]
self.yearalter2 = results[18]
self.xcoord = results[19]
self.ycoord = results[20]
query = 'SELECT gr_sqft FROM tc234 WHERE bble=%s'
cursor.execute(query, (self.bbl,))
row = cursor.fetchone()
if row is None:
query = 'SELECT gr_sqft FROM tc1 WHERE bble=%s'
cursor.execute(query, (self.bbl,))
row = cursor.fetchone()
if row is not None:
self.gr_sqft = row[0]
tax_query = 'SELECT tax_year, tax_bill FROM tax_records WHERE bbl=%s AND tax_bill IS NOT NULL ORDER BY bill_date DESC;'
cursor.execute(tax_query, (self.bbl,))
row = cursor.fetchone()
if row is not None:
self.property_tax = row[1]
self.connection_pool.putconn(dbconnection)
return
def get_attributes_as_array(self):
attribute_array = [ \
self.lotarea, \
self.bldgarea, \
self.comarea, \
self.resarea, \
self.officearea, \
self.retailarea, \
self.garagearea, \
self.strgearea, \
self.factryarea, \
self.otherarea, \
self.numfloors, \
self.unitsres, \
self.unitstotal, \
self.yearbuilt, \
self.yearalter1, \
self.yearalter2 \
]
return attribute_array
def get_json(self):
if self.xcoord is None and self.connection_pool is not None:
self.load_building_attributes()
elif self.xcoord is None and self.connection_pool is None:
return ''
schema = BuildingSchema()
return schema.dump(self)
def _get_location_of_bbl(self, bbl):
query = '''select xcoord, ycoord FROM pluto WHERE bbl = %s'''
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(query, (bbl,))
result = cursor.fetchone()
if result is None:
logging.error('no location for bbl ' + bbl)
return
self.connection_pool.putconn(dbconnection)
return (result[0], result[1])
def _distance(self, x1, y1, x2, y2):
return math.sqrt( (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1) )
def load_nearby_buildings(self, distance=750):
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
if (self.xcoord is None):
coords = self._get_location_of_bbl(self.bbl)
self.xcoord = coords[0]
self.ycoord = coords[1]
x1 = self.xcoord + distance
x2 = self.xcoord - distance
y1 = self.ycoord + distance
y2 = self.ycoord - distance
borough = int(self.bbl[0])
borough_string = get_borough_string(borough)
query = '''select
borough, block, lot, bbl::text AS bbl, address, zipcode,
gross_square_feet, stories, residential_units, total_units,
lot_area, residential_area, retail_area, office_area, common_area, storage_area, garage_area,
factory_area, building_area, other_area,
yearbuilt, last_year_altered, xcoord, ycoord
from building_test WHERE borough = %s AND
xcoord > %s AND xcoord < %s AND ycoord > %s AND ycoord < %s'''
cursor.execute(query, (borough_string, x2, x1, y2, y1))
rows = cursor.fetchall()
for results in rows:
bbl = results[3]
if bbl == self.bbl:
continue
if results[4] is None:
continue
bldg = Building(bbl)
bldg.address = results[4] + ' NEW YORK, NY ' + str(results[5])
bldg.lotarea = results[6]
bldg.bldgarea = results[7]
bldg.comarea = results[8]
bldg.resarea = results[9]
bldg.officearea = results[10]
bldg.retailarea = results[11]
bldg.garagearea = results[12]
bldg.strgearea = results[13]
bldg.factryarea = results[14]
bldg.otherarea = results[15]
bldg.numfloors = results[16]
bldg.unitsres = results[17]
bldg.unitstotal = results[18]
bldg.yearbuilt = results[19]
bldg.yearalter1 = results[20]
bldg.yearalter2 = results[21]
bldg.xcoord = results[22]
bldg.ycoord = results[23]
if self._distance(self.xcoord, self.ycoord, bldg.xcoord, bldg.ycoord) <= 750 \
and self._distance(self.xcoord, self.ycoord, bldg.xcoord, bldg.ycoord) != 0:
self.nearby_buildings.append(bldg)
query = 'SELECT gr_sqft FROM tc234 WHERE bble=%s'
cursor.execute(query, (bbl,))
row = cursor.fetchone()
if row is None:
query = 'SELECT gr_sqft FROM tc1 WHERE bble=%s'
cursor.execute(query, (bbl,))
row = cursor.fetchone()
if row is not None:
bldg.gr_sqft = row[0]
tax_query = 'SELECT tax_year, tax_bill FROM tax_records WHERE bbl=%s AND tax_bill IS NOT NULL ORDER BY bill_date DESC;'
cursor.execute(tax_query, (bbl,))
row = cursor.fetchone()
if row is not None:
bldg.property_tax = row[1]
self.connection_pool.putconn(dbconnection)
# will be quicker to calculate radius here, anyway
def get_units_in_building(self):
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
borough = self.bbl[0]
block = int(self.bbl[1:6])
lot = int(self.bbl[6:10])
units = []
unit_bbls = []
if lot > 7500:
# this is a condo building
address_query = '''SELECT hnum_lo, hnum_hi, str_name FROM tc234 WHERE bble=%s'''
cursor.execute(address_query, (self.bbl,))
row = cursor.fetchone()
hnum_lo = row[0]
hnum_hi = row[1]
str_name = row[2]
unit_query = "SELECT bble FROM tc234 WHERE bble LIKE %s AND (hnum_lo=%s OR hnum_hi=%s) AND str_name=%s"
cursor.execute(unit_query, (self.bbl[0:6]+'%', hnum_lo, hnum_hi, str_name,))
rows = cursor.fetchall()
self.connection_pool.putconn(dbconnection)
unit_bbls = [r[0] for r in rows]
for unit_bbl in unit_bbls:
condo_unit = CondoUnit(unit_bbl, self.bbl, self.connection_pool)
units.append(condo_unit)
self.units = units
return units
class ApartmentBuilding(Building):
def __init__(self, bbl=None, connection_pool=None):
Building.__init__(self, bbl, connection_pool)
self._init()
def _init(self):
self.cur_fv_l = None
self.cur_fv_t = None
self.new_fv_l = None
self.new_fv_t = None
self.curavl = None
self.curavt = None
self.curexl = None
self.curext = None
self.curavl_a = None
self.curavt_a = None
self.curexl_a = None
self.curext_a = None
self.tn_avt = None
self.tn_avl = None
self.tn_ext = None
self.tn_avl_a = None
self.tn_avt_a = None
self.tn_exl_a = None
self.tn_ext_a = None
self.fn_avl = None
self.fn_avt = None
self.fn_exl = None
self.fn_avl_a = None
self.fn_avt_a = None
self.fn_exl_a = None
self.fn_ext_a = None
def load_building_attributes(self):
Building.load_building_attributes(self)
query = '''SELECT * FROM tc234 WHERE bble=%s'''
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(query, (self.bbl,))
row = cursor.fetchone()
description = cursor.description
column_names = [d[0] for d in description]
column_types = [d[1] for d in description]
for varname in vars(self).keys():
try:
idx = column_names.index(varname)
except ValueError:
continue
vars(self)[varname] = row[idx]
def _load(self):
if self.connection_pool is None:
return None
self.load_building_attributes()
query = '''SELECT * FROM tc234 WHERE bble=%s'''
altquery = '''SELECT * FROM tc1 WHERE bble=%s'''
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(query, (self.bbl,))
row = cursor.fetchone()
if row is None:
cursor.execute(altquery, (self.bbl,))
row = cursor.fetchone()
if row is None:
return
description = cursor.description
column_names = [d[0] for d in description]
column_types = [d[1] for d in description]
for varname in vars(self).keys():
try:
idx = column_names.index(varname)
except ValueError:
continue
vars(self)[varname] = row[idx]
def get_json(self):
if self.xcoord is None and self.connection_pool is not None:
self.load_building_attributes()
elif self.xcoord is None and self.connection_pool is None:
return ''
schema = ApartmentBuildingSchema()
return schema.dump(self)
class CoopBuilding(ApartmentBuilding):
def __init__(self, bbl=None, connection_pool=None):
ApartmentBuilding.__init__(self, bbl, connection_pool)
pass
class CondoBuilding(Building):
def __init__(self, bbl=None, connection_pool=None):
Building.__init__(self, bbl, connection_pool)
def get_json(self):
if self.xcoord is None and self.connection_pool is not None:
self.load_building_attributes()
elif self.xcoord is None and self.connection_pool is None:
return ''
schema = CondoBuildingSchema()
return schema.dump(self)
class Unit(object):
def __init__(self, id=None, building_bbl=None, connection_pool=None):
self.id = id
self.building_bbl = building_bbl
self.connection_pool = connection_pool
self._init()
def _init(self):
self.gr_sqft = None
self.aptno = None
class CoopUnit(Unit):
def __init__(self, bbl=None, building_bbl=None, connection_pool=None):
self.bbl = bbl
self.building_bbl = building_bbl
Unit.__init__(self, bbl, connection_pool)
self.sales = []
class CondoUnit(Unit):
def __init__(self, bbl=None, building_bbl=None, connection_pool=None):
self.bbl = bbl
super(CondoUnit, self).__init__(bbl, building_bbl, connection_pool)
self._init()
self._load()
def _load(self):
if self.connection_pool is None:
return None
query = '''SELECT * FROM tc234 WHERE bble=%s'''
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(query, (self.bbl,))
row = cursor.fetchone()
description = cursor.description
column_names = [d[0] for d in description]
column_types = [d[1] for d in description]
for varname in vars(self).keys():
try:
idx = column_names.index(varname)
except ValueError:
continue
vars(self)[varname] = row[idx]
sales_queries = """SELECT DocumentId, doctype, borough, block, lot,
DocDate, DocAmount, PartyType, PartyName FROM getallsales(%s,%s,%s);"""
borough = int(self.bbl[0])
block = str(int(self.bbl[1:6]))
lot = str(int(self.bbl[6:10]))
cursor.execute(sales_queries, (borough, block, lot,))
rows = cursor.fetchall()
sales = {}
for row in rows:
#def __init__(self, price=None, date=None, seller=None, buyer=None):
docid = row[0]
if docid not in sales.keys():
sale = {}
sale['price'] = row[6]
sale['date'] = row[5]
if row[7] == '2':
sale['buyer'] = row[8]
else:
sale['seller'] = row[8]
sales[docid] = sale
else:
sale = sales[docid]
if row[7] == '2':
sale['buyer'] = row[8]
else:
sale['seller'] = row[9]
for docid, sale in sales.iteritems():
property_sale = PropertySale(sale['price'], sale['date'], sale['seller'], sale['buyer'])
self.sales.append(property_sale)
self.sales.sort(key=lambda x: x.date)
tax_query = 'SELECT tax_year, tax_bill FROM tax_records WHERE bbl=%s AND tax_bill IS NOT NULL ORDER BY bill_date DESC;'
cursor.execute(tax_query, (self.bbl,))
row = cursor.fetchone()
self.property_tax = row[1]
self.connection_pool.putconn(dbconnection)
def _init(self):
super(CondoUnit, self)._init()
self.cur_fv_l = None
self.cur_fv_t = None
self.new_fv_l = None
self.new_fv_t = None
self.curavl = None
self.curavt = None
self.curexl = None
self.curext = None
self.curavl_a = None
self.curavt_a = None
self.curexl_a = None
self.curext_a = None
self.tn_avt = None
self.tn_avl = None
self.tn_ext = None
self.tn_avl_a = None
self.tn_avt_a = None
self.tn_exl_a = None
self.tn_ext_a = None
self.fn_avl = None
self.fn_avt = None
self.fn_exl = None
self.fn_avl_a = None
self.fn_avt_a = None
self.fn_exl_a = None
self.fn_ext_a = None
self.property_tax = None
self.sales = []
def get_last_sale(self):
if len(self.sales) > 0:
return self.sales[-1]
else:
return None
def get_json(self):
schema = CondoUnitSchema()
return schema.dump(self)
class PropertySale:
def __init__(self, price=None, date=None, seller=None, buyer=None):
self.price = price
self.date = date
self.seller = seller
self.buyer = buyer
class MailingAddress:
def __init__(self, bbl, connection_pool=None):
self.bbl = bbl
self.address = None
self.connection_pool = connection_pool
def _load(self):
if self.connection_pool is None:
return None
query = '''SELECT bbl, address FROM mailing_addresses WHERE bbl=%s'''
dbconnection = self.connection_pool.getconn()
cursor = dbconnection.cursor()
cursor.execute(query, (self.bbl,))
row = cursor.fetchone()
if row is not None:
self.address = row[1]
def get_json(self):
if self.address is None:
self._load()
schema = MailingAddressSchema()
return schema.dump(self) | en | 0.606753 | #getzipcode(self.address, city, state) SELECT * FROM building_tax_analysis b LEFT JOIN bbl_locations l ON b.borough_block_lot = l.borough_block_lot WHERE b.borough_block_lot = %s AND fiscal_year IS NOT NULL ORDER BY fiscal_year DESC SELECT estimated_gross_income, gross_income_per_square_foot, estimated_expense, expense_per_square_foot, net_operating_income, full_market_value, market_value_per_square_foot FROM city_comparables_unadjusted WHERE year = %s AND borough_block_lot = %s SELECT estimated_gross_income, gross_income_per_square_foot, estimated_expense, expense_per_square_foot, net_operating_income, full_market_value, market_value_per_square_foot FROM building_tax_analysis WHERE year = %s AND borough_block_lot = %s SELECT DISTINCT c.neighborhood, c.building_class, c.borough_block_lot, c.address, c.year_built, c.total_units, c.gross_square_feet, c.estimated_gross_income, c.gross_income_per_square_foot, c.estimated_expense, c.expense_per_square_foot, c.net_operating_income, c.full_market_value, c.market_value_per_square_foot, c.distance_from_subject_in_miles, c.comparableof, c.year, c.fiscal_year, s.score, l.lat, l.long FROM tax_analysis_city_comparables c LEFT JOIN similar_bbls s on REPLACE(c.borough_block_lot, '-', '') = s.similar_bbl AND REPLACE(c.comparableof, '-','') = s.bbl AND s.city_comp = True LEFT JOIN bbl_locations l ON l.borough_block_lot = c.borough_block_lot where c.comparableof = %s SELECT DISTINCT c.neighborhood, c.building_class, c.borough_block_lot, c.address, c.year_built, c.total_units, c.gross_square_feet, c.estimated_gross_income, c.gross_income_per_square_foot, c.estimated_expense, c.expense_per_square_foot, c.net_operating_income, c.full_market_value, c.market_value_per_square_foot, c.distance_from_subject_in_miles, c.annual_tax, c.comparableof, c.year, c.fiscal_year, l.lat, l.long FROM tax_analysis_recommended_comparables c LEFT JOIN bbl_locations l ON l.borough_block_lot = c.borough_block_lot where c.borough_block_lot IN ( neighborhood | text | | | building_class | text | | | borough_block_lot | character varying(15) | | | address | text | | | year_built | integer | | | total_units | integer | | | gross_square_feet | double precision | | | estimated_gross_income | double precision | | | gross_income_per_square_foot | double precision | | | estimated_expense | double precision | | | expense_per_square_foot | double precision | | | net_operating_income | double precision | | | full_market_value | double precision | | | market_value_per_square_foot | double precision | | | last_year_total_market_value | double precision | | | this_year_total_market_value | double precision | | | last_year_assessed_value | double precision | | | this_year_assessed_value | double precision | | | last_year_transitional_assessed_value | double precision | | | this_year_transitional_assessed_value | double precision SELECT bbl, address, zipcode, lotarea, bldgarea, comarea, resarea, officearea, retailarea, garagearea, strgearea, factryarea, otherarea, numfloors, unitsres, unitstotal, yearbuilt, yearalter1, yearalter2, xcoord, ycoord FROM pluto WHERE bbl = %s select xcoord, ycoord FROM pluto WHERE bbl = %s select borough, block, lot, bbl::text AS bbl, address, zipcode, gross_square_feet, stories, residential_units, total_units, lot_area, residential_area, retail_area, office_area, common_area, storage_area, garage_area, factory_area, building_area, other_area, yearbuilt, last_year_altered, xcoord, ycoord from building_test WHERE borough = %s AND xcoord > %s AND xcoord < %s AND ycoord > %s AND ycoord < %s # will be quicker to calculate radius here, anyway # this is a condo building SELECT hnum_lo, hnum_hi, str_name FROM tc234 WHERE bble=%s SELECT * FROM tc234 WHERE bble=%s SELECT * FROM tc234 WHERE bble=%s SELECT * FROM tc1 WHERE bble=%s SELECT * FROM tc234 WHERE bble=%s SELECT DocumentId, doctype, borough, block, lot, DocDate, DocAmount, PartyType, PartyName FROM getallsales(%s,%s,%s); #def __init__(self, price=None, date=None, seller=None, buyer=None): SELECT bbl, address FROM mailing_addresses WHERE bbl=%s | 2.319651 | 2 |
dask_geomodeling/raster/misc.py | wietzesuijker/dask-geomodeling | 0 | 6631432 | <reponame>wietzesuijker/dask-geomodeling
"""
Module containing miscellaneous raster blocks.
"""
from osgeo import ogr
import numpy as np
import random
from geopandas import GeoSeries
from shapely.geometry import box
from shapely.errors import WKTReadingError
from shapely.wkt import loads as load_wkt
from dask import config
from dask_geomodeling.geometry import GeometryBlock
from dask_geomodeling import utils
from .base import RasterBlock, BaseSingle
__all__ = [
"Clip",
"Classify",
"Reclassify",
"Mask",
"MaskAbove",
"MaskBelow",
"MaskRandom",
"Step",
"Rasterize",
"RasterizeWKT",
]
class Clip(BaseSingle):
"""
Clip one raster to the extent of another raster.
Takes two raster inputs, one raster ('store') whose values are returned in
the output and one raster ('source') that is used as the extent. Cells of
the 'store' raster are replaced with 'no data' if there is no data in the
'source' raster.
If the 'source' raster is a boolean raster, False will result in 'no data'.
Args:
store (RasterBlock): Raster whose values are clipped
source (RasterBlock): Raster that is used as the clipping mask
Returns:
RasterBlock with clipped values.
"""
def __init__(self, store, source):
if not isinstance(source, RasterBlock):
raise TypeError("'{}' object is not allowed".format(type(store)))
super(Clip, self).__init__(store, source)
@property
def source(self):
return self.args[1]
@staticmethod
def process(data, source_data):
""" Mask store_data where source_data has no data """
if data is None:
return None
if "values" not in data:
return data
# check if values contain data
if np.all(data["values"] == data["no_data_value"]):
return data
# make the boolean mask
if source_data is None:
return None
if source_data["values"].dtype == np.dtype("bool"):
mask = ~source_data["values"]
else:
mask = source_data["values"] == source_data["no_data_value"]
# adjust values
values = data["values"].copy()
values[mask] = data["no_data_value"]
return {"values": values, "no_data_value": data["no_data_value"]}
@property
def extent(self):
"""Intersection of bounding boxes of 'store' and 'source'. """
result, mask = [s.extent for s in self.args]
if result is None or mask is None:
return
# return the overlapping box
x1 = max(result[0], mask[0])
y1 = max(result[1], mask[1])
x2 = min(result[2], mask[2])
y2 = min(result[3], mask[3])
if x2 <= x1 or y2 <= y1:
return None # no overlap
else:
return x1, y1, x2, y2
@property
def geometry(self):
"""Intersection of geometries of 'store' and 'source'. """
result, mask = [x.geometry for x in self.args]
if result is None or mask is None:
return
sr = result.GetSpatialReference()
if not mask.GetSpatialReference().IsSame(sr):
mask = mask.Clone()
mask.TransformTo(sr)
result = result.Intersection(mask)
if result.GetArea() == 0.0:
return
return result
class Mask(BaseSingle):
"""
Replace values in a raster with a single constant value. 'no data' values
are preserved.
Args:
store (RasterBlock): The raster whose values are to be converted.
value (number): The constant value to be given to 'data' values.
Returns:
RasterBlock containing a single value
"""
def __init__(self, store, value):
if not isinstance(value, (float, int)):
raise TypeError("'{}' object is not allowed".format(type(value)))
super(Mask, self).__init__(store, value)
@property
def value(self):
return self.args[1]
@property
def fillvalue(self):
return 1 if self.value == 0 else 0
@property
def dtype(self):
return "float32" if isinstance(self.value, float) else "uint8"
@staticmethod
def process(data, value):
if data is None or "values" not in data:
return data
index = utils.get_index(
values=data["values"], no_data_value=data["no_data_value"]
)
fillvalue = 1 if value == 0 else 0
dtype = "float32" if isinstance(value, float) else "uint8"
values = np.full_like(data["values"], fillvalue, dtype=dtype)
values[index] = value
return {"values": values, "no_data_value": fillvalue}
class MaskAbove(BaseSingle):
"""
Converts raster cells above the supplied value to 'no data'.
Raster cells with values lower than or equal to the supplied value are
returned unchanged.
Args:
store (RasterBlock): The raster whose values are to be masked.
value (number): The constant value above which values are masked.
Returns:
RasterBlock with cells below the input value converted to 'no data'.
"""
def __init__(self, store, value):
if not isinstance(value, (float, int)):
raise TypeError("'{}' object is not allowed".format(type(value)))
super(MaskAbove, self).__init__(store, value)
@staticmethod
def process(data, value):
if data is None or "values" not in data:
return data
values, no_data_value = data["values"].copy(), data["no_data_value"]
values[values > value] = no_data_value
return {"values": values, "no_data_value": no_data_value}
class MaskBelow(BaseSingle):
"""
Converts raster cells below the supplied value to 'no data'.
Raster cells with values greater than or equal to the supplied value are
returned unchanged.
Args:
store (RasterBlock): The raster whose values are to be masked.
value (number): The constant value below which values are masked.
Returns:
RasterBlock with cells below the input value converted to 'no data'.
"""
def __init__(self, store, value):
if not isinstance(value, (float, int)):
raise TypeError("'{}' object is not allowed".format(type(value)))
super(MaskBelow, self).__init__(store, value)
@staticmethod
def process(data, value):
if data is None or "values" not in data:
return data
values, no_data_value = data["values"].copy(), data["no_data_value"]
values[values < value] = no_data_value
return {"values": values, "no_data_value": no_data_value}
class MaskRandom(BaseSingle):
"""
Replace values in a raster with a random number between 0 and 1. 'no data' values
are preserved.
Args:
store (RasterBlock): The raster whose values are to be converted.
Returns:
RasterBlock containing a single value
"""
def __init__(self, store):
super(MaskRandom, self).__init__(store)
@property
def fillvalue(self):
return 255
@property
def dtype(self):
return "float32"
@staticmethod
def process(data):
if data is None or "values" not in data:
return data
index = utils.get_index(
values=data["values"], no_data_value=data["no_data_value"]
)
fillvalue = 255
dtype = "float32"
values = np.full_like(data["values"], fillvalue, dtype=dtype)
values[index] = random.random()
return {"values": values, "no_data_value": fillvalue}
class Step(BaseSingle):
"""
Apply a step function to a raster.
This operation classifies the elements of a raster into three categories:
less than, equal to, and greater than a value.
The step function is defined as follows, with x being the value of a raster
cell:
- 'left' if *x < value*
- 'at' if *x == value*
- 'right' if *x > value*
Args:
store (RasterBlock): The input raster
left (number): Value given to cells lower than the input value,
defaults to 0
right (number): Value given to cells higher than the input value,
defaults to 1
value (number): The constant value which raster cells are compared to,
defaults to 0
at (number): Value given to cells equal to the input value, defaults to
the average of left and right
Returns:
RasterBlock containing three values; left, right and at.
"""
def __init__(self, store, left=0, right=1, value=0, at=None):
at = (left + right) / 2 if at is None else at
for x in left, right, value, at:
if not isinstance(x, (float, int)):
raise TypeError("'{}' object is not allowed".format(type(x)))
super(Step, self).__init__(store, left, right, value, at)
@property
def left(self):
return self.args[1]
@property
def right(self):
return self.args[2]
@property
def value(self):
return self.args[3]
@property
def at(self):
return self.args[4]
@staticmethod
def process(data, left, right, location, at):
if data is None or "values" not in data:
return data
values, no_data_value = data["values"].copy(), data["no_data_value"]
# determine boolean index arrays
mask = values == no_data_value
left_index = values < location
at_index = values == location
right_index = values > location
# perform mapping
values[left_index] = left
values[at_index] = at
values[right_index] = right
# put no data values back
values[mask] = no_data_value
return {"values": values, "no_data_value": no_data_value}
class Classify(BaseSingle):
"""
Classify raster data into binned categories
Takes a RasterBlock and classifies its values based on bins. The bins are
supplied as a list of increasing bin edges.
For each raster cell this operation returns the index of the bin to which
the raster cell belongs. The lowest possible output cell value is 0, which
means that the input value was lower than the lowest bin edge. The highest
possible output value is equal to the number of supplied bin edges.
Args:
store (RasterBlock): The raster whose cell values are to be classified
bins (list): An increasing list of bin edges
right (boolean): Whether the intervals include the right or the left bin
edge, defaults to False.
Returns:
RasterBlock with classified values
"""
def __init__(self, store, bins, right=False):
if not isinstance(store, RasterBlock):
raise TypeError("'{}' object is not allowed".format(type(store)))
if not hasattr(bins, "__iter__"):
raise TypeError("'{}' object is not allowed".format(type(bins)))
bins_arr = np.asarray(bins)
if bins_arr.ndim != 1:
raise TypeError("'bins' should be one-dimensional")
if not np.issubdtype(bins_arr.dtype, np.number):
raise TypeError("'bins' should be numeric")
bins_diff = np.diff(bins)
if not np.all(bins_diff > 0) or np.all(bins_diff < 0):
raise TypeError("'bins' should be monotonic")
super(Classify, self).__init__(store, bins_arr.tolist(), right)
@property
def bins(self):
return self.args[1]
@property
def right(self):
return self.args[2]
@property
def dtype(self):
# with 254 bin edges, we have 255 bins, and we need 256 possible values
# to include no_data
return utils.get_uint_dtype(len(self.bins) + 2)
@property
def fillvalue(self):
return utils.get_dtype_max(self.dtype)
@staticmethod
def process(data, bins, right):
if data is None or "values" not in data:
return data
values = data["values"]
dtype = utils.get_uint_dtype(len(bins) + 2)
fillvalue = utils.get_dtype_max(dtype)
result_values = np.digitize(values, bins, right).astype(dtype)
result_values[values == data["no_data_value"]] = fillvalue
return {"values": result_values, "no_data_value": fillvalue}
class Reclassify(BaseSingle):
"""
Reclassify a raster of integer values.
This operation can be used to reclassify a classified raster into desired
values. Reclassification is done by supplying a list of [from, to] pairs.
Args:
store (RasterBlock): The raster whose cell values are to be reclassified
bins (list): A list of [from, to] pairs defining the reclassification.
The from values can be of bool or int datatype; the to values can be of
int or float datatype
select (boolean): Whether to set all non-reclassified cells to 'no data',
defaults to False.
Returns:
RasterBlock with reclassified values
"""
def __init__(self, store, data, select=False):
dtype = store.dtype
if dtype != np.bool and not np.issubdtype(dtype, np.integer):
raise TypeError("The store must be of boolean or integer datatype")
# validate "data"
if not hasattr(data, "__iter__"):
raise TypeError("'{}' object is not allowed".format(type(data)))
try:
source, target = map(np.asarray, zip(*data))
except ValueError:
raise ValueError("Please supply a list of [from, to] values")
# "from" can have bool or int dtype, "to" can also be float
if source.dtype != np.bool and not np.issubdtype(source.dtype, np.integer):
raise TypeError(
"Cannot reclassify from value with type '{}'".format(source.dtype)
)
if len(np.unique(source)) != len(source):
raise ValueError("There are duplicates in the reclassify values")
if not np.issubdtype(target.dtype, np.number):
raise TypeError(
"Cannot reclassify to value with type '{}'".format(target.dtype)
)
# put 'data' into a list with consistent dtypes
data = [list(x) for x in zip(source.tolist(), target.tolist())]
if select is not True and select is not False:
raise TypeError("'{}' object is not allowed".format(type(select)))
super().__init__(store, data, select)
@property
def data(self):
return self.args[1]
@property
def select(self):
return self.args[2]
@property
def dtype(self):
_, target = map(np.asarray, zip(*self.data))
return target.dtype
@property
def fillvalue(self):
return utils.get_dtype_max(self.dtype)
def get_sources_and_requests(self, **request):
process_kwargs = {
"dtype": self.dtype.str,
"fillvalue": self.fillvalue,
"data": self.data,
"select": self.select,
}
return [(self.store, request), (process_kwargs, None)]
@staticmethod
def process(store_data, process_kwargs):
if store_data is None or "values" not in store_data:
return store_data
no_data_value = store_data["no_data_value"]
values = store_data["values"]
source, target = map(np.asarray, zip(*process_kwargs["data"]))
dtype = np.dtype(process_kwargs["dtype"])
fillvalue = process_kwargs["fillvalue"]
# add the nodata value to the source array and map it to the target
# nodata
if no_data_value is not None and no_data_value not in source:
source = np.append(source, no_data_value)
target = np.append(target, fillvalue)
# sort the source and target values
inds = np.argsort(source)
source = source[inds]
target = target[inds]
# create the result array
if process_kwargs["select"]: # select = True: initialize with nodata
result = np.full(values.shape, fillvalue, dtype=dtype)
else: # select = True: initialize with existing data
result = values.astype(dtype) # makes a copy
# find all values in the source data that are to be mapped
mask = np.in1d(values.ravel(), source)
mask.shape = values.shape
# place the target values (this also maps nodata values)
result[mask] = target[np.searchsorted(source, values[mask])]
return {"values": result, "no_data_value": fillvalue}
class Rasterize(RasterBlock):
"""
Converts geometry source to raster
This operation is used to transform GeometryBlocks into RasterBlocks. Here
geometries (from for example a shapefile) are converted to a raster, using
the values from one of the columns.
Note that to rasterize floating point values, it is necessary to pass
``dtype="float"``.
Args:
source (GeometryBlock): The geometry source to be rasterized
column_name (string): The name of the column whose values will be
returned in the raster. If column_name is not provided, a boolean
raster will be generated indicating where there are geometries.
dtype (string): A numpy datatype specification to return the array.
Defaults to 'int32' if column_name is provided, or to 'bool' otherwise.
Returns:
RasterBlock with values from 'column_name' or a boolean raster.
See also:
https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
The global geometry-limit setting can be adapted as follows:
>>> from dask import config
>>> config.set({"geomodeling.geometry-limit": 100000})
"""
def __init__(self, source, column_name=None, dtype=None, limit=None):
if not isinstance(source, GeometryBlock):
raise TypeError("'{}' object is not allowed".format(type(source)))
if column_name is not None and not isinstance(column_name, str):
raise TypeError("'{}' object is not allowed".format(type(column_name)))
if dtype is None: # set default values
dtype = "bool" if column_name is None else "int32"
else: # parse to numpy dtype and back to string
dtype = str(np.dtype(dtype))
if limit and not isinstance(limit, int):
raise TypeError("'{}' object is not allowed".format(type(limit)))
if limit and limit < 1:
raise ValueError("Limit should be greater than 1")
super(Rasterize, self).__init__(source, column_name, dtype, limit)
@property
def source(self):
return self.args[0]
@property
def column_name(self):
return self.args[1]
@property
def limit(self):
return self.args[3]
@property
def dtype(self):
return np.dtype(self.args[2])
@property
def fillvalue(self):
return None if self.dtype == np.bool else utils.get_dtype_max(self.dtype)
@property
def period(self):
return (self.DEFAULT_ORIGIN,) * 2
@property
def extent(self):
return None
@property
def timedelta(self):
return None
@property
def geometry(self):
return None
@property
def projection(self):
return None
@property
def geo_transform(self):
return None
def get_sources_and_requests(self, **request):
# first handle the 'time' and 'meta' requests
mode = request["mode"]
if mode == "time":
return [(self.period[-1], None), ({"mode": "time"}, None)]
elif mode == "meta":
return [(None, None), ({"mode": "meta"}, None)]
elif mode != "vals":
raise ValueError("Unknown mode '{}'".format(mode))
# build the request to be sent to the geometry source
x1, y1, x2, y2 = request["bbox"]
width, height = request["width"], request["height"]
# be strict about the bbox, it may lead to segfaults else
if x2 == x1 and y2 == y1: # point
min_size = None
elif x1 < x2 and y1 < y2:
min_size = min((x2 - x1) / width, (y2 - y1) / height)
else:
raise ValueError("Invalid bbox ({})".format(request["bbox"]))
limit = self.limit
if self.limit is None:
limit = config.get("geomodeling.geometry-limit")
geom_request = {
"mode": "intersects",
"geometry": box(*request["bbox"]),
"projection": request["projection"],
"min_size": min_size,
"limit": limit,
"start": request.get("start"),
"stop": request.get("stop"),
}
# keep some variables for use in process()
process_kwargs = {
"mode": "vals",
"column_name": self.column_name,
"dtype": self.dtype,
"no_data_value": self.fillvalue,
"width": width,
"height": height,
"bbox": request["bbox"],
}
return [(self.source, geom_request), (process_kwargs, None)]
@staticmethod
def process(data, process_kwargs):
# first handle the time and meta requests
mode = process_kwargs["mode"]
if mode == "time":
return {"time": [data]}
elif mode == "meta":
return {"meta": [None]}
column_name = process_kwargs["column_name"]
height = process_kwargs["height"]
width = process_kwargs["width"]
no_data_value = process_kwargs["no_data_value"]
dtype = process_kwargs["dtype"]
f = data["features"]
# get the value column to rasterize
if column_name is None:
values = None
else:
try:
values = f[column_name]
except KeyError:
if f.index.name == column_name:
values = f.index.to_series()
else:
values = False
if len(f) == 0 or values is False: # there is no data to rasterize
values = np.full((1, height, width), no_data_value, dtype=dtype)
return {"values": values, "no_data_value": no_data_value}
result = utils.rasterize_geoseries(
geoseries=f["geometry"] if "geometry" in f else None,
values=values,
bbox=process_kwargs["bbox"],
projection=data["projection"],
height=height,
width=width,
)
values = result["values"]
# cast to the expected dtype if necessary
cast_values = values.astype(process_kwargs["dtype"])
# replace the nodata value if necessary
if result["no_data_value"] != no_data_value:
cast_values[values == result["no_data_value"]] = no_data_value
return {"values": cast_values, "no_data_value": no_data_value}
class RasterizeWKT(RasterBlock):
"""Converts a single geometry to a raster mask
Args:
wkt (string): the WKT representation of a geometry
projection (string): the projection of the geometry
Returns:
RasterBlock with True for cells that are inside the geometry.
"""
def __init__(self, wkt, projection):
if not isinstance(wkt, str):
raise TypeError("'{}' object is not allowed".format(type(wkt)))
if not isinstance(projection, str):
raise TypeError("'{}' object is not allowed".format(type(projection)))
try:
load_wkt(wkt)
except WKTReadingError:
raise ValueError("The provided geometry is not a valid WKT")
try:
utils.get_sr(projection)
except TypeError:
raise ValueError("The provided projection is not a valid WKT")
super().__init__(wkt, projection)
@property
def wkt(self):
return self.args[0]
@property
def projection(self):
return self.args[1]
@property
def dtype(self):
return np.dtype("bool")
@property
def fillvalue(self):
return None
@property
def period(self):
return (self.DEFAULT_ORIGIN,) * 2
@property
def extent(self):
return tuple(
utils.shapely_transform(
load_wkt(self.wkt), self.projection, "EPSG:4326"
).bounds
)
@property
def timedelta(self):
return None
@property
def geometry(self):
return ogr.CreateGeometryFromWkt(self.wkt, utils.get_sr(self.projection))
@property
def geo_transform(self):
return None
def get_sources_and_requests(self, **request):
# first handle the 'time' and 'meta' requests
mode = request["mode"]
if mode == "time":
data = self.period[-1]
elif mode == "meta":
data = None
elif mode == "vals":
data = {"wkt": self.wkt, "projection": self.projection}
else:
raise ValueError("Unknown mode '{}'".format(mode))
return [(data, None), (request, None)]
@staticmethod
def process(data, request):
mode = request["mode"]
if mode == "time":
return {"time": [data]}
elif mode == "meta":
return {"meta": [None]}
# load the geometry and transform it into the requested projection
geometry = load_wkt(data["wkt"])
if data["projection"] != request["projection"]:
geometry = utils.shapely_transform(
geometry, data["projection"], request["projection"]
)
# take a shortcut when the geometry does not intersect the bbox
if not geometry.intersects(box(*request["bbox"])):
return {
"values": np.full(
(1, request["height"], request["width"]), False, dtype=np.bool
),
"no_data_value": None,
}
return utils.rasterize_geoseries(
geoseries=GeoSeries([geometry]) if not geometry.is_empty else None,
bbox=request["bbox"],
projection=request["projection"],
height=request["height"],
width=request["width"],
)
| """
Module containing miscellaneous raster blocks.
"""
from osgeo import ogr
import numpy as np
import random
from geopandas import GeoSeries
from shapely.geometry import box
from shapely.errors import WKTReadingError
from shapely.wkt import loads as load_wkt
from dask import config
from dask_geomodeling.geometry import GeometryBlock
from dask_geomodeling import utils
from .base import RasterBlock, BaseSingle
__all__ = [
"Clip",
"Classify",
"Reclassify",
"Mask",
"MaskAbove",
"MaskBelow",
"MaskRandom",
"Step",
"Rasterize",
"RasterizeWKT",
]
class Clip(BaseSingle):
"""
Clip one raster to the extent of another raster.
Takes two raster inputs, one raster ('store') whose values are returned in
the output and one raster ('source') that is used as the extent. Cells of
the 'store' raster are replaced with 'no data' if there is no data in the
'source' raster.
If the 'source' raster is a boolean raster, False will result in 'no data'.
Args:
store (RasterBlock): Raster whose values are clipped
source (RasterBlock): Raster that is used as the clipping mask
Returns:
RasterBlock with clipped values.
"""
def __init__(self, store, source):
if not isinstance(source, RasterBlock):
raise TypeError("'{}' object is not allowed".format(type(store)))
super(Clip, self).__init__(store, source)
@property
def source(self):
return self.args[1]
@staticmethod
def process(data, source_data):
""" Mask store_data where source_data has no data """
if data is None:
return None
if "values" not in data:
return data
# check if values contain data
if np.all(data["values"] == data["no_data_value"]):
return data
# make the boolean mask
if source_data is None:
return None
if source_data["values"].dtype == np.dtype("bool"):
mask = ~source_data["values"]
else:
mask = source_data["values"] == source_data["no_data_value"]
# adjust values
values = data["values"].copy()
values[mask] = data["no_data_value"]
return {"values": values, "no_data_value": data["no_data_value"]}
@property
def extent(self):
"""Intersection of bounding boxes of 'store' and 'source'. """
result, mask = [s.extent for s in self.args]
if result is None or mask is None:
return
# return the overlapping box
x1 = max(result[0], mask[0])
y1 = max(result[1], mask[1])
x2 = min(result[2], mask[2])
y2 = min(result[3], mask[3])
if x2 <= x1 or y2 <= y1:
return None # no overlap
else:
return x1, y1, x2, y2
@property
def geometry(self):
"""Intersection of geometries of 'store' and 'source'. """
result, mask = [x.geometry for x in self.args]
if result is None or mask is None:
return
sr = result.GetSpatialReference()
if not mask.GetSpatialReference().IsSame(sr):
mask = mask.Clone()
mask.TransformTo(sr)
result = result.Intersection(mask)
if result.GetArea() == 0.0:
return
return result
class Mask(BaseSingle):
"""
Replace values in a raster with a single constant value. 'no data' values
are preserved.
Args:
store (RasterBlock): The raster whose values are to be converted.
value (number): The constant value to be given to 'data' values.
Returns:
RasterBlock containing a single value
"""
def __init__(self, store, value):
if not isinstance(value, (float, int)):
raise TypeError("'{}' object is not allowed".format(type(value)))
super(Mask, self).__init__(store, value)
@property
def value(self):
return self.args[1]
@property
def fillvalue(self):
return 1 if self.value == 0 else 0
@property
def dtype(self):
return "float32" if isinstance(self.value, float) else "uint8"
@staticmethod
def process(data, value):
if data is None or "values" not in data:
return data
index = utils.get_index(
values=data["values"], no_data_value=data["no_data_value"]
)
fillvalue = 1 if value == 0 else 0
dtype = "float32" if isinstance(value, float) else "uint8"
values = np.full_like(data["values"], fillvalue, dtype=dtype)
values[index] = value
return {"values": values, "no_data_value": fillvalue}
class MaskAbove(BaseSingle):
"""
Converts raster cells above the supplied value to 'no data'.
Raster cells with values lower than or equal to the supplied value are
returned unchanged.
Args:
store (RasterBlock): The raster whose values are to be masked.
value (number): The constant value above which values are masked.
Returns:
RasterBlock with cells below the input value converted to 'no data'.
"""
def __init__(self, store, value):
if not isinstance(value, (float, int)):
raise TypeError("'{}' object is not allowed".format(type(value)))
super(MaskAbove, self).__init__(store, value)
@staticmethod
def process(data, value):
if data is None or "values" not in data:
return data
values, no_data_value = data["values"].copy(), data["no_data_value"]
values[values > value] = no_data_value
return {"values": values, "no_data_value": no_data_value}
class MaskBelow(BaseSingle):
"""
Converts raster cells below the supplied value to 'no data'.
Raster cells with values greater than or equal to the supplied value are
returned unchanged.
Args:
store (RasterBlock): The raster whose values are to be masked.
value (number): The constant value below which values are masked.
Returns:
RasterBlock with cells below the input value converted to 'no data'.
"""
def __init__(self, store, value):
if not isinstance(value, (float, int)):
raise TypeError("'{}' object is not allowed".format(type(value)))
super(MaskBelow, self).__init__(store, value)
@staticmethod
def process(data, value):
if data is None or "values" not in data:
return data
values, no_data_value = data["values"].copy(), data["no_data_value"]
values[values < value] = no_data_value
return {"values": values, "no_data_value": no_data_value}
class MaskRandom(BaseSingle):
"""
Replace values in a raster with a random number between 0 and 1. 'no data' values
are preserved.
Args:
store (RasterBlock): The raster whose values are to be converted.
Returns:
RasterBlock containing a single value
"""
def __init__(self, store):
super(MaskRandom, self).__init__(store)
@property
def fillvalue(self):
return 255
@property
def dtype(self):
return "float32"
@staticmethod
def process(data):
if data is None or "values" not in data:
return data
index = utils.get_index(
values=data["values"], no_data_value=data["no_data_value"]
)
fillvalue = 255
dtype = "float32"
values = np.full_like(data["values"], fillvalue, dtype=dtype)
values[index] = random.random()
return {"values": values, "no_data_value": fillvalue}
class Step(BaseSingle):
"""
Apply a step function to a raster.
This operation classifies the elements of a raster into three categories:
less than, equal to, and greater than a value.
The step function is defined as follows, with x being the value of a raster
cell:
- 'left' if *x < value*
- 'at' if *x == value*
- 'right' if *x > value*
Args:
store (RasterBlock): The input raster
left (number): Value given to cells lower than the input value,
defaults to 0
right (number): Value given to cells higher than the input value,
defaults to 1
value (number): The constant value which raster cells are compared to,
defaults to 0
at (number): Value given to cells equal to the input value, defaults to
the average of left and right
Returns:
RasterBlock containing three values; left, right and at.
"""
def __init__(self, store, left=0, right=1, value=0, at=None):
at = (left + right) / 2 if at is None else at
for x in left, right, value, at:
if not isinstance(x, (float, int)):
raise TypeError("'{}' object is not allowed".format(type(x)))
super(Step, self).__init__(store, left, right, value, at)
@property
def left(self):
return self.args[1]
@property
def right(self):
return self.args[2]
@property
def value(self):
return self.args[3]
@property
def at(self):
return self.args[4]
@staticmethod
def process(data, left, right, location, at):
if data is None or "values" not in data:
return data
values, no_data_value = data["values"].copy(), data["no_data_value"]
# determine boolean index arrays
mask = values == no_data_value
left_index = values < location
at_index = values == location
right_index = values > location
# perform mapping
values[left_index] = left
values[at_index] = at
values[right_index] = right
# put no data values back
values[mask] = no_data_value
return {"values": values, "no_data_value": no_data_value}
class Classify(BaseSingle):
"""
Classify raster data into binned categories
Takes a RasterBlock and classifies its values based on bins. The bins are
supplied as a list of increasing bin edges.
For each raster cell this operation returns the index of the bin to which
the raster cell belongs. The lowest possible output cell value is 0, which
means that the input value was lower than the lowest bin edge. The highest
possible output value is equal to the number of supplied bin edges.
Args:
store (RasterBlock): The raster whose cell values are to be classified
bins (list): An increasing list of bin edges
right (boolean): Whether the intervals include the right or the left bin
edge, defaults to False.
Returns:
RasterBlock with classified values
"""
def __init__(self, store, bins, right=False):
if not isinstance(store, RasterBlock):
raise TypeError("'{}' object is not allowed".format(type(store)))
if not hasattr(bins, "__iter__"):
raise TypeError("'{}' object is not allowed".format(type(bins)))
bins_arr = np.asarray(bins)
if bins_arr.ndim != 1:
raise TypeError("'bins' should be one-dimensional")
if not np.issubdtype(bins_arr.dtype, np.number):
raise TypeError("'bins' should be numeric")
bins_diff = np.diff(bins)
if not np.all(bins_diff > 0) or np.all(bins_diff < 0):
raise TypeError("'bins' should be monotonic")
super(Classify, self).__init__(store, bins_arr.tolist(), right)
@property
def bins(self):
return self.args[1]
@property
def right(self):
return self.args[2]
@property
def dtype(self):
# with 254 bin edges, we have 255 bins, and we need 256 possible values
# to include no_data
return utils.get_uint_dtype(len(self.bins) + 2)
@property
def fillvalue(self):
return utils.get_dtype_max(self.dtype)
@staticmethod
def process(data, bins, right):
if data is None or "values" not in data:
return data
values = data["values"]
dtype = utils.get_uint_dtype(len(bins) + 2)
fillvalue = utils.get_dtype_max(dtype)
result_values = np.digitize(values, bins, right).astype(dtype)
result_values[values == data["no_data_value"]] = fillvalue
return {"values": result_values, "no_data_value": fillvalue}
class Reclassify(BaseSingle):
"""
Reclassify a raster of integer values.
This operation can be used to reclassify a classified raster into desired
values. Reclassification is done by supplying a list of [from, to] pairs.
Args:
store (RasterBlock): The raster whose cell values are to be reclassified
bins (list): A list of [from, to] pairs defining the reclassification.
The from values can be of bool or int datatype; the to values can be of
int or float datatype
select (boolean): Whether to set all non-reclassified cells to 'no data',
defaults to False.
Returns:
RasterBlock with reclassified values
"""
def __init__(self, store, data, select=False):
dtype = store.dtype
if dtype != np.bool and not np.issubdtype(dtype, np.integer):
raise TypeError("The store must be of boolean or integer datatype")
# validate "data"
if not hasattr(data, "__iter__"):
raise TypeError("'{}' object is not allowed".format(type(data)))
try:
source, target = map(np.asarray, zip(*data))
except ValueError:
raise ValueError("Please supply a list of [from, to] values")
# "from" can have bool or int dtype, "to" can also be float
if source.dtype != np.bool and not np.issubdtype(source.dtype, np.integer):
raise TypeError(
"Cannot reclassify from value with type '{}'".format(source.dtype)
)
if len(np.unique(source)) != len(source):
raise ValueError("There are duplicates in the reclassify values")
if not np.issubdtype(target.dtype, np.number):
raise TypeError(
"Cannot reclassify to value with type '{}'".format(target.dtype)
)
# put 'data' into a list with consistent dtypes
data = [list(x) for x in zip(source.tolist(), target.tolist())]
if select is not True and select is not False:
raise TypeError("'{}' object is not allowed".format(type(select)))
super().__init__(store, data, select)
@property
def data(self):
return self.args[1]
@property
def select(self):
return self.args[2]
@property
def dtype(self):
_, target = map(np.asarray, zip(*self.data))
return target.dtype
@property
def fillvalue(self):
return utils.get_dtype_max(self.dtype)
def get_sources_and_requests(self, **request):
process_kwargs = {
"dtype": self.dtype.str,
"fillvalue": self.fillvalue,
"data": self.data,
"select": self.select,
}
return [(self.store, request), (process_kwargs, None)]
@staticmethod
def process(store_data, process_kwargs):
if store_data is None or "values" not in store_data:
return store_data
no_data_value = store_data["no_data_value"]
values = store_data["values"]
source, target = map(np.asarray, zip(*process_kwargs["data"]))
dtype = np.dtype(process_kwargs["dtype"])
fillvalue = process_kwargs["fillvalue"]
# add the nodata value to the source array and map it to the target
# nodata
if no_data_value is not None and no_data_value not in source:
source = np.append(source, no_data_value)
target = np.append(target, fillvalue)
# sort the source and target values
inds = np.argsort(source)
source = source[inds]
target = target[inds]
# create the result array
if process_kwargs["select"]: # select = True: initialize with nodata
result = np.full(values.shape, fillvalue, dtype=dtype)
else: # select = True: initialize with existing data
result = values.astype(dtype) # makes a copy
# find all values in the source data that are to be mapped
mask = np.in1d(values.ravel(), source)
mask.shape = values.shape
# place the target values (this also maps nodata values)
result[mask] = target[np.searchsorted(source, values[mask])]
return {"values": result, "no_data_value": fillvalue}
class Rasterize(RasterBlock):
"""
Converts geometry source to raster
This operation is used to transform GeometryBlocks into RasterBlocks. Here
geometries (from for example a shapefile) are converted to a raster, using
the values from one of the columns.
Note that to rasterize floating point values, it is necessary to pass
``dtype="float"``.
Args:
source (GeometryBlock): The geometry source to be rasterized
column_name (string): The name of the column whose values will be
returned in the raster. If column_name is not provided, a boolean
raster will be generated indicating where there are geometries.
dtype (string): A numpy datatype specification to return the array.
Defaults to 'int32' if column_name is provided, or to 'bool' otherwise.
Returns:
RasterBlock with values from 'column_name' or a boolean raster.
See also:
https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
The global geometry-limit setting can be adapted as follows:
>>> from dask import config
>>> config.set({"geomodeling.geometry-limit": 100000})
"""
def __init__(self, source, column_name=None, dtype=None, limit=None):
if not isinstance(source, GeometryBlock):
raise TypeError("'{}' object is not allowed".format(type(source)))
if column_name is not None and not isinstance(column_name, str):
raise TypeError("'{}' object is not allowed".format(type(column_name)))
if dtype is None: # set default values
dtype = "bool" if column_name is None else "int32"
else: # parse to numpy dtype and back to string
dtype = str(np.dtype(dtype))
if limit and not isinstance(limit, int):
raise TypeError("'{}' object is not allowed".format(type(limit)))
if limit and limit < 1:
raise ValueError("Limit should be greater than 1")
super(Rasterize, self).__init__(source, column_name, dtype, limit)
@property
def source(self):
return self.args[0]
@property
def column_name(self):
return self.args[1]
@property
def limit(self):
return self.args[3]
@property
def dtype(self):
return np.dtype(self.args[2])
@property
def fillvalue(self):
return None if self.dtype == np.bool else utils.get_dtype_max(self.dtype)
@property
def period(self):
return (self.DEFAULT_ORIGIN,) * 2
@property
def extent(self):
return None
@property
def timedelta(self):
return None
@property
def geometry(self):
return None
@property
def projection(self):
return None
@property
def geo_transform(self):
return None
def get_sources_and_requests(self, **request):
# first handle the 'time' and 'meta' requests
mode = request["mode"]
if mode == "time":
return [(self.period[-1], None), ({"mode": "time"}, None)]
elif mode == "meta":
return [(None, None), ({"mode": "meta"}, None)]
elif mode != "vals":
raise ValueError("Unknown mode '{}'".format(mode))
# build the request to be sent to the geometry source
x1, y1, x2, y2 = request["bbox"]
width, height = request["width"], request["height"]
# be strict about the bbox, it may lead to segfaults else
if x2 == x1 and y2 == y1: # point
min_size = None
elif x1 < x2 and y1 < y2:
min_size = min((x2 - x1) / width, (y2 - y1) / height)
else:
raise ValueError("Invalid bbox ({})".format(request["bbox"]))
limit = self.limit
if self.limit is None:
limit = config.get("geomodeling.geometry-limit")
geom_request = {
"mode": "intersects",
"geometry": box(*request["bbox"]),
"projection": request["projection"],
"min_size": min_size,
"limit": limit,
"start": request.get("start"),
"stop": request.get("stop"),
}
# keep some variables for use in process()
process_kwargs = {
"mode": "vals",
"column_name": self.column_name,
"dtype": self.dtype,
"no_data_value": self.fillvalue,
"width": width,
"height": height,
"bbox": request["bbox"],
}
return [(self.source, geom_request), (process_kwargs, None)]
@staticmethod
def process(data, process_kwargs):
# first handle the time and meta requests
mode = process_kwargs["mode"]
if mode == "time":
return {"time": [data]}
elif mode == "meta":
return {"meta": [None]}
column_name = process_kwargs["column_name"]
height = process_kwargs["height"]
width = process_kwargs["width"]
no_data_value = process_kwargs["no_data_value"]
dtype = process_kwargs["dtype"]
f = data["features"]
# get the value column to rasterize
if column_name is None:
values = None
else:
try:
values = f[column_name]
except KeyError:
if f.index.name == column_name:
values = f.index.to_series()
else:
values = False
if len(f) == 0 or values is False: # there is no data to rasterize
values = np.full((1, height, width), no_data_value, dtype=dtype)
return {"values": values, "no_data_value": no_data_value}
result = utils.rasterize_geoseries(
geoseries=f["geometry"] if "geometry" in f else None,
values=values,
bbox=process_kwargs["bbox"],
projection=data["projection"],
height=height,
width=width,
)
values = result["values"]
# cast to the expected dtype if necessary
cast_values = values.astype(process_kwargs["dtype"])
# replace the nodata value if necessary
if result["no_data_value"] != no_data_value:
cast_values[values == result["no_data_value"]] = no_data_value
return {"values": cast_values, "no_data_value": no_data_value}
class RasterizeWKT(RasterBlock):
"""Converts a single geometry to a raster mask
Args:
wkt (string): the WKT representation of a geometry
projection (string): the projection of the geometry
Returns:
RasterBlock with True for cells that are inside the geometry.
"""
def __init__(self, wkt, projection):
if not isinstance(wkt, str):
raise TypeError("'{}' object is not allowed".format(type(wkt)))
if not isinstance(projection, str):
raise TypeError("'{}' object is not allowed".format(type(projection)))
try:
load_wkt(wkt)
except WKTReadingError:
raise ValueError("The provided geometry is not a valid WKT")
try:
utils.get_sr(projection)
except TypeError:
raise ValueError("The provided projection is not a valid WKT")
super().__init__(wkt, projection)
@property
def wkt(self):
return self.args[0]
@property
def projection(self):
return self.args[1]
@property
def dtype(self):
return np.dtype("bool")
@property
def fillvalue(self):
return None
@property
def period(self):
return (self.DEFAULT_ORIGIN,) * 2
@property
def extent(self):
return tuple(
utils.shapely_transform(
load_wkt(self.wkt), self.projection, "EPSG:4326"
).bounds
)
@property
def timedelta(self):
return None
@property
def geometry(self):
return ogr.CreateGeometryFromWkt(self.wkt, utils.get_sr(self.projection))
@property
def geo_transform(self):
return None
def get_sources_and_requests(self, **request):
# first handle the 'time' and 'meta' requests
mode = request["mode"]
if mode == "time":
data = self.period[-1]
elif mode == "meta":
data = None
elif mode == "vals":
data = {"wkt": self.wkt, "projection": self.projection}
else:
raise ValueError("Unknown mode '{}'".format(mode))
return [(data, None), (request, None)]
@staticmethod
def process(data, request):
mode = request["mode"]
if mode == "time":
return {"time": [data]}
elif mode == "meta":
return {"meta": [None]}
# load the geometry and transform it into the requested projection
geometry = load_wkt(data["wkt"])
if data["projection"] != request["projection"]:
geometry = utils.shapely_transform(
geometry, data["projection"], request["projection"]
)
# take a shortcut when the geometry does not intersect the bbox
if not geometry.intersects(box(*request["bbox"])):
return {
"values": np.full(
(1, request["height"], request["width"]), False, dtype=np.bool
),
"no_data_value": None,
}
return utils.rasterize_geoseries(
geoseries=GeoSeries([geometry]) if not geometry.is_empty else None,
bbox=request["bbox"],
projection=request["projection"],
height=request["height"],
width=request["width"],
) | en | 0.742685 | Module containing miscellaneous raster blocks. Clip one raster to the extent of another raster. Takes two raster inputs, one raster ('store') whose values are returned in the output and one raster ('source') that is used as the extent. Cells of the 'store' raster are replaced with 'no data' if there is no data in the 'source' raster. If the 'source' raster is a boolean raster, False will result in 'no data'. Args: store (RasterBlock): Raster whose values are clipped source (RasterBlock): Raster that is used as the clipping mask Returns: RasterBlock with clipped values. Mask store_data where source_data has no data # check if values contain data # make the boolean mask # adjust values Intersection of bounding boxes of 'store' and 'source'. # return the overlapping box # no overlap Intersection of geometries of 'store' and 'source'. Replace values in a raster with a single constant value. 'no data' values are preserved. Args: store (RasterBlock): The raster whose values are to be converted. value (number): The constant value to be given to 'data' values. Returns: RasterBlock containing a single value Converts raster cells above the supplied value to 'no data'. Raster cells with values lower than or equal to the supplied value are returned unchanged. Args: store (RasterBlock): The raster whose values are to be masked. value (number): The constant value above which values are masked. Returns: RasterBlock with cells below the input value converted to 'no data'. Converts raster cells below the supplied value to 'no data'. Raster cells with values greater than or equal to the supplied value are returned unchanged. Args: store (RasterBlock): The raster whose values are to be masked. value (number): The constant value below which values are masked. Returns: RasterBlock with cells below the input value converted to 'no data'. Replace values in a raster with a random number between 0 and 1. 'no data' values are preserved. Args: store (RasterBlock): The raster whose values are to be converted. Returns: RasterBlock containing a single value Apply a step function to a raster. This operation classifies the elements of a raster into three categories: less than, equal to, and greater than a value. The step function is defined as follows, with x being the value of a raster cell: - 'left' if *x < value* - 'at' if *x == value* - 'right' if *x > value* Args: store (RasterBlock): The input raster left (number): Value given to cells lower than the input value, defaults to 0 right (number): Value given to cells higher than the input value, defaults to 1 value (number): The constant value which raster cells are compared to, defaults to 0 at (number): Value given to cells equal to the input value, defaults to the average of left and right Returns: RasterBlock containing three values; left, right and at. # determine boolean index arrays # perform mapping # put no data values back Classify raster data into binned categories Takes a RasterBlock and classifies its values based on bins. The bins are supplied as a list of increasing bin edges. For each raster cell this operation returns the index of the bin to which the raster cell belongs. The lowest possible output cell value is 0, which means that the input value was lower than the lowest bin edge. The highest possible output value is equal to the number of supplied bin edges. Args: store (RasterBlock): The raster whose cell values are to be classified bins (list): An increasing list of bin edges right (boolean): Whether the intervals include the right or the left bin edge, defaults to False. Returns: RasterBlock with classified values # with 254 bin edges, we have 255 bins, and we need 256 possible values # to include no_data Reclassify a raster of integer values. This operation can be used to reclassify a classified raster into desired values. Reclassification is done by supplying a list of [from, to] pairs. Args: store (RasterBlock): The raster whose cell values are to be reclassified bins (list): A list of [from, to] pairs defining the reclassification. The from values can be of bool or int datatype; the to values can be of int or float datatype select (boolean): Whether to set all non-reclassified cells to 'no data', defaults to False. Returns: RasterBlock with reclassified values # validate "data" # "from" can have bool or int dtype, "to" can also be float # put 'data' into a list with consistent dtypes # add the nodata value to the source array and map it to the target # nodata # sort the source and target values # create the result array # select = True: initialize with nodata # select = True: initialize with existing data # makes a copy # find all values in the source data that are to be mapped # place the target values (this also maps nodata values) Converts geometry source to raster This operation is used to transform GeometryBlocks into RasterBlocks. Here geometries (from for example a shapefile) are converted to a raster, using the values from one of the columns. Note that to rasterize floating point values, it is necessary to pass ``dtype="float"``. Args: source (GeometryBlock): The geometry source to be rasterized column_name (string): The name of the column whose values will be returned in the raster. If column_name is not provided, a boolean raster will be generated indicating where there are geometries. dtype (string): A numpy datatype specification to return the array. Defaults to 'int32' if column_name is provided, or to 'bool' otherwise. Returns: RasterBlock with values from 'column_name' or a boolean raster. See also: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html The global geometry-limit setting can be adapted as follows: >>> from dask import config >>> config.set({"geomodeling.geometry-limit": 100000}) # set default values # parse to numpy dtype and back to string # first handle the 'time' and 'meta' requests # build the request to be sent to the geometry source # be strict about the bbox, it may lead to segfaults else # point # keep some variables for use in process() # first handle the time and meta requests # get the value column to rasterize # there is no data to rasterize # cast to the expected dtype if necessary # replace the nodata value if necessary Converts a single geometry to a raster mask Args: wkt (string): the WKT representation of a geometry projection (string): the projection of the geometry Returns: RasterBlock with True for cells that are inside the geometry. # first handle the 'time' and 'meta' requests # load the geometry and transform it into the requested projection # take a shortcut when the geometry does not intersect the bbox | 2.584952 | 3 |
Model1.py | WalterJohnson0/DeepSpeech-KerasRebuild | 0 | 6631433 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 20 19:21:25 2020
@author: <NAME>
DeepSpeech model
"""
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Bidirectional, LSTM, Softmax, TimeDistributed, Masking
from keras.utils import to_categorical
import tensorflow.compat.v1 as tf
import numpy as np
from util.Flags import FLAGS
############
optimizer = keras.optimizers.Adam( beta_1=0.9, beta_2=0.999, amsgrad=False)
def ctc_loss(y_true, y_pred):
# print(y_true)
# print(y_pred)
y_true = tf.reshape(y_true, (FLAGS.batch_size, FLAGS.time_step_length))
y_pred = tf.reshape(y_pred, (FLAGS.batch_size, FLAGS.time_step_length, FLAGS.n_character+1))
input_length = np.ones((FLAGS.batch_size, 1))*FLAGS.time_step_length
label_length = np.ones((FLAGS.batch_size, 1))*FLAGS.time_step_length
loss = keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length)
return loss
def create_model():
# network parameters
n_hidden = FLAGS.n_hidden
rate_dropout = FLAGS.dropout
time_step_len = FLAGS.time_step_length
window_len = FLAGS.window_length
n_mfcc = FLAGS.n_mfcc
n_class = FLAGS.n_character
# build model
model = Sequential()
model.add(Masking(mask_value= float(0.) , input_shape=(time_step_len, window_len*n_mfcc)))
model.add(TimeDistributed(Dense(n_hidden, activation='relu', input_dim=(window_len* n_mfcc), )))
model.add(TimeDistributed(Dropout(rate_dropout)))
model.add(TimeDistributed(Dense(n_hidden, activation='relu', input_dim=(window_len* n_mfcc), )))
model.add(TimeDistributed(Dropout(rate_dropout)))
model.add(TimeDistributed(Dense(n_hidden, activation='relu', input_dim=(window_len* n_mfcc), )))
model.add(TimeDistributed(Dropout(rate_dropout)))
model.add(Bidirectional(LSTM(n_hidden, return_sequences=True)))
model.add(TimeDistributed(Dropout(rate_dropout)))
# predict the null label of ctc loss
model.add(TimeDistributed(Dense(n_class+1)))
model.add(TimeDistributed(Softmax(axis=-1)))
return model
| # -*- coding: utf-8 -*-
"""
Created on Thu Feb 20 19:21:25 2020
@author: <NAME>
DeepSpeech model
"""
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Bidirectional, LSTM, Softmax, TimeDistributed, Masking
from keras.utils import to_categorical
import tensorflow.compat.v1 as tf
import numpy as np
from util.Flags import FLAGS
############
optimizer = keras.optimizers.Adam( beta_1=0.9, beta_2=0.999, amsgrad=False)
def ctc_loss(y_true, y_pred):
# print(y_true)
# print(y_pred)
y_true = tf.reshape(y_true, (FLAGS.batch_size, FLAGS.time_step_length))
y_pred = tf.reshape(y_pred, (FLAGS.batch_size, FLAGS.time_step_length, FLAGS.n_character+1))
input_length = np.ones((FLAGS.batch_size, 1))*FLAGS.time_step_length
label_length = np.ones((FLAGS.batch_size, 1))*FLAGS.time_step_length
loss = keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length)
return loss
def create_model():
# network parameters
n_hidden = FLAGS.n_hidden
rate_dropout = FLAGS.dropout
time_step_len = FLAGS.time_step_length
window_len = FLAGS.window_length
n_mfcc = FLAGS.n_mfcc
n_class = FLAGS.n_character
# build model
model = Sequential()
model.add(Masking(mask_value= float(0.) , input_shape=(time_step_len, window_len*n_mfcc)))
model.add(TimeDistributed(Dense(n_hidden, activation='relu', input_dim=(window_len* n_mfcc), )))
model.add(TimeDistributed(Dropout(rate_dropout)))
model.add(TimeDistributed(Dense(n_hidden, activation='relu', input_dim=(window_len* n_mfcc), )))
model.add(TimeDistributed(Dropout(rate_dropout)))
model.add(TimeDistributed(Dense(n_hidden, activation='relu', input_dim=(window_len* n_mfcc), )))
model.add(TimeDistributed(Dropout(rate_dropout)))
model.add(Bidirectional(LSTM(n_hidden, return_sequences=True)))
model.add(TimeDistributed(Dropout(rate_dropout)))
# predict the null label of ctc loss
model.add(TimeDistributed(Dense(n_class+1)))
model.add(TimeDistributed(Softmax(axis=-1)))
return model
| en | 0.539468 | # -*- coding: utf-8 -*- Created on Thu Feb 20 19:21:25 2020 @author: <NAME> DeepSpeech model ############ # print(y_true) # print(y_pred) # network parameters # build model # predict the null label of ctc loss | 2.486022 | 2 |
compare.py | MarkEEaton/open-journal-matcher | 15 | 6631434 | """ run the comparisons using asyncio """
import asyncio
import asks
import regex
import settingsmay2021 as settings
import aiohttp
import langdetect
import os
import schedule
from time import sleep
from flask_bootstrap import Bootstrap
from collections import OrderedDict
from flask_wtf import FlaskForm
from wtforms import TextAreaField, SubmitField
from wtforms.validators import Length, ValidationError
from flask import Flask, render_template, request, url_for, Response, abort
from datetime import datetime
from redislite import StrictRedis
app = Flask(__name__, static_url_path="/static")
Bootstrap(app)
app.config["SECRET_KEY"] = settings.csrf
REDIS = os.path.join("/tmp/redis.db")
r = StrictRedis(REDIS, charset="utf-8", decode_responses=True)
r.hset("counter", "increment", 0)
def reset_redis():
r.hset("counter", "increment", 0)
schedule.every().hour.do(reset_redis)
class WebForm(FlaskForm):
""" for validation """
webabstract = TextAreaField(
validators=[
Length(
min=150,
max=10000,
message="Your abstract must be between 150 and 10,000 characters.",
)
]
)
def validate_webabstract(form, field):
try:
language = langdetect.detect(field.data)
except langdetect.lang_detect_exception.LangDetectException:
raise ValidationError(
"Your abstract must be between 150 and 10,000 characters."
)
print(language)
if language != "en":
raise ValidationError(
"The Open Journal Matcher only works with abstracts written in English."
)
submit = SubmitField("Search")
@app.route("/", methods=["GET", "POST"])
def index():
""" display index page """
form = WebForm()
valid = form.validate_on_submit()
schedule.run_pending()
if request.method == "POST" and valid:
# check to ensure not over rate limit
counter = int(r.hget("counter", "increment"))
counter += 1
print("counter:", counter)
if counter >= 10:
rate_error = {
"webabstract": [
"The application is experiencing peak load. Please try again later."
]
}
print("Turnaway due to load")
return render_template(
"index.html", form=form, errors=rate_error, output=""
)
r.hset("counter", "increment", counter)
# lay the groundwork
comp = {}
unordered_scores = {}
inp = form.webabstract.data
t0 = datetime.now()
# do the work
asyncio.run(parent1(inp, comp))
asyncio.run(parent2(comp, unordered_scores))
# sort the results
scores = OrderedDict(
sorted(unordered_scores.items(), key=lambda t: t[0], reverse=True)
)
# calculate running time
t1 = datetime.now()
print(t1 - t0)
return render_template("index.html", form=form, errors={}, output=scores)
elif request.method == "POST" and not valid:
return render_template("index.html", form=form, errors=form.errors, output="")
else:
return render_template("index.html", form=form, errors={}, output="")
@app.after_request
def add_security_headers(resp):
resp.headers["X-Content-Type-Options"] = "nosniff"
resp.headers["X-Frame-Options"] = "SAMEORIGIN"
resp.headers["X-XSS-Protection"] = "1; mode=block"
resp.headers["Strict-Transport-Security"] = "max-age=31536000; includeSubDomains"
resp.headers[
"Content-Security-Policy"
] = "script-src 'self'; style-src 'self'; default-src 'none'"
return resp
async def parent1(inp, comp):
""" manage the async calls to GCP """
await asyncio.gather(
*[cloud_work(blob, inp, comp, 0) for blob in settings.bucket_list]
)
return
async def cloud_work(blob, inp, comp, count):
""" interact with google cloud function """
max_out = 0
try:
async with aiohttp.ClientSession() as session:
while max_out < 6:
async with session.post(
settings.cloud_function,
json={"d": inp, "f": blob, "t": settings.token},
) as resp:
if max_out >= 5:
raise Exception("Max out")
if resp.status == 200:
comp[blob] = await resp.text()
break
elif resp.status == 500:
max_out += 1
elif resp.status == 429:
sleep(0.01)
else:
raise Exception(str(resp.status))
except (
aiohttp.client_exceptions.ClientConnectorError,
aiohttp.client_exceptions.ServerDisconnectedError,
asyncio.TimeoutError,
) as e:
# print(type(e), e, str(count))
if count < 5:
await cloud_work(blob, inp, comp, count + 1)
except Exception as e:
print(type(e), e)
return
async def parent2(comp, unordered_scores):
""" manage the async calls to the DOAJ api """
# test for validity
to_sort = [(k, v) for k, v in comp.items() if test_response(v)]
print("Journals checked:" + str(len(to_sort)))
# this sort is needed to reduce API calls to doaj.org
top = sorted(to_sort, key=lambda x: x[1], reverse=True)[:5]
# make calls to the doaj API asynchronously
await asyncio.gather(
*[titles(idx, item, unordered_scores) for idx, item in enumerate(top)]
)
return
def test_response(resp):
""" some abstract collections raise ValueErrors. Ignore these """
try:
return float(resp) # will evaluate as false if float == 0.0
except ValueError:
return False
async def titles(idx, item, unordered_scores):
if regex.match(r"^[0-9]{4}-[0-9]{3}[0-9Xx]$", item[0]):
issn = item[0]
else:
raise Exception("ISSN does not match regex")
journal_data = await asks.get(
"https://doaj.org/api/v2/search/journals/issn%3A" + issn
)
journal_json = journal_data.json()
try:
title = journal_json["results"][0]["bibjson"]["title"]
if title[-1:] == " ":
title = title[:-1]
url = "https://doaj.org/toc/" + issn
except:
title = "Title lookup failed. Try finding this item by ISSN instead.."
url = ""
score = float(item[1]) * 100
unordered_scores[score] = (title, issn, url)
return
if __name__ == "__main__":
app.run()
| """ run the comparisons using asyncio """
import asyncio
import asks
import regex
import settingsmay2021 as settings
import aiohttp
import langdetect
import os
import schedule
from time import sleep
from flask_bootstrap import Bootstrap
from collections import OrderedDict
from flask_wtf import FlaskForm
from wtforms import TextAreaField, SubmitField
from wtforms.validators import Length, ValidationError
from flask import Flask, render_template, request, url_for, Response, abort
from datetime import datetime
from redislite import StrictRedis
app = Flask(__name__, static_url_path="/static")
Bootstrap(app)
app.config["SECRET_KEY"] = settings.csrf
REDIS = os.path.join("/tmp/redis.db")
r = StrictRedis(REDIS, charset="utf-8", decode_responses=True)
r.hset("counter", "increment", 0)
def reset_redis():
r.hset("counter", "increment", 0)
schedule.every().hour.do(reset_redis)
class WebForm(FlaskForm):
""" for validation """
webabstract = TextAreaField(
validators=[
Length(
min=150,
max=10000,
message="Your abstract must be between 150 and 10,000 characters.",
)
]
)
def validate_webabstract(form, field):
try:
language = langdetect.detect(field.data)
except langdetect.lang_detect_exception.LangDetectException:
raise ValidationError(
"Your abstract must be between 150 and 10,000 characters."
)
print(language)
if language != "en":
raise ValidationError(
"The Open Journal Matcher only works with abstracts written in English."
)
submit = SubmitField("Search")
@app.route("/", methods=["GET", "POST"])
def index():
""" display index page """
form = WebForm()
valid = form.validate_on_submit()
schedule.run_pending()
if request.method == "POST" and valid:
# check to ensure not over rate limit
counter = int(r.hget("counter", "increment"))
counter += 1
print("counter:", counter)
if counter >= 10:
rate_error = {
"webabstract": [
"The application is experiencing peak load. Please try again later."
]
}
print("Turnaway due to load")
return render_template(
"index.html", form=form, errors=rate_error, output=""
)
r.hset("counter", "increment", counter)
# lay the groundwork
comp = {}
unordered_scores = {}
inp = form.webabstract.data
t0 = datetime.now()
# do the work
asyncio.run(parent1(inp, comp))
asyncio.run(parent2(comp, unordered_scores))
# sort the results
scores = OrderedDict(
sorted(unordered_scores.items(), key=lambda t: t[0], reverse=True)
)
# calculate running time
t1 = datetime.now()
print(t1 - t0)
return render_template("index.html", form=form, errors={}, output=scores)
elif request.method == "POST" and not valid:
return render_template("index.html", form=form, errors=form.errors, output="")
else:
return render_template("index.html", form=form, errors={}, output="")
@app.after_request
def add_security_headers(resp):
resp.headers["X-Content-Type-Options"] = "nosniff"
resp.headers["X-Frame-Options"] = "SAMEORIGIN"
resp.headers["X-XSS-Protection"] = "1; mode=block"
resp.headers["Strict-Transport-Security"] = "max-age=31536000; includeSubDomains"
resp.headers[
"Content-Security-Policy"
] = "script-src 'self'; style-src 'self'; default-src 'none'"
return resp
async def parent1(inp, comp):
""" manage the async calls to GCP """
await asyncio.gather(
*[cloud_work(blob, inp, comp, 0) for blob in settings.bucket_list]
)
return
async def cloud_work(blob, inp, comp, count):
""" interact with google cloud function """
max_out = 0
try:
async with aiohttp.ClientSession() as session:
while max_out < 6:
async with session.post(
settings.cloud_function,
json={"d": inp, "f": blob, "t": settings.token},
) as resp:
if max_out >= 5:
raise Exception("Max out")
if resp.status == 200:
comp[blob] = await resp.text()
break
elif resp.status == 500:
max_out += 1
elif resp.status == 429:
sleep(0.01)
else:
raise Exception(str(resp.status))
except (
aiohttp.client_exceptions.ClientConnectorError,
aiohttp.client_exceptions.ServerDisconnectedError,
asyncio.TimeoutError,
) as e:
# print(type(e), e, str(count))
if count < 5:
await cloud_work(blob, inp, comp, count + 1)
except Exception as e:
print(type(e), e)
return
async def parent2(comp, unordered_scores):
""" manage the async calls to the DOAJ api """
# test for validity
to_sort = [(k, v) for k, v in comp.items() if test_response(v)]
print("Journals checked:" + str(len(to_sort)))
# this sort is needed to reduce API calls to doaj.org
top = sorted(to_sort, key=lambda x: x[1], reverse=True)[:5]
# make calls to the doaj API asynchronously
await asyncio.gather(
*[titles(idx, item, unordered_scores) for idx, item in enumerate(top)]
)
return
def test_response(resp):
""" some abstract collections raise ValueErrors. Ignore these """
try:
return float(resp) # will evaluate as false if float == 0.0
except ValueError:
return False
async def titles(idx, item, unordered_scores):
if regex.match(r"^[0-9]{4}-[0-9]{3}[0-9Xx]$", item[0]):
issn = item[0]
else:
raise Exception("ISSN does not match regex")
journal_data = await asks.get(
"https://doaj.org/api/v2/search/journals/issn%3A" + issn
)
journal_json = journal_data.json()
try:
title = journal_json["results"][0]["bibjson"]["title"]
if title[-1:] == " ":
title = title[:-1]
url = "https://doaj.org/toc/" + issn
except:
title = "Title lookup failed. Try finding this item by ISSN instead.."
url = ""
score = float(item[1]) * 100
unordered_scores[score] = (title, issn, url)
return
if __name__ == "__main__":
app.run()
| en | 0.76576 | run the comparisons using asyncio for validation display index page # check to ensure not over rate limit # lay the groundwork # do the work # sort the results # calculate running time manage the async calls to GCP interact with google cloud function # print(type(e), e, str(count)) manage the async calls to the DOAJ api # test for validity # this sort is needed to reduce API calls to doaj.org # make calls to the doaj API asynchronously some abstract collections raise ValueErrors. Ignore these # will evaluate as false if float == 0.0 | 2.374335 | 2 |
ht3_solver_run_script.py | hjabird/XFEM_Boundary_Cooling_Solver | 0 | 6631435 | <filename>ht3_solver_run_script.py
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@copyright Copyright 2017, <NAME>
@lisence: MIT
@status: alpha
"""
import ht3_solver as ht3s
import ElemMesh as em
import Elements as Elements
import numpy as np
import pickle
from ScriptTools import *
# Convenience...
run_id = "TEST_ID"
print("Run id is "+str(run_id))
## MESH INPUTS
mesh = em.ElemMesh()
# WE CAN BUILD A MESH FROM SCRATCH:
# 1x3 Mesh:
# ---------------
# | | | |
# | | | |
# ---------------
#mesh.nodes[0] = np.array([0.0, 0.0, 0.0])
#mesh.nodes[1] = np.array([1.0, 0.0, 0.0])
#mesh.nodes[2] = np.array([2.0, 0.0, 0.0])
#mesh.nodes[3] = np.array([3.0, 0.0, 0.0])
#mesh.nodes[4] = np.array([0.0, 1.0, 0.0])
#mesh.nodes[5] = np.array([1.0, 1.0, 0.0])
#mesh.nodes[6] = np.array([2.0, 1.0, 0.0])
#mesh.nodes[7] = np.array([3.0, 1.0, 0.0])
#
#mesh.elems[0] = Elements.ElemQuad4(mesh.nodes, (0,1,5,4))
#mesh.elems[1] = Elements.ElemQuad4(mesh.nodes, (1,2,6,5))
#mesh.elems[2] = Elements.ElemQuad4(mesh.nodes, (2,3,7,6))
#
#mesh.nodes_in_physical_groups = {}
#mesh.nodes_in_physical_groups[0] = [0,4]
#mesh.nodes_in_physical_groups[1] = [3,7]
#mesh.nodes_in_physical_groups[2] = [1,2,3,4,5,6,7]
#mesh.nodes_in_physical_groups[3] = [0,4,3,7]
#mesh.phys_group_names = {0:"Left",
# 1:"Right",
# 2:"Volume",
# 3:"Boundary"}
# OR IMPORT OUR MESH FROM A .msh FILE:
mesh.build_from_gmsh("./RMesh/MESH_FILE.msh")
mesh.print_elem_counts()
mesh.remove_line_elems() # Remove line elements on boundary
mesh.print_elem_counts()
mesh.calc_elems_in_physgrps() # [Boilerplate]
mesh.print_group_elem_counts()
mesh.elem_quad9_to_quad8() # Currently, quad9s don't work. This converts to quad8.
# ARE WE USING ENRICHMENT? IF YES:
# We need a mesh to project results onto:
outmesh = em.ElemMesh() # Mesh object
outmesh.build_from_gmsh("./RMesh/MESH_FILE_2.msh") # Import mesh from .msh
outmesh.print_elem_counts() #(Boilerplate)
outmesh.remove_line_elems() # Again, remove line elements.
outmesh.print_elem_counts()
outmesh.calc_elems_in_physgrps()
outmesh.print_group_elem_counts()
# DEFINE OUR ENRICHMENT:
# Enrichment needs to define an enrichment function and its partial derivatives.
# We can make a function generate these functions for similar enrichments.
def gen_tanhkx2d(k, dim, scalar):
offset = 1.0 - np.tanh(2*k)
f = lambda x:np.tanh(scalar*k*x[dim]+k) + offset - 1.0
f_prime0 = lambda x: scalar * k * (1.0/np.cosh(scalar*k*x[dim] + k))**2
f_prime1 = lambda x: 0
if dim == 0:
f_prime = lambda x: np.array((f_prime0(x), f_prime1(x)))
if dim == 1:
f_prime = lambda x: np.array((f_prime1(x), f_prime0(x)))
return (f, f_prime)
# We can also write a function to apply multiple enrichments to a single element:
def enrich_me(group, dim, pm, k_list, ids_start):
if dim == 0:
quadrature = (70, 1) # Quadratures is not symettric.
else:
quadrature = (1, 70)
for k in k_list:
print("SCRIPT:\tAdding enrichment to quad with id "+ str(ids_start))
enr = gen_tanhkx2d(k, dim, pm)
mesh.enrich_elems(group, enr[0],
enr[1],
quadrature,
Elements.ElemQuadBase,
ids_start)
ids_start += 1
# Enrichment IDs - Enrichments on the same node that share the same id will
# share a degree of freedom.
k_list = [2, 3, 6, 12, 24]
enrich_me("Bottom", 1, 1, k_list, 100)
enrich_me("Right", 1, -1, k_list, 100)
enrich_me("Arc", 0, -1, k_list, 200)
# END DEFINE ENRICHEMENT
## CREATE A NEW SOLVER OBJECT
solver = ht3s.ht3_solver(mesh)
# solver.norm_path = "./ROut/ht3_"+run_id+"_norm.csv" # If norm output is desired, this must be defined.
# solver.export_mesh = outmesh # If XFEM, an output mesh must be defined.
solver.save_path = "./ROut/ht3_"+run_id+ "_" # A path to save the solution .vtus must be defined.
mesh.export_to_vtk(solver.save_path+"mesh") # It is useful to save the input mesh as a VTU. Good for debugging.
# We can specify that saving and norm calculation is only done on specific steps:
# def norm_reporting_rule(step, dt):
# if step % np.floor(5e-6 / dt) == 0:
# return True
# else:
# return False
# solver.norm_saving_rule = norm_reporting_rule
def saving_rule(step, dt): return False
solver.save_rule = saving_rule
# We can use a predfined solution:
#f(x,y,t) = exp(- x^c kt) + exp(- y^c kt)
# c = 1
# k = 1
# solution = lambda x, t: np.exp(-1 * x[0]**c *k*t) + np.exp(-1 * x[1]**c *k*t) # The solution
# oneD1 = lambda x, t: -1 * c * k * t * x**(c-1) * np.exp(-1 * x**c *k *t) # Partial deriv 1
# def oneD2(x, t): # Partial deriv 2
# a = c*k*t*np.exp(-x**c * k*t)
# b = c*k*t*x**(2*c-2)
# d = (c - 1) * x**(c-2)
# return a * ( b - d)
# laplacian = lambda x, t: oneD2(x[0], t) + oneD2(x[1], t) # Laplacian
# def norm_grad(x, t, n): # Grad in given dir.
# dfdx = np.array((oneD1(x[0], t), oneD1(x[1], t)))
# return np.dot(n, dfdx)
# dTdt = lambda x, t: -k * (x[0]**c * np.exp(-k * t*x[0]**c) + \ # DT / Dt
# x[1]**c * np.exp(-k * t*x[1]**c))
# solver.redef_F_laplacian = lambda x, y, t: laplacian((x,y), t)
# solver.redef_f_norm_grad = lambda x, y, t, n: norm_grad((x,y), t, n)
# solver.redef_dTdt = lambda x, y, t: dTdt((x, y), t)
# solver.expected_solution = solution
# SIMULATION CONSTANTS
# Some parts are optical for SP1 radiation approximation included in code.
# If len(fq_list) == 0, no radiation will be modelled. Radiation consts like diff scale
# must still be defined however - assertions will (should) occur otherwise.
#mesh
#time
solver.zero_timings()
solver.d_T = 1e-7
solver.max_T = 1.01e-5
# simulation setup optical
solver.v0_frequency = 2.933e13
solver.fq_list = []#[3.422, 3.733, 4.563, 5.133, 5.866, 6.844, 102.671, 10e6]
# simulation setup temperatures
solver.background_temperature = 300.00
solver.initial_temperature = 1000.0
solver.diff_scale = 0.5
#material properties
#optical
solver.absorb_coeffs = []#[7136.00, 576.32, 276.98, 27.98, 15.45, 7.70, 0.50, 0.40]
solver.alpha = 0.92 #(Hemisperic emssivity)
solver.refr_idx_vol = 1.46
solver.refr_idx_background = 1.00
solver.r1 = 0.0
solver.r2 = 0.0
#conductive
solver.density = 2514.8
solver.heat_capacity = 1239.6
solver.thermal_conductivity = 1.672
solver.convect_coeff = 1.0
# Set solver running.
# Solver can be called with initial solution for FEM problems.
# solver.run(initial= lambda x,y: solution(np.array((x,y)),solver.current_T))
solver.run()
# Solver runs till it ends.
# FEM: a solution can be saved (IE Mesh + degrees of freedom). Not possible currently with XFEM.
# f = open("ROut/SOLUTION.pkl", 'wb')
# ts = ht3s.saved_solver(solver)
# pickle.dump(ts, f)
# f.close()
# FEM OR XFEM: a reference solution can be opened and compared to (Ie calc rel error L2 Norms)
f = open("../v0.6_FEM/ROut/SOLUTION.pkl", 'rb')
fem_ref = pickle.load(f).return_solver()
f.close()
mapping = solver.compare_solutions(fem_ref, 1e-7)
solver.compare_solutions(fem_ref, 2e-7, mesh_mapping = mapping)
solver.compare_solutions(fem_ref, 4e-7, mesh_mapping = mapping)
print("DONE!")
| <filename>ht3_solver_run_script.py
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@copyright Copyright 2017, <NAME>
@lisence: MIT
@status: alpha
"""
import ht3_solver as ht3s
import ElemMesh as em
import Elements as Elements
import numpy as np
import pickle
from ScriptTools import *
# Convenience...
run_id = "TEST_ID"
print("Run id is "+str(run_id))
## MESH INPUTS
mesh = em.ElemMesh()
# WE CAN BUILD A MESH FROM SCRATCH:
# 1x3 Mesh:
# ---------------
# | | | |
# | | | |
# ---------------
#mesh.nodes[0] = np.array([0.0, 0.0, 0.0])
#mesh.nodes[1] = np.array([1.0, 0.0, 0.0])
#mesh.nodes[2] = np.array([2.0, 0.0, 0.0])
#mesh.nodes[3] = np.array([3.0, 0.0, 0.0])
#mesh.nodes[4] = np.array([0.0, 1.0, 0.0])
#mesh.nodes[5] = np.array([1.0, 1.0, 0.0])
#mesh.nodes[6] = np.array([2.0, 1.0, 0.0])
#mesh.nodes[7] = np.array([3.0, 1.0, 0.0])
#
#mesh.elems[0] = Elements.ElemQuad4(mesh.nodes, (0,1,5,4))
#mesh.elems[1] = Elements.ElemQuad4(mesh.nodes, (1,2,6,5))
#mesh.elems[2] = Elements.ElemQuad4(mesh.nodes, (2,3,7,6))
#
#mesh.nodes_in_physical_groups = {}
#mesh.nodes_in_physical_groups[0] = [0,4]
#mesh.nodes_in_physical_groups[1] = [3,7]
#mesh.nodes_in_physical_groups[2] = [1,2,3,4,5,6,7]
#mesh.nodes_in_physical_groups[3] = [0,4,3,7]
#mesh.phys_group_names = {0:"Left",
# 1:"Right",
# 2:"Volume",
# 3:"Boundary"}
# OR IMPORT OUR MESH FROM A .msh FILE:
mesh.build_from_gmsh("./RMesh/MESH_FILE.msh")
mesh.print_elem_counts()
mesh.remove_line_elems() # Remove line elements on boundary
mesh.print_elem_counts()
mesh.calc_elems_in_physgrps() # [Boilerplate]
mesh.print_group_elem_counts()
mesh.elem_quad9_to_quad8() # Currently, quad9s don't work. This converts to quad8.
# ARE WE USING ENRICHMENT? IF YES:
# We need a mesh to project results onto:
outmesh = em.ElemMesh() # Mesh object
outmesh.build_from_gmsh("./RMesh/MESH_FILE_2.msh") # Import mesh from .msh
outmesh.print_elem_counts() #(Boilerplate)
outmesh.remove_line_elems() # Again, remove line elements.
outmesh.print_elem_counts()
outmesh.calc_elems_in_physgrps()
outmesh.print_group_elem_counts()
# DEFINE OUR ENRICHMENT:
# Enrichment needs to define an enrichment function and its partial derivatives.
# We can make a function generate these functions for similar enrichments.
def gen_tanhkx2d(k, dim, scalar):
offset = 1.0 - np.tanh(2*k)
f = lambda x:np.tanh(scalar*k*x[dim]+k) + offset - 1.0
f_prime0 = lambda x: scalar * k * (1.0/np.cosh(scalar*k*x[dim] + k))**2
f_prime1 = lambda x: 0
if dim == 0:
f_prime = lambda x: np.array((f_prime0(x), f_prime1(x)))
if dim == 1:
f_prime = lambda x: np.array((f_prime1(x), f_prime0(x)))
return (f, f_prime)
# We can also write a function to apply multiple enrichments to a single element:
def enrich_me(group, dim, pm, k_list, ids_start):
if dim == 0:
quadrature = (70, 1) # Quadratures is not symettric.
else:
quadrature = (1, 70)
for k in k_list:
print("SCRIPT:\tAdding enrichment to quad with id "+ str(ids_start))
enr = gen_tanhkx2d(k, dim, pm)
mesh.enrich_elems(group, enr[0],
enr[1],
quadrature,
Elements.ElemQuadBase,
ids_start)
ids_start += 1
# Enrichment IDs - Enrichments on the same node that share the same id will
# share a degree of freedom.
k_list = [2, 3, 6, 12, 24]
enrich_me("Bottom", 1, 1, k_list, 100)
enrich_me("Right", 1, -1, k_list, 100)
enrich_me("Arc", 0, -1, k_list, 200)
# END DEFINE ENRICHEMENT
## CREATE A NEW SOLVER OBJECT
solver = ht3s.ht3_solver(mesh)
# solver.norm_path = "./ROut/ht3_"+run_id+"_norm.csv" # If norm output is desired, this must be defined.
# solver.export_mesh = outmesh # If XFEM, an output mesh must be defined.
solver.save_path = "./ROut/ht3_"+run_id+ "_" # A path to save the solution .vtus must be defined.
mesh.export_to_vtk(solver.save_path+"mesh") # It is useful to save the input mesh as a VTU. Good for debugging.
# We can specify that saving and norm calculation is only done on specific steps:
# def norm_reporting_rule(step, dt):
# if step % np.floor(5e-6 / dt) == 0:
# return True
# else:
# return False
# solver.norm_saving_rule = norm_reporting_rule
def saving_rule(step, dt): return False
solver.save_rule = saving_rule
# We can use a predfined solution:
#f(x,y,t) = exp(- x^c kt) + exp(- y^c kt)
# c = 1
# k = 1
# solution = lambda x, t: np.exp(-1 * x[0]**c *k*t) + np.exp(-1 * x[1]**c *k*t) # The solution
# oneD1 = lambda x, t: -1 * c * k * t * x**(c-1) * np.exp(-1 * x**c *k *t) # Partial deriv 1
# def oneD2(x, t): # Partial deriv 2
# a = c*k*t*np.exp(-x**c * k*t)
# b = c*k*t*x**(2*c-2)
# d = (c - 1) * x**(c-2)
# return a * ( b - d)
# laplacian = lambda x, t: oneD2(x[0], t) + oneD2(x[1], t) # Laplacian
# def norm_grad(x, t, n): # Grad in given dir.
# dfdx = np.array((oneD1(x[0], t), oneD1(x[1], t)))
# return np.dot(n, dfdx)
# dTdt = lambda x, t: -k * (x[0]**c * np.exp(-k * t*x[0]**c) + \ # DT / Dt
# x[1]**c * np.exp(-k * t*x[1]**c))
# solver.redef_F_laplacian = lambda x, y, t: laplacian((x,y), t)
# solver.redef_f_norm_grad = lambda x, y, t, n: norm_grad((x,y), t, n)
# solver.redef_dTdt = lambda x, y, t: dTdt((x, y), t)
# solver.expected_solution = solution
# SIMULATION CONSTANTS
# Some parts are optical for SP1 radiation approximation included in code.
# If len(fq_list) == 0, no radiation will be modelled. Radiation consts like diff scale
# must still be defined however - assertions will (should) occur otherwise.
#mesh
#time
solver.zero_timings()
solver.d_T = 1e-7
solver.max_T = 1.01e-5
# simulation setup optical
solver.v0_frequency = 2.933e13
solver.fq_list = []#[3.422, 3.733, 4.563, 5.133, 5.866, 6.844, 102.671, 10e6]
# simulation setup temperatures
solver.background_temperature = 300.00
solver.initial_temperature = 1000.0
solver.diff_scale = 0.5
#material properties
#optical
solver.absorb_coeffs = []#[7136.00, 576.32, 276.98, 27.98, 15.45, 7.70, 0.50, 0.40]
solver.alpha = 0.92 #(Hemisperic emssivity)
solver.refr_idx_vol = 1.46
solver.refr_idx_background = 1.00
solver.r1 = 0.0
solver.r2 = 0.0
#conductive
solver.density = 2514.8
solver.heat_capacity = 1239.6
solver.thermal_conductivity = 1.672
solver.convect_coeff = 1.0
# Set solver running.
# Solver can be called with initial solution for FEM problems.
# solver.run(initial= lambda x,y: solution(np.array((x,y)),solver.current_T))
solver.run()
# Solver runs till it ends.
# FEM: a solution can be saved (IE Mesh + degrees of freedom). Not possible currently with XFEM.
# f = open("ROut/SOLUTION.pkl", 'wb')
# ts = ht3s.saved_solver(solver)
# pickle.dump(ts, f)
# f.close()
# FEM OR XFEM: a reference solution can be opened and compared to (Ie calc rel error L2 Norms)
f = open("../v0.6_FEM/ROut/SOLUTION.pkl", 'rb')
fem_ref = pickle.load(f).return_solver()
f.close()
mapping = solver.compare_solutions(fem_ref, 1e-7)
solver.compare_solutions(fem_ref, 2e-7, mesh_mapping = mapping)
solver.compare_solutions(fem_ref, 4e-7, mesh_mapping = mapping)
print("DONE!")
| en | 0.629703 | # -*- coding: utf-8 -*- @author: <NAME>
@copyright Copyright 2017, <NAME>
@lisence: MIT
@status: alpha # Convenience... ## MESH INPUTS # WE CAN BUILD A MESH FROM SCRATCH: # 1x3 Mesh: # --------------- # | | | | # | | | | # --------------- #mesh.nodes[0] = np.array([0.0, 0.0, 0.0]) #mesh.nodes[1] = np.array([1.0, 0.0, 0.0]) #mesh.nodes[2] = np.array([2.0, 0.0, 0.0]) #mesh.nodes[3] = np.array([3.0, 0.0, 0.0]) #mesh.nodes[4] = np.array([0.0, 1.0, 0.0]) #mesh.nodes[5] = np.array([1.0, 1.0, 0.0]) #mesh.nodes[6] = np.array([2.0, 1.0, 0.0]) #mesh.nodes[7] = np.array([3.0, 1.0, 0.0]) # #mesh.elems[0] = Elements.ElemQuad4(mesh.nodes, (0,1,5,4)) #mesh.elems[1] = Elements.ElemQuad4(mesh.nodes, (1,2,6,5)) #mesh.elems[2] = Elements.ElemQuad4(mesh.nodes, (2,3,7,6)) # #mesh.nodes_in_physical_groups = {} #mesh.nodes_in_physical_groups[0] = [0,4] #mesh.nodes_in_physical_groups[1] = [3,7] #mesh.nodes_in_physical_groups[2] = [1,2,3,4,5,6,7] #mesh.nodes_in_physical_groups[3] = [0,4,3,7] #mesh.phys_group_names = {0:"Left", # 1:"Right", # 2:"Volume", # 3:"Boundary"} # OR IMPORT OUR MESH FROM A .msh FILE: # Remove line elements on boundary # [Boilerplate] # Currently, quad9s don't work. This converts to quad8. # ARE WE USING ENRICHMENT? IF YES: # We need a mesh to project results onto: # Mesh object # Import mesh from .msh #(Boilerplate) # Again, remove line elements. # DEFINE OUR ENRICHMENT: # Enrichment needs to define an enrichment function and its partial derivatives. # We can make a function generate these functions for similar enrichments. # We can also write a function to apply multiple enrichments to a single element: # Quadratures is not symettric. # Enrichment IDs - Enrichments on the same node that share the same id will # share a degree of freedom. # END DEFINE ENRICHEMENT ## CREATE A NEW SOLVER OBJECT # solver.norm_path = "./ROut/ht3_"+run_id+"_norm.csv" # If norm output is desired, this must be defined. # solver.export_mesh = outmesh # If XFEM, an output mesh must be defined. # A path to save the solution .vtus must be defined. # It is useful to save the input mesh as a VTU. Good for debugging. # We can specify that saving and norm calculation is only done on specific steps: # def norm_reporting_rule(step, dt): # if step % np.floor(5e-6 / dt) == 0: # return True # else: # return False # solver.norm_saving_rule = norm_reporting_rule # We can use a predfined solution: #f(x,y,t) = exp(- x^c kt) + exp(- y^c kt) # c = 1 # k = 1 # solution = lambda x, t: np.exp(-1 * x[0]**c *k*t) + np.exp(-1 * x[1]**c *k*t) # The solution # oneD1 = lambda x, t: -1 * c * k * t * x**(c-1) * np.exp(-1 * x**c *k *t) # Partial deriv 1 # def oneD2(x, t): # Partial deriv 2 # a = c*k*t*np.exp(-x**c * k*t) # b = c*k*t*x**(2*c-2) # d = (c - 1) * x**(c-2) # return a * ( b - d) # laplacian = lambda x, t: oneD2(x[0], t) + oneD2(x[1], t) # Laplacian # def norm_grad(x, t, n): # Grad in given dir. # dfdx = np.array((oneD1(x[0], t), oneD1(x[1], t))) # return np.dot(n, dfdx) # dTdt = lambda x, t: -k * (x[0]**c * np.exp(-k * t*x[0]**c) + \ # DT / Dt # x[1]**c * np.exp(-k * t*x[1]**c)) # solver.redef_F_laplacian = lambda x, y, t: laplacian((x,y), t) # solver.redef_f_norm_grad = lambda x, y, t, n: norm_grad((x,y), t, n) # solver.redef_dTdt = lambda x, y, t: dTdt((x, y), t) # solver.expected_solution = solution # SIMULATION CONSTANTS # Some parts are optical for SP1 radiation approximation included in code. # If len(fq_list) == 0, no radiation will be modelled. Radiation consts like diff scale # must still be defined however - assertions will (should) occur otherwise. #mesh #time # simulation setup optical #[3.422, 3.733, 4.563, 5.133, 5.866, 6.844, 102.671, 10e6] # simulation setup temperatures #material properties #optical #[7136.00, 576.32, 276.98, 27.98, 15.45, 7.70, 0.50, 0.40] #(Hemisperic emssivity) #conductive # Set solver running. # Solver can be called with initial solution for FEM problems. # solver.run(initial= lambda x,y: solution(np.array((x,y)),solver.current_T)) # Solver runs till it ends. # FEM: a solution can be saved (IE Mesh + degrees of freedom). Not possible currently with XFEM. # f = open("ROut/SOLUTION.pkl", 'wb') # ts = ht3s.saved_solver(solver) # pickle.dump(ts, f) # f.close() # FEM OR XFEM: a reference solution can be opened and compared to (Ie calc rel error L2 Norms) | 2.398488 | 2 |
web_scraper/__init__.py | vvaezian/Web-Scraper | 0 | 6631436 | <gh_stars>0
from .get_links_directly import get_links_directly
from .get_links_using_Google_search import get_links_using_Google_search
from .find_links_by_extension import find_links_by_extension
| from .get_links_directly import get_links_directly
from .get_links_using_Google_search import get_links_using_Google_search
from .find_links_by_extension import find_links_by_extension | none | 1 | 1.096196 | 1 |
|
ipb_homework_checker/tools.py | PRBonn/ipb_homework_checker | 11 | 6631437 | <filename>ipb_homework_checker/tools.py
"""Handle various utility tasks."""
from os import path
from os import makedirs
from os import environ
import tempfile
import subprocess
import logging
import datetime
from .schema_tags import OutputTags
PKG_NAME = "ipb_homework_checker"
PROJECT_ROOT_FOLDER = path.abspath(path.dirname(path.dirname(__file__)))
DATE_PATTERN = "%Y-%m-%d %H:%M:%S"
MAX_DATE_STR = datetime.datetime.max.strftime(DATE_PATTERN)
EXPIRED_TAG = "expired"
log = logging.getLogger("GHC")
def get_temp_dir():
"""Create a temporary folder if needed and return it."""
tempdir = path.join(tempfile.gettempdir(), PKG_NAME)
if not path.exists(tempdir):
makedirs(tempdir)
return tempdir
def create_folder_if_needed(directory):
"""Create a folder if it does not exist."""
if not path.exists(directory):
makedirs(directory)
def expand_if_needed(input_path):
"""Expand the path if it is not absolute."""
if path.isabs(input_path):
return input_path
new_path = path.expanduser(input_path)
if path.isabs(new_path):
# This path needed user expansion. Now that the user home directory is
# expanded this is a full absolute path.
return new_path
# The user could not be expanded, so we assume it is just another relative
# path to the project directory. Mostly used for testing purposes here.
return path.join(PROJECT_ROOT_FOLDER, new_path)
def convert_to(output_type, value):
"""Convert the value to a specified type."""
if not value:
return None, "No value. Cannot convert to '{}'.".format(output_type)
try:
if output_type == OutputTags.STRING:
result = str(value).strip()
if output_type == OutputTags.NUMBER:
result = float(value)
except ValueError as e:
log.error('Exception: %s.', e)
return None, str(e)
return result, "OK"
def parse_git_url(git_url):
"""Parse the git url.
Args:
git_url (str): url of a git repository (https or ssh)
Returns:
(str, str, str): tupple of domain, user and project name parsed from url
"""
import re
regex = re.compile(r'(?:git@|https:\/\/)' # Prefix
r'([\w\-_\.]+)' # Domain
r'[:\/]' # Separator : or /
r'([\w\-_\.\/]+)' # User or folders
r'[\/]' # Separator /
r'([\w\-_]+)' # Project name
r'(?:.git)*$') # .git or nothing
domain, user, project = regex.search(git_url).groups()
return domain, user, project
class CmdResult:
"""A small container for command result."""
SUCCESS = 0
FAILURE = 13
def __init__(self, returncode=None, stdout=None, stderr=None):
"""Initialize either stdout of stderr."""
self._returncode = returncode
self._stdout = stdout
self._stderr = stderr
def succeeded(self):
"""Check if the command succeeded."""
if self.returncode is not None:
return self.returncode == CmdResult.SUCCESS
if self.stderr:
return False
return True
@property
def returncode(self):
"""Get returncode."""
return self._returncode
@property
def stdout(self):
"""Get stdout."""
return self._stdout
@property
def stderr(self):
"""Get stderr."""
return self._stderr
@stderr.setter
def stderr(self, value):
self._returncode = None # We can't rely on returncode anymore
self._stderr = value
@staticmethod
def success():
"""Return a cmd result that is a success."""
return CmdResult(stdout="Success!")
def __repr__(self):
"""Representatin of command result."""
stdout = self.stdout
if not stdout:
stdout = ""
if self.stderr:
return "stdout: {}, stderr: {}".format(stdout.strip(),
self.stderr.strip())
return stdout.strip()
def run_command(command, shell=True, cwd=path.curdir, env=environ, timeout=20):
"""Run a generic command in a subprocess.
Args:
command (str): command to run
Returns:
str: raw command output
"""
try:
startupinfo = None
if shell and isinstance(command, list):
command = subprocess.list2cmdline(command)
log.debug("running command: \n%s", command)
process = __run_subprocess(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell,
cwd=cwd,
env=env,
startupinfo=startupinfo,
timeout=timeout)
return CmdResult(returncode=process.returncode,
stdout=process.stdout.decode('utf-8'),
stderr=process.stderr.decode('utf-8'))
except subprocess.CalledProcessError as e:
output_text = e.output.decode("utf-8")
log.error("command '%s' finished with code: %s", e.cmd, e.returncode)
log.debug("command output: \n%s", output_text)
return CmdResult(returncode=e.returncode, stderr=output_text)
except subprocess.TimeoutExpired as e:
output_text = "Timeout: command '{}' ran longer than {} seconds".format(
e.cmd.strip(), e.timeout)
log.error(output_text)
return CmdResult(returncode=1, stderr=output_text)
def __run_subprocess(command,
input=None,
timeout=None,
check=False,
**kwargs):
"""Run a command as a subprocess.
Using the guide from StackOverflow:
https://stackoverflow.com/a/36955420/1763680
This command has been adapted from:
https://github.com/python/cpython/blob/3.5/Lib/subprocess.py#L352-L399
This code does essentially the same as subprocess.run(...) but makes sure to
kill the whole process tree which allows to use the timeout even when using
shell=True. The reason I don't want to stop using shell=True here is the
convenience of piping arguments from one function to another.
"""
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = subprocess.PIPE
import os
import signal
from subprocess import Popen, TimeoutExpired, CalledProcessError
from subprocess import CompletedProcess
with Popen(command, preexec_fn=os.setsid, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired:
# Kill the whole group of processes.
os.killpg(process.pid, signal.SIGINT)
stdout, stderr = process.communicate()
raise TimeoutExpired(process.args, timeout, output=stdout,
stderr=stderr)
retcode = process.poll()
if check and retcode:
raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
return CompletedProcess(process.args, retcode, stdout, stderr)
| <filename>ipb_homework_checker/tools.py
"""Handle various utility tasks."""
from os import path
from os import makedirs
from os import environ
import tempfile
import subprocess
import logging
import datetime
from .schema_tags import OutputTags
PKG_NAME = "ipb_homework_checker"
PROJECT_ROOT_FOLDER = path.abspath(path.dirname(path.dirname(__file__)))
DATE_PATTERN = "%Y-%m-%d %H:%M:%S"
MAX_DATE_STR = datetime.datetime.max.strftime(DATE_PATTERN)
EXPIRED_TAG = "expired"
log = logging.getLogger("GHC")
def get_temp_dir():
"""Create a temporary folder if needed and return it."""
tempdir = path.join(tempfile.gettempdir(), PKG_NAME)
if not path.exists(tempdir):
makedirs(tempdir)
return tempdir
def create_folder_if_needed(directory):
"""Create a folder if it does not exist."""
if not path.exists(directory):
makedirs(directory)
def expand_if_needed(input_path):
"""Expand the path if it is not absolute."""
if path.isabs(input_path):
return input_path
new_path = path.expanduser(input_path)
if path.isabs(new_path):
# This path needed user expansion. Now that the user home directory is
# expanded this is a full absolute path.
return new_path
# The user could not be expanded, so we assume it is just another relative
# path to the project directory. Mostly used for testing purposes here.
return path.join(PROJECT_ROOT_FOLDER, new_path)
def convert_to(output_type, value):
"""Convert the value to a specified type."""
if not value:
return None, "No value. Cannot convert to '{}'.".format(output_type)
try:
if output_type == OutputTags.STRING:
result = str(value).strip()
if output_type == OutputTags.NUMBER:
result = float(value)
except ValueError as e:
log.error('Exception: %s.', e)
return None, str(e)
return result, "OK"
def parse_git_url(git_url):
"""Parse the git url.
Args:
git_url (str): url of a git repository (https or ssh)
Returns:
(str, str, str): tupple of domain, user and project name parsed from url
"""
import re
regex = re.compile(r'(?:git@|https:\/\/)' # Prefix
r'([\w\-_\.]+)' # Domain
r'[:\/]' # Separator : or /
r'([\w\-_\.\/]+)' # User or folders
r'[\/]' # Separator /
r'([\w\-_]+)' # Project name
r'(?:.git)*$') # .git or nothing
domain, user, project = regex.search(git_url).groups()
return domain, user, project
class CmdResult:
"""A small container for command result."""
SUCCESS = 0
FAILURE = 13
def __init__(self, returncode=None, stdout=None, stderr=None):
"""Initialize either stdout of stderr."""
self._returncode = returncode
self._stdout = stdout
self._stderr = stderr
def succeeded(self):
"""Check if the command succeeded."""
if self.returncode is not None:
return self.returncode == CmdResult.SUCCESS
if self.stderr:
return False
return True
@property
def returncode(self):
"""Get returncode."""
return self._returncode
@property
def stdout(self):
"""Get stdout."""
return self._stdout
@property
def stderr(self):
"""Get stderr."""
return self._stderr
@stderr.setter
def stderr(self, value):
self._returncode = None # We can't rely on returncode anymore
self._stderr = value
@staticmethod
def success():
"""Return a cmd result that is a success."""
return CmdResult(stdout="Success!")
def __repr__(self):
"""Representatin of command result."""
stdout = self.stdout
if not stdout:
stdout = ""
if self.stderr:
return "stdout: {}, stderr: {}".format(stdout.strip(),
self.stderr.strip())
return stdout.strip()
def run_command(command, shell=True, cwd=path.curdir, env=environ, timeout=20):
"""Run a generic command in a subprocess.
Args:
command (str): command to run
Returns:
str: raw command output
"""
try:
startupinfo = None
if shell and isinstance(command, list):
command = subprocess.list2cmdline(command)
log.debug("running command: \n%s", command)
process = __run_subprocess(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell,
cwd=cwd,
env=env,
startupinfo=startupinfo,
timeout=timeout)
return CmdResult(returncode=process.returncode,
stdout=process.stdout.decode('utf-8'),
stderr=process.stderr.decode('utf-8'))
except subprocess.CalledProcessError as e:
output_text = e.output.decode("utf-8")
log.error("command '%s' finished with code: %s", e.cmd, e.returncode)
log.debug("command output: \n%s", output_text)
return CmdResult(returncode=e.returncode, stderr=output_text)
except subprocess.TimeoutExpired as e:
output_text = "Timeout: command '{}' ran longer than {} seconds".format(
e.cmd.strip(), e.timeout)
log.error(output_text)
return CmdResult(returncode=1, stderr=output_text)
def __run_subprocess(command,
input=None,
timeout=None,
check=False,
**kwargs):
"""Run a command as a subprocess.
Using the guide from StackOverflow:
https://stackoverflow.com/a/36955420/1763680
This command has been adapted from:
https://github.com/python/cpython/blob/3.5/Lib/subprocess.py#L352-L399
This code does essentially the same as subprocess.run(...) but makes sure to
kill the whole process tree which allows to use the timeout even when using
shell=True. The reason I don't want to stop using shell=True here is the
convenience of piping arguments from one function to another.
"""
if input is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = subprocess.PIPE
import os
import signal
from subprocess import Popen, TimeoutExpired, CalledProcessError
from subprocess import CompletedProcess
with Popen(command, preexec_fn=os.setsid, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired:
# Kill the whole group of processes.
os.killpg(process.pid, signal.SIGINT)
stdout, stderr = process.communicate()
raise TimeoutExpired(process.args, timeout, output=stdout,
stderr=stderr)
retcode = process.poll()
if check and retcode:
raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
return CompletedProcess(process.args, retcode, stdout, stderr)
| en | 0.826131 | Handle various utility tasks. Create a temporary folder if needed and return it. Create a folder if it does not exist. Expand the path if it is not absolute. # This path needed user expansion. Now that the user home directory is # expanded this is a full absolute path. # The user could not be expanded, so we assume it is just another relative # path to the project directory. Mostly used for testing purposes here. Convert the value to a specified type. Parse the git url. Args: git_url (str): url of a git repository (https or ssh) Returns: (str, str, str): tupple of domain, user and project name parsed from url # Prefix # Domain # Separator : or / # User or folders # Separator / # Project name # .git or nothing A small container for command result. Initialize either stdout of stderr. Check if the command succeeded. Get returncode. Get stdout. Get stderr. # We can't rely on returncode anymore Return a cmd result that is a success. Representatin of command result. Run a generic command in a subprocess. Args: command (str): command to run Returns: str: raw command output Run a command as a subprocess. Using the guide from StackOverflow: https://stackoverflow.com/a/36955420/1763680 This command has been adapted from: https://github.com/python/cpython/blob/3.5/Lib/subprocess.py#L352-L399 This code does essentially the same as subprocess.run(...) but makes sure to kill the whole process tree which allows to use the timeout even when using shell=True. The reason I don't want to stop using shell=True here is the convenience of piping arguments from one function to another. # Kill the whole group of processes. | 3.070629 | 3 |
test/test_web.py | asnramos/asv | 0 | 6631438 | <filename>test/test_web.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
import shutil
import time
import urllib.parse
from os.path import join, abspath, dirname
import pytest
from asv import config, util
try:
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import ActionChains
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException
except ImportError:
pass
from . import tools
from .tools import get_with_retry, WAIT_TIME, WIN
def _rebuild_basic_html(basedir):
local = abspath(dirname(__file__))
cwd = os.getcwd()
if os.path.isdir(basedir):
html_dir = join(basedir, 'html')
dvcs = tools.Git(join(basedir, 'repo'))
return html_dir, dvcs
os.makedirs(basedir)
os.chdir(basedir)
try:
machine_file = join(basedir, 'asv-machine.json')
shutil.copyfile(join(local, 'asv-machine.json'),
machine_file)
values = [[x] * 2 for x in [0, 0, 0, 0, 0,
1, 1, 1, 1, 1,
3, 3, 3, 3, 3,
2, 2, 2, 2, 2]]
dvcs = tools.generate_test_repo(basedir, values)
first_tested_commit_hash = dvcs.get_hash('master~14')
repo_path = dvcs.path
shutil.move(repo_path, join(basedir, 'repo'))
dvcs = tools.Git(join(basedir, 'repo'))
conf = config.Config.from_json({
'env_dir': join(basedir, 'env'),
'benchmark_dir': join(local, 'benchmark'),
'results_dir': join(basedir, 'results_workflow'),
'html_dir': join(basedir, 'html'),
'repo': join(basedir, 'repo'),
'dvcs': 'git',
'project': 'asv',
'matrix': {"env": {"SOME_TEST_VAR": ["1"]}},
'regressions_first_commits': {
'.*': first_tested_commit_hash
},
})
if WIN:
# Tell conda to not use hardlinks: on Windows it's not possible
# to delete hard links to files in use, which causes problem when
# trying to cleanup environments during this test (since the
# same cache directory may get reused).
conf.matrix["env"]["CONDA_ALWAYS_COPY"] = ["True"]
tools.run_asv_with_conf(conf, 'run', 'ALL',
'--show-stderr', '--quick',
'--bench=params_examples[a-z0-9_.]*track_',
_machine_file=machine_file)
# Swap CPU info and obtain some results
info = util.load_json(machine_file, api_version=1)
# Put in parameter values that need quoting in file names
info['orangutan']['cpu'] = 'Not /really/ <fast>'
info['orangutan']['ram'] = '?'
info['orangutan']['NUL'] = ''
util.write_json(machine_file, info, api_version=1)
tools.run_asv_with_conf(conf, 'run', 'master~10..', '--steps=3',
'--show-stderr', '--quick',
'--bench=params_examples[a-z0-9_.]*track_',
_machine_file=machine_file)
# Output
tools.run_asv_with_conf(conf, 'publish')
shutil.rmtree(join(basedir, 'env'))
finally:
os.chdir(cwd)
return conf.html_dir, dvcs
@pytest.mark.flaky(reruns=1, reruns_delay=5)
def test_web_summarygrid(browser, basic_html):
html_dir, dvcs = basic_html
ignore_exc = (NoSuchElementException, StaleElementReferenceException)
with tools.preview(html_dir) as base_url:
get_with_retry(browser, base_url)
WebDriverWait(browser, WAIT_TIME).until(EC.title_is(
'airspeed velocity of an unladen asv'))
# Verify benchmark names are displayed as expected
for href, expected in (
('#subdir.time_subdir.time_foo', u'time_subdir.time_foo'),
('#params_examples.ParamSuite.track_value', u'ParamSuite.track_value'),
('#custom.time_function', u'My Custom Function'),
('#named.track_custom_pretty_name', u'this.is/the.answer'),
):
item = browser.find_element_by_xpath(
"//a[@href='{}']/div[@class='benchmark-text']".format(href))
assert item.text == expected
# Open a graph display, scroll to item and click
item = browser.find_element_by_link_text('track_param')
y = item.location['y']
browser.execute_script('window.scrollTo(0, {0})'.format(y - 200))
item.click()
# Verify there's a plot of some sort
browser.find_element_by_css_selector('canvas.flot-base')
# Click a parameterized test button, which should toggle the button
param_button = browser.find_element_by_link_text('benchmark.params_examples.ClassOne')
assert 'active' in param_button.get_attribute('class').split()
param_button.click()
def check(*args):
param_button = browser.find_element_by_link_text('benchmark.params_examples.ClassOne')
return 'active' not in param_button.get_attribute('class').split()
WebDriverWait(browser, WAIT_TIME, ignored_exceptions=ignore_exc).until(check)
# Check there's no error popup; needs an explicit wait because
# there is no event that occurs on successful load that
# doesn't also occur on a failed load
time.sleep(1.0)
error_box = browser.find_element_by_id('error-message')
assert not error_box.is_displayed()
@pytest.mark.flaky(reruns=1, reruns_delay=5)
def test_web_regressions(browser, basic_html):
html_dir, dvcs = basic_html
bad_commit_hash = dvcs.get_hash('master~9')
ignore_exc = (NoSuchElementException, StaleElementReferenceException)
browser.set_window_size(1200, 900)
with tools.preview(html_dir) as base_url:
get_with_retry(browser, base_url)
regressions_btn = browser.find_element_by_link_text('Regressions')
regressions_btn.click()
# Wait for element to appear in the table
WebDriverWait(browser, WAIT_TIME).until(EC.text_to_be_present_in_element(
('xpath', '//table[1]/tbody/tr[2]/td[1]'), 'params_examples.track_find_test'
))
# Check that the expected links appear in the table
regression_1 = browser.find_element_by_link_text('params_examples.track_find_test(1)')
browser.find_element_by_link_text('params_examples.track_find_test(2)')
browser.find_element_by_link_text(bad_commit_hash[:8])
href = regression_1.get_attribute('href')
assert '/#params_examples.track_find_test?' in href
assert 'commits=' in href
# Sort the tables vs. benchmark name (PhantomJS doesn't allow doing it via actionchains)
browser.execute_script("$('thead th').eq(0).stupidsort('asc')")
WebDriverWait(browser, WAIT_TIME).until(EC.text_to_be_present_in_element(
('xpath', '//table[1]/tbody/tr[1]/td[1]'), 'params_examples.track_find_test(1)'
))
# Check the contents of the table
table_rows = browser.find_elements_by_xpath('//table[1]/tbody/tr')
assert len(table_rows) == 2
cols1 = [td.text for td in table_rows[0].find_elements_by_xpath('td')]
cols2 = [td.text for td in table_rows[1].find_elements_by_xpath('td')]
assert cols1[0] == 'params_examples.track_find_test(1)'
assert cols2[0] == 'params_examples.track_find_test(2)'
assert re.match(r'^\d\d\d\d-\d\d-\d\d \d\d:\d\d$', cols1[1])
assert re.match(r'^\d\d\d\d-\d\d-\d\d \d\d:\d\d$', cols2[1])
assert cols1[2:] == [bad_commit_hash[:8], '2.00x', '1.00', '2.00', 'Ignore']
assert cols2[2:] == [bad_commit_hash[:8], '2.00x', '1.00', '2.00', 'Ignore']
# Check that the ignore buttons work as expected
buttons = [button for button in browser.find_elements_by_xpath('//button')
if button.text == 'Ignore']
buttons[0].click()
# The button should disappear, together with the link
WebDriverWait(browser, WAIT_TIME).until_not(EC.visibility_of(buttons[0]))
WebDriverWait(browser, WAIT_TIME).until_not(EC.visibility_of(regression_1))
table_rows = browser.find_elements_by_xpath('//table[1]/tbody/tr')
assert len(table_rows) == 1
# There's a second button for showing the links, clicking
# which makes the elements reappear
show_button = [button for button in browser.find_elements_by_xpath('//button')
if button.text == 'Show ignored regressions...'][0]
show_button.click()
regression_1 = browser.find_element_by_link_text('params_examples.track_find_test(1)')
WebDriverWait(browser, WAIT_TIME).until(EC.visibility_of(regression_1))
table_rows = browser.find_elements_by_xpath('//table[2]/tbody/tr')
assert len(table_rows) == 1
# There's a config sample element
pre_div = browser.find_element_by_xpath('//pre')
assert "params_examples\\\\.track_find_test\\\\(1\\\\)" in pre_div.text
# There's an unignore button that moves the element back to the main table
unignore_button = [button for button in browser.find_elements_by_xpath('//button')
if button.text == 'Unignore'][0]
unignore_button.click()
# wait until the table has two rows
browser.find_elements_by_xpath('//table[1]/tbody/tr[2]')
table_rows = browser.find_elements_by_xpath('//table[1]/tbody/tr')
assert len(table_rows) == 2
# Check that a plot of some sort appears on mouseover. The
# page needs to be scrolled first so that the mouseover popup
# has enough space to appear.
regression_1 = browser.find_element_by_link_text('params_examples.track_find_test(1)')
y = regression_1.location['y']
browser.execute_script('window.scrollTo(0, {0})'.format(y - 200))
chain = ActionChains(browser)
chain.move_to_element(regression_1)
chain.perform()
browser.find_element_by_css_selector('div.popover-content')
browser.find_element_by_css_selector('canvas.flot-base')
# Check group/ungroup button functionality
group_button, = [button for button in browser.find_elements_by_xpath('//button')
if button.text == "Group regressions"]
group_button.click()
def check(*args):
columns = browser.find_element_by_xpath('//table/thead/tr[1]').text
return columns == 'Benchmark Last date Commits Factor Best Current'
WebDriverWait(browser, WAIT_TIME, ignored_exceptions=ignore_exc).until(check)
ungroup_button, = [button for button in browser.find_elements_by_xpath('//button')
if button.text == "Ungroup regressions"]
ungroup_button.click()
def check(*args):
columns = browser.find_element_by_xpath('//table/thead/tr[1]').text
return columns == 'Benchmark Date Commit Factor Before Best after'
WebDriverWait(browser, WAIT_TIME, ignored_exceptions=ignore_exc).until(check)
@pytest.mark.flaky(reruns=1, reruns_delay=5)
def test_web_summarylist(browser, basic_html):
ignore_exc = (NoSuchElementException, StaleElementReferenceException)
html_dir, dvcs = basic_html
last_change_hash = dvcs.get_hash('master~4')
browser.set_window_size(1200, 900)
with tools.preview(html_dir) as base_url:
get_with_retry(browser, base_url)
summarylist_btn = browser.find_element_by_link_text('Benchmark list')
summarylist_btn.click()
# Check text content in the table
base_link = browser.find_element_by_link_text('params_examples.track_find_test')
cur_row = base_link.find_element_by_xpath('../..')
m = re.match('params_examples.track_find_test \\([12]\\) 2.00 \u221233.3% \\(-1.00\\).*' +
last_change_hash[:8],
cur_row.text)
assert m, cur_row.text
# Check units in row
base_link2 = browser.find_element_by_link_text('params_examples.track_bytes')
cur_row2 = base_link2.find_element_by_xpath('../..')
m = re.match(r'params_examples.track_bytes\s*1.000M', cur_row2.text)
assert m, cur_row2.text
# Check link
base_href, qs = urllib.parse.splitquery(base_link.get_attribute('href'))
base_url, tag = urllib.parse.splittag(base_href)
assert urllib.parse.parse_qs(qs) == {'ram': ['128GB'], 'cpu': ['Blazingly fast'],
'NUL': ['[none]']}
assert tag == 'params_examples.track_find_test'
# Change table sort (sorting is async, so needs waits)
sort_th = browser.find_element_by_xpath('//th[text()="Recent change"]')
sort_th.click()
WebDriverWait(browser, WAIT_TIME).until(
EC.text_to_be_present_in_element(('xpath', '//tbody/tr[1]'),
'params_examples.track_find_test'))
# Try to click cpu selector link in the panel
cpu_select = browser.find_element_by_link_text('Not /really/ <fast>')
cpu_select.click()
# For the other CPU, there is no recent change recorded, only
# the latest result is available
def check(*args):
links = browser.find_elements_by_link_text('params_examples.track_find_test')
visible_links = [item for item in links if item.is_displayed()]
row_texts = [link.find_element_by_xpath('../..').text
for link in visible_links]
row_texts.sort()
if len(row_texts) != 2:
return False
ok = (re.match(r'^params_examples\.track_find_test \(1\) 2\.00 .*\(-1\.00\).*$',
row_texts[0]) and
re.match(r'^params_examples\.track_find_test \(2\) 2\.00 .*\(-1\.00\).*$',
row_texts[1]))
return ok
WebDriverWait(browser, WAIT_TIME, ignored_exceptions=ignore_exc).until(check)
| <filename>test/test_web.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
import shutil
import time
import urllib.parse
from os.path import join, abspath, dirname
import pytest
from asv import config, util
try:
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import ActionChains
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException
except ImportError:
pass
from . import tools
from .tools import get_with_retry, WAIT_TIME, WIN
def _rebuild_basic_html(basedir):
local = abspath(dirname(__file__))
cwd = os.getcwd()
if os.path.isdir(basedir):
html_dir = join(basedir, 'html')
dvcs = tools.Git(join(basedir, 'repo'))
return html_dir, dvcs
os.makedirs(basedir)
os.chdir(basedir)
try:
machine_file = join(basedir, 'asv-machine.json')
shutil.copyfile(join(local, 'asv-machine.json'),
machine_file)
values = [[x] * 2 for x in [0, 0, 0, 0, 0,
1, 1, 1, 1, 1,
3, 3, 3, 3, 3,
2, 2, 2, 2, 2]]
dvcs = tools.generate_test_repo(basedir, values)
first_tested_commit_hash = dvcs.get_hash('master~14')
repo_path = dvcs.path
shutil.move(repo_path, join(basedir, 'repo'))
dvcs = tools.Git(join(basedir, 'repo'))
conf = config.Config.from_json({
'env_dir': join(basedir, 'env'),
'benchmark_dir': join(local, 'benchmark'),
'results_dir': join(basedir, 'results_workflow'),
'html_dir': join(basedir, 'html'),
'repo': join(basedir, 'repo'),
'dvcs': 'git',
'project': 'asv',
'matrix': {"env": {"SOME_TEST_VAR": ["1"]}},
'regressions_first_commits': {
'.*': first_tested_commit_hash
},
})
if WIN:
# Tell conda to not use hardlinks: on Windows it's not possible
# to delete hard links to files in use, which causes problem when
# trying to cleanup environments during this test (since the
# same cache directory may get reused).
conf.matrix["env"]["CONDA_ALWAYS_COPY"] = ["True"]
tools.run_asv_with_conf(conf, 'run', 'ALL',
'--show-stderr', '--quick',
'--bench=params_examples[a-z0-9_.]*track_',
_machine_file=machine_file)
# Swap CPU info and obtain some results
info = util.load_json(machine_file, api_version=1)
# Put in parameter values that need quoting in file names
info['orangutan']['cpu'] = 'Not /really/ <fast>'
info['orangutan']['ram'] = '?'
info['orangutan']['NUL'] = ''
util.write_json(machine_file, info, api_version=1)
tools.run_asv_with_conf(conf, 'run', 'master~10..', '--steps=3',
'--show-stderr', '--quick',
'--bench=params_examples[a-z0-9_.]*track_',
_machine_file=machine_file)
# Output
tools.run_asv_with_conf(conf, 'publish')
shutil.rmtree(join(basedir, 'env'))
finally:
os.chdir(cwd)
return conf.html_dir, dvcs
@pytest.mark.flaky(reruns=1, reruns_delay=5)
def test_web_summarygrid(browser, basic_html):
html_dir, dvcs = basic_html
ignore_exc = (NoSuchElementException, StaleElementReferenceException)
with tools.preview(html_dir) as base_url:
get_with_retry(browser, base_url)
WebDriverWait(browser, WAIT_TIME).until(EC.title_is(
'airspeed velocity of an unladen asv'))
# Verify benchmark names are displayed as expected
for href, expected in (
('#subdir.time_subdir.time_foo', u'time_subdir.time_foo'),
('#params_examples.ParamSuite.track_value', u'ParamSuite.track_value'),
('#custom.time_function', u'My Custom Function'),
('#named.track_custom_pretty_name', u'this.is/the.answer'),
):
item = browser.find_element_by_xpath(
"//a[@href='{}']/div[@class='benchmark-text']".format(href))
assert item.text == expected
# Open a graph display, scroll to item and click
item = browser.find_element_by_link_text('track_param')
y = item.location['y']
browser.execute_script('window.scrollTo(0, {0})'.format(y - 200))
item.click()
# Verify there's a plot of some sort
browser.find_element_by_css_selector('canvas.flot-base')
# Click a parameterized test button, which should toggle the button
param_button = browser.find_element_by_link_text('benchmark.params_examples.ClassOne')
assert 'active' in param_button.get_attribute('class').split()
param_button.click()
def check(*args):
param_button = browser.find_element_by_link_text('benchmark.params_examples.ClassOne')
return 'active' not in param_button.get_attribute('class').split()
WebDriverWait(browser, WAIT_TIME, ignored_exceptions=ignore_exc).until(check)
# Check there's no error popup; needs an explicit wait because
# there is no event that occurs on successful load that
# doesn't also occur on a failed load
time.sleep(1.0)
error_box = browser.find_element_by_id('error-message')
assert not error_box.is_displayed()
@pytest.mark.flaky(reruns=1, reruns_delay=5)
def test_web_regressions(browser, basic_html):
html_dir, dvcs = basic_html
bad_commit_hash = dvcs.get_hash('master~9')
ignore_exc = (NoSuchElementException, StaleElementReferenceException)
browser.set_window_size(1200, 900)
with tools.preview(html_dir) as base_url:
get_with_retry(browser, base_url)
regressions_btn = browser.find_element_by_link_text('Regressions')
regressions_btn.click()
# Wait for element to appear in the table
WebDriverWait(browser, WAIT_TIME).until(EC.text_to_be_present_in_element(
('xpath', '//table[1]/tbody/tr[2]/td[1]'), 'params_examples.track_find_test'
))
# Check that the expected links appear in the table
regression_1 = browser.find_element_by_link_text('params_examples.track_find_test(1)')
browser.find_element_by_link_text('params_examples.track_find_test(2)')
browser.find_element_by_link_text(bad_commit_hash[:8])
href = regression_1.get_attribute('href')
assert '/#params_examples.track_find_test?' in href
assert 'commits=' in href
# Sort the tables vs. benchmark name (PhantomJS doesn't allow doing it via actionchains)
browser.execute_script("$('thead th').eq(0).stupidsort('asc')")
WebDriverWait(browser, WAIT_TIME).until(EC.text_to_be_present_in_element(
('xpath', '//table[1]/tbody/tr[1]/td[1]'), 'params_examples.track_find_test(1)'
))
# Check the contents of the table
table_rows = browser.find_elements_by_xpath('//table[1]/tbody/tr')
assert len(table_rows) == 2
cols1 = [td.text for td in table_rows[0].find_elements_by_xpath('td')]
cols2 = [td.text for td in table_rows[1].find_elements_by_xpath('td')]
assert cols1[0] == 'params_examples.track_find_test(1)'
assert cols2[0] == 'params_examples.track_find_test(2)'
assert re.match(r'^\d\d\d\d-\d\d-\d\d \d\d:\d\d$', cols1[1])
assert re.match(r'^\d\d\d\d-\d\d-\d\d \d\d:\d\d$', cols2[1])
assert cols1[2:] == [bad_commit_hash[:8], '2.00x', '1.00', '2.00', 'Ignore']
assert cols2[2:] == [bad_commit_hash[:8], '2.00x', '1.00', '2.00', 'Ignore']
# Check that the ignore buttons work as expected
buttons = [button for button in browser.find_elements_by_xpath('//button')
if button.text == 'Ignore']
buttons[0].click()
# The button should disappear, together with the link
WebDriverWait(browser, WAIT_TIME).until_not(EC.visibility_of(buttons[0]))
WebDriverWait(browser, WAIT_TIME).until_not(EC.visibility_of(regression_1))
table_rows = browser.find_elements_by_xpath('//table[1]/tbody/tr')
assert len(table_rows) == 1
# There's a second button for showing the links, clicking
# which makes the elements reappear
show_button = [button for button in browser.find_elements_by_xpath('//button')
if button.text == 'Show ignored regressions...'][0]
show_button.click()
regression_1 = browser.find_element_by_link_text('params_examples.track_find_test(1)')
WebDriverWait(browser, WAIT_TIME).until(EC.visibility_of(regression_1))
table_rows = browser.find_elements_by_xpath('//table[2]/tbody/tr')
assert len(table_rows) == 1
# There's a config sample element
pre_div = browser.find_element_by_xpath('//pre')
assert "params_examples\\\\.track_find_test\\\\(1\\\\)" in pre_div.text
# There's an unignore button that moves the element back to the main table
unignore_button = [button for button in browser.find_elements_by_xpath('//button')
if button.text == 'Unignore'][0]
unignore_button.click()
# wait until the table has two rows
browser.find_elements_by_xpath('//table[1]/tbody/tr[2]')
table_rows = browser.find_elements_by_xpath('//table[1]/tbody/tr')
assert len(table_rows) == 2
# Check that a plot of some sort appears on mouseover. The
# page needs to be scrolled first so that the mouseover popup
# has enough space to appear.
regression_1 = browser.find_element_by_link_text('params_examples.track_find_test(1)')
y = regression_1.location['y']
browser.execute_script('window.scrollTo(0, {0})'.format(y - 200))
chain = ActionChains(browser)
chain.move_to_element(regression_1)
chain.perform()
browser.find_element_by_css_selector('div.popover-content')
browser.find_element_by_css_selector('canvas.flot-base')
# Check group/ungroup button functionality
group_button, = [button for button in browser.find_elements_by_xpath('//button')
if button.text == "Group regressions"]
group_button.click()
def check(*args):
columns = browser.find_element_by_xpath('//table/thead/tr[1]').text
return columns == 'Benchmark Last date Commits Factor Best Current'
WebDriverWait(browser, WAIT_TIME, ignored_exceptions=ignore_exc).until(check)
ungroup_button, = [button for button in browser.find_elements_by_xpath('//button')
if button.text == "Ungroup regressions"]
ungroup_button.click()
def check(*args):
columns = browser.find_element_by_xpath('//table/thead/tr[1]').text
return columns == 'Benchmark Date Commit Factor Before Best after'
WebDriverWait(browser, WAIT_TIME, ignored_exceptions=ignore_exc).until(check)
@pytest.mark.flaky(reruns=1, reruns_delay=5)
def test_web_summarylist(browser, basic_html):
ignore_exc = (NoSuchElementException, StaleElementReferenceException)
html_dir, dvcs = basic_html
last_change_hash = dvcs.get_hash('master~4')
browser.set_window_size(1200, 900)
with tools.preview(html_dir) as base_url:
get_with_retry(browser, base_url)
summarylist_btn = browser.find_element_by_link_text('Benchmark list')
summarylist_btn.click()
# Check text content in the table
base_link = browser.find_element_by_link_text('params_examples.track_find_test')
cur_row = base_link.find_element_by_xpath('../..')
m = re.match('params_examples.track_find_test \\([12]\\) 2.00 \u221233.3% \\(-1.00\\).*' +
last_change_hash[:8],
cur_row.text)
assert m, cur_row.text
# Check units in row
base_link2 = browser.find_element_by_link_text('params_examples.track_bytes')
cur_row2 = base_link2.find_element_by_xpath('../..')
m = re.match(r'params_examples.track_bytes\s*1.000M', cur_row2.text)
assert m, cur_row2.text
# Check link
base_href, qs = urllib.parse.splitquery(base_link.get_attribute('href'))
base_url, tag = urllib.parse.splittag(base_href)
assert urllib.parse.parse_qs(qs) == {'ram': ['128GB'], 'cpu': ['Blazingly fast'],
'NUL': ['[none]']}
assert tag == 'params_examples.track_find_test'
# Change table sort (sorting is async, so needs waits)
sort_th = browser.find_element_by_xpath('//th[text()="Recent change"]')
sort_th.click()
WebDriverWait(browser, WAIT_TIME).until(
EC.text_to_be_present_in_element(('xpath', '//tbody/tr[1]'),
'params_examples.track_find_test'))
# Try to click cpu selector link in the panel
cpu_select = browser.find_element_by_link_text('Not /really/ <fast>')
cpu_select.click()
# For the other CPU, there is no recent change recorded, only
# the latest result is available
def check(*args):
links = browser.find_elements_by_link_text('params_examples.track_find_test')
visible_links = [item for item in links if item.is_displayed()]
row_texts = [link.find_element_by_xpath('../..').text
for link in visible_links]
row_texts.sort()
if len(row_texts) != 2:
return False
ok = (re.match(r'^params_examples\.track_find_test \(1\) 2\.00 .*\(-1\.00\).*$',
row_texts[0]) and
re.match(r'^params_examples\.track_find_test \(2\) 2\.00 .*\(-1\.00\).*$',
row_texts[1]))
return ok
WebDriverWait(browser, WAIT_TIME, ignored_exceptions=ignore_exc).until(check)
| en | 0.894816 | # Licensed under a 3-clause BSD style license - see LICENSE.rst # Tell conda to not use hardlinks: on Windows it's not possible # to delete hard links to files in use, which causes problem when # trying to cleanup environments during this test (since the # same cache directory may get reused). # Swap CPU info and obtain some results # Put in parameter values that need quoting in file names # Output # Verify benchmark names are displayed as expected # Open a graph display, scroll to item and click # Verify there's a plot of some sort # Click a parameterized test button, which should toggle the button # Check there's no error popup; needs an explicit wait because # there is no event that occurs on successful load that # doesn't also occur on a failed load # Wait for element to appear in the table # Check that the expected links appear in the table #params_examples.track_find_test?' in href # Sort the tables vs. benchmark name (PhantomJS doesn't allow doing it via actionchains) # Check the contents of the table # Check that the ignore buttons work as expected # The button should disappear, together with the link # There's a second button for showing the links, clicking # which makes the elements reappear # There's a config sample element # There's an unignore button that moves the element back to the main table # wait until the table has two rows # Check that a plot of some sort appears on mouseover. The # page needs to be scrolled first so that the mouseover popup # has enough space to appear. # Check group/ungroup button functionality # Check text content in the table # Check units in row # Check link # Change table sort (sorting is async, so needs waits) # Try to click cpu selector link in the panel # For the other CPU, there is no recent change recorded, only # the latest result is available | 1.818514 | 2 |
alerts_deduplication.py | andtheWings/alerts | 0 | 6631439 | <filename>alerts_deduplication.py
import pandas as pd
import pandas_dedupe as dd
import sqlalchemy as sa
import os
os.chdir('/home/riggins/mrc_analyses/mrc_data')
engine = sa.create_engine("mysql+pymysql://[email protected]:3306/mrc_data")
people = pd.read_sql(
'''
SELECT * FROM people
WHERE possible_dupe = 1
''', engine)
os.chdir('/home/riggins/mrc_analyses/mrc_data/people_model')
deduped_people = dd.dedupe_dataframe(
people,
['person_id', 'cmrn_id', 'rin_cc']
)
deduped_people.to_parquet('deduped_people') | <filename>alerts_deduplication.py
import pandas as pd
import pandas_dedupe as dd
import sqlalchemy as sa
import os
os.chdir('/home/riggins/mrc_analyses/mrc_data')
engine = sa.create_engine("mysql+pymysql://[email protected]:3306/mrc_data")
people = pd.read_sql(
'''
SELECT * FROM people
WHERE possible_dupe = 1
''', engine)
os.chdir('/home/riggins/mrc_analyses/mrc_data/people_model')
deduped_people = dd.dedupe_dataframe(
people,
['person_id', 'cmrn_id', 'rin_cc']
)
deduped_people.to_parquet('deduped_people') | en | 0.706988 | SELECT * FROM people WHERE possible_dupe = 1 | 2.71698 | 3 |
setup.py | sergioabadarca/python-amazon-paapi | 0 | 6631440 | <reponame>sergioabadarca/python-amazon-paapi
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='python-amazon-paapi',
version='3.3.4',
author='<NAME>',
author_email='<EMAIL>',
description='Amazon Product Advertising API 5.0 wrapper for Python',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT',
url='https://github.com/sergioteula/python-amazon-paapi',
packages=setuptools.find_packages(),
install_requires=['certifi',
'six',
'python_dateutil',
'setuptools',
'urllib3'],
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=2.7',
)
| import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='python-amazon-paapi',
version='3.3.4',
author='<NAME>',
author_email='<EMAIL>',
description='Amazon Product Advertising API 5.0 wrapper for Python',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT',
url='https://github.com/sergioteula/python-amazon-paapi',
packages=setuptools.find_packages(),
install_requires=['certifi',
'six',
'python_dateutil',
'setuptools',
'urllib3'],
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=2.7',
) | none | 1 | 1.449608 | 1 |
|
bibliography_plugins/traditionalBibliography.py | MPvHarmelen/MarkdownCiteCompletions | 0 | 6631441 | <reponame>MPvHarmelen/MarkdownCiteCompletions
from ..external import latex_chars
from ..latextools_utils import bibcache
import codecs
import re
import sublime
import traceback
kp = re.compile(r'@[^\{]+\{\s*(.+)\s*,', re.UNICODE)
# new and improved regex
# we must have "title" then "=", possibly with spaces
# then either {, maybe repeated twice, or "
# then spaces and finally the title
# # We capture till the end of the line as maybe entry is broken over several lines
# # and in the end we MAY but need not have }'s and "s
# tp = re.compile(r'\btitle\s*=\s*(?:\{+|")\s*(.+)', re.IGNORECASE) # note no comma!
# # Tentatively do the same for author
# # Note: match ending } or " (surely safe for author names!)
# ap = re.compile(r'\bauthor\s*=\s*(?:\{|")\s*(.+)(?:\}|"),?', re.IGNORECASE)
# # Editors
# ep = re.compile(r'\beditor\s*=\s*(?:\{|")\s*(.+)(?:\}|"),?', re.IGNORECASE)
# # kp2 = re.compile(r'([^\t]+)\t*')
# # and year...
# # Note: year can be provided without quotes or braces (yes, I know...)
# yp = re.compile(r'\byear\s*=\s*(?:\{+|"|\b)\s*(\d+)[\}"]?,?', re.IGNORECASE)
# This may speed things up
# So far this captures: the tag, and the THREE possible groups
multip = re.compile(
r'\b(author|title|year|editor|journal|eprint)\s*=\s*'
r'(?:\{|"|\b)(.+?)(?:\}+|"|\b)\s*,?\s*\Z',
re.IGNORECASE | re.UNICODE
)
# LaTeX -> Unicode decoder
latex_chars.register()
class TraditionalBibliographyPlugin:
def get_entries(self, *bib_files):
entries = []
for bibfname in bib_files:
bib_cache = bibcache.BibCache("trad", bibfname)
try:
cached_entries = bib_cache.get()
entries.extend(cached_entries)
continue
except:
pass
try:
bibf = codecs.open(bibfname, 'r', 'UTF-8', 'ignore') # 'ignore' to be safe
except IOError:
print("Cannot open bibliography file %s !" % (bibfname,))
sublime.status_message("Cannot open bibliography file %s !" % (bibfname,))
continue
else:
bib_data = bibf.readlines()
bib_entries = []
entry = {}
for line in bib_data:
line = line.strip()
# Let's get rid of irrelevant lines first
if line == "" or line[0] == '%':
continue
if line.lower()[0:8] == "@comment":
continue
if line.lower()[0:7] == "@string":
continue
if line.lower()[0:9] == "@preamble":
continue
if line[0] == "@":
if 'keyword' in entry:
bib_entries.append(entry)
entry = {}
kp_match = kp.search(line)
if kp_match:
entry['keyword'] = kp_match.group(1)
else:
print(u"Cannot process this @ line: " + line)
print(
u"Previous keyword (if any): " +
entry.get('keyword', '')
)
continue
# Now test for title, author, etc.
# Note: we capture only the first line, but that's OK for our purposes
multip_match = multip.search(line)
if multip_match:
key = multip_match.group(1).lower()
value = codecs.decode(multip_match.group(2), 'latex')
if key == 'title':
value = value.replace(
'{\\textquoteright}', ''
).replace('{', '').replace('}', '')
entry[key] = value
continue
# at the end, we have a single record
if 'keyword' in entry:
bib_entries.append(entry)
print ('Loaded %d bibitems' % (len(bib_entries)))
try:
bib_cache.set(bib_entries)
fmt_entries = bib_cache.get()
entries.extend(fmt_entries)
except:
traceback.print_exc()
print("Using bibliography without caching it")
entries.extend(bib_entries)
finally:
try:
bibf.close()
except:
pass
print("Found %d total bib entries" % (len(entries),))
return entries
| from ..external import latex_chars
from ..latextools_utils import bibcache
import codecs
import re
import sublime
import traceback
kp = re.compile(r'@[^\{]+\{\s*(.+)\s*,', re.UNICODE)
# new and improved regex
# we must have "title" then "=", possibly with spaces
# then either {, maybe repeated twice, or "
# then spaces and finally the title
# # We capture till the end of the line as maybe entry is broken over several lines
# # and in the end we MAY but need not have }'s and "s
# tp = re.compile(r'\btitle\s*=\s*(?:\{+|")\s*(.+)', re.IGNORECASE) # note no comma!
# # Tentatively do the same for author
# # Note: match ending } or " (surely safe for author names!)
# ap = re.compile(r'\bauthor\s*=\s*(?:\{|")\s*(.+)(?:\}|"),?', re.IGNORECASE)
# # Editors
# ep = re.compile(r'\beditor\s*=\s*(?:\{|")\s*(.+)(?:\}|"),?', re.IGNORECASE)
# # kp2 = re.compile(r'([^\t]+)\t*')
# # and year...
# # Note: year can be provided without quotes or braces (yes, I know...)
# yp = re.compile(r'\byear\s*=\s*(?:\{+|"|\b)\s*(\d+)[\}"]?,?', re.IGNORECASE)
# This may speed things up
# So far this captures: the tag, and the THREE possible groups
multip = re.compile(
r'\b(author|title|year|editor|journal|eprint)\s*=\s*'
r'(?:\{|"|\b)(.+?)(?:\}+|"|\b)\s*,?\s*\Z',
re.IGNORECASE | re.UNICODE
)
# LaTeX -> Unicode decoder
latex_chars.register()
class TraditionalBibliographyPlugin:
def get_entries(self, *bib_files):
entries = []
for bibfname in bib_files:
bib_cache = bibcache.BibCache("trad", bibfname)
try:
cached_entries = bib_cache.get()
entries.extend(cached_entries)
continue
except:
pass
try:
bibf = codecs.open(bibfname, 'r', 'UTF-8', 'ignore') # 'ignore' to be safe
except IOError:
print("Cannot open bibliography file %s !" % (bibfname,))
sublime.status_message("Cannot open bibliography file %s !" % (bibfname,))
continue
else:
bib_data = bibf.readlines()
bib_entries = []
entry = {}
for line in bib_data:
line = line.strip()
# Let's get rid of irrelevant lines first
if line == "" or line[0] == '%':
continue
if line.lower()[0:8] == "@comment":
continue
if line.lower()[0:7] == "@string":
continue
if line.lower()[0:9] == "@preamble":
continue
if line[0] == "@":
if 'keyword' in entry:
bib_entries.append(entry)
entry = {}
kp_match = kp.search(line)
if kp_match:
entry['keyword'] = kp_match.group(1)
else:
print(u"Cannot process this @ line: " + line)
print(
u"Previous keyword (if any): " +
entry.get('keyword', '')
)
continue
# Now test for title, author, etc.
# Note: we capture only the first line, but that's OK for our purposes
multip_match = multip.search(line)
if multip_match:
key = multip_match.group(1).lower()
value = codecs.decode(multip_match.group(2), 'latex')
if key == 'title':
value = value.replace(
'{\\textquoteright}', ''
).replace('{', '').replace('}', '')
entry[key] = value
continue
# at the end, we have a single record
if 'keyword' in entry:
bib_entries.append(entry)
print ('Loaded %d bibitems' % (len(bib_entries)))
try:
bib_cache.set(bib_entries)
fmt_entries = bib_cache.get()
entries.extend(fmt_entries)
except:
traceback.print_exc()
print("Using bibliography without caching it")
entries.extend(bib_entries)
finally:
try:
bibf.close()
except:
pass
print("Found %d total bib entries" % (len(entries),))
return entries | en | 0.767156 | # new and improved regex # we must have "title" then "=", possibly with spaces # then either {, maybe repeated twice, or " # then spaces and finally the title # # We capture till the end of the line as maybe entry is broken over several lines # # and in the end we MAY but need not have }'s and "s # tp = re.compile(r'\btitle\s*=\s*(?:\{+|")\s*(.+)', re.IGNORECASE) # note no comma! # # Tentatively do the same for author # # Note: match ending } or " (surely safe for author names!) # ap = re.compile(r'\bauthor\s*=\s*(?:\{|")\s*(.+)(?:\}|"),?', re.IGNORECASE) # # Editors # ep = re.compile(r'\beditor\s*=\s*(?:\{|")\s*(.+)(?:\}|"),?', re.IGNORECASE) # # kp2 = re.compile(r'([^\t]+)\t*') # # and year... # # Note: year can be provided without quotes or braces (yes, I know...) # yp = re.compile(r'\byear\s*=\s*(?:\{+|"|\b)\s*(\d+)[\}"]?,?', re.IGNORECASE) # This may speed things up # So far this captures: the tag, and the THREE possible groups # LaTeX -> Unicode decoder # 'ignore' to be safe # Let's get rid of irrelevant lines first # Now test for title, author, etc. # Note: we capture only the first line, but that's OK for our purposes # at the end, we have a single record | 2.584172 | 3 |
python/cuml/test/test_kmeans.py | kkraus14/cuml | 2 | 6631442 | <gh_stars>1-10
# Copyright (c) 2018, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
import cuml
from sklearn import cluster
from sklearn.preprocessing import StandardScaler
from cuml.test.utils import fit_predict, get_pattern, clusters_equal
dataset_names = ['noisy_moons', 'varied', 'aniso', 'blobs', 'noisy_circles']
@pytest.mark.parametrize('name', dataset_names)
def test_kmeans_sklearn_comparison(name):
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
pat = get_pattern(name, 5000)
params = default_base.copy()
params.update(pat[1])
kmeans = cluster.KMeans(n_clusters=params['n_clusters'])
cuml_kmeans = cuml.KMeans(n_clusters=params['n_clusters'])
X, y = pat[0]
X = StandardScaler().fit_transform(X)
clustering_algorithms = (
('sk_Kmeans', kmeans),
('cuml_Kmeans', cuml_kmeans),
)
sk_y_pred, _ = fit_predict(clustering_algorithms[0][1],
clustering_algorithms[0][0], X)
cu_y_pred, _ = fit_predict(clustering_algorithms[1][1],
clustering_algorithms[1][0], X)
# Noisy circles clusters are rotated in the results,
# since we are comparing 2 we just need to compare that both clusters
# have approximately the same number of points.
if name == 'noisy_circles':
assert (np.sum(sk_y_pred) - np.sum(cu_y_pred))/len(sk_y_pred) < 1e-10
else:
clusters_equal(sk_y_pred, cu_y_pred, params['n_clusters'])
| # Copyright (c) 2018, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
import cuml
from sklearn import cluster
from sklearn.preprocessing import StandardScaler
from cuml.test.utils import fit_predict, get_pattern, clusters_equal
dataset_names = ['noisy_moons', 'varied', 'aniso', 'blobs', 'noisy_circles']
@pytest.mark.parametrize('name', dataset_names)
def test_kmeans_sklearn_comparison(name):
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
pat = get_pattern(name, 5000)
params = default_base.copy()
params.update(pat[1])
kmeans = cluster.KMeans(n_clusters=params['n_clusters'])
cuml_kmeans = cuml.KMeans(n_clusters=params['n_clusters'])
X, y = pat[0]
X = StandardScaler().fit_transform(X)
clustering_algorithms = (
('sk_Kmeans', kmeans),
('cuml_Kmeans', cuml_kmeans),
)
sk_y_pred, _ = fit_predict(clustering_algorithms[0][1],
clustering_algorithms[0][0], X)
cu_y_pred, _ = fit_predict(clustering_algorithms[1][1],
clustering_algorithms[1][0], X)
# Noisy circles clusters are rotated in the results,
# since we are comparing 2 we just need to compare that both clusters
# have approximately the same number of points.
if name == 'noisy_circles':
assert (np.sum(sk_y_pred) - np.sum(cu_y_pred))/len(sk_y_pred) < 1e-10
else:
clusters_equal(sk_y_pred, cu_y_pred, params['n_clusters']) | en | 0.900382 | # Copyright (c) 2018, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Noisy circles clusters are rotated in the results, # since we are comparing 2 we just need to compare that both clusters # have approximately the same number of points. | 2.0768 | 2 |
ddi_search_engine/Bio/expressions/swissprot/sprot40.py | dbmi-pitt/DIKB-Evidence-analytics | 3 | 6631443 | <gh_stars>1-10
import warnings
warnings.warn("Bio.expressions was deprecated, as it does not work with recent versions of mxTextTools. If you want to continue to use this module, please get in contact with the Biopython developers at <EMAIL> to avoid permanent removal of this module from Biopython", DeprecationWarning)
import Martel
from Martel import Time
import sprot38
# HAS2_CHICK has a DT line like this
# DT 30-MAY-2000 (REL. 39, Created)
# ^^^ Note the upper-case "REL" instead of "Rel" !
DT_created_exp = (Martel.Str("DT ") +
Time.make_expression("%(DD)-%(Jan)-%(YYYY)") + \
Martel.Re(" \(R[Ee][Ll]. (?P<release>\d\d), Created\)\R"))
OX_start = (Martel.Str("OX NCBI_TaxID=") +
Martel.Rep1(Martel.Digits("ncbi_taxid") +
Martel.Re("[,; ]+")) +
Martel.AnyEol())
OX_cont = (Martel.Str("OX ") +
Martel.Rep1(Martel.Digits("ncbi_taxid") +
Martel.Re("[,; ]+")) +
Martel.AnyEol())
OX_exp = OX_start + Martel.Rep(OX_cont)
# 0 or 1
# in 40 the line changed to look like this
# RX MEDLINE=93305731; PubMed=7916637;
# RX PubMed=11001938;
bib = (Martel.Word("bibliographic_database_name") + Martel.Str("=") +
Martel.ToSep("bibliographic_identifier", ";")
)
RX_exp = (Martel.Str("RX ") + bib +
Martel.Opt(Martel.Str(" ") + bib) +
Martel.AnyEol())
# Here's the neq SQ line format -- uses a CRC64
# SQ SEQUENCE 889 AA; 100368 MW; ABD7E3CD53961B78 CRC64;
SQ_exp = Martel.Re("SQ SEQUENCE +(?P<sequence_length>\d+) AA;" \
" +(?P<molecular_weight>\d+) MW;" \
" +(?P<crc?type=64>\w+) CRC64;\R")
replacements = [
("DT_created", DT_created_exp),
("OX_block", OX_exp),
("RX", RX_exp),
("SQ", SQ_exp),
]
record = Martel.replace_groups(sprot38.record, replacements)
format_expression = Martel.replace_groups(
sprot38.format_expression, replacements)
format = Martel.replace_groups(sprot38.format, replacements)
if __name__ == "__main__":
parser = format.make_parser()
filename = "/home/dalke/ftps/databases/swiss-prot/release_compressed/sprot40.dat"
## import os
## infile = os.popen("zcat " + filename)
infile = open(filename)
infile.seek(107976062)
parser.parseFile(infile)
| import warnings
warnings.warn("Bio.expressions was deprecated, as it does not work with recent versions of mxTextTools. If you want to continue to use this module, please get in contact with the Biopython developers at <EMAIL> to avoid permanent removal of this module from Biopython", DeprecationWarning)
import Martel
from Martel import Time
import sprot38
# HAS2_CHICK has a DT line like this
# DT 30-MAY-2000 (REL. 39, Created)
# ^^^ Note the upper-case "REL" instead of "Rel" !
DT_created_exp = (Martel.Str("DT ") +
Time.make_expression("%(DD)-%(Jan)-%(YYYY)") + \
Martel.Re(" \(R[Ee][Ll]. (?P<release>\d\d), Created\)\R"))
OX_start = (Martel.Str("OX NCBI_TaxID=") +
Martel.Rep1(Martel.Digits("ncbi_taxid") +
Martel.Re("[,; ]+")) +
Martel.AnyEol())
OX_cont = (Martel.Str("OX ") +
Martel.Rep1(Martel.Digits("ncbi_taxid") +
Martel.Re("[,; ]+")) +
Martel.AnyEol())
OX_exp = OX_start + Martel.Rep(OX_cont)
# 0 or 1
# in 40 the line changed to look like this
# RX MEDLINE=93305731; PubMed=7916637;
# RX PubMed=11001938;
bib = (Martel.Word("bibliographic_database_name") + Martel.Str("=") +
Martel.ToSep("bibliographic_identifier", ";")
)
RX_exp = (Martel.Str("RX ") + bib +
Martel.Opt(Martel.Str(" ") + bib) +
Martel.AnyEol())
# Here's the neq SQ line format -- uses a CRC64
# SQ SEQUENCE 889 AA; 100368 MW; ABD7E3CD53961B78 CRC64;
SQ_exp = Martel.Re("SQ SEQUENCE +(?P<sequence_length>\d+) AA;" \
" +(?P<molecular_weight>\d+) MW;" \
" +(?P<crc?type=64>\w+) CRC64;\R")
replacements = [
("DT_created", DT_created_exp),
("OX_block", OX_exp),
("RX", RX_exp),
("SQ", SQ_exp),
]
record = Martel.replace_groups(sprot38.record, replacements)
format_expression = Martel.replace_groups(
sprot38.format_expression, replacements)
format = Martel.replace_groups(sprot38.format, replacements)
if __name__ == "__main__":
parser = format.make_parser()
filename = "/home/dalke/ftps/databases/swiss-prot/release_compressed/sprot40.dat"
## import os
## infile = os.popen("zcat " + filename)
infile = open(filename)
infile.seek(107976062)
parser.parseFile(infile) | en | 0.641903 | # HAS2_CHICK has a DT line like this # DT 30-MAY-2000 (REL. 39, Created) # ^^^ Note the upper-case "REL" instead of "Rel" ! # 0 or 1 # in 40 the line changed to look like this # RX MEDLINE=93305731; PubMed=7916637; # RX PubMed=11001938; # Here's the neq SQ line format -- uses a CRC64 # SQ SEQUENCE 889 AA; 100368 MW; ABD7E3CD53961B78 CRC64; ## import os ## infile = os.popen("zcat " + filename) | 2.080144 | 2 |
main.py | studewan/01-Interactive-Fiction | 0 | 6631444 | {
"uuid": "A095F919-661C-4B7F-9467-4368B345AFD9",
"name": "The Mystery",
"creator": "Twine",
"creatorVersion": "2.3.14",
"schemaName": "Harlowe 3 to JSON",
"schemaVersion": "0.0.6",
"createdAtMs": 1631371628710,
"passages": [
{
"name": "Starting ",
"tags": "",
"id": "1",
"text": "You are about to go through an experience which would change your mindset forever. But first up... a little about the game! In this migrant trail, your taking up the role of an escaping migrant. You will be faced with a few choices which would lead to how the game carries on. \n\n[[Start -> Start your migrant journey!]]",
"links": [
{
"linkText": "Start",
"passageName": "Start your migrant journey!",
"original": "[[Start -> Start your migrant journey!]]"
}
],
"hooks": [],
"cleanText": "You are about to go through an experience which would change your mindset forever. But first up... a little about the game! In this migrant trail, your taking up the role of an escaping migrant. You will be faced with a few choices which would lead to how the game carries on."
},
{
"name": " Start your migrant journey!",
"tags": "",
"id": "2",
"text": "Your city is under attack by a terrorist group. Many people you know have been murdered and you are trying to escape whilst keeping a low profile. You need to leave immediately. What is your profession( each profession leads to a different story)?\n\n[[You are a medical student who works at a make shift clinic -> Medical Student]]\n[[You are currently in between jobs which makes you a little skilled -> Undecided]]",
"links": [
{
"linkText": "You are a medical student who works at a make shift clinic",
"passageName": "Medical Student",
"original": "[[You are a medical student who works at a make shift clinic -> Medical Student]]"
},
{
"linkText": "You are currently in between jobs which makes you a little skilled",
"passageName": "Undecided",
"original": "[[You are currently in between jobs which makes you a little skilled -> Undecided]]"
}
],
"hooks": [],
"cleanText": "Your city is under attack by a terrorist group. Many people you know have been murdered and you are trying to escape whilst keeping a low profile. You need to leave immediately. What is your profession( each profession leads to a different story)?"
},
{
"name": " <NAME>",
"tags": "",
"id": "3",
"text": "As the minutes pass the situation gets more and more intense. There are bombs dropping everywhere and the terrorists are stealing around from everyone's house. You need an escape route to find your way out without getting killed...\n\n[[Pay a smuggler to help you escape through the sea. -> Sea escape]]\n[[You decide to escape through the land -> Land escape]]\n[[Help out the people around you to escape (Since you are a medical student you are given a bonus option!!) -> Saviour]]",
"links": [
{
"linkText": "Pay a smuggler to help you escape through the sea.",
"passageName": "Sea escape",
"original": "[[Pay a smuggler to help you escape through the sea. -> Sea escape]]"
},
{
"linkText": "You decide to escape through the land",
"passageName": "Land escape",
"original": "[[You decide to escape through the land -> Land escape]]"
},
{
"linkText": "Help out the people around you to escape (Since you are a medical student you are given a bonus option!!)",
"passageName": "Saviour",
"original": "[[Help out the people around you to escape (Since you are a medical student you are given a bonus option!!) -> Saviour]]"
}
],
"hooks": [],
"cleanText": "As the minutes pass the situation gets more and more intense. There are bombs dropping everywhere and the terrorists are stealing around from everyone's house. You need an escape route to find your way out without getting killed..."
},
{
"name": " Undecided",
"tags": "",
"id": "4",
"text": "As the minutes pass the situation gets more and more intense. There are bombs dropping everywhere and the terrorists are stealing around from everyone's house. You need an escape route to find your way out without getting killed...\n\n[[Pay a smuggler to help you escape through the sea. -> Sea escape]]\n[[You decide to escape through the land -> Land escape]]\n[[Look for useful items around you to ensure your journey is good (Since your profession is makeshift you are given a bonus option!!) -> Scavenger]]",
"links": [
{
"linkText": "Pay a smuggler to help you escape through the sea.",
"passageName": "Sea escape",
"original": "[[Pay a smuggler to help you escape through the sea. -> Sea escape]]"
},
{
"linkText": "You decide to escape through the land",
"passageName": "Land escape",
"original": "[[You decide to escape through the land -> Land escape]]"
},
{
"linkText": "Look for useful items around you to ensure your journey is good (Since your profession is makeshift you are given a bonus option!!)",
"passageName": "Scavenger",
"original": "[[Look for useful items around you to ensure your journey is good (Since your profession is makeshift you are given a bonus option!!) -> Scavenger]]"
}
],
"hooks": [],
"cleanText": "As the minutes pass the situation gets more and more intense. There are bombs dropping everywhere and the terrorists are stealing around from everyone's house. You need an escape route to find your way out without getting killed..."
},
{
"name": " Sea escape",
"tags": "",
"id": "5",
"text": "You have decided to smuggle yourself out of your city. You make your way towards the sea and reach the boat to find out that it looks mostly filled to the brim. After a long wait you finally get to have a seat on the ground as there was not enough seats in the seating area. You make your way through the ocean while facing many challenges. There are storms on the way which makes everyone on the boat really cold. Since the boat had to leave in a hurry there was not enough food available for everyone to eat and the whole boat stunk because of how packed everyone was. \n\nFinally after what feels like forever you were able to make it back to the shores of USA. Since you are not documented you have the following options to make sure you do not get caught...\n\n[[You pay the smuggler even more money to make you fake documents -> Illegal]]\n[[You make your way to the nearest embassy of your country and seek shelter -> Legal]]",
"links": [
{
"linkText": "You pay the smuggler even more money to make you fake documents",
"passageName": "Illegal",
"original": "[[You pay the smuggler even more money to make you fake documents -> Illegal]]"
},
{
"linkText": "You make your way to the nearest embassy of your country and seek shelter",
"passageName": "Legal",
"original": "[[You make your way to the nearest embassy of your country and seek shelter -> Legal]]"
}
],
"hooks": [],
"cleanText": "You have decided to smuggle yourself out of your city. You make your way towards the sea and reach the boat to find out that it looks mostly filled to the brim. After a long wait you finally get to have a seat on the ground as there was not enough seats in the seating area. You make your way through the ocean while facing many challenges. There are storms on the way which makes everyone on the boat really cold. Since the boat had to leave in a hurry there was not enough food available for everyone to eat and the whole boat stunk because of how packed everyone was. \n\nFinally after what feels like forever you were able to make it back to the shores of USA. Since you are not documented you have the following options to make sure you do not get caught..."
},
{
"name": " <NAME>",
"tags": "",
"id": "6",
"text": "Travel through land has always been hard for everyone. There is always lack of food, water, shelter, and everyone always ends up getting tired. You were lucky enough to have been able to hitch hike your way through most of the journey. But unfortunately you are really exhausted and have no clue where to go.\n\nFinally after what feels like forever you were able to make it back to the shores of USA. Since you are not documented you have the following options to make sure you do not get caught...\n\n[[You came across a smuggler who would make you fake documents -> Illegal]]\n[[You make your way to the nearest embassy of your country and seek shelter -> Legal]]",
"links": [
{
"linkText": "You came across a smuggler who would make you fake documents",
"passageName": "Illegal",
"original": "[[You came across a smuggler who would make you fake documents -> Illegal]]"
},
{
"linkText": "You make your way to the nearest embassy of your country and seek shelter",
"passageName": "Legal",
"original": "[[You make your way to the nearest embassy of your country and seek shelter -> Legal]]"
}
],
"hooks": [],
"cleanText": "Travel through land has always been hard for everyone. There is always lack of food, water, shelter, and everyone always ends up getting tired. You were lucky enough to have been able to hitch hike your way through most of the journey. But unfortunately you are really exhausted and have no clue where to go.\n\nFinally after what feels like forever you were able to make it back to the shores of USA. Since you are not documented you have the following options to make sure you do not get caught..."
},
{
"name": " Saviour",
"tags": "",
"id": "7",
"text": "You chose the option to make sure the people around you are safe. You were successfully able to make your way to the make shift clinic to get a few supplies to help the injured people around you. Whislt helping everyone out, you were able to save the son of a wealthy man who was making his way to the shores of USA to escape. He offers to help you out and takes you along with his family. \n\n[[Winner!!]]",
"links": [
{
"linkText": "Winner!!",
"passageName": "Winner!!",
"original": "[[Winner!!]]"
}
],
"hooks": [],
"cleanText": "You chose the option to make sure the people around you are safe. You were successfully able to make your way to the make shift clinic to get a few supplies to help the injured people around you. Whislt helping everyone out, you were able to save the son of a wealthy man who was making his way to the shores of USA to escape. He offers to help you out and takes you along with his family."
},
{
"name": " Scavenger",
"tags": "",
"id": "8",
"text": "You chose the option to make sure that you and the people around you are safe and stocked with a few necessities. You were successfully able to make your way through the city to get a few supplies to help the injured people around you. Whislt helping everyone out and gathering some items, you were able to save the son of a wealthy man who was making his way to the shores of USA to escape. He offers to help you out and takes you along with his family. \n\n[[Winner!!]]",
"links": [
{
"linkText": "Winner!!",
"passageName": "Winner!!",
"original": "[[Winner!!]]"
}
],
"hooks": [],
"cleanText": "You chose the option to make sure that you and the people around you are safe and stocked with a few necessities. You were successfully able to make your way through the city to get a few supplies to help the injured people around you. Whislt helping everyone out and gathering some items, you were able to save the son of a wealthy man who was making his way to the shores of USA to escape. He offers to help you out and takes you along with his family."
},
{
"name": " Illegal",
"tags": "",
"id": "9",
"text": "Unfortunately, the choice that you have chosen has led the authorities to be very suspicious of you and ask around about you. They found out that you are in USA illegally which is why you are being deported back. \n\nWould you like to play another round?\n\n[[Starting ]]",
"links": [
{
"linkText": "Starting",
"passageName": "Starting",
"original": "[[Starting ]]"
}
],
"hooks": [],
"cleanText": "Unfortunately, the choice that you have chosen has led the authorities to be very suspicious of you and ask around about you. They found out that you are in USA illegally which is why you are being deported back. \n\nWould you like to play another round?"
},
{
"name": " Legal",
"tags": "",
"id": "10",
"text": "You chose the correct options which has led you to safe gound!!\n\n[[Winner!!]]",
"links": [
{
"linkText": "Winner!!",
"passageName": "Winner!!",
"original": "[[Winner!!]]"
}
],
"hooks": [],
"cleanText": "You chose the correct options which has led you to safe gound!!"
},
{
"name": "Winner!!",
"tags": "",
"id": "11",
"text": "Congradulations!! You were sucessfully able to make it and you saved many people on the way!! \n\nWould you like to play again?\n\n[[Starting ]]",
"links": [
{
"linkText": "Starting",
"passageName": "Starting",
"original": "[[Starting ]]"
}
],
"hooks": [],
"cleanText": "Congradulations!! You were sucessfully able to make it and you saved many people on the way!! \n\nWould you like to play again?"
}
]
} | {
"uuid": "A095F919-661C-4B7F-9467-4368B345AFD9",
"name": "The Mystery",
"creator": "Twine",
"creatorVersion": "2.3.14",
"schemaName": "Harlowe 3 to JSON",
"schemaVersion": "0.0.6",
"createdAtMs": 1631371628710,
"passages": [
{
"name": "Starting ",
"tags": "",
"id": "1",
"text": "You are about to go through an experience which would change your mindset forever. But first up... a little about the game! In this migrant trail, your taking up the role of an escaping migrant. You will be faced with a few choices which would lead to how the game carries on. \n\n[[Start -> Start your migrant journey!]]",
"links": [
{
"linkText": "Start",
"passageName": "Start your migrant journey!",
"original": "[[Start -> Start your migrant journey!]]"
}
],
"hooks": [],
"cleanText": "You are about to go through an experience which would change your mindset forever. But first up... a little about the game! In this migrant trail, your taking up the role of an escaping migrant. You will be faced with a few choices which would lead to how the game carries on."
},
{
"name": " Start your migrant journey!",
"tags": "",
"id": "2",
"text": "Your city is under attack by a terrorist group. Many people you know have been murdered and you are trying to escape whilst keeping a low profile. You need to leave immediately. What is your profession( each profession leads to a different story)?\n\n[[You are a medical student who works at a make shift clinic -> Medical Student]]\n[[You are currently in between jobs which makes you a little skilled -> Undecided]]",
"links": [
{
"linkText": "You are a medical student who works at a make shift clinic",
"passageName": "Medical Student",
"original": "[[You are a medical student who works at a make shift clinic -> Medical Student]]"
},
{
"linkText": "You are currently in between jobs which makes you a little skilled",
"passageName": "Undecided",
"original": "[[You are currently in between jobs which makes you a little skilled -> Undecided]]"
}
],
"hooks": [],
"cleanText": "Your city is under attack by a terrorist group. Many people you know have been murdered and you are trying to escape whilst keeping a low profile. You need to leave immediately. What is your profession( each profession leads to a different story)?"
},
{
"name": " <NAME>",
"tags": "",
"id": "3",
"text": "As the minutes pass the situation gets more and more intense. There are bombs dropping everywhere and the terrorists are stealing around from everyone's house. You need an escape route to find your way out without getting killed...\n\n[[Pay a smuggler to help you escape through the sea. -> Sea escape]]\n[[You decide to escape through the land -> Land escape]]\n[[Help out the people around you to escape (Since you are a medical student you are given a bonus option!!) -> Saviour]]",
"links": [
{
"linkText": "Pay a smuggler to help you escape through the sea.",
"passageName": "Sea escape",
"original": "[[Pay a smuggler to help you escape through the sea. -> Sea escape]]"
},
{
"linkText": "You decide to escape through the land",
"passageName": "Land escape",
"original": "[[You decide to escape through the land -> Land escape]]"
},
{
"linkText": "Help out the people around you to escape (Since you are a medical student you are given a bonus option!!)",
"passageName": "Saviour",
"original": "[[Help out the people around you to escape (Since you are a medical student you are given a bonus option!!) -> Saviour]]"
}
],
"hooks": [],
"cleanText": "As the minutes pass the situation gets more and more intense. There are bombs dropping everywhere and the terrorists are stealing around from everyone's house. You need an escape route to find your way out without getting killed..."
},
{
"name": " Undecided",
"tags": "",
"id": "4",
"text": "As the minutes pass the situation gets more and more intense. There are bombs dropping everywhere and the terrorists are stealing around from everyone's house. You need an escape route to find your way out without getting killed...\n\n[[Pay a smuggler to help you escape through the sea. -> Sea escape]]\n[[You decide to escape through the land -> Land escape]]\n[[Look for useful items around you to ensure your journey is good (Since your profession is makeshift you are given a bonus option!!) -> Scavenger]]",
"links": [
{
"linkText": "Pay a smuggler to help you escape through the sea.",
"passageName": "Sea escape",
"original": "[[Pay a smuggler to help you escape through the sea. -> Sea escape]]"
},
{
"linkText": "You decide to escape through the land",
"passageName": "Land escape",
"original": "[[You decide to escape through the land -> Land escape]]"
},
{
"linkText": "Look for useful items around you to ensure your journey is good (Since your profession is makeshift you are given a bonus option!!)",
"passageName": "Scavenger",
"original": "[[Look for useful items around you to ensure your journey is good (Since your profession is makeshift you are given a bonus option!!) -> Scavenger]]"
}
],
"hooks": [],
"cleanText": "As the minutes pass the situation gets more and more intense. There are bombs dropping everywhere and the terrorists are stealing around from everyone's house. You need an escape route to find your way out without getting killed..."
},
{
"name": " Sea escape",
"tags": "",
"id": "5",
"text": "You have decided to smuggle yourself out of your city. You make your way towards the sea and reach the boat to find out that it looks mostly filled to the brim. After a long wait you finally get to have a seat on the ground as there was not enough seats in the seating area. You make your way through the ocean while facing many challenges. There are storms on the way which makes everyone on the boat really cold. Since the boat had to leave in a hurry there was not enough food available for everyone to eat and the whole boat stunk because of how packed everyone was. \n\nFinally after what feels like forever you were able to make it back to the shores of USA. Since you are not documented you have the following options to make sure you do not get caught...\n\n[[You pay the smuggler even more money to make you fake documents -> Illegal]]\n[[You make your way to the nearest embassy of your country and seek shelter -> Legal]]",
"links": [
{
"linkText": "You pay the smuggler even more money to make you fake documents",
"passageName": "Illegal",
"original": "[[You pay the smuggler even more money to make you fake documents -> Illegal]]"
},
{
"linkText": "You make your way to the nearest embassy of your country and seek shelter",
"passageName": "Legal",
"original": "[[You make your way to the nearest embassy of your country and seek shelter -> Legal]]"
}
],
"hooks": [],
"cleanText": "You have decided to smuggle yourself out of your city. You make your way towards the sea and reach the boat to find out that it looks mostly filled to the brim. After a long wait you finally get to have a seat on the ground as there was not enough seats in the seating area. You make your way through the ocean while facing many challenges. There are storms on the way which makes everyone on the boat really cold. Since the boat had to leave in a hurry there was not enough food available for everyone to eat and the whole boat stunk because of how packed everyone was. \n\nFinally after what feels like forever you were able to make it back to the shores of USA. Since you are not documented you have the following options to make sure you do not get caught..."
},
{
"name": " <NAME>",
"tags": "",
"id": "6",
"text": "Travel through land has always been hard for everyone. There is always lack of food, water, shelter, and everyone always ends up getting tired. You were lucky enough to have been able to hitch hike your way through most of the journey. But unfortunately you are really exhausted and have no clue where to go.\n\nFinally after what feels like forever you were able to make it back to the shores of USA. Since you are not documented you have the following options to make sure you do not get caught...\n\n[[You came across a smuggler who would make you fake documents -> Illegal]]\n[[You make your way to the nearest embassy of your country and seek shelter -> Legal]]",
"links": [
{
"linkText": "You came across a smuggler who would make you fake documents",
"passageName": "Illegal",
"original": "[[You came across a smuggler who would make you fake documents -> Illegal]]"
},
{
"linkText": "You make your way to the nearest embassy of your country and seek shelter",
"passageName": "Legal",
"original": "[[You make your way to the nearest embassy of your country and seek shelter -> Legal]]"
}
],
"hooks": [],
"cleanText": "Travel through land has always been hard for everyone. There is always lack of food, water, shelter, and everyone always ends up getting tired. You were lucky enough to have been able to hitch hike your way through most of the journey. But unfortunately you are really exhausted and have no clue where to go.\n\nFinally after what feels like forever you were able to make it back to the shores of USA. Since you are not documented you have the following options to make sure you do not get caught..."
},
{
"name": " Saviour",
"tags": "",
"id": "7",
"text": "You chose the option to make sure the people around you are safe. You were successfully able to make your way to the make shift clinic to get a few supplies to help the injured people around you. Whislt helping everyone out, you were able to save the son of a wealthy man who was making his way to the shores of USA to escape. He offers to help you out and takes you along with his family. \n\n[[Winner!!]]",
"links": [
{
"linkText": "Winner!!",
"passageName": "Winner!!",
"original": "[[Winner!!]]"
}
],
"hooks": [],
"cleanText": "You chose the option to make sure the people around you are safe. You were successfully able to make your way to the make shift clinic to get a few supplies to help the injured people around you. Whislt helping everyone out, you were able to save the son of a wealthy man who was making his way to the shores of USA to escape. He offers to help you out and takes you along with his family."
},
{
"name": " Scavenger",
"tags": "",
"id": "8",
"text": "You chose the option to make sure that you and the people around you are safe and stocked with a few necessities. You were successfully able to make your way through the city to get a few supplies to help the injured people around you. Whislt helping everyone out and gathering some items, you were able to save the son of a wealthy man who was making his way to the shores of USA to escape. He offers to help you out and takes you along with his family. \n\n[[Winner!!]]",
"links": [
{
"linkText": "Winner!!",
"passageName": "Winner!!",
"original": "[[Winner!!]]"
}
],
"hooks": [],
"cleanText": "You chose the option to make sure that you and the people around you are safe and stocked with a few necessities. You were successfully able to make your way through the city to get a few supplies to help the injured people around you. Whislt helping everyone out and gathering some items, you were able to save the son of a wealthy man who was making his way to the shores of USA to escape. He offers to help you out and takes you along with his family."
},
{
"name": " Illegal",
"tags": "",
"id": "9",
"text": "Unfortunately, the choice that you have chosen has led the authorities to be very suspicious of you and ask around about you. They found out that you are in USA illegally which is why you are being deported back. \n\nWould you like to play another round?\n\n[[Starting ]]",
"links": [
{
"linkText": "Starting",
"passageName": "Starting",
"original": "[[Starting ]]"
}
],
"hooks": [],
"cleanText": "Unfortunately, the choice that you have chosen has led the authorities to be very suspicious of you and ask around about you. They found out that you are in USA illegally which is why you are being deported back. \n\nWould you like to play another round?"
},
{
"name": " Legal",
"tags": "",
"id": "10",
"text": "You chose the correct options which has led you to safe gound!!\n\n[[Winner!!]]",
"links": [
{
"linkText": "Winner!!",
"passageName": "Winner!!",
"original": "[[Winner!!]]"
}
],
"hooks": [],
"cleanText": "You chose the correct options which has led you to safe gound!!"
},
{
"name": "Winner!!",
"tags": "",
"id": "11",
"text": "Congradulations!! You were sucessfully able to make it and you saved many people on the way!! \n\nWould you like to play again?\n\n[[Starting ]]",
"links": [
{
"linkText": "Starting",
"passageName": "Starting",
"original": "[[Starting ]]"
}
],
"hooks": [],
"cleanText": "Congradulations!! You were sucessfully able to make it and you saved many people on the way!! \n\nWould you like to play again?"
}
]
} | none | 1 | 1.803473 | 2 |
|
nnunet/dataset_conversion/Task_253_make_splits_pickle.py | hasukmin12/nnUNet_MDD_UNet_with_Semi_Supervised | 3 | 6631445 | <filename>nnunet/dataset_conversion/Task_253_make_splits_pickle.py<gh_stars>1-10
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import nnUNet_raw_data
from sklearn.model_selection import KFold
from collections import OrderedDict
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
import numpy as np
if __name__ == "__main__":
"""
This is the Bladder dataset from <NAME>
"""
base = "/data5/sukmin/_has_Task252_Ureter"
task_id = 254
task_name = "Ureter"
foldername = "Task%03.0d_%s" % (task_id, task_name)
nnUNet_raw_data = '/data5/sukmin/nnUNet_raw_data_base/nnUNet_raw_data'
out_base = join(nnUNet_raw_data, foldername)
# out_base = join(base, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
labelsts = join(out_base, "labelsTs")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
maybe_mkdir_p(labelsts)
train_patient_names = []
test_patient_names = []
all_cases = subfolders(base, join=False)
train_patients = all_cases[:182] + all_cases[251:]
test_patients = all_cases[182:251]
# train_patients = all_cases[:210] + all_cases[300:540] + all_cases[600:]
# test_patients = all_cases[210:300] + all_cases[540:600]
for p in train_patients:
curr = join(base, p)
label_file = join(curr, "segmentation.nii.gz")
image_file = join(curr, "imaging.nii.gz")
if os.path.isfile(label_file)==True:
shutil.copy(label_file, join(labelstr, p + ".nii.gz"))
# shutil.copy(image_file, join(imagestr, p + "_0000.nii.gz"))
train_patient_names.append(p)
for p in test_patients:
curr = join(base, p)
image_file = join(curr, "imaging.nii.gz")
# shutil.copy(image_file, join(imagests, p + "_0000.nii.gz"))
test_patient_names.append(p)
# 나중에 test inference를 위해 폴더는 만들어놓
for p in test_patients:
curr = join(base, p)
label_file = join(curr, "segmentation.nii.gz")
# shutil.copy(label_file, join(labelsts, p + ".nii.gz"))
json_dict = {}
json_dict['name'] = "Ureter"
json_dict['description'] = "Ureter segmentation"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "Ureter data for nnunet"
json_dict['licence'] = ""
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "CT",
}
json_dict['labels'] = {
"0": "background",
"1": "Ureter"
}
json_dict['numTraining'] = len(train_patient_names)
json_dict['numTest'] = len(test_patient_names)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in
train_patient_names]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names]
# json_dict['test'] = [{'image': "./imagesTs/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTs/%s.nii.gz" % i.split("/")[-1]} for i in
# test_patient_names]
# save_json(json_dict, os.path.join(out_base, "dataset.json"))
# create a dummy split (patients need to be separated)
splits = []
patient_label = all_cases[:182] + all_cases[251:361]
patient_no_label = all_cases[361:]
patients = np.unique([i for i in patient_label])
kf = KFold(5, True, 12345)
for tr, val in kf.split(patient_label):
splits.append(OrderedDict())
tr_patients = patients[tr]
splits[-1]['train'] = [i for i in tr_patients] + [i for i in patient_no_label]
val_patients = patients[val]
splits[-1]['val'] = [i for i in val_patients]
save_pickle(splits, "/data5/sukmin/nnunet_process_out/Task257_Ureter/splits_final.pkl") | <filename>nnunet/dataset_conversion/Task_253_make_splits_pickle.py<gh_stars>1-10
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import nnUNet_raw_data
from sklearn.model_selection import KFold
from collections import OrderedDict
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
import numpy as np
if __name__ == "__main__":
"""
This is the Bladder dataset from <NAME>
"""
base = "/data5/sukmin/_has_Task252_Ureter"
task_id = 254
task_name = "Ureter"
foldername = "Task%03.0d_%s" % (task_id, task_name)
nnUNet_raw_data = '/data5/sukmin/nnUNet_raw_data_base/nnUNet_raw_data'
out_base = join(nnUNet_raw_data, foldername)
# out_base = join(base, foldername)
imagestr = join(out_base, "imagesTr")
imagests = join(out_base, "imagesTs")
labelstr = join(out_base, "labelsTr")
labelsts = join(out_base, "labelsTs")
maybe_mkdir_p(imagestr)
maybe_mkdir_p(imagests)
maybe_mkdir_p(labelstr)
maybe_mkdir_p(labelsts)
train_patient_names = []
test_patient_names = []
all_cases = subfolders(base, join=False)
train_patients = all_cases[:182] + all_cases[251:]
test_patients = all_cases[182:251]
# train_patients = all_cases[:210] + all_cases[300:540] + all_cases[600:]
# test_patients = all_cases[210:300] + all_cases[540:600]
for p in train_patients:
curr = join(base, p)
label_file = join(curr, "segmentation.nii.gz")
image_file = join(curr, "imaging.nii.gz")
if os.path.isfile(label_file)==True:
shutil.copy(label_file, join(labelstr, p + ".nii.gz"))
# shutil.copy(image_file, join(imagestr, p + "_0000.nii.gz"))
train_patient_names.append(p)
for p in test_patients:
curr = join(base, p)
image_file = join(curr, "imaging.nii.gz")
# shutil.copy(image_file, join(imagests, p + "_0000.nii.gz"))
test_patient_names.append(p)
# 나중에 test inference를 위해 폴더는 만들어놓
for p in test_patients:
curr = join(base, p)
label_file = join(curr, "segmentation.nii.gz")
# shutil.copy(label_file, join(labelsts, p + ".nii.gz"))
json_dict = {}
json_dict['name'] = "Ureter"
json_dict['description'] = "Ureter segmentation"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "Ureter data for nnunet"
json_dict['licence'] = ""
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "CT",
}
json_dict['labels'] = {
"0": "background",
"1": "Ureter"
}
json_dict['numTraining'] = len(train_patient_names)
json_dict['numTest'] = len(test_patient_names)
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTr/%s.nii.gz" % i.split("/")[-1]} for i in
train_patient_names]
json_dict['test'] = ["./imagesTs/%s.nii.gz" % i.split("/")[-1] for i in test_patient_names]
# json_dict['test'] = [{'image': "./imagesTs/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTs/%s.nii.gz" % i.split("/")[-1]} for i in
# test_patient_names]
# save_json(json_dict, os.path.join(out_base, "dataset.json"))
# create a dummy split (patients need to be separated)
splits = []
patient_label = all_cases[:182] + all_cases[251:361]
patient_no_label = all_cases[361:]
patients = np.unique([i for i in patient_label])
kf = KFold(5, True, 12345)
for tr, val in kf.split(patient_label):
splits.append(OrderedDict())
tr_patients = patients[tr]
splits[-1]['train'] = [i for i in tr_patients] + [i for i in patient_no_label]
val_patients = patients[val]
splits[-1]['val'] = [i for i in val_patients]
save_pickle(splits, "/data5/sukmin/nnunet_process_out/Task257_Ureter/splits_final.pkl") | en | 0.716194 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This is the Bladder dataset from <NAME> # out_base = join(base, foldername) # train_patients = all_cases[:210] + all_cases[300:540] + all_cases[600:] # test_patients = all_cases[210:300] + all_cases[540:600] # shutil.copy(image_file, join(imagestr, p + "_0000.nii.gz")) # shutil.copy(image_file, join(imagests, p + "_0000.nii.gz")) # 나중에 test inference를 위해 폴더는 만들어놓 # shutil.copy(label_file, join(labelsts, p + ".nii.gz")) # json_dict['test'] = [{'image': "./imagesTs/%s.nii.gz" % i.split("/")[-1], "label": "./labelsTs/%s.nii.gz" % i.split("/")[-1]} for i in # test_patient_names] # save_json(json_dict, os.path.join(out_base, "dataset.json")) # create a dummy split (patients need to be separated) | 2.260432 | 2 |
source/beamer/Display.py | mkroehn/gesina | 0 | 6631446 | import numpy as np
import cv2
class Display:
# internal
view_reduction = 0
border_thickness = 5
border_color = (200, 0, 0)
padding = 5
fullscreen = False
def __init__(self, conf):
self.vid_h = conf.vid_h
self.vid_w = conf.vid_w
self.img_h = conf.img_h
self.img_w = conf.img_w
self.insitu_img = np.zeros((conf.vid_h, conf.vid_w, 3), np.uint8)
self.compression = conf.sampling_reduction
self.view_reduction = conf.view_reduction
self.fullscreen = conf.fullscreen
def clear(self):
cv2.rectangle(self.insitu_img,
pt1=(0, 0),
pt2=(self.vid_w, self.vid_h),
color=(0, 0, 0),
thickness=-1)
def add_button(self, cx, cy, r, col):
cv2.circle(self.insitu_img, center=(cx, cy), radius=r, color=col, thickness=-1)
def add_border(self, cx, cy, w, h):
cv2.rectangle(self.insitu_img,
pt1=(cx, cy),
pt2=(int((cx+w)/self.view_reduction) + 2*self.padding, int((cy+h)/self.view_reduction) + 2*self.padding),
color=self.border_color,
thickness=self.border_thickness)
def update_streams(self, depth_img, color_img):
reduced_color = color_img[0:color_img.shape[0]:self.view_reduction, 0:color_img.shape[1]:self.view_reduction, :]
reduced_depth = depth_img[0:depth_img.shape[0]:self.view_reduction, 0:depth_img.shape[1]:self.view_reduction, :]
images = np.hstack((reduced_color, reduced_depth))
self.insitu_img[self.padding:images.shape[0]+self.padding, self.padding:images.shape[1]+self.padding, :] = images
def update_info(self, info):
img_info = np.zeros((20, 400, 3))
cv2.putText(img_info, text=info, org=(0, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(200, 0, 0), thickness=1, lineType=cv2.LINE_AA)
self.insitu_img[140:160, 10:410, :] = img_info
def add_static_text(self, txt, xpos, ypos, color, scale):
cv2.putText(self.insitu_img, text=txt, org=(xpos, ypos), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=scale, color=color, thickness=1, lineType=cv2.LINE_AA)
def show(self):
if self.fullscreen:
cv2.namedWindow('RealSense', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('RealSense', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
else:
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', self.insitu_img)
return cv2.waitKey(1)
def color_depth_from_frame(self, depth_frame):
depth_image = np.asanyarray(depth_frame.get_data())[0:self.img_h:self.compression, 0:self.img_w:self.compression]
return cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_PINK)
def color_depth_from_image(self, depth_image):
return cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_PINK)
def start(self):
self.show()
def stop(self):
cv2.destroyAllWindows() | import numpy as np
import cv2
class Display:
# internal
view_reduction = 0
border_thickness = 5
border_color = (200, 0, 0)
padding = 5
fullscreen = False
def __init__(self, conf):
self.vid_h = conf.vid_h
self.vid_w = conf.vid_w
self.img_h = conf.img_h
self.img_w = conf.img_w
self.insitu_img = np.zeros((conf.vid_h, conf.vid_w, 3), np.uint8)
self.compression = conf.sampling_reduction
self.view_reduction = conf.view_reduction
self.fullscreen = conf.fullscreen
def clear(self):
cv2.rectangle(self.insitu_img,
pt1=(0, 0),
pt2=(self.vid_w, self.vid_h),
color=(0, 0, 0),
thickness=-1)
def add_button(self, cx, cy, r, col):
cv2.circle(self.insitu_img, center=(cx, cy), radius=r, color=col, thickness=-1)
def add_border(self, cx, cy, w, h):
cv2.rectangle(self.insitu_img,
pt1=(cx, cy),
pt2=(int((cx+w)/self.view_reduction) + 2*self.padding, int((cy+h)/self.view_reduction) + 2*self.padding),
color=self.border_color,
thickness=self.border_thickness)
def update_streams(self, depth_img, color_img):
reduced_color = color_img[0:color_img.shape[0]:self.view_reduction, 0:color_img.shape[1]:self.view_reduction, :]
reduced_depth = depth_img[0:depth_img.shape[0]:self.view_reduction, 0:depth_img.shape[1]:self.view_reduction, :]
images = np.hstack((reduced_color, reduced_depth))
self.insitu_img[self.padding:images.shape[0]+self.padding, self.padding:images.shape[1]+self.padding, :] = images
def update_info(self, info):
img_info = np.zeros((20, 400, 3))
cv2.putText(img_info, text=info, org=(0, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(200, 0, 0), thickness=1, lineType=cv2.LINE_AA)
self.insitu_img[140:160, 10:410, :] = img_info
def add_static_text(self, txt, xpos, ypos, color, scale):
cv2.putText(self.insitu_img, text=txt, org=(xpos, ypos), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=scale, color=color, thickness=1, lineType=cv2.LINE_AA)
def show(self):
if self.fullscreen:
cv2.namedWindow('RealSense', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('RealSense', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
else:
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', self.insitu_img)
return cv2.waitKey(1)
def color_depth_from_frame(self, depth_frame):
depth_image = np.asanyarray(depth_frame.get_data())[0:self.img_h:self.compression, 0:self.img_w:self.compression]
return cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_PINK)
def color_depth_from_image(self, depth_image):
return cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_PINK)
def start(self):
self.show()
def stop(self):
cv2.destroyAllWindows() | en | 0.501499 | # internal | 2.631163 | 3 |
detection/core/anchor/anchor_generator.py | pskurochkin/tf-eager-fasterrcnn | 106 | 6631447 | <reponame>pskurochkin/tf-eager-fasterrcnn
import tensorflow as tf
from detection.utils.misc import *
class AnchorGenerator(object):
def __init__(self,
scales=(32, 64, 128, 256, 512),
ratios=(0.5, 1, 2),
feature_strides=(4, 8, 16, 32, 64)):
'''Anchor Generator
Attributes
---
scales: 1D array of anchor sizes in pixels.
ratios: 1D array of anchor ratios of width/height.
feature_strides: Stride of the feature map relative to the image in pixels.
'''
self.scales = scales
self.ratios = ratios
self.feature_strides = feature_strides
def generate_pyramid_anchors(self, img_metas):
'''Generate the multi-level anchors for Region Proposal Network
Args
---
img_metas: [batch_size, 11]
Returns
---
anchors: [num_anchors, (y1, x1, y2, x2)] in image coordinates.
valid_flags: [batch_size, num_anchors]
'''
# generate anchors
pad_shape = calc_batch_padded_shape(img_metas)
feature_shapes = [(pad_shape[0] // stride, pad_shape[1] // stride)
for stride in self.feature_strides]
anchors = [
self._generate_level_anchors(level, feature_shape)
for level, feature_shape in enumerate(feature_shapes)
]
anchors = tf.concat(anchors, axis=0)
# generate valid flags
img_shapes = calc_img_shapes(img_metas)
valid_flags = [
self._generate_valid_flags(anchors, img_shapes[i])
for i in range(img_shapes.shape[0])
]
valid_flags = tf.stack(valid_flags, axis=0)
return anchors, valid_flags
def _generate_valid_flags(self, anchors, img_shape):
'''
Args
---
anchors: [num_anchors, (y1, x1, y2, x2)] in image coordinates.
img_shape: Tuple. (height, width, channels)
Returns
---
valid_flags: [num_anchors]
'''
y_center = (anchors[:, 2] + anchors[:, 0]) / 2
x_center = (anchors[:, 3] + anchors[:, 1]) / 2
valid_flags = tf.ones(anchors.shape[0], dtype=tf.int32)
zeros = tf.zeros(anchors.shape[0], dtype=tf.int32)
valid_flags = tf.where(y_center <= img_shape[0], valid_flags, zeros)
valid_flags = tf.where(x_center <= img_shape[1], valid_flags, zeros)
return valid_flags
def _generate_level_anchors(self, level, feature_shape):
'''Generate the anchors given the spatial shape of feature map.
Args
---
feature_shape: (height, width)
Returns
---
numpy.ndarray [anchors_num, (y1, x1, y2, x2)]
'''
scale = self.scales[level]
ratios = self.ratios
feature_stride = self.feature_strides[level]
# Get all combinations of scales and ratios
scales, ratios = tf.meshgrid([float(scale)], ratios)
scales = tf.reshape(scales, [-1])
ratios = tf.reshape(ratios, [-1])
# Enumerate heights and widths from scales and ratios
heights = scales / tf.sqrt(ratios)
widths = scales * tf.sqrt(ratios)
# Enumerate shifts in feature space
shifts_y = tf.multiply(tf.range(feature_shape[0]), feature_stride)
shifts_x = tf.multiply(tf.range(feature_shape[1]), feature_stride)
shifts_x, shifts_y = tf.cast(shifts_x, tf.float32), tf.cast(shifts_y, tf.float32)
shifts_x, shifts_y = tf.meshgrid(shifts_x, shifts_y)
# Enumerate combinations of shifts, widths, and heights
box_widths, box_centers_x = tf.meshgrid(widths, shifts_x)
box_heights, box_centers_y = tf.meshgrid(heights, shifts_y)
# Reshape to get a list of (y, x) and a list of (h, w)
box_centers = tf.reshape(tf.stack([box_centers_y, box_centers_x], axis=2), (-1, 2))
box_sizes = tf.reshape(tf.stack([box_heights, box_widths], axis=2), (-1, 2))
# Convert to corner coordinates (y1, x1, y2, x2)
boxes = tf.concat([box_centers - 0.5 * box_sizes,
box_centers + 0.5 * box_sizes], axis=1)
return boxes
| import tensorflow as tf
from detection.utils.misc import *
class AnchorGenerator(object):
def __init__(self,
scales=(32, 64, 128, 256, 512),
ratios=(0.5, 1, 2),
feature_strides=(4, 8, 16, 32, 64)):
'''Anchor Generator
Attributes
---
scales: 1D array of anchor sizes in pixels.
ratios: 1D array of anchor ratios of width/height.
feature_strides: Stride of the feature map relative to the image in pixels.
'''
self.scales = scales
self.ratios = ratios
self.feature_strides = feature_strides
def generate_pyramid_anchors(self, img_metas):
'''Generate the multi-level anchors for Region Proposal Network
Args
---
img_metas: [batch_size, 11]
Returns
---
anchors: [num_anchors, (y1, x1, y2, x2)] in image coordinates.
valid_flags: [batch_size, num_anchors]
'''
# generate anchors
pad_shape = calc_batch_padded_shape(img_metas)
feature_shapes = [(pad_shape[0] // stride, pad_shape[1] // stride)
for stride in self.feature_strides]
anchors = [
self._generate_level_anchors(level, feature_shape)
for level, feature_shape in enumerate(feature_shapes)
]
anchors = tf.concat(anchors, axis=0)
# generate valid flags
img_shapes = calc_img_shapes(img_metas)
valid_flags = [
self._generate_valid_flags(anchors, img_shapes[i])
for i in range(img_shapes.shape[0])
]
valid_flags = tf.stack(valid_flags, axis=0)
return anchors, valid_flags
def _generate_valid_flags(self, anchors, img_shape):
'''
Args
---
anchors: [num_anchors, (y1, x1, y2, x2)] in image coordinates.
img_shape: Tuple. (height, width, channels)
Returns
---
valid_flags: [num_anchors]
'''
y_center = (anchors[:, 2] + anchors[:, 0]) / 2
x_center = (anchors[:, 3] + anchors[:, 1]) / 2
valid_flags = tf.ones(anchors.shape[0], dtype=tf.int32)
zeros = tf.zeros(anchors.shape[0], dtype=tf.int32)
valid_flags = tf.where(y_center <= img_shape[0], valid_flags, zeros)
valid_flags = tf.where(x_center <= img_shape[1], valid_flags, zeros)
return valid_flags
def _generate_level_anchors(self, level, feature_shape):
'''Generate the anchors given the spatial shape of feature map.
Args
---
feature_shape: (height, width)
Returns
---
numpy.ndarray [anchors_num, (y1, x1, y2, x2)]
'''
scale = self.scales[level]
ratios = self.ratios
feature_stride = self.feature_strides[level]
# Get all combinations of scales and ratios
scales, ratios = tf.meshgrid([float(scale)], ratios)
scales = tf.reshape(scales, [-1])
ratios = tf.reshape(ratios, [-1])
# Enumerate heights and widths from scales and ratios
heights = scales / tf.sqrt(ratios)
widths = scales * tf.sqrt(ratios)
# Enumerate shifts in feature space
shifts_y = tf.multiply(tf.range(feature_shape[0]), feature_stride)
shifts_x = tf.multiply(tf.range(feature_shape[1]), feature_stride)
shifts_x, shifts_y = tf.cast(shifts_x, tf.float32), tf.cast(shifts_y, tf.float32)
shifts_x, shifts_y = tf.meshgrid(shifts_x, shifts_y)
# Enumerate combinations of shifts, widths, and heights
box_widths, box_centers_x = tf.meshgrid(widths, shifts_x)
box_heights, box_centers_y = tf.meshgrid(heights, shifts_y)
# Reshape to get a list of (y, x) and a list of (h, w)
box_centers = tf.reshape(tf.stack([box_centers_y, box_centers_x], axis=2), (-1, 2))
box_sizes = tf.reshape(tf.stack([box_heights, box_widths], axis=2), (-1, 2))
# Convert to corner coordinates (y1, x1, y2, x2)
boxes = tf.concat([box_centers - 0.5 * box_sizes,
box_centers + 0.5 * box_sizes], axis=1)
return boxes | en | 0.728956 | Anchor Generator Attributes --- scales: 1D array of anchor sizes in pixels. ratios: 1D array of anchor ratios of width/height. feature_strides: Stride of the feature map relative to the image in pixels. Generate the multi-level anchors for Region Proposal Network Args --- img_metas: [batch_size, 11] Returns --- anchors: [num_anchors, (y1, x1, y2, x2)] in image coordinates. valid_flags: [batch_size, num_anchors] # generate anchors # generate valid flags Args --- anchors: [num_anchors, (y1, x1, y2, x2)] in image coordinates. img_shape: Tuple. (height, width, channels) Returns --- valid_flags: [num_anchors] Generate the anchors given the spatial shape of feature map. Args --- feature_shape: (height, width) Returns --- numpy.ndarray [anchors_num, (y1, x1, y2, x2)] # Get all combinations of scales and ratios # Enumerate heights and widths from scales and ratios # Enumerate shifts in feature space # Enumerate combinations of shifts, widths, and heights # Reshape to get a list of (y, x) and a list of (h, w) # Convert to corner coordinates (y1, x1, y2, x2) | 2.286622 | 2 |
server/auvsi_suas/views/teams_test.py | RMMichael/interop | 175 | 6631448 | """Tests for the teams module."""
import dateutil.parser
import functools
import json
from auvsi_suas.models.aerial_position import AerialPosition
from auvsi_suas.models.gps_position import GpsPosition
from auvsi_suas.models.mission_config import MissionConfig
from auvsi_suas.models.takeoff_or_landing_event import TakeoffOrLandingEvent
from auvsi_suas.models.uas_telemetry import UasTelemetry
from auvsi_suas.models.waypoint import Waypoint
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
teams_url = reverse('auvsi_suas:teams')
team_url = functools.partial(reverse, 'auvsi_suas:team')
class TestTeamsViewLoggedOut(TestCase):
def test_not_authenticated(self):
"""Tests requests that have not yet been authenticated."""
response = self.client.get(teams_url)
self.assertEqual(403, response.status_code)
class TestTeamsView(TestCase):
"""Tests the teams view."""
def setUp(self):
self.superuser = User.objects.create_superuser('superuser',
'<EMAIL>',
'<PASSWORD>')
self.superuser.save()
self.client.force_login(self.superuser)
def create_data(self):
"""Create a basic sample dataset."""
self.user1 = User.objects.create_user('user1', '<EMAIL>',
'<PASSWORD>')
self.user1.save()
self.user2 = User.objects.create_user('user2', '<EMAIL>',
'<PASSWORD>')
self.user2.save()
# Mission
pos = GpsPosition()
pos.latitude = 10
pos.longitude = 100
pos.save()
wpt = Waypoint()
wpt.order = 10
wpt.latitude = 10
wpt.longitude = 100
wpt.altitude_msl = 1000
wpt.save()
self.mission = MissionConfig()
self.mission.home_pos = pos
self.mission.lost_comms_pos = pos
self.mission.emergent_last_known_pos = pos
self.mission.off_axis_odlc_pos = pos
self.mission.map_center_pos = pos
self.mission.map_height_ft = 1
self.mission.air_drop_pos = pos
self.mission.ugv_drive_pos = pos
self.mission.save()
self.mission.mission_waypoints.add(wpt)
self.mission.search_grid_points.add(wpt)
self.mission.save()
# user1 is flying
event = TakeoffOrLandingEvent(user=self.user1,
mission=self.mission,
uas_in_air=True)
event.save()
# user2 has landed
event = TakeoffOrLandingEvent(user=self.user2,
mission=self.mission,
uas_in_air=True)
event.save()
event = TakeoffOrLandingEvent(user=self.user2,
mission=self.mission,
uas_in_air=False)
event.save()
# user2 is active
self.timestamp = timezone.now()
self.telem = UasTelemetry(user=self.user2,
latitude=38.6462,
longitude=-76.2452,
altitude_msl=0,
uas_heading=90)
self.telem.save()
self.telem.timestamp = dateutil.parser.parse(
u'2016-10-01T00:00:00.0+00:00')
self.telem.save()
def test_normal_user(self):
"""Normal users allowed access."""
user = User.objects.create_user('testuser', '<EMAIL>',
'<PASSWORD>')
user.save()
self.client.force_login(user)
response = self.client.get(teams_url)
self.assertEqual(200, response.status_code)
def test_no_users(self):
"""No users results in empty list, no superusers."""
response = self.client.get(teams_url)
self.assertEqual(200, response.status_code)
self.assertEqual([], json.loads(response.content))
def test_post(self):
"""POST not allowed"""
response = self.client.post(teams_url)
self.assertEqual(405, response.status_code)
def test_correct_json(self):
"""Response JSON is properly formatted."""
self.create_data()
response = self.client.get(teams_url)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(2, len(data))
for user in data:
self.assertIn('team', user)
self.assertIn('id', user['team'])
self.assertIn('username', user['team'])
self.assertIn('inAir', user)
if 'telemetry' in user:
self.assertIn('telemetryId', user)
self.assertIn('telemetryAgeSec', user)
self.assertIn('telemetryTimestamp', user)
def test_users_correct(self):
"""User names and status correct."""
self.create_data()
response = self.client.get(teams_url)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
names = [d['team']['username'] for d in data]
self.assertIn('user1', names)
self.assertIn('user2', names)
user1 = data[names.index('user1')]
self.assertEqual(True, user1['inAir'])
self.assertNotIn('telemetry', user1)
user2 = data[names.index('user2')]
self.assertEqual(False, user2['inAir'])
self.assertEqual(
{
u'latitude': 38.6462,
u'longitude': -76.2452,
u'altitude': 0.0,
u'heading': 90.0,
}, user2['telemetry'])
self.assertEqual(int(user2['telemetryId']), self.telem.pk)
self.assertGreater(user2['telemetryAgeSec'], 0)
self.assertEqual(user2['telemetryTimestamp'],
u'2016-10-01T00:00:00+00:00')
class TestTeamViewLoggedOut(TestCase):
def test_not_authenticated(self):
"""Tests requests that have not yet been authenticated."""
response = self.client.get(team_url(args=[1]))
self.assertEqual(403, response.status_code)
class TestTeamView(TestCase):
"""Tests the teams-by-id view."""
def setUp(self):
self.user1 = User.objects.create_user('user1', '<EMAIL>',
'<PASSWORD>')
self.user1.save()
self.superuser = User.objects.create_superuser('superuser',
'<EMAIL>',
'<PASSWORD>')
self.superuser.save()
self.client.force_login(self.superuser)
def test_bad_id(self):
"""Invalid user id rejected"""
response = self.client.get(team_url(args=[999]))
self.assertGreaterEqual(400, response.status_code)
def test_correct_user(self):
"""User requested is correct"""
response = self.client.get(team_url(args=[self.user1.username]))
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertIn('team', data)
self.assertIn('username', data['team'])
self.assertEqual('user1', data['team']['username'])
self.assertIn('inAir', data)
self.assertEqual(False, data['inAir'])
self.assertNotIn('telemetry', data)
def test_post(self):
"""POST not allowed"""
response = self.client.post(team_url(args=[self.user1.username]))
self.assertEqual(405, response.status_code)
| """Tests for the teams module."""
import dateutil.parser
import functools
import json
from auvsi_suas.models.aerial_position import AerialPosition
from auvsi_suas.models.gps_position import GpsPosition
from auvsi_suas.models.mission_config import MissionConfig
from auvsi_suas.models.takeoff_or_landing_event import TakeoffOrLandingEvent
from auvsi_suas.models.uas_telemetry import UasTelemetry
from auvsi_suas.models.waypoint import Waypoint
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
teams_url = reverse('auvsi_suas:teams')
team_url = functools.partial(reverse, 'auvsi_suas:team')
class TestTeamsViewLoggedOut(TestCase):
def test_not_authenticated(self):
"""Tests requests that have not yet been authenticated."""
response = self.client.get(teams_url)
self.assertEqual(403, response.status_code)
class TestTeamsView(TestCase):
"""Tests the teams view."""
def setUp(self):
self.superuser = User.objects.create_superuser('superuser',
'<EMAIL>',
'<PASSWORD>')
self.superuser.save()
self.client.force_login(self.superuser)
def create_data(self):
"""Create a basic sample dataset."""
self.user1 = User.objects.create_user('user1', '<EMAIL>',
'<PASSWORD>')
self.user1.save()
self.user2 = User.objects.create_user('user2', '<EMAIL>',
'<PASSWORD>')
self.user2.save()
# Mission
pos = GpsPosition()
pos.latitude = 10
pos.longitude = 100
pos.save()
wpt = Waypoint()
wpt.order = 10
wpt.latitude = 10
wpt.longitude = 100
wpt.altitude_msl = 1000
wpt.save()
self.mission = MissionConfig()
self.mission.home_pos = pos
self.mission.lost_comms_pos = pos
self.mission.emergent_last_known_pos = pos
self.mission.off_axis_odlc_pos = pos
self.mission.map_center_pos = pos
self.mission.map_height_ft = 1
self.mission.air_drop_pos = pos
self.mission.ugv_drive_pos = pos
self.mission.save()
self.mission.mission_waypoints.add(wpt)
self.mission.search_grid_points.add(wpt)
self.mission.save()
# user1 is flying
event = TakeoffOrLandingEvent(user=self.user1,
mission=self.mission,
uas_in_air=True)
event.save()
# user2 has landed
event = TakeoffOrLandingEvent(user=self.user2,
mission=self.mission,
uas_in_air=True)
event.save()
event = TakeoffOrLandingEvent(user=self.user2,
mission=self.mission,
uas_in_air=False)
event.save()
# user2 is active
self.timestamp = timezone.now()
self.telem = UasTelemetry(user=self.user2,
latitude=38.6462,
longitude=-76.2452,
altitude_msl=0,
uas_heading=90)
self.telem.save()
self.telem.timestamp = dateutil.parser.parse(
u'2016-10-01T00:00:00.0+00:00')
self.telem.save()
def test_normal_user(self):
"""Normal users allowed access."""
user = User.objects.create_user('testuser', '<EMAIL>',
'<PASSWORD>')
user.save()
self.client.force_login(user)
response = self.client.get(teams_url)
self.assertEqual(200, response.status_code)
def test_no_users(self):
"""No users results in empty list, no superusers."""
response = self.client.get(teams_url)
self.assertEqual(200, response.status_code)
self.assertEqual([], json.loads(response.content))
def test_post(self):
"""POST not allowed"""
response = self.client.post(teams_url)
self.assertEqual(405, response.status_code)
def test_correct_json(self):
"""Response JSON is properly formatted."""
self.create_data()
response = self.client.get(teams_url)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertEqual(2, len(data))
for user in data:
self.assertIn('team', user)
self.assertIn('id', user['team'])
self.assertIn('username', user['team'])
self.assertIn('inAir', user)
if 'telemetry' in user:
self.assertIn('telemetryId', user)
self.assertIn('telemetryAgeSec', user)
self.assertIn('telemetryTimestamp', user)
def test_users_correct(self):
"""User names and status correct."""
self.create_data()
response = self.client.get(teams_url)
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
names = [d['team']['username'] for d in data]
self.assertIn('user1', names)
self.assertIn('user2', names)
user1 = data[names.index('user1')]
self.assertEqual(True, user1['inAir'])
self.assertNotIn('telemetry', user1)
user2 = data[names.index('user2')]
self.assertEqual(False, user2['inAir'])
self.assertEqual(
{
u'latitude': 38.6462,
u'longitude': -76.2452,
u'altitude': 0.0,
u'heading': 90.0,
}, user2['telemetry'])
self.assertEqual(int(user2['telemetryId']), self.telem.pk)
self.assertGreater(user2['telemetryAgeSec'], 0)
self.assertEqual(user2['telemetryTimestamp'],
u'2016-10-01T00:00:00+00:00')
class TestTeamViewLoggedOut(TestCase):
def test_not_authenticated(self):
"""Tests requests that have not yet been authenticated."""
response = self.client.get(team_url(args=[1]))
self.assertEqual(403, response.status_code)
class TestTeamView(TestCase):
"""Tests the teams-by-id view."""
def setUp(self):
self.user1 = User.objects.create_user('user1', '<EMAIL>',
'<PASSWORD>')
self.user1.save()
self.superuser = User.objects.create_superuser('superuser',
'<EMAIL>',
'<PASSWORD>')
self.superuser.save()
self.client.force_login(self.superuser)
def test_bad_id(self):
"""Invalid user id rejected"""
response = self.client.get(team_url(args=[999]))
self.assertGreaterEqual(400, response.status_code)
def test_correct_user(self):
"""User requested is correct"""
response = self.client.get(team_url(args=[self.user1.username]))
self.assertEqual(200, response.status_code)
data = json.loads(response.content)
self.assertIn('team', data)
self.assertIn('username', data['team'])
self.assertEqual('user1', data['team']['username'])
self.assertIn('inAir', data)
self.assertEqual(False, data['inAir'])
self.assertNotIn('telemetry', data)
def test_post(self):
"""POST not allowed"""
response = self.client.post(team_url(args=[self.user1.username]))
self.assertEqual(405, response.status_code)
| en | 0.89597 | Tests for the teams module. Tests requests that have not yet been authenticated. Tests the teams view. Create a basic sample dataset. # Mission # user1 is flying # user2 has landed # user2 is active Normal users allowed access. No users results in empty list, no superusers. POST not allowed Response JSON is properly formatted. User names and status correct. Tests requests that have not yet been authenticated. Tests the teams-by-id view. Invalid user id rejected User requested is correct POST not allowed | 2.318065 | 2 |
exercises/migrations/0005_auto_20200512_0840.py | rattletat/homework-server | 1 | 6631449 | # Generated by Django 3.0.5 on 2020-05-12 06:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exercises', '0004_auto_20200512_0501'),
]
operations = [
migrations.AlterField(
model_name='testresult',
name='first_error',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='testresult',
name='first_failure',
field=models.TextField(blank=True, null=True),
),
]
| # Generated by Django 3.0.5 on 2020-05-12 06:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exercises', '0004_auto_20200512_0501'),
]
operations = [
migrations.AlterField(
model_name='testresult',
name='first_error',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='testresult',
name='first_failure',
field=models.TextField(blank=True, null=True),
),
]
| en | 0.782814 | # Generated by Django 3.0.5 on 2020-05-12 06:40 | 1.472727 | 1 |
uploader/uploader.py | KfirBernstein/technion-iem-ds_lab | 0 | 6631450 | <reponame>KfirBernstein/technion-iem-ds_lab
"""
Upload homework TAR.GZ file from zip file created by Moodle to the Automatic Checker
Student's homework submissions can be downloaded from Moodle in one zip file.
We assume here that ALL the submissions are in a TAR.gz format (one file for each submission)
This script will open the ZIP file, and upload all the files in it to the checker.
If the server is busy, it will wait.
When all files are uploaded, the script exits.
Usage:
upload_to_checker.py [--host=server_name] zip_file_name exercise_number
"""
import logging
import os
import tempfile
import threading
import time
import shutil
import zipfile
from http import HTTPStatus
import requests
MAX_JOBS = 2 # TODO remove this value and rely on the server's 503 code .
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class Uploader():
POLLING_INTERVAL_sec = 4
def __init__(self, host_server, upload_url):
"""
:param host_server: host name of the server where the file will be uploaded to. e.g. "homework.com"
:param upload_url: path to the upload : e.g. "/submit/hw/3/"
"""
self.input_queue = []
self.server_url = upload_url
self.host_server = host_server
self.http_scheme = "http://"
self.num_uploaded = 0
self.total_num_enqueued = 0
def enqueue_for_upload(self, file_name):
"""
enqueue a file name to be uploaded.
return immediately
:param file_name:
"""
self.input_queue.append(file_name) # careful - should it be thread safe?
self.total_num_enqueued += 1
def start_uploading(self):
""" create a worker thread,
start uploading from the input queue, do not overwhelm the server
:return immediatley.
"""
self.worker_thread = threading.Thread(target=self._work)
self.worker_thread.start()
def _upload(self, file_name):
# full path is needed for opening the file, but for clarity,
# the server should get only the basename
files = {'file': (os.path.basename(file_name), open(file_name, 'rb'), 'application/gzip', {'Expires': '0'})}
r = requests.post(self.http_scheme + self.host_server + self.server_url, files=files)
if r.status_code != HTTPStatus.OK:
logging.error("Server returned " + str(r))
if r.status_code == HTTPStatus.SERVICE_UNAVAILABLE:
logging.fatal("oops. Server is asked to work when busy. This should not happen.")
raise RuntimeError()
self.num_uploaded += 1
logging.info(
"Uploaded {} files. {} to go.".format(self.num_uploaded, self.total_num_enqueued - self.num_uploaded))
def _check_server_status(self):
import json
r = requests.get(self.http_scheme + self.host_server + "/status")
j = None
try:
j = r.json()
except json.decoder.JSONDecodeError:
logging.fatal("The server does not cooperate. Check server version.")
return j
def _work(self):
"""worker thread proc"""
try:
while len(self.input_queue) > 0:
reply = self._check_server_status()
if reply['num_jobs'] >= MAX_JOBS:
logging.info("Sleeping until the server is not busy...")
while reply['num_jobs'] >= MAX_JOBS:
time.sleep(self.POLLING_INTERVAL_sec)
reply = self._check_server_status()
self._upload(self.input_queue.pop(0))
logging.info("worker finished")
except requests.Timeout as ex:
logging.fatal("Server not timed out! " + str(ex))
except requests.ConnectionError as ex:
logging.error('Connection to server failed. Check if the server is running.\n' + str(ex))
def wait(self):
self.worker_thread.join()
if __name__ == "__main__":
import argparse
# TODO: connect to the "source copy detector" script
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="hostname of the server")
parser.add_argument("file", help="input file name (ZIP)")
parser.add_argument("ex_num", help="exercise number (e.g. 3)")
args = parser.parse_args()
path_to_zip_file = args.file
ex_num = args.ex_num
server = args.host
if server is None:
server = "homework-tester.westeurope.cloudapp.azure.com"
upload_url = "/submit/hw/" + str(ex_num)
directory_to_extract_to = tempfile.mkdtemp(dir='.')
print("using up to %d concurrent uploads" % MAX_JOBS)
try:
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
uploader = Uploader(server, upload_url)
for root, dirs, files in os.walk(directory_to_extract_to, topdown=False):
for name in files:
uploader.enqueue_for_upload(os.path.join(root, name))
uploader.start_uploading()
uploader.wait()
finally:
try:
shutil.rmtree(directory_to_extract_to)
except PermissionError:
logging.warning("Could not remove {}. Please remove it manually".format(directory_to_extract_to))
| """
Upload homework TAR.GZ file from zip file created by Moodle to the Automatic Checker
Student's homework submissions can be downloaded from Moodle in one zip file.
We assume here that ALL the submissions are in a TAR.gz format (one file for each submission)
This script will open the ZIP file, and upload all the files in it to the checker.
If the server is busy, it will wait.
When all files are uploaded, the script exits.
Usage:
upload_to_checker.py [--host=server_name] zip_file_name exercise_number
"""
import logging
import os
import tempfile
import threading
import time
import shutil
import zipfile
from http import HTTPStatus
import requests
MAX_JOBS = 2 # TODO remove this value and rely on the server's 503 code .
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class Uploader():
POLLING_INTERVAL_sec = 4
def __init__(self, host_server, upload_url):
"""
:param host_server: host name of the server where the file will be uploaded to. e.g. "homework.com"
:param upload_url: path to the upload : e.g. "/submit/hw/3/"
"""
self.input_queue = []
self.server_url = upload_url
self.host_server = host_server
self.http_scheme = "http://"
self.num_uploaded = 0
self.total_num_enqueued = 0
def enqueue_for_upload(self, file_name):
"""
enqueue a file name to be uploaded.
return immediately
:param file_name:
"""
self.input_queue.append(file_name) # careful - should it be thread safe?
self.total_num_enqueued += 1
def start_uploading(self):
""" create a worker thread,
start uploading from the input queue, do not overwhelm the server
:return immediatley.
"""
self.worker_thread = threading.Thread(target=self._work)
self.worker_thread.start()
def _upload(self, file_name):
# full path is needed for opening the file, but for clarity,
# the server should get only the basename
files = {'file': (os.path.basename(file_name), open(file_name, 'rb'), 'application/gzip', {'Expires': '0'})}
r = requests.post(self.http_scheme + self.host_server + self.server_url, files=files)
if r.status_code != HTTPStatus.OK:
logging.error("Server returned " + str(r))
if r.status_code == HTTPStatus.SERVICE_UNAVAILABLE:
logging.fatal("oops. Server is asked to work when busy. This should not happen.")
raise RuntimeError()
self.num_uploaded += 1
logging.info(
"Uploaded {} files. {} to go.".format(self.num_uploaded, self.total_num_enqueued - self.num_uploaded))
def _check_server_status(self):
import json
r = requests.get(self.http_scheme + self.host_server + "/status")
j = None
try:
j = r.json()
except json.decoder.JSONDecodeError:
logging.fatal("The server does not cooperate. Check server version.")
return j
def _work(self):
"""worker thread proc"""
try:
while len(self.input_queue) > 0:
reply = self._check_server_status()
if reply['num_jobs'] >= MAX_JOBS:
logging.info("Sleeping until the server is not busy...")
while reply['num_jobs'] >= MAX_JOBS:
time.sleep(self.POLLING_INTERVAL_sec)
reply = self._check_server_status()
self._upload(self.input_queue.pop(0))
logging.info("worker finished")
except requests.Timeout as ex:
logging.fatal("Server not timed out! " + str(ex))
except requests.ConnectionError as ex:
logging.error('Connection to server failed. Check if the server is running.\n' + str(ex))
def wait(self):
self.worker_thread.join()
if __name__ == "__main__":
import argparse
# TODO: connect to the "source copy detector" script
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="hostname of the server")
parser.add_argument("file", help="input file name (ZIP)")
parser.add_argument("ex_num", help="exercise number (e.g. 3)")
args = parser.parse_args()
path_to_zip_file = args.file
ex_num = args.ex_num
server = args.host
if server is None:
server = "homework-tester.westeurope.cloudapp.azure.com"
upload_url = "/submit/hw/" + str(ex_num)
directory_to_extract_to = tempfile.mkdtemp(dir='.')
print("using up to %d concurrent uploads" % MAX_JOBS)
try:
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
uploader = Uploader(server, upload_url)
for root, dirs, files in os.walk(directory_to_extract_to, topdown=False):
for name in files:
uploader.enqueue_for_upload(os.path.join(root, name))
uploader.start_uploading()
uploader.wait()
finally:
try:
shutil.rmtree(directory_to_extract_to)
except PermissionError:
logging.warning("Could not remove {}. Please remove it manually".format(directory_to_extract_to)) | en | 0.893058 | Upload homework TAR.GZ file from zip file created by Moodle to the Automatic Checker Student's homework submissions can be downloaded from Moodle in one zip file. We assume here that ALL the submissions are in a TAR.gz format (one file for each submission) This script will open the ZIP file, and upload all the files in it to the checker. If the server is busy, it will wait. When all files are uploaded, the script exits. Usage: upload_to_checker.py [--host=server_name] zip_file_name exercise_number # TODO remove this value and rely on the server's 503 code . :param host_server: host name of the server where the file will be uploaded to. e.g. "homework.com" :param upload_url: path to the upload : e.g. "/submit/hw/3/" enqueue a file name to be uploaded. return immediately :param file_name: # careful - should it be thread safe? create a worker thread, start uploading from the input queue, do not overwhelm the server :return immediatley. # full path is needed for opening the file, but for clarity, # the server should get only the basename worker thread proc # TODO: connect to the "source copy detector" script | 3.525169 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.