content
stringlengths 5
1.05M
|
---|
from sqlalchemy import Column, Integer, String, Float
from app.models.database import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(64), unique=False)
picture = Column(String(64), unique=False)
company = Column(String(64), unique=False)
email = Column(String(64), unique=True)
phone = Column(String(64), unique=True)
country = Column(String(64), unique=False)
longitude = Column(Float, unique=False)
latitude = Column(Float, unique=False)
def __init__(self, user=None):
self.name = user['name']
self.picture = user['picture']
self.company = user['company']
self.email = user['email']
self.phone = user['phone']
self.longitude = user['longitude']
self.latitude = user['latitude']
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
|
from PySide2 import QtCore, QtGui, QtWidgets
from qtwidgets import PasswordEdit
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
password = PasswordEdit()
self.setCentralWidget(password)
app = QtWidgets.QApplication([])
w = Window()
w.show()
app.exec_()
|
from .load_dicom import load_scan_from_dicom, get_pixels_hu
__all__ = [load_scan_from_dicom, get_pixels_hu] |
import curses
import platform
import terraindata as td
import fielddata as field
import variables as var
import uidata as UI
import colors
import functions as func
import dialogue as dial
import cardfunc as cf
def display():
var.window.border('|', '|', '-', '-', '#', '#', '#', '#')
var.window.addstr(UI.Main.title_text[0], UI.Main.title_text[1], 'Location : ' + var.Field.location)
for i in range(len(field.field[var.Field.location])):
for j in range(len(field.field[var.Field.location][0])):
r = UI.Field.cell_interval[0] * i - var.Camera.position[0]
c = UI.Field.cell_interval[1] * j - var.Camera.position[1]
if r > 0 and r < 26 and c > 0 and c < 90:
draw_terrain(r, c, field.field[var.Field.location][i][j])
for i in range(len(field.objects[var.Field.location])):
r = UI.Field.cell_interval[0] * field.objects[var.Field.location][i][0] - var.Camera.position[0]
c = UI.Field.cell_interval[1] * field.objects[var.Field.location][i][1] - var.Camera.position[1]
if r > 0 and r < 26 and c > 0 and c < 90:
draw_objects(r, c, field.objects[var.Field.location][i][2])
draw_player(UI.Field.cell_interval[0] * var.Field.player_position[0] - var.Camera.position[0], UI.Field.cell_interval[1] * var.Field.player_position[1] - var.Camera.position[1])
if var.Game.state == 'dialogue':
func.draw_rect(UI.Main.dialogue[0], UI.Main.dialogue[1], UI.Main.dialogue[2], UI.Main.dialogue[3], colors.fg_white)
var.window.addstr(UI.Main.dialogue_text[0], UI.Main.dialogue_text[1], dial.data[var.Game.interaction]['contents'][var.Game.interaction_level])
def draw_terrain(r, c, terrain):
func.draw_rect(r, c, UI.Field.cell_size[0], UI.Field.cell_size[1], colors.fg_white)
for i in range(3):
for j in range(4):
if platform.system() == 'Windows':
var.window.addstr(r + i + 1, c + j + 1, td.character[terrain][i][j], curses.color_pair(td.color_code['windows'][td.color[terrain][i][j]]))
else:
var.window.addstr(r + i + 1, c + j + 1, td.character[terrain][i][j], curses.color_pair(td.color_code['other'][td.color[terrain][i][j]]))
def draw_objects(r, c, ID):
for i in range(3):
for j in range(4):
if platform.system() == 'Windows':
var.window.addstr(r + i + 1, c + j + 1, td.objects[ID][i][j], curses.color_pair(td.color_code['windows'][td.objects_color[ID][i][j]]))
else:
var.window.addstr(r + i + 1, c + j + 1, td.objects[ID][i][j], curses.color_pair(td.color_code['other'][td.objects_color[ID][i][j]]))
def draw_player(r, c):
var.window.addstr(r + 1, c + 2, '||')
var.window.addstr(r + 3, c + 2, '--')
def input_handle(key):
if var.Game.state == '':
if key == 96 + 23:
if field.walls[var.Field.location][var.Field.player_position[0] - 1][var.Field.player_position[1]] == 0:
var.Field.player_position[0] -= 1
var.Camera.position[0] -= UI.Field.cell_interval[0]
var.Field.player_face = 'U'
elif key == 96 + 19:
if field.walls[var.Field.location][var.Field.player_position[0] + 1][var.Field.player_position[1]] == 0:
var.Field.player_position[0] += 1
var.Camera.position[0] += UI.Field.cell_interval[0]
var.Field.player_face = 'D'
elif key == 96 + 1:
if field.walls[var.Field.location][var.Field.player_position[0]][var.Field.player_position[1] - 1] == 0:
var.Field.player_position[1] -= 1
var.Camera.position[1] -= UI.Field.cell_interval[1]
var.Field.player_face = 'L'
elif key == 96 + 4:
if field.walls[var.Field.location][var.Field.player_position[0]][var.Field.player_position[1] + 1] == 0:
var.Field.player_position[1] += 1
var.Camera.position[1] += UI.Field.cell_interval[1]
var.Field.player_face = 'R'
elif key == 96 + 5:
for i in range(len(field.connection[var.Field.location])):
if var.Field.player_position[0] == field.connection[var.Field.location][i][0][0] and var.Field.player_position[1] == field.connection[var.Field.location][i][0][1]:
var.Field.player_position = [field.connection[var.Field.location][i][2][0], field.connection[var.Field.location][i][2][1]]
var.Camera.position = [field.connection[var.Field.location][i][3][0], field.connection[var.Field.location][i][3][1]]
var.Field.location = field.connection[var.Field.location][i][1]
elif key == 96 + 18:
for i in range(len(field.interaction[var.Field.location])):
for j in range(4):
if var.Field.player_position[0] == field.interaction[var.Field.location][i][j][0] and var.Field.player_position[1] == field.interaction[var.Field.location][i][j][1] and var.Field.player_face == field.interaction[var.Field.location][i][j][2]:
var.Game.state = 'dialogue'
var.Game.interaction = field.interaction[var.Field.location][i][4]
var.Game.interaction_level = 0
elif var.Game.state == 'dialogue':
if key == 96 + 5:
if var.Game.interaction_level < len(dial.data[var.Game.interaction]['contents']) - 1:
var.Game.interaction_level += 1
elif key == 96 + 25:
if var.Game.interaction_level == len(dial.data[var.Game.interaction]['contents']) - 1:
if dial.data[var.Game.interaction]['end_option'][121] == 'battle':
var.Game.scene = 'battle'
func.battle_start()
cf.deck_generate_battle('adventure1')
elif key == 96 + 14:
var.Game.state = ''
var.Game.interaction = -1
var.Game.interaction_level = -1
def neighbor(pos1, pos2):
if abs(pos1[0] - pos2[0]) == 0 and abs(pos1[1] - pos2[1]) == 1:
return True
if abs(pos1[0] - pos2[0]) == 1 and abs(pos1[1] - pos2[1]) == 0:
return True
return False
|
# Copyright (c) 2013, [email protected] and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
columns = [
"Posting Date:Date:100",
"Employee:Link/Employee:100",
"Employee Name:Data:150",
"Advance Amount:Currency:120",
"Paid Amount:Currency:120",
"Retired Amount:Currency:130",
"Refunded Amount:Currency:130",
"Variance:Currency:120",
]
conditions = ""
if filters.get("from_date"):
conditions += "d.posting_date >= DATE('{from_date}')"
if filters.get("to_date"):
conditions += " AND d.posting_date <= DATE('{to_date}')"
if filters.get("status") and filters.get('status') != "Retired":
conditions += " AND d.status = '{status}'"
else:
conditions += " AND d.claimed_amount = d.advance_amount or AND d.refunded = d.advance_amount "
data = frappe.db.sql("SELECT d.posting_date, d.employee, d.employee_name , d.advance_amount, d.paid_amount, "
"d.claimed_amount, d.refund_amount, (d.refund_amount + d.claimed_amount - d.paid_amount) FROM "
"`tabEmployee Advance` d WHERE {0} ".format(conditions.format(**filters)), as_list=1)
return columns, data
|
import os
import pickle
import numpy as np
from tqdm import tqdm
import phns
DATA_PATH = os.path.dirname(os.path.realpath(__file__)) + "/../data/"
print(f"Loading data from {DATA_PATH}")
with open(DATA_PATH + "timit_bench.pkl", "rb") as f:
data = pickle.load(f)
# missing words:
# exceptions = [
# "motorists'",
# "somebody'll",
# "andrei's",
#
# "morphophonemic",
# "nihilistic",
# "radiosterilization",
# "exhusband",
# "smolderingly",
# "geocentricism",
# "unmagnified",
# "stirrin",
# "utopianism",
# "infuriation",
# "preprepared",
# "understandingly",
# "eventualities",
# "micrometeorites",
# "herdin'",
# "responsively",
# "demineralization",
# "unwaveringly",
# "cap'n",
# "mournfully",
# "autofluorescence",
# "fasciculations",
# "weatherstrip",
# "nonsystematic",
# "traditionalism",
# "chorused",
# "micrometeorite",
# "reupholstering",
# "castorbeans"
# ]
cers = []
cers_no_contractions = []
skipped = 0
missing = set()
for item in tqdm(data):
# Preprocessing data
_phns = phns.utils.timit_to_cmu(item["phns"])
_phns = [phns.Phn(phn) for phn in _phns if phn != "sil"]
graph = phns.from_text(
item["text"],
missing_handler=lambda word: missing.add(word),
apply_confusion=True,
)
if graph:
result = phns.closest(_phns, graph)
cers.append(result["cmu_cer"])
if result["cmu_cer"] > 0.3 and False:
print(item["text"])
print("phns", item["phns"])
print("_phns", [phn.val for phn in _phns])
print("targt", [phn.val for phn in result["target"]])
print("match", [graph.nodes[m].value.val for m in result["match"]])
print(result)
import ipdb
ipdb.set_trace()
else:
skipped += 1
print("skipped: ", skipped)
print("missing: ", len(missing))
print(missing)
print(
{
"25%": np.percentile(cers, 25),
"50%": np.percentile(cers, 50),
"75%": np.percentile(cers, 75),
"95%": np.percentile(cers, 95),
"mean": np.mean(cers),
}
)
|
from http import HTTPStatus
from unittest.mock import patch
from pytest_cases import parametrize_with_cases
from infobip_channels.core.models import ResponseBase
from infobip_channels.email.channel import EmailChannel
from tests.conftest import get_response_object
def set_up_mock_server_and_send_request(
httpserver,
status_code,
response_content,
endpoint,
http_method,
expected_headers,
expected_query_parameters,
expected_data,
request_data,
method_name,
):
httpserver.expect_request(
endpoint,
method=http_method,
query_string=expected_query_parameters,
headers=expected_headers,
data=expected_data,
).respond_with_response(get_response_object(status_code, response_content))
email_channel = EmailChannel.from_auth_params(
{"base_url": httpserver.url_for("/"), "api_key": "secret"}
)
return getattr(email_channel, method_name)(request_data)
@patch("urllib3.filepost.choose_boundary", return_value="mockBoundary")
@parametrize_with_cases(
"status_code, response_content, endpoint, http_method, expected_headers, "
"expected_query_parameters, expected_data, request_data, method_name",
prefix="case__supported_status",
)
def test_mms_endpoints__supported_status(
mock_boundary,
httpserver,
status_code,
response_content,
endpoint,
http_method,
expected_headers,
expected_query_parameters,
expected_data,
request_data,
method_name,
):
response = set_up_mock_server_and_send_request(
httpserver,
status_code,
response_content,
endpoint,
http_method,
expected_headers,
expected_query_parameters,
expected_data,
request_data,
method_name,
)
if method_name == "delete_existing_domain" or method_name == "verify_domain":
assert response.status_code == status_code
else:
response_dict = EmailChannel.convert_model_to_dict(response)
raw_response = response_dict.pop("rawResponse")
expected_response_dict = {
**response_content,
"statusCode": HTTPStatus(status_code),
}
assert isinstance(response, ResponseBase) is True
assert response.status_code == status_code
assert response_dict == expected_response_dict
assert raw_response is not None
@patch("urllib3.filepost.choose_boundary", return_value="mockBoundary")
@parametrize_with_cases(
"status_code, response_content, endpoint, http_method, expected_headers, "
"expected_query_parameters, expected_data, request_data, method_name",
prefix="case__unsupported_status",
)
def test_mms_endpoints__unsupported_status(
mock_boundary,
httpserver,
status_code,
response_content,
endpoint,
http_method,
expected_headers,
expected_query_parameters,
expected_data,
request_data,
method_name,
):
response = set_up_mock_server_and_send_request(
httpserver,
status_code,
response_content,
endpoint,
http_method,
expected_headers,
expected_query_parameters,
expected_data,
request_data,
method_name,
)
assert isinstance(response, ResponseBase) is False
assert response is not None
assert response.status_code == status_code
assert response.json() == response_content
|
'''
Test for generate_indexes module.
'''
from dmrg_helpers.extract.generate_indexes import SiteFilter
def test_constant():
f = SiteFilter('1')
assert f.a == '1'
assert f.i == None
assert f.pm == None
assert f.b == None
assert f.is_constant() == True
assert f.build_index(5) == 1
def test_odd():
f = SiteFilter('2*i+1')
assert f.a == '2'
assert f.i == 'i'
assert f.pm == '+'
assert f.b == '1'
assert f.is_constant() == False
assert f.build_index(5) == 11
def test_even():
f = SiteFilter('2*i')
assert f.a == '2'
assert f.i == 'i'
assert f.pm == None
assert f.b == None
assert f.is_constant() == False
assert f.build_index(5) == 10
def test_identity():
f = SiteFilter('i')
assert f.a == None
assert f.i == 'i'
assert f.pm == None
assert f.b == None
assert f.is_constant() == False
assert f.build_index(5) == 5
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Email, EqualTo, ValidationError
from flask_babel import _, lazy_gettext as _l
from blog_app.models import User
class LoginForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember_me = BooleanField(_l('Remember me'))
log_in = SubmitField(_l('Log In'))
class RegistrationForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
repeat_password = PasswordField(_l('Repeat Password'), validators=[DataRequired(), EqualTo('password')])
sign_up = SubmitField(_l('Registration'))
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(_('Please use a different email address.'))
class ResetPasswordRequestForm(FlaskForm):
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
submit = SubmitField(_l('Request Password Reset'))
class ResetPasswordForm(FlaskForm):
new_password = PasswordField(_l('Password'), validators=[DataRequired()])
repeat_password = PasswordField(_l('Repeat Password'), validators=[DataRequired(), EqualTo('new_password')])
submit = SubmitField(_l('Request Password Reset')) |
import time
from datetime import datetime
import subprocess32 as subprocess
import select
from .logging_ext import logger
class RunResult(object):
def __init__(self, text, status):
self.text = text
self.status = status
def run_with_io_timeout(cmd, timeout_sec=120):
command = ' '.join(cmd)
logger.info('run_with_io_timeout: running {}'.format(command))
start_time = datetime.now()
p = subprocess.Popen(
cmd, bufsize=0, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True)
process_id = p.pid
logger.info('run_with_io_timeout: spawned process pid={}'.format(process_id))
def read_line():
_ln = ''
while True:
_rlist, _wlist, _xlist = select.select([p.stdout], [], [p.stdout], timeout_sec)
if _xlist:
return _ln, 'closed'
if not _rlist:
return _ln, 'timeout'
_c = p.stdout.read(1)
if not _c:
return _ln, 'closed'
if _c == '\n':
return _ln, ''
_ln += _c
last_line_time = time.time()
lines = []
while True:
ln, status = read_line()
lines.append(ln)
print(ln)
if status == 'closed':
break
## if ln is none then the read timed out, kill it
if status == 'timeout':
p.kill()
break
## save output to the db at 5 second intervals
if time.time() - last_line_time > 5:
last_line_time = time.time()
p.stdout.close()
p.wait()
end_time = datetime.now()
duration_sec = (end_time - start_time).total_seconds()
if p.returncode == 0:
logger.info('run_with_io_timeout: return_code={} duration={}sec'.format(p.returncode, duration_sec))
else:
if status == 'timeout':
state = 'killed hung job'
else:
state = 'failed'
logger.error('run_with_io_timeout: return_code={} duration={}sec {}'.format(p.returncode, duration_sec, state))
return RunResult(u''.join(lines), p.returncode)
|
from btmonitor.pubsub import Hub
from btmonitor.pubsub import Subscription
import pytest
@pytest.mark.asyncio
async def test_pubsub_single():
hub: Hub[int] = Hub()
hub.publish(1)
with Subscription(hub) as queue:
assert queue.empty()
hub.publish(2)
result = await queue.get()
assert result == 2
assert queue.empty()
hub.suspend()
hub.publish(3)
assert queue.empty()
assert hub.is_suspended()
hub.resume()
hub.publish(3)
assert not queue.empty()
assert not hub.is_suspended()
@pytest.mark.asyncio
async def test_pubsub_multi():
hub: Hub[int] = Hub()
hub.publish(1)
with Subscription(hub) as queue:
assert queue.empty()
hub.publish_all([3, 4])
result = await queue.get()
assert result == 3
assert not queue.empty()
result = await queue.get()
assert result == 4
assert queue.empty()
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-function-docstring
"""These examples should be handle by the classicalfunction compiler"""
from qiskit.circuit import Int1
def identity(a: Int1) -> Int1:
return a
def bit_and(a: Int1, b: Int1) -> Int1:
return a & b
def bit_or(a: Int1, b: Int1) -> Int1:
return a | b
def bool_or(a: Int1, b: Int1) -> Int1:
return a or b
def bool_not(a: Int1) -> Int1:
return not a
def and_and(a: Int1, b: Int1, c: Int1) -> Int1:
return a and b and c
def multiple_binop(a: Int1, b: Int1) -> Int1:
return (a or b) | (b & a) and (a & b)
def id_assing(a: Int1) -> Int1:
b = a
return b
def example1(a: Int1, b: Int1) -> Int1:
c = a & b
d = b | a
return c ^ a | d
def grover_oracle(a: Int1, b: Int1, c: Int1, d: Int1) -> Int1:
return not a and b and not c and d
|
import re
import os
import gzip
import argparse
import numpy as np
import scipy.stats as stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from Bio import SeqIO
def plot_lengths(lengths, name):
mean_len, std_len, median_len = np.mean(lengths), np.std(lengths), np.median(lengths)
min_len, max_len = lengths.min(), lengths.max()
fig, ax = plt.subplots()
n, bins, patches = plt.hist(lengths, 50, rwidth=0.8, align='mid', density=True, alpha=0.75)
x = np.linspace(min(bins), max(bins), 1000)
lengths_log = np.log(lengths)
mean_log, std_log = lengths_log.mean(), lengths_log.std()
pdf = (np.exp(-(np.log(x) - mean_log)**2 / (2 * std_log**2)) / (x * std_log * np.sqrt(2 * np.pi)))
plt.plot(x, pdf)
plt.title('Read length histogram')
plt.grid(True)
plt.savefig(f'plots/{name}_length.png')
with open(f'logs/{name}_length.log', 'w') as f:
f.write(f'----- LENGTH INFO -----\n')
f.write(f'Num seq\t\t=\t{len(lengths)}\n')
f.write(f'Min len\t\t=\t{min_len}\n')
f.write(f'Median len\t=\t{median_len}\n')
f.write(f'Mean len\t=\t{mean_len}\n')
f.write(f'Std len\t\t=\t{std_len}\n')
f.write(f'Max len\t\t=\t{max_len}\n')
f.write(f'-------------------------\n')
def plot_qscores(qualities, name):
mean_q, std_q, median_q = np.mean(qualities), np.std(qualities), np.median(qualities)
min_q, max_q = qualities.min(), qualities.max()
fig, ax = plt.subplots()
n, bins, patches = plt.hist(qualities, 50, rwidth=0.8, align='mid', density=True, alpha=0.75)
x = np.linspace(min(bins), max(bins), 1000)
plt.plot(x, stats.norm.pdf(x, mean_q, std_q))
plt.title('Q-score histogram')
plt.grid(True)
plt.savefig(f'plots/{name}_qscore.png')
with open(f'logs/{name}_qscores.log', 'w') as f:
f.write(f'----- Q-SCORE INFO -----\n')
f.write(f'Num seq\t\t=\t{len(qualities)}\n')
f.write(f'Min Q\t\t=\t{min_q}\n')
f.write(f'Median Q\t=\t{median_q}\n')
f.write(f'Mean Q\t\t=\t{mean_q}\n')
f.write(f'Std Q\t\t=\t{std_q}\n')
f.write(f'Max Q\t\t=\t{max_q}\n')
f.write(f'--------------------------\n')
def plot_accuracies(accuracies, name):
mean_acc, std_acc, median_acc = np.mean(accuracies), np.std(accuracies), np.median(accuracies)
min_acc, max_acc = accuracies.min(), accuracies.max()
fig, ax = plt.subplots()
n, bins, patches = plt.hist(accuracies, 50, rwidth=0.8, align='mid', density=True, alpha=0.75)
x = np.linspace(min(bins), max(bins), 1000)
plt.plot(x, stats.norm.pdf(x, mean_acc, std_acc))
plt.title('Accuracy histogram')
plt.grid(True)
plt.savefig(f'plots/{name}_acc.png')
with open(f'logs/{name}_acc.log', 'w') as f:
f.write(f'----- ACCURACY INFO -----\n')
f.write(f'Num seq\t\t=\t{len(accuracies)}\n')
f.write(f'Min acc\t\t=\t{min_acc}\n')
f.write(f'Median acc\t=\t{median_acc}\n')
f.write(f'Mean acc\t=\t{mean_acc}\n')
f.write(f'Std acc\t\t=\t{std_acc}\n')
f.write(f'Max acc\t\t=\t{max_acc}\n')
f.write(f'-------------------------\n')
def get_acc(q):
return 1.0 - 10 ** -(q / 10)
def main(args):
filename = args.filename
find_l_dist = args.length
find_q_dist = args.qscore
find_acc_dist = args.accuracy
try:
if filename[-2:] == 'gz':
f = gzip.open(filename, 'rt')
else:
f = open(filename)
if find_l_dist:
lengths = np.array([len(s) for s in SeqIO.parse(f, 'fastq')])
f.seek(0)
if find_q_dist:
qualities = np.array([np.array(r.letter_annotations['phred_quality']).mean() \
for r in SeqIO.parse(f, 'fastq')])
f.seek(0)
if find_acc_dist:
accuracies = np.array([np.array(list(map(get_acc, r.letter_annotations['phred_quality']))).mean() \
for r in SeqIO.parse(f, 'fastq')])
f.seek(0)
finally:
f.close()
pattern = r'.*/(.*).fast.*'
name = re.findall(pattern, filename)[0]
if find_l_dist:
print('Processing lengths...')
plot_lengths(lengths, name)
if find_q_dist:
print('Processing q-scores...')
plot_qscores(qualities, name)
if find_acc_dist:
print('Processing accuracies...')
plot_accuracies(accuracies, name)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot distributions')
parser.add_argument('filename', type=str)
parser.add_argument('--length', action='store_true', default=False)
parser.add_argument('--qscore', action='store_true', default=False)
parser.add_argument('--accuracy', action='store_true', default=False)
args = parser.parse_args()
main(args)
|
default_app_config = 'marketing.apps.MarketingConfig'
|
#020 - Sorteando uma ordem na lista
from random import shuffle
n1 = str(input('primeiro aluno: '))
n2 = str(input('segundo aluno: '))
n3 = str(input('terceiro aluno: '))
n4 = str(input('quarto aluno: '))
lista = [n1, n2, n3, n4]
shuffle(lista)
print('a ordem de apresentacao sera ')
print(lista)
|
import cv2
import numpy as np
#---------------------------------------------------#
# 对输入图像进行resize
#---------------------------------------------------#
def letterbox_image(image, size):
ih, iw, _ = np.shape(image)
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = cv2.resize(image, (nw, nh))
new_image = np.ones([size[1], size[0], 3]) * 128
new_image[(h-nh)//2:nh+(h-nh)//2, (w-nw)//2:nw+(w-nw)//2] = image
return new_image
#---------------------------------------------------#
# 获得学习率
#---------------------------------------------------#
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def preprocess_input(image):
image -= np.array((104, 117, 123),np.float32)
return image
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.core.cache.backends.memcached import BaseMemcachedCache
class PymemcacheCache(BaseMemcachedCache):
"""
Implementation of a pymemcache binding for Django 2.x.
"""
def __init__(self, server, params):
import pymemcache
super().__init__(server, params, library=pymemcache,
value_not_found_exception=KeyError)
@property
def _cache(self):
if getattr(self, "_client", None) is None:
kwargs = {
"allow_unicode_keys": True,
"default_noreply": False,
"serde": self._lib.serde.pickle_serde,
}
kwargs.update(self._options)
self._client = self._lib.HashClient(self._servers, **kwargs)
return self._client
def close(self, **kwargs):
# Don't call disconnect_all() if connection pooling is enabled,
# as it resets the failover state and creates unnecessary reconnects.
if not self._cache.use_pooling:
self._cache.disconnect_all()
|
##
# Main Module: display.py
##
import configparser
import tm1637
from time import sleep
config = configparser.ConfigParser()
config.read('config/appconfig.ini')
CLK = int(config['GPIO']['clk'])
DIO = int(config['GPIO']['dio'])
tm = tm1637.TM1637(clk=CLK, dio=DIO)
tm.brightness(7)
while True:
tm.scroll('HELLO LISA')
tm.numbers(69, 69)
sleep(3)
|
"""
PyENT: Python version of FourmiLab's ENT: Benchmarking suite for pseudorandom number sequence.
(c) 2020 by Minh-Hai Nguyen
"""
import sys
import numpy as np
from scipy.stats import chi2 as spChi2
def Chi2(data,bins,min_value=None,max_value=None):
""" Compute chi-square
bins: int or sequence of bins
If min_value or max_value is None, use the bounds of the data
"""
if min_value is None:
min_value = min(data)
if max_value is None:
max_value = max(data)
Os, bs = np.histogram(data,bins=bins,range=(min_value,max_value),density=False)
E = len(data)/len(bs)
return np.sum((Os-E)**2)/E
def Chi2Q(data,bins,min_value=None,max_value=None):
""" Compute accumunative distribution of chi-square
bins: int or sequence of bins
If min_value or max_value is None, use the bounds of the data
"""
if min_value is None:
min_value = min(data)
if max_value is None:
max_value = max(data)
Os, bs = np.histogram(data,bins=bins,range=(min_value,max_value),density=False)
E = len(data)/(len(bs)-1)
c2 = np.sum((Os-E)**2)/E
return 1-spChi2.cdf(c2,len(bs)-2)
def Pi(data,min_value=None,max_value=None):
""" Estimate the value of pi from the data by Monte-Carlo method
by converting data into a series of (x,y) coordinates in a square
and count the number of points fall within a circle bounded by that square
If min_value or max_value is None, use the upper and lower bounds of the data
"""
if min_value is None:
min_value = min(data)
if max_value is None:
max_value = max(data)
R = (max_value - min_value)/2
Rloc = (max_value + min_value)/2
R2 = R**2
xs = data[:-1] - Rloc
ys = data[1:] - Rloc
ds = xs**2 + ys**2
hits = np.sum(ds<R2)
return 4*hits/len(xs)
def Entropy(data,bins,min_value=None,max_value=None):
""" Compute Shannon Entropy of the data
bins: int or sequence of bins
If min_value or max_value is None, use the bounds of the data
"""
if min_value is None:
min_value = min(data)
if max_value is None:
max_value = max(data)
Os,_ = np.histogram(data,bins=bins,range=(min_value,max_value),density=False)
Ps = Os/len(data)
Ps = Ps[Ps>0]
E = -np.sum(Ps*np.log2(Ps))
return E
def Corr(data):
""" Serial Correlation Coefficient
"""
result = np.corrcoef(data[:-1],data[1:])
return abs(result[0,1]/result[0,0])
def ENT(data,bins,min_value=None,max_value=None,display=False):
""" Comprehensive randomness Benchmarking
bins: int or sequence of bins
If min_value or max_value is None, use the bounds of the data
display: print out results or not
"""
if min_value is None:
min_value = min(data)
if max_value is None:
max_value = max(data)
# Number of bins
if isinstance(bins,list):
num_bins = len(bins)
else:
num_bins = bins
# Shannon Entropy
entropy = Entropy(data,bins,min_value,max_value)
max_ent = np.log2(num_bins)
# Chi-square Test
chi2 = Chi2(data,bins,min_value,max_value)
chi2Q = 1-spChi2.cdf(chi2,num_bins-1)
# Mean
mean = np.mean(data)
median = (min_value + max_value)/2
# Monte-Carlo value for Pi
pi = Pi(data,min_value,max_value)
# Serial Correlation
corr = Corr(data)
# Print out
if display:
print("Entropy = ", entropy, "bits per character.")
print("Optimum compression would reduce the size of this data by %.0f percent."
%((max_ent-entropy)*100/max_ent))
print()
print("Chi square distribution is %.2e, and randomly would exceed this value %.1f percent of the times."
%(chi2,chi2Q*100))
print()
print("Arithmetic mean value is %.2e" %mean)
print("Median value is %.2e" %median)
print()
print("Monte-Carlo value for Pi is ", pi, " (error %.3f percent)" %(abs(np.pi-pi)*100/np.pi))
print()
print("Serial correlation coefficient is ", corr)
return entropy, chi2, pi, corr
def ent_file(fname, display=False):
""" Run ENT analysis on binary file fname
Data is read as bytes
"""
with open(fname,'rb') as f:
data = f.read()
data = np.array(bytearray(data))
return ENT(data,1<<8,min_value=0,max_value=1<<8-1,display=display)
if __name__=="__main__":
args = sys.argv
if len(args)<2:
fname = input("File name: ")
fnames = [fname]
else:
fnames = args[1:]
for fname in fnames:
print()
print("Test results for file: %s" %fname)
_ = ent_file(fname,True)
print("============")
|
import os
from model.generic_loss_functions import PFNL_generic_loss_functions
from model.generic_loss_functions import NAME as PROPOSED_LOSS
from model.control import PFNL_control
from model.control import NAME as CONTROL
from model.alternative import PFNL_alternative
from model.alternative import NAME as ALTERNATIVE
from model.null import PFNL_null
from model.null import NAME as NULL
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
if __name__ == '__main__':
# Choose model
model = PFNL_control()
# model = PFNL_null()
# model = PFNL_alternative()
# model = PFNL_generic_loss_functions()
print('Model loaded!')
# Training the model
model.train()
print('Training finished')
# Testing the specified model
if isinstance(model, PFNL_control):
NAME = CONTROL
elif isinstance(model, PFNL_null):
NAME = NULL
elif isinstance(model, PFNL_generic_loss_functions):
NAME = PROPOSED_LOSS
else:
NAME = ALTERNATIVE
model.testvideos('test/vid4', name='{}'.format(NAME))
model.testvideos('test/udm10', name='{}'.format(NAME))
print('Runtime finished')
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Prefix DAG permissions.
Revision ID: 849da589634d
Revises: 45ba3f1493b9
Create Date: 2020-10-01 17:25:10.006322
"""
from airflow.security import permissions
from airflow.www.app import cached_app
# revision identifiers, used by Alembic.
revision = '849da589634d'
down_revision = '45ba3f1493b9'
branch_labels = None
depends_on = None
def upgrade(): # noqa: D103
permissions = ['can_dag_read', 'can_dag_edit']
view_menus = cached_app().appbuilder.sm.get_all_view_menu()
convert_permissions(permissions, view_menus, upgrade_action, upgrade_dag_id)
def downgrade(): # noqa: D103
permissions = ['can_read', 'can_edit']
vms = cached_app().appbuilder.sm.get_all_view_menu()
view_menus = [vm for vm in vms if (vm.name == permissions.RESOURCE_DAG or vm.name.startswith('DAG:'))]
convert_permissions(permissions, view_menus, downgrade_action, downgrade_dag_id)
def upgrade_dag_id(dag_id):
"""Adds the 'DAG:' prefix to a DAG view if appropriate."""
if dag_id == 'all_dags':
return permissions.RESOURCE_DAG
if dag_id.startswith("DAG:"):
return dag_id
return f"DAG:{dag_id}"
def downgrade_dag_id(dag_id):
"""Removes the 'DAG:' prefix from a DAG view name to return the DAG id."""
if dag_id == permissions.RESOURCE_DAG:
return 'all_dags'
if dag_id.startswith("DAG:"):
return dag_id[len("DAG:") :]
return dag_id
def upgrade_action(action):
"""Converts the a DAG permission name from the old style to the new style."""
if action == 'can_dag_read':
return 'can_read'
return 'can_edit'
def downgrade_action(action):
"""Converts the a DAG permission name from the old style to the new style."""
if action == 'can_read':
return 'can_dag_read'
return 'can_dag_edit'
def convert_permissions(permissions, view_menus, convert_action, convert_dag_id):
"""Creates new empty role in DB"""
appbuilder = cached_app().appbuilder # pylint: disable=no-member
roles = appbuilder.sm.get_all_roles()
views_to_remove = set()
for permission_name in permissions: # pylint: disable=too-many-nested-blocks
for view_menu in view_menus:
view_name = view_menu.name
old_pvm = appbuilder.sm.find_permission_view_menu(permission_name, view_name)
if not old_pvm:
continue
views_to_remove.add(view_name)
new_permission_name = convert_action(permission_name)
new_pvm = appbuilder.sm.add_permission_view_menu(new_permission_name, convert_dag_id(view_name))
for role in roles:
if appbuilder.sm.exist_permission_on_roles(view_name, permission_name, [role.id]):
appbuilder.sm.add_permission_role(role, new_pvm)
appbuilder.sm.del_permission_role(role, old_pvm)
print(f"DELETING: {role.name} ----> {view_name}.{permission_name}")
appbuilder.sm.del_permission_view_menu(permission_name, view_name)
print(f"DELETING: perm_view ----> {view_name}.{permission_name}")
for view_name in views_to_remove:
if appbuilder.sm.find_view_menu(view_name):
appbuilder.sm.del_view_menu(view_name)
print(f"DELETING: view_menu ----> {view_name}")
if 'can_dag_read' in permissions:
for permission_name in permissions:
if appbuilder.sm.find_permission(permission_name):
appbuilder.sm.del_permission(permission_name)
print(f"DELETING: permission ----> {permission_name}")
|
import torch
import random
from torch.distributions.multivariate_normal import MultivariateNormal
from utils import cov
class EstimationMaximisation(object):
'''Gaussian Estimation Maximisation Algorithm
The following models the Gaussian Estimation Maximisation algorithm.
Given a set of points, we do a clustering if them into Gaussian models,
given that the number of clusters is selected before.
Args:
points: List of Pytorch tensors on which the algorithm
is to be run
no_of_iterations: The number of iterations the algorithm
is to be run on.
no_of_gaussians: The number of clusters the data is to be
divided into
parametric: whether the clustering is soft or hard,
where soft clustering is not assigning weights
to a point for each cluster it is to belong to, while
hard is that a point belongs to a single cluster.
'''
def __init__(self, points, no_of_iterations, no_of_gaussians, parametric="Yes"):
'''Initialise variables for EM Model
Sets the means, weights, gamma and the covaraince
matrices to None.
'''
self.points = points #list of pytorch tensors
self.no_of_points = len(self.points)
self.no_of_iterations = no_of_iterations
self.parametric = parametric
self.no_of_gaussians = no_of_gaussians
self.dimension = points[0].shape[0]
self.means = None
self.cov_matrices = None
self.weights = None
self.gamma = None
if parametric == 'No':
raise Warning('Non - Parametric version to be implememted however not available, shifting to Parametric version...')
self.parametric == 'Yes'
def initilize_means(self):
'''Initialises Means for the Gaussians
Intialises a random Pytorch Tensor of the dimensions
equal to that of the points dimensions, for
each Gaussian.
Returns:
list of mean Tensors
'''
means = list()
for i in xrange(self.no_of_gaussians):
means.append(torch.rand(self.dimension, ))
self.means = means
return means
def initilize_cov_matrices(self):
'''Initialise Covariance Matrices for the Gaussians
Initialise Covaraiance matrices for the Gaussians
by first initialising a set of points and then
computing the Covaraiance matrix for it.
Returns:
List of Tensor Covariance matrices
'''
cov_matrices = list()
for i in xrange(self.no_of_gaussians):
cov_matrices.append(cov(torch.rand(self.dimension+100, self.dimension)))
self.cov_matrices = cov_matrices
assert(cov_matrices[0].shape[0] == self.dimension)
assert(cov_matrices[0].shape[1] == self.dimension)
return cov_matrices
def initialize_parameters(self):
'''Initialise weights for the Gaussians
Initialise weights for the Gaussians, given
the Estimation Maximisation Algorithm
is "parametric"
Returns:
List of Tensor weights
'''
params = list()
for i in xrange(self.no_of_gaussians):
params.append(torch.rand(1))
l = sum(params)
for i in xrange(self.no_of_gaussians):
params[i] = params[i]/l
# assert(sum(params) == 1.0)
self.weights = params
return params
def initialize_gamma(self):
'''Initialise Gamma
Gamma is a matrix of size nxd and can be defined
as probabilty of each point belonging to each
Gaussian
Returns:
Gamma Tensor matrix
'''
k = torch.rand(self.no_of_points, self.no_of_gaussians)
sums = torch.sum(k, dim=0)
for i in xrange(self.no_of_gaussians):
k[:, i] = k[:, i]/sums[i]
self.gamma = k
return k
def update_parametric(self):
'''Estimation Maxiisation Update Function
Firstly step M is done - update the gamma
matrix by finding the probabilites of each
point in each Gaussian. Using this partitiom the
centers, weights and the covariance matrices of the Gaussians are
updated K-step.
'''
gamma = torch.ones((self.no_of_points, self.no_of_gaussians))
# Finding the gamma matrix for the iteration, go through
# all points for all Gaussians
for i in xrange(self.no_of_points):
l = 0.0
for j in xrange(self.no_of_gaussians):
# Define Multivariate Function
normal_function = MultivariateNormal(self.means[j], self.cov_matrices[j])
# Find the porbability for point
prob = torch.exp(normal_function.log_prob(self.points[i]))*self.weights[j]
# Update the gamma fiunction
gamma[i, j] = prob
l += prob
for j in xrange(self.no_of_gaussians):
# Normalise the Gamma function over a Gaussian
gamma[i, j] = gamma[i, j]/l
self.gamma = gamma
# row wise sum of gamma matrix
s = torch.sum(gamma, dim=0)
for i in xrange(self.no_of_gaussians):
# updating weights using the weight calculation formula
self.weights[i] = s[i]/self.no_of_points
# define a mean tensor
mean = torch.zeros((self.dimension, ))
for j in xrange(self.no_of_points):
# print np.argwhere(np.isnan(gamma[j, i]))
# k = self.points[j]*gamma[j, i])
# Find the weighted mean using points "Parametric EM"
mean += self.points[j]*gamma[j, i])
self.means[i] = mean/s[i] #updating means
for i in xrange(self.no_of_gaussians):
# update covaraince matrices
g = torch.tensor(self.points).view(self.no_of_points, self.dimension) - self.means[i].view(1, self.dimension)
self.cov_matrices[i] = torch.mm(g.t(), self.gamma[:, i]*g) #updating covariance matrices
# print self.means
def update_inverse_parametric(self):
'''Estimation Maxiisation Update Function inverse
In order to tackle the low probability of points
in each Gaussian first the K-step is done, gamma is taken
at random and first the means and covariance matrices and
weights are updated, then gamma is updated
The first update method is recommended however if not
then this method should help make the algorthm
converge.
'''
s = torch.sum(self.gamma, dim=0)
# Initialise weights, means ans=d covriance matrices
self.weights = list()
self.means = list()
self.cov_matrices = list()
for i in xrange(self.no_of_gaussians):
self.weights.append(0)
self.means.append(0)
self.cov_matrices.append(0)
# update the means and weights of the Gaussians
for i in xrange(self.no_of_gaussians):
self.weights[i] = s[i]/self.no_of_points
mean = torch.zeros((self.dimension, ))
for j in xrange(self.no_of_points):
# k = np.multiply(self.points[j], self.gamma[j, i])
mean += self.points[j] * self.gamma[j, i]
self.means[i] = mean/s[i]
# update the covariance matrices for the Gaussians
for i in xrange(self.no_of_gaussians):
g = torch.tensor(self.points).view(self.no_of_points, self.dimension)-self.means[i].view(1, self.dimension))
self.cov_matrices[i] = torch.mm(g.t(), self.gamma[:, i].view(self.gamma.shape[0], 1)*g)
gamma = torch.ones((self.no_of_points, self.no_of_gaussians))
# Using means, covariance matrices and weights, update gamma
for i in xrange(self.no_of_points):
l = 0.0
for j in xrange(self.no_of_gaussians):
prob = MultivariateNormal.probs(self.points[i], self.means[j], self.cov_matrices[j], allow_singular=True)*self.weights[j]
# print 'a'
gamma[i, j] = prob
l += prob
for j in xrange(self.no_of_gaussians):
gamma[i, j] = gamma[i, j]/l
self.gamma = gamma
def update_NonParametric(self):
NotImplemented
def update_inverse_NonParametric(self):
NotImplemented
def iterate(self):
for i in xrange(self.no_of_iterations):
if i == 0:
if self.means == None:
self.initilize_means()
if self.cov_matrices == None:
self.initilize_cov_matrices()
if self.weights == None:
self.initialize_parameters()
print('iteration - '+str(i+1))
if self.parametric == 'Yes':
self.update_parametric()
else:
self.update_NonParametric()
print('')
print('iteration complete')
def iterate_inverse(self):
for i in xrange(self.no_of_iterations):
if i == 0:
if self.gamma == None:
self.initialize_gamma()
print('iteration - '+str(i+1))
if self.parametric == 'Yes':
self.update_inverse_parametric()
else:
self.update_inverse_NonParametric()
print('')
print('#####Iterations complete#######')
|
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint, DispatchFunction
from starlette.requests import Request
from starlette.responses import Response
from starlette.types import ASGIApp
class BrowserCachingMiddleware(BaseHTTPMiddleware):
"""
Enabling the caching of assets by the browser can defeat the auto-fetching of resources
to cache them in the user's computer.
At the end, we only use the mechanism of py-youwol to handle caching of resources.
"""
cache = {}
def __init__(self, app: ASGIApp,
dispatch: DispatchFunction = None,
**_) -> None:
super().__init__(app, dispatch)
async def dispatch(
self, request: Request, call_next: RequestResponseEndpoint
) -> Response:
response = await call_next(request)
if '/api/assets-gateway/raw/' in request.url.path and request.method == "GET":
response.headers["Cache-Control"] = "no-cache, no-store"
return response
|
from django.urls import include, path
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r"invoices", views.InvoicesViewSet)
urlpatterns = [
path("", include(router.urls)),
path(
"api-auth/", include("rest_framework.urls", namespace="rest_framework")
),
]
|
"""Add dependency matcher pipe to the pipeline."""
from array import array
from collections import defaultdict, namedtuple
from typing import Union
import spacy
from spacy.language import Language
from spacy.matcher import DependencyMatcher
from spacy.tokens import Span, Token
from traiter.util import as_list, sign
DEPENDENCY = 'traiter.dependency.v1'
LINK_NEAREST = 'traiter.link_nearest.v1'
NEVER = 9999
PENALTY = {
',': 2,
';': 5,
'.': NEVER,
}
DependencyPatterns = Union[dict, list[dict]]
Link = namedtuple('Link', 'trait start_char end_char')
def add_extensions():
"""Add extensions for spans and tokens used by entity linker pipes."""
if not Span.has_extension('links'):
Span.set_extension('links', default=[])
Token.set_extension('links', default=[])
@Language.factory(DEPENDENCY)
class Dependency:
"""Matchers that walk the parse tree of a sentence or doc."""
def __init__(self, nlp: Language, name: str, patterns: DependencyPatterns):
self.nlp = nlp
self.name = name
self.matcher = DependencyMatcher(nlp.vocab)
patterns = as_list(patterns)
self.dispatch = self.build_dispatch_table(patterns)
self.build_matchers(patterns)
add_extensions()
def build_matchers(self, patterns: DependencyPatterns):
"""Setup matchers."""
for matcher in patterns:
label = matcher['label']
self.matcher.add(label, matcher['patterns'])
def build_dispatch_table(self, patterns: DependencyPatterns):
"""Setup after match actions."""
dispatch = {}
for matcher in patterns:
label = matcher['label']
label = self.nlp.vocab.strings[label]
if on_match := matcher.get('on_match'):
func = spacy.registry.misc.get(on_match['func'])
dispatch[label] = (func, on_match.get('kwargs', {}))
return dispatch
def __call__(self, doc):
matches = self.matcher(doc)
if not self.dispatch:
return doc
matches_by_id = defaultdict(list)
for match in matches:
matches_by_id[match[0]].append(match)
for match_id, match_list in matches_by_id.items():
if after := self.dispatch.get(match_id):
after[0](doc, match_list, **after[1])
return doc
@spacy.registry.misc(LINK_NEAREST)
def link_nearest(doc, matches, **kwargs):
"""Link traits."""
anchor = kwargs.get('anchor')
e_matches = tokens2entities(doc, matches)
# Group indices by anchor index (instance) and the other entity's label
groups = defaultdict(list)
for _, m_idx in e_matches:
anchor_i = [i for i in m_idx if doc.ents[i].label_ == anchor][0]
for entity_i in [i for i in m_idx if doc.ents[i].label_ != anchor]:
e_label = doc.ents[entity_i].label_
dist = weighted_distance(anchor_i, entity_i, doc)
groups[(anchor_i, e_label)].append((dist, entity_i))
# Then sort the groups by weighted distance & grab the closest entity
groups = {k: sorted(v)[0][1] for k, v in groups.items()}
# Update the anchor entity with data from the closest entity
for (anchor_i, e_label), nearest in groups.items():
n_ent = doc.ents[nearest]
doc.ents[anchor_i]._.data[e_label] = n_ent._.data[e_label]
doc.ents[anchor_i]._.links.append(Link(
e_label, n_ent.start_char, n_ent.end_char))
def weighted_distance(anchor_i, entity_i, doc):
"""Calculate the token offset from the anchor to the entity, penalize punct."""
lo, hi = (entity_i, anchor_i) if entity_i < anchor_i else (anchor_i, entity_i)
lo, hi = doc.ents[lo][-1].i, doc.ents[hi][0].i
dist = hi - lo
penalty = sum(PENALTY.get(doc[i].text, 0) for i in range(lo + 1, hi))
return dist + penalty, sign(anchor_i - entity_i)
def tokens2entities(doc, matches):
"""Map tokens in the matches to entities.
The dependency tree is built by a neural net before running the linker rules.
The dependency tree links tokens, not spans/entities to tokens. Therefore a
tree arc may point to any token in an entity/span and many arcs may point to
the same entity. We want to add data to the entity not to the tokens. So we
need to map tokens in the matches to entities. This function turns match token
indices into entity indices.
"""
token2ent = array('i', [-1] * len(doc))
# This creates an array of tokens indices and the entity indices they map to
for e, ent in enumerate(doc.ents):
token2ent[ent.start:ent.end] = array('i', [e] * len(ent))
# Map the matched tokens to entities, remove duplicates and remove non-entities
mapped = {(m_id, tuple(e for i in t_idx if (e := token2ent[i]) >= 0))
for m_id, t_idx in matches}
return sorted(mapped)
# ####################################################################################
# TODO: Remove the code below when other traiters migrate to use link_nearest()
NEAREST_ANCHOR = 'traiter.nearest_anchor.v1'
@spacy.registry.misc(NEAREST_ANCHOR)
def nearest_anchor(doc, matches, **kwargs):
"""Link traits to the nearest anchor trait.
In this case the "superior" trait (body_part, sex, etc.) is the anchor.
This uses a simple algorithm for linking traits.
1) Create a set of matched entities from matches of tokens.
2) Find all entities.
2) Link entities to closest anchor entity. There are different distance metrics.
"""
anchor, anchor_idx, ent_idx = map_tokens2entities(doc, matches, kwargs)
# Find the closest anchor entity to the target entity
for e in ent_idx:
if not doc.ents[e]._.data.get(anchor):
nearest = [(token_penalty(a, e, doc), a) for a in anchor_idx]
nearest = [n for n in nearest if n[0][0] < NEVER]
if nearest:
nearest_idx = sorted(nearest)[0][1]
doc.ents[e]._.data[anchor] = doc.ents[nearest]._.data[anchor]
nearest = doc.ents[nearest_idx]
doc.ents[e]._.links.append(Link(
anchor, nearest.start_char, nearest.end_char))
def map_tokens2entities(doc, matches, kwargs):
"""Map tokens in the matches to entities.
The dependency tree is built by a neural net before the linker rules are run.
The dependency tree links tokens, not spans/entities to tokens. Therefore a
tree arc may point to any token in an entity/span and many arcs may point to
the same entity. We want to add data to the entity not to the tokens. So we
need to map tokens in the matches to entities.
"""
# print(kwargs)
# print(matches)
anchor = kwargs.get('anchor')
exclude = kwargs.get('exclude')
anchor_2_ent = array('i', [-1] * len(doc))
token_2_ent = array('i', [-1] * len(doc))
for e, ent in enumerate(doc.ents):
if ent.label_ == exclude:
continue
elif ent.label_ == anchor:
anchor_2_ent[ent.start:ent.end] = array('i', [e] * len(ent))
else:
token_2_ent[ent.start:ent.end] = array('i', [e] * len(ent))
# From the matches with token indexes get the entity index
anchor_idx, ent_idx = set(), set()
for _, token_ids in matches:
ent_idx |= {e for t in token_ids if (e := token_2_ent[t]) > -1}
anchor_idx |= {e for t in token_ids if (e := anchor_2_ent[t]) > -1}
return anchor, anchor_idx, ent_idx
def token_penalty(anchor_i, entity_i, doc):
"""Calculate the token offset from the anchor to the entity, penalize punct."""
lo, hi = (entity_i, anchor_i) if entity_i < anchor_i else (anchor_i, entity_i)
lo, hi = doc.ents[lo][-1].i, doc.ents[hi][0].i
dist = hi - lo
penalty = sum(PENALTY.get(doc[i].text, 0) for i in range(lo + 1, hi))
return dist + penalty, sign(anchor_i - entity_i)
def entity_distance(anchor_i, entity_i, _):
"""Calculate the distance in token offset from the anchor to the entity."""
dist = anchor_i - entity_i
return abs(dist), sign(dist)
def token_distance(anchor_i, entity_i, doc):
"""Calculate token offset from the anchor to the entity."""
hi, lo = (anchor_i, entity_i) if anchor_i > entity_i else (entity_i, anchor_i)
dist = doc.ents[hi][0].i - doc.ents[lo][-1].i
return dist, sign(anchor_i - entity_i), entity_i
|
from .core import contents, where
__all__ = ["contents", "where"]
__version__ = "2022.05.18.1"
|
import json
if __name__ == '__main__':
students_list = []
while True:
command = input("Add, list, exit, load <>, save <>: ").lower()
if command == 'exit':
break
elif command == 'add':
last_name = input('Your last name^ ')
class_name = input('Class ')
grades = []
n = 0
while n < 5:
grades.append(int(input('Your grades ')))
n += 1
student = {
'Last name': last_name,
'Class': class_name,
'Grades': grades,
}
students_list.append(student)
if len(students_list) > 1:
students_list.sort(key=lambda item: item.get('Last name', ''))
print(students_list)
elif command == 'list':
count = 0
for student in students_list:
for i in student['Grades']:
if i == 2:
count += 1
print('{:>4}: {}'.format('This student got an F', student.get('Last name', '')))
elif command.startswith('load '):
parts = command.split(' ', maxsplit=1)
with open(parts[1], 'r') as f:
data = json.load(f)
print(data)
elif command.startswith('save '):
parts = command.split(' ', maxsplit=1)
with open(parts[1], 'w') as f:
json.dump(students_list, f)
|
def main():
info('Waiting for minibone access')
wait('FelixMiniboneFlag', 0)
info('Minibone free')
acquire('JanMiniboneFlag', clear=True)
info('Minibone acquired')
wait('MinibonePumpTimeFlag', 0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Setup for filament_watch'''
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup
with open('README.md') as readme_file:
README = readme_file.read()
setup(
name="filament_watch",
version="1.0",
author="Richard L. Lynch",
author_email="[email protected]",
description=("Monitors filament motion and pauses/cancels OctoPrint if the filament stops feeding."),
long_description=README,
license="MIT",
keywords="3d_printer 3d printer filament watch monitor jam safety",
url="https://github.com/rllynch/filament_watch",
packages=['filament_watch'],
include_package_data=True,
entry_points={
'console_scripts': ['filament_watch = filament_watch.filament_watch:main'],
},
install_requires=[
'requests',
'pyserial',
'cherrypy>=3.1',
'pyyaml'
]
)
|
#!/usr/bin/env python
'''
Created on Feb 21, 2020
@author: gsnyder
Retrieve BOM computed notifications
Note: The user account you run this under will determine the scope (i.e. projects, versions) of
the notifications that can be received.
'''
# TODO: Use startDate filter on /api/notifications to limit the notifications retrieved when using -n
import argparse
from datetime import datetime
import json
import logging
import pytz
import sys
import timestring
from terminaltables import AsciiTable
from blackduck.HubRestApi import HubInstance, object_id
parser = argparse.ArgumentParser("Retreive BOM computed notifications")
parser.add_argument("project", help="The name of the project")
parser.add_argument("version", help="The name of the version")
parser.add_argument("-n", "--newer_than",
default=None,
type=str,
help="Set this option to see all vulnerability notifications published since the given date/time.")
parser.add_argument("-d", "--save_dt",
action='store_true',
help="If set, the date/time will be saved to a file named '.last_run' in the current directory which can be used later with the -n option to see vulnerabilities published since the last run.")
parser.add_argument("-l", "--limit", default=100000, help="To change the limit on the number of notifications to retrieve")
parser.add_argument("-s", "--system", action='store_true', help="Pull notifications from the system as opposed to the user's account")
args = parser.parse_args()
if args.newer_than:
newer_than = timestring.Date(args.newer_than).date
# adjust to UTC so the comparison is normalized
newer_than = newer_than.astimezone(pytz.utc)
else:
newer_than = None
if args.save_dt:
with open(".last_run", "w") as f:
f.write(datetime.now().isoformat())
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', stream=sys.stderr, level=logging.DEBUG)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
hub = HubInstance()
current_user = hub.get_current_user()
version = hub.get_project_version_by_name(args.project, args.version)
version_url = version['_meta']['href']
# Construct the URL to either pull from the system or user account scope,
# and then narrow the search to only include BOM computed notifications
if args.system:
notifications_url = "{}/api/notifications".format(hub.get_urlbase())
else:
notifications_url = hub.get_link(current_user, "notifications")
notifications_url = "{}?limit={}&filter=notificationType:VERSION_BOM_CODE_LOCATION_BOM_COMPUTED".format(
notifications_url, args.limit)
if newer_than:
# add to the URL to include startDate
start_date = newer_than.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
notifications_url += "&startDate=" + start_date
logging.debug(f"Retrieving BOM computed notifications using {notifications_url}")
bom_computed_notifications = hub.execute_get(notifications_url).json().get('items', [])
# filter to include only those notification pertaining to the specified project, version
bom_computed_notifications = list(
filter(lambda n: version_url == n['content']['projectVersion'], bom_computed_notifications))
print(json.dumps(bom_computed_notifications))
|
#!/usr/bin/env python
"""Exercise answer 8.2 for chapter 8."""
def ui():
"""The program of UI."""
while True:
tip = """
Input three numbers, using ',' to seperate it.
These are from,to,and increment. We'll generate
a sequence for you. Using 'q' to quit this:
"""
inp = raw_input(tip).strip()
if inp == 'q':
break
else:
nums = inp.split(',')
if len(nums) != 3:
print "Numbers not right."
continue
seq = xrange(int(nums[0]), int(nums[1]), int(nums[2]))
for x in seq:
print x,
if __name__ == '__main__':
ui()
|
#!/usr/bin/env python
import rospy
import time
import cv2
import sys
import numpy as np
import message_filters
from sensor_msgs.msg import Image, CameraInfo
from std_msgs.msg import Header
from cv_bridge import CvBridge, CvBridgeError
from openpose_ros_msgs.msg import OpenPoseHumanList
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Point
from human_pose_multiview.msg import CustomMarkerArray
CAMERA = str(sys.argv[1]) # Read camera from parameters
# Subscribers
HUMAN_POSE_TOPIC = "/human_pose_estimation/human_list/cam_" + CAMERA
COLOR_IMAGE_TOPIC = "/cam_" + CAMERA + "/color/image_raw"
DEPTH_IMAGE_TOPIC = "/cam_" + CAMERA + "/depth_registered/image_rect"
CAMERA_PARAMETERS = "/cam_" + CAMERA + "/color/camera_info"
# Publisher
HUMAN_POSE_DRAWER_TOPIC = "/human_pose/pose3D/cam_" + CAMERA
class PoseGenerator:
def __init__(self, pub, K):
self.bridge = CvBridge()
#self.pose_sub = rospy.Subscriber(HUMAN_POSE_TOPIC, MarkerArray, self.pose_callback, queue_size=10)
#self.image_sub = rospy.Subscriber(COLOR_IMAGE_TOPIC, Image, self.image_callback, queue_size=10)
self.pose_sub = message_filters.Subscriber(HUMAN_POSE_TOPIC, OpenPoseHumanList)
self.image_sub = message_filters.Subscriber(COLOR_IMAGE_TOPIC, Image)
self.depth_sub = message_filters.Subscriber(DEPTH_IMAGE_TOPIC, Image)
self.sync = message_filters.ApproximateTimeSynchronizer([self.pose_sub, self.image_sub, self.depth_sub], 10, 0.1)
self.sync.registerCallback(self.callback)
self.pub = pub
self.K = K
self.fx = K[0]
self.fy = K[4]
self.cx = K[2]
self.cy = K[5]
self.depth_env_delta = 2
self.min_depth = 1000
self.max_depth = 2500
self.threshold = 0.25
self.g_depth_scale = 1000.0
def callback(self, data_pose, data_image, data_depth):
#print("message \t - \t", data_image.header.seq, data_pose.header.seq)
cv_color = self.bridge.imgmsg_to_cv2(data_image, "rgb8")
cv_depth = self.bridge.imgmsg_to_cv2(data_depth, "16UC1")
ma = CustomMarkerArray()
h = Header(frame_id=data_image.header.frame_id, seq=data_image.header.seq)
ma.header = h
for kp_idx, point in enumerate(data_pose.human_list[0].body_key_points_with_prob):
if point.prob >= self.threshold:
u, v = int(round(point.x)), int(round(point.y))
depth_candidates = []
for uu in range(u-self.depth_env_delta, u+self.depth_env_delta):
for vv in range(v-self.depth_env_delta, v+self.depth_env_delta):
if uu<0 or vv<0 or uu>=cv_color.shape[1] or vv>=cv_color.shape[0]:
break
if cv_depth[vv,uu] > self.min_depth and cv_depth[vv,uu] < self.max_depth:
depth_candidates.append(cv_depth[vv,uu])
if not depth_candidates:
break
depth_candidates.sort()
#z = 0
#if kp_idx==11 or kp_idx==12:
#z = depth_candidates[-1] / self.g_depth_scale
#else:
z = depth_candidates[len(depth_candidates)/2] / self.g_depth_scale
x = (u - self.cx) * z / self.fx
y = (v - self.cy) * z / self.fy
marker = Marker()
marker.header = h
marker.pose.position.x = x
marker.pose.position.y = y
marker.pose.position.z = z
marker.pose.orientation.w = 1
marker.pose.orientation.x = 0
marker.pose.orientation.y = 0
marker.pose.orientation.z = 0
marker.scale.x = 0.05
marker.scale.y = 0.05
marker.scale.z = 0.05
marker.ns = "joints"
marker.id = kp_idx
marker.color.r = 1.0
marker.color.a = 1.0
ma.confidences.append(point.prob)
ma.markers.append(marker)
self.pub.publish(ma)
class CameraCalibSubscriber():
def __init__(self):
self.subscriber = rospy.Subscriber(CAMERA_PARAMETERS,
CameraInfo, self.camera_callback, queue_size=1)
self.stop = False
self.K = None
self.camera_frame_id = None
self.camera_seq = None
def camera_callback(self, data):
self.K = data.K
self.camera_frame_id = data.header.frame_id
self.camera_seq = data.header.seq
self.stop = True
def wait_for_calib(self):
try:
while not self.stop:
time.sleep(1.0)
except KeyboardInterrupt:
print("Shutting down")
return self.K, self.camera_frame_id, self.camera_seq
if __name__ == '__main__':
rospy.init_node('pose_drawer_3d', anonymous=True)
#read calib from ros topic
camera_calib = CameraCalibSubscriber()
K, camera_frame_id, camera_seq = camera_calib.wait_for_calib()
pub = rospy.Publisher(HUMAN_POSE_DRAWER_TOPIC, CustomMarkerArray, queue_size=10)
human_pose_drawer = PoseGenerator(pub, K)
rospy.spin()
|
import json
import unittest
from unittest.mock import patch
import requests_mock
from frontstage import app
from frontstage.common.eq_payload import EqPayload
from frontstage.controllers import collection_exercise_controller
from frontstage.exceptions.exceptions import ApiError, InvalidEqPayLoad
from tests.integration.mocked_services import (
business_party,
case,
categories,
collection_exercise,
collection_exercise_events,
collection_instrument_eq,
collection_instrument_seft,
completed_case,
respondent_party,
survey,
survey_eq,
url_banner_api,
url_get_business_party,
url_get_case,
url_get_case_categories,
url_get_ci,
url_get_collection_exercise,
url_get_collection_exercise_events,
url_get_respondent_party,
url_get_survey,
url_get_survey_by_short_name_eq,
url_post_case_event_uuid,
)
encoded_jwt_token = (
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJwYXJ0eV9pZCI6ImY5NTZlOGFlLTZ"
"lMGYtNDQxNC1iMGNmLWEwN2MxYWEzZTM3YiIsImV4cGlyZXNfYXQiOiIxMDAxMjM0NTY"
"3ODkiLCJyb2xlIjoicmVzcG9uZGVudCIsInVucmVhZF9tZXNzYWdlX2NvdW50Ijp7InZh"
"bHVlIjowLCJyZWZyZXNoX2luIjozMjUyNzY3NDAwMC4wfSwiZXhwaXJlc19pbiI6MzI1M"
"jc2NzQwMDAuMH0.m94R50EPIKTJmE6gf6PvCmCq8ZpYwwV8PHSqsJh5fnI"
)
class TestGenerateEqURL(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.set_cookie("localhost", "authorization", "session_key")
self.headers = {
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoicmluZ3JhbUBub3d3aGVyZS5jb20iLCJ1c2VyX3Njb3BlcyI6WyJjaS5yZWFkIiwiY2kud3JpdGUiXX0.se0BJtNksVtk14aqjp7SvnXzRbEKoqXb8Q5U9VVdy54" # NOQA
}
self.patcher = patch("redis.StrictRedis.get", return_value=encoded_jwt_token)
self.patcher.start()
def tearDown(self):
self.patcher.stop()
@requests_mock.mock()
def test_generate_eq_url(self, mock_request):
# Given all external services are mocked and we have an EQ collection instrument
mock_request.get(url_get_case, json=case)
mock_request.get(url_get_collection_exercise, json=collection_exercise)
mock_request.get(url_get_collection_exercise_events, json=collection_exercise_events)
mock_request.get(url_get_business_party, json=business_party)
mock_request.get(url_get_survey_by_short_name_eq, json=survey_eq)
mock_request.get(url_get_ci, json=collection_instrument_eq)
mock_request.get(url_get_case_categories, json=categories)
mock_request.post(url_post_case_event_uuid, status_code=201)
mock_request.get(url_get_respondent_party, status_code=200, json=respondent_party)
mock_request.get(url_banner_api, status_code=404)
# When the generate-eq-url is called
response = self.app.get(
f"/surveys/access-survey?case_id={case['id']}&business_party_id={business_party['id']}"
f"&survey_short_name={survey_eq['shortName']}&ci_type=EQ",
headers=self.headers,
)
# An eq url is generated
self.assertEqual(response.status_code, 302)
self.assertIn("https://eq-test/session?token=", response.location)
@requests_mock.mock()
@patch("frontstage.controllers.party_controller.is_respondent_enrolled")
def test_generate_eq_url_complete_case(self, mock_request, _):
# Given a mocked case has its caseGroup status as complete
mock_request.get(
f"{app.config['COLLECTION_EXERCISE_URL']}" f"/collectionexercises/14fb3e68-4dca-46db-bf49-04b84e07e77c",
json=collection_exercise,
)
mock_request.get(f"{app.config['CASE_URL']}/cases/8cdc01f9-656a-4715-a148-ffed0dbe1b04", json=completed_case)
mock_request.get(url_get_collection_exercise_events, json=collection_exercise_events)
mock_request.get(url_get_business_party, json=business_party)
mock_request.get(url_get_survey_by_short_name_eq, json=survey_eq)
mock_request.get(url_get_ci, json=collection_instrument_eq)
mock_request.get(url_get_case_categories, json=categories)
mock_request.post(url_post_case_event_uuid, status_code=201)
mock_request.get(url_get_respondent_party, status_code=200, json=respondent_party)
mock_request.get(url_banner_api, status_code=404)
# When the generate-eq-url is called
response = self.app.get(
f"/surveys/access-survey?case_id={completed_case['id']}&business_party_id={business_party['id']}"
f"&survey_short_name={survey_eq['shortName']}&ci_type=EQ",
headers=self.headers,
follow_redirects=True,
)
# A 403 is returned
self.assertEqual(response.status_code, 403)
@requests_mock.mock()
def test_generate_eq_url_seft(self, mock_request):
# Given all external services are mocked and we have seft collection instrument
mock_request.get(url_get_collection_exercise, json=collection_exercise)
mock_request.get(url_get_collection_exercise_events, json=collection_exercise_events)
mock_request.get(url_get_business_party, json=business_party)
mock_request.get(url_get_survey, json=survey)
mock_request.get(url_get_ci, json=collection_instrument_seft)
mock_request.get(url_banner_api, status_code=404)
# When create_payload is called
# Then an InvalidEqPayLoad is raised
with app.app_context():
with self.assertRaises(InvalidEqPayLoad) as e:
EqPayload().create_payload(
case, party_id=respondent_party["id"], business_party_id=business_party["id"], survey=survey_eq
)
self.assertEqual(
e.exception.message, "Collection instrument 68ad4018-2ddd-4894-89e7-33f0135887a2 type is not EQ"
)
@requests_mock.mock()
def test_generate_eq_url_no_eq_id(self, mock_request):
# Given all external services are mocked and we have an EQ collection instrument without an EQ ID
with open("tests/test_data/collection_instrument/collection_instrument_eq_no_eq_id.json") as json_data:
collection_instrument_eq_no_eq_id = json.load(json_data)
mock_request.get(url_get_ci, json=collection_instrument_eq_no_eq_id)
mock_request.get(url_banner_api, status_code=404)
# When create_payload is called
# Then an InvalidEqPayLoad is raised
with app.app_context():
with self.assertRaises(InvalidEqPayLoad) as e:
EqPayload().create_payload(
case, party_id=respondent_party["id"], business_party_id=business_party["id"], survey=survey_eq
)
self.assertEqual(
e.exception.message,
"Collection instrument 68ad4018-2ddd-4894-89e7-33f0135887a2 " "classifiers are incorrect or missing",
)
@requests_mock.mock()
def test_generate_eq_url_no_form_type(self, mock_request):
# Given all external services are mocked and we have an EQ collection instrument without a Form_type
with open("tests/test_data/collection_instrument/collection_instrument_eq_no_form_type.json") as json_data:
collection_instrument_eq_no_form_type = json.load(json_data)
mock_request.get(url_get_ci, json=collection_instrument_eq_no_form_type)
mock_request.get(url_banner_api, status_code=404)
# When create_payload is called
# Then an InvalidEqPayLoad is raised
with app.app_context():
with self.assertRaises(InvalidEqPayLoad) as e:
EqPayload().create_payload(
case, party_id=respondent_party["id"], business_party_id=business_party["id"], survey=survey_eq
)
self.assertEqual(
e.exception.message,
"Collection instrument 68ad4018-2ddd-4894-89e7-33f0135887a2 " "classifiers are incorrect or missing",
)
@requests_mock.mock()
def test_access_collection_exercise_events_fail(self, mock_request):
# Given a failing collection exercise events service
mock_request.get(url_get_collection_exercise_events, status_code=500)
mock_request.get(url_banner_api, status_code=404)
# When get collection exercise events is called
# Then an ApiError is raised
with app.app_context():
with self.assertRaises(ApiError):
collection_exercise_controller.get_collection_exercise_events(collection_exercise["id"])
def test_generate_eq_url_incorrect_date_format(self):
# Given an invalid date
date = "invalid"
# When format_string_long_date_time_to_short_date is called
# Then an InvalidEqPayLoad is raised
with self.assertRaises(InvalidEqPayLoad) as e:
EqPayload()._format_string_long_date_time_to_short_date(date)
self.assertEqual(e.exception.message, "Unable to format invalid")
def test_generate_eq_url_iso8601_date_format(self):
# Given a valid date
date = "2007-01-25T12:00:00Z"
# When format_string_long_date_time_to_short_date is called
# Then the correct date is returned
result = EqPayload()._format_string_long_date_time_to_short_date(date)
self.assertEqual(result, "2007-01-25")
def test_iso8601_adjusts_to_local_time(self):
# Given a valid date in tz -1hr before midnight
date = "2007-01-25T23:59:59-0100"
# When format_date is called
result = EqPayload()._format_string_long_date_time_to_short_date(date)
# Then the date is localised to the next day
self.assertEqual(result, "2007-01-26")
def test_generate_eq_url_missing_mandatory_event_date(self):
# Given a mandatory event date does not exist
collex_events_dates = [
{
"id": "e82e7ec9-b14e-412c-813e-edfd2e03e773",
"collectionExerciseId": "8d926ae3-fb3c-4c25-9f0f-356ded7d1ac0",
"tag": "return_by",
"timestamp": "2018-03-27T01:00:00.000+01:00",
},
{
"id": "8a24731e-3d79-4f3c-b6eb-3b199f53694f",
"collectionExerciseId": "8d926ae3-fb3c-4c25-9f0f-356ded7d1ac0",
"tag": "reminder",
"timestamp": "2018-04-03T01:00:00.000+01:00",
},
]
# When find_event_date_by_tag is called with a search param
# Then an InvalidEqPayLoad is raised
with self.assertRaises(InvalidEqPayLoad) as e:
EqPayload()._find_event_date_by_tag("return by", collex_events_dates, "123", True)
self.assertEqual(e.exception.message, "Mandatory event not found for collection 123 for search param return by")
def test_generate_eq_url_non_mandatory_event_date_is_none(self):
# Given a non mandatory event date does not exist
collex_events_dates = []
# When find_event_date_by_tag is called with a search param
# Then a None response is returned and no exception is raised
response = EqPayload()._find_event_date_by_tag("employment", collex_events_dates, "123", False)
self.assertEqual(response, None)
def test_generate_eq_url_non_mandatory_event_date_is_returned(self):
# Given a non mandatory event date exists
collex_events_dates = [
{
"id": "e82e7ec9-b14e-412c-813e-edfd2e03e773",
"collectionExerciseId": "8d926ae3-fb3c-4c25-9f0f-356ded7d1ac0",
"tag": "return_by",
"timestamp": "2018-03-27T01:00:00.000+01:00",
},
{
"id": "8a24731e-3d79-4f3c-b6eb-3b199f53694f",
"collectionExerciseId": "8d926ae3-fb3c-4c25-9f0f-356ded7d1ac0",
"tag": "employment",
"timestamp": "2018-04-03T01:00:00.000+01:00",
},
]
# When find_event_date_by_tag is called with a search param
# Then the formatted date is returned
response = EqPayload()._find_event_date_by_tag("employment", collex_events_dates, "123", False)
self.assertEqual(response, "2018-04-03")
|
import requests.exceptions
import google.auth.exceptions
class HttpError(Exception):
"""Holds the message and code from cloud errors."""
def __init__(self, error_response=None):
if error_response:
self.message = error_response.get("message", "")
self.code = error_response.get("code", None)
else:
self.message = ""
self.code = None
# Call the base class constructor with the parameters it needs
super(HttpError, self).__init__(self.message)
class ChecksumError(Exception):
"""Raised when the md5 hash of the content does not match the header."""
pass
RETRIABLE_EXCEPTIONS = (
requests.exceptions.ChunkedEncodingError,
requests.exceptions.ConnectionError,
requests.exceptions.ReadTimeout,
requests.exceptions.Timeout,
requests.exceptions.ProxyError,
requests.exceptions.SSLError,
requests.exceptions.ContentDecodingError,
google.auth.exceptions.RefreshError,
ChecksumError,
)
def is_retriable(exception):
"""Returns True if this exception is retriable."""
errs = list(range(500, 505)) + [
# Request Timeout
408,
# Too Many Requests
429,
]
errs += [str(e) for e in errs]
if isinstance(exception, HttpError):
return exception.code in errs
return isinstance(exception, RETRIABLE_EXCEPTIONS)
class FileSender:
def __init__(self, consistency="none"):
self.consistency = consistency
if consistency == "size":
self.sent = 0
elif consistency == "md5":
from hashlib import md5
self.md5 = md5()
async def send(self, pre, f, post):
yield pre
chunk = f.read(64 * 1024)
while chunk:
yield chunk
if self.consistency == "size":
self.sent += len(chunk)
elif self.consistency == "md5":
self.md5.update(chunk)
chunk = f.read(64 * 1024)
yield post
def __len__(self):
return self.sent
|
"""Implementação de Pilha em Python."""
class Nodo:
"""Elemento de uma pilha, guardando um valor e referenciando outro
Nodo.
"""
def __init__(self, valor):
"""Constrói um nodo com o valor indicado.
Por padrão, não há um próximo elemento.
"""
self.valor = valor
self.debaixo = None
def __repr__(self) -> str:
"""Representa a estrutura de uma maneira amigável para imprimir no terminal."""
return f"<{self.valor}>"
class Pilha:
"""Estrutura de dados dinâmica composta de nodos empilhados um em cima do outro."""
def __init__(self):
"""Constrói uma pilha vazia."""
self._topo = None
@property
def topo(self) -> Nodo:
"""Retorna o nodo inicial da pilha, de forma que só possa ser lido."""
return self._topo
def adicionar(self, valor) -> Nodo:
"""Inclui um nodo no topo da pilha com o valor indicado.
O novo nodo aponta para o antigo "topo".
"""
novo_nodo = Nodo(valor)
novo_nodo.debaixo = self._topo
self._topo = novo_nodo
return novo_nodo
def consumir(self) -> Nodo:
"""Retira o nodo do topo da pilha e promove o debaixo."""
nodo_removido = self._topo
if self._topo is not None:
self._topo = nodo_removido.debaixo
return nodo_removido
def imprimir(self):
"""Imprime todos os nodos da pilha."""
texto = "<<< "
nodo = self._topo
while nodo:
texto += f"{nodo} |"
nodo = nodo.debaixo
return texto
def __repr__(self) -> str:
"""Representa a estrutura de uma maneira amigável para imprimir no terminal."""
conteudo = f"Topo: {self._topo} " if self._topo else "VAZIA"
return f"[{self.__class__.__name__} | {conteudo}]"
|
import topi
import tvm
import numpy as np
import torch
dim0 = 8
dim1 = 3
dim2 = 4
shape_size1 = [dim0, dim1]
shape_size2 = [dim0, dim2]
dtype = "float32"
A = tvm.te.placeholder(shape_size1, dtype=dtype, name="A")
B = tvm.te.placeholder(shape_size2, dtype=dtype, name="B")
C = topi.concatenate([A, B], axis=1)
dC = tvm.te.placeholder(C.shape, dtype=dtype, name="dC")
dA, dB = tvm.te.mygradient(C, [A, B], dC)
s = tvm.te.create_schedule([C.op, dA.op, dB.op])
print(tvm.lower(s, [A, B, dC, dA, dB], simple_mode=True))
func = tvm.build(s, [A, B, dC, dA, dB], target="llvm")
A_np = np.random.uniform(-10, 10, shape_size1).astype("float32")
B_np = np.random.uniform(-10, 10, shape_size2).astype("float32")
dC_np = np.ones([dim0, dim1+dim2]).astype("float32")
dA_np = np.zeros(shape_size1).astype("float32")
dB_np = np.zeros(shape_size2).astype("float32")
ctx = tvm.context("llvm", 0)
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
dC_tvm = tvm.nd.array(dC_np, ctx)
dA_tvm = tvm.nd.array(dA_np, ctx)
dB_tvm = tvm.nd.array(dB_np, ctx)
func(A_tvm, B_tvm, dC_tvm, dA_tvm, dB_tvm)
print("dA_tvm", dA_tvm)
# =======>
# compare the results with pytorch
A_torch = torch.tensor(A_np, requires_grad=True)
B_torch = torch.tensor(B_np, requires_grad=True)
C_torch = torch.cat([A_torch, B_torch], dim=1)
loss = C_torch.sum()
loss.backward()
print("Pytorch gradient:\n", A_torch.grad.numpy(), B_torch.grad.numpy())
tvm.testing.assert_allclose(dA_tvm.asnumpy(), A_torch.grad.numpy(), atol=1e-30, rtol=1e-30)
tvm.testing.assert_allclose(dB_tvm.asnumpy(), B_torch.grad.numpy(), atol=1e-30, rtol=1e-30)
print("Compare with PyTorch success!")
|
input = """
a(1,2,0).
a(1,3,0).
a(2,3,0).
b(X) :- a(X,_,_).
c(X) :- a(_,X,_).
"""
output = """
a(1,2,0).
a(1,3,0).
a(2,3,0).
b(X) :- a(X,_,_).
c(X) :- a(_,X,_).
"""
|
#!/usr/bin/env python3
from json import dump, load
from os.path import isfile
from subprocess import run
import sys
class colors:
BOLD = "\033[1m"
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
END = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def save(obj, f):
f = open(f, "w")
dump(obj, f, sort_keys=True, indent=4)
def translate(translations, key, move):
temp_file = "/tmp/" + key
temp = open(temp_file, "w")
print("# Übersetzung oben eingeben.",
"# Ohne Eingabe wird die Übersetzung abgebrochen.",
"# ",
"# ================================================================================",
"# ID: %s" % key,
"# TITLE: %s" % move["name"],
"# CLASSES: %s" % ", ".join(move["classes"]) if "classes" in move else "All classes",
"# -- Zu Übersetzen ---------------------------------------------------------------",
"# %s" % move["description"].replace("\n", "\n# "),
"# --------------------------------------------------------------------------------",
"# ================================================================================",
sep="\n",
file=temp)
temp.flush()
temp.close()
proc = run(["$VISUAL %s" % temp_file], shell=True)
if proc.returncode == 0:
lines = []
temp = open(temp_file)
for line in temp.readlines():
if not line.startswith("#"):
line = line.strip()
print(line)
lines.append(line)
temp.close()
if len(lines) > 0 and lines[0] != "":
translations[key] = "\n".join(lines)
return True
return False
data_file = open("data.json", "r")
data = load(data_file)
translations = {}
if isfile("description_translations.json"):
in_file = open("description_translations.json", "r")
translations = load(in_file)
out_file = "description_translations.json"
# Translate basic moves
moves = data["basic_moves"]
for move in moves:
if not move["key"] in translations:
success = translate(translations, move["key"], move)
save(translations, out_file)
if not success:
sys.exit()
# Translate special moves
moves = data["special_moves"]
for move in moves:
if not move["key"] in translations:
success = translate(translations, move["key"], move)
save(translations, out_file)
if not success:
sys.exit()
save(translations, out_file)
# Reload moves to fix dublication bug
translations = {}
if isfile("description_translations.json"):
in_file = open("description_translations.json", "r")
translations = load(in_file)
# Translate all remaining moves
moves = data["moves"]
for key, move in moves.items():
if not key in translations:
success = translate(translations, key, move)
save(translations, out_file)
if not success:
sys.exit()
|
def sortedSquaredArray(array):
smallest = 0
largest = len(array) - 1
result = []
while smallest <= largest:
if abs(array[smallest]) > abs(array[largest]):
val = array[smallest]
smallest += 1
else:
val = array[largest]
largest -= 1
result.append(val*val)
return list(reversed(result))
print(sortedSquaredArray([1, 2, 3, 5, 6, 8, 9]))
|
'''
有这样一个字典d = {"chaoqian":87, “caoxu”:90, “caohuan”:98, “wuhan”:82, “zhijia”:89}
1)将以上字典按成绩排名
'''
d = {"chaoqian":87, "caoxu":90, "caohuan":98, "wuhan":82, "zhijia":89}
print(sorted(d.items(),key = lambda item : item[1]))
|
import datetime
import pandas as pd
import plotly.graph_objects as go
from dateutil.relativedelta import relativedelta
from plotly.subplots import make_subplots
def get_date_list(start_day) -> list:
"""시작일부터 오늘까지를 1달 간격으로 나눈 것을 리스트로 만듦"""
date_list = []
start = datetime.datetime.strptime(start_day, "%Y%m%d")
today = datetime.datetime.now()
first_day = datetime.datetime(today.year, today.month, 1)
date_list.append(today.strftime("%Y%m%d"))
date = first_day
delta = relativedelta(months=1)
while date > start:
date_list.append(date.strftime("%Y%m%d"))
date = date - delta
date_list.append(start.strftime("%Y%m%d"))
return date_list
def make_ohlcv_graph(
df: pd.DataFrame, open="시가", high="고가", low="저가", close="종가", volume="거래량"
) -> None:
# hovertext 생성
ohlc_candle_hovertext = []
volume_bar_hovertext = []
for i in range(len(df[open])):
ohlc_candle_hovertext.append(
f"일자: {df.index[i].date()}<br>시가: {df[open][i]}<br>고가: {df[high][i]}<br>저가: {df[low][i]}<br>종가: {df[close][i]}"
)
volume_bar_hovertext.append(f"일자: {df.index[i].date()}<br>거래량: {df[volume][i]}")
# OHLC 캔들 차트 생성
ohlc_candle = go.Candlestick(
x=df.index,
open=df[open],
high=df[high],
low=df[low],
close=df[close],
text=ohlc_candle_hovertext,
hoverinfo="text",
increasing_line_color="red",
decreasing_line_color="blue",
)
# 거래량 바 차트 생성
volume_bar = go.Bar(
x=df.index,
y=df[volume],
text=volume_bar_hovertext,
hoverinfo="text",
)
# 그래프 그리기
fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.02)
fig.add_trace(ohlc_candle, row=1, col=1)
fig.add_trace(volume_bar, row=2, col=1)
fig.update_layout(
yaxis1_title="가격",
yaxis2_title="거래량",
xaxis2_title="기간",
xaxis1_rangeslider_visible=False,
xaxis2_rangeslider_visible=True,
showlegend=False,
yaxis1=dict(domain=[0.25, 1]),
yaxis2=dict(domain=[0, 0.2]),
)
fig.show()
|
import csv
from datetime import datetime
import pytz
import os
from ledger.settings_base import TIME_ZONE
from mooringlicensing.settings import BASE_DIR
from mooringlicensing.components.approvals.models import (
Approval, WaitingListAllocation, AnnualAdmissionPermit,
AuthorisedUserPermit, MooringLicence
)
date_now = datetime.now(pytz.timezone(TIME_ZONE)).date()
date_now_str = date_now.strftime('%Y%m%d')
def write_wla():
approval_type = WaitingListAllocation.code
approvals = WaitingListAllocation.objects.filter(status='current')
header_row = ['Lodgement Number', 'Start Date', 'Expiry Date', 'Issue Date', 'WLA Queue Date', 'WLA Order',
'First Name', 'Last Name', 'Address 1', 'Suburb', 'State', 'Country', 'Postcode', 'Postal Address',
'Phone', 'Mobile', 'EMAIL', 'Vessel Rego', 'Company',
]
rows = [[wla.lodgement_number, wla.start_date, wla.expiry_date, wla.issue_date.strftime('%Y-%m-%d'),
wla.wla_queue_date.strftime('%Y-%m-%d') if wla.wla_queue_date else '', wla.wla_order,
wla.submitter.first_name, wla.submitter.last_name, wla.submitter.residential_address.line1,
wla.submitter.residential_address.locality, wla.submitter.residential_address.state,
wla.submitter.residential_address.country, wla.submitter.residential_address.postcode, '',
wla.submitter.phone_number, wla.submitter.mobile_number, wla.submitter.email,
wla.current_proposal.vessel_ownership.vessel.rego_no if wla.current_proposal.vessel_ownership else '', '',
] for wla in WaitingListAllocation.objects.filter(status='current')]
write_file(approval_type, approvals, header_row, rows)
def write_aap():
approval_type = AnnualAdmissionPermit.code
approvals = AnnualAdmissionPermit.objects.filter(status='current')
header_row = ['Lodgement Number', 'Start Date', 'Expiry Date', 'Issue Date', 'First Name',
'Last Name', 'Address 1', 'Suburb', 'State', 'Country', 'Postcode', 'Postal Address',
'Phone', 'Mobile', 'EMAIL', 'Vessel Rego', 'Company',
]
rows = [[aap.lodgement_number, aap.start_date, aap.expiry_date, aap.issue_date.strftime('%Y-%m-%d'),
aap.submitter.first_name, aap.submitter.last_name, aap.submitter.residential_address.line1,
aap.submitter.residential_address.locality, aap.submitter.residential_address.state,
aap.submitter.residential_address.country, aap.submitter.residential_address.postcode, '',
aap.submitter.phone_number, aap.submitter.mobile_number, aap.submitter.email,
aap.current_proposal.vessel_ownership.vessel.rego_no if aap.current_proposal.vessel_ownership else '', '',
] for aap in AnnualAdmissionPermit.objects.filter(status='current')]
write_file(approval_type, approvals, header_row, rows)
def write_aup():
approval_type = AuthorisedUserPermit.code
approvals = AuthorisedUserPermit.objects.filter(status='current')
header_row = ['Lodgement Number', 'Start Date', 'Expiry Date', 'Issue Date', 'Moorings', 'First Name',
'Last Name', 'Address 1', 'Suburb', 'State', 'Country', 'Postcode', 'Postal Address',
'Phone', 'Mobile', 'EMAIL', 'Vessel Rego', 'Company',
]
rows = [[aup.lodgement_number, aup.start_date, aup.expiry_date, aup.issue_date.strftime('%Y-%m-%d'),
','.join(str(moa.mooring) for moa in aup.mooringonapproval_set.filter(mooring__mooring_licence__status='current')),
aup.submitter.first_name, aup.submitter.last_name, aup.submitter.residential_address.line1,
aup.submitter.residential_address.locality, aup.submitter.residential_address.state,
aup.submitter.residential_address.country, aup.submitter.residential_address.postcode, '',
aup.submitter.phone_number, aup.submitter.mobile_number, aup.submitter.email,
aup.current_proposal.vessel_ownership.vessel.rego_no if aup.current_proposal.vessel_ownership else '', '',
] for aup in AuthorisedUserPermit.objects.filter(status='current')]
write_file(approval_type, approvals, header_row, rows)
def write_ml():
approval_type = MooringLicence.code
approvals = MooringLicence.objects.filter(status='current')
header_row = ['Lodgement Number', 'Start Date', 'Expiry Date', 'Issue Date', 'Mooring', 'First Name',
'Last Name', 'Address 1', 'Suburb', 'State', 'Country', 'Postcode', 'Postal Address',
'Phone', 'Mobile', 'EMAIL', 'Vessels',
]
rows = [[ml.lodgement_number, ml.start_date, ml.expiry_date, ml.issue_date.strftime('%Y-%m-%d'),
ml.mooring if hasattr(ml, 'mooring') else '',
ml.submitter.first_name, ml.submitter.last_name, ml.submitter.residential_address.line1,
ml.submitter.residential_address.locality, ml.submitter.residential_address.state,
ml.submitter.residential_address.country, ml.submitter.residential_address.postcode, '',
ml.submitter.phone_number, ml.submitter.mobile_number, ml.submitter.email,
','.join(vessel.rego_no for vessel in ml.vessel_list),
] for ml in MooringLicence.objects.filter(status='current')]
write_file(approval_type, approvals, header_row, rows)
def write_file(approval_type, approvals, header_row, rows):
if not os.path.isdir(os.path.join(BASE_DIR, 'mooringlicensing', 'utils', 'csv')):
os.mkdir(os.path.join(BASE_DIR, 'mooringlicensing', 'utils', 'csv'))
filename = os.path.join(BASE_DIR, 'mooringlicensing', 'utils', 'csv', '{}_{}.csv'.format(approval_type, date_now_str))
with open(filename, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(header_row)
for row in rows:
csvwriter.writerow(row)
write_wla()
write_aap()
write_aup()
write_ml()
|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Optional
from sparseml.base import check_version
try:
import onnx
onnx_err = None
except Exception as err:
onnx = object() # TODO: populate with fake object for necessary imports
onnx_err = err
try:
import onnxruntime
onnxruntime_err = None
except Exception as err:
onnxruntime = object() # TODO: populate with fake object for necessary imports
onnxruntime_err = err
__all__ = [
"onnx",
"onnx_err",
"onnxruntime",
"onnxruntime_err",
"check_onnx_install",
"check_onnxruntime_install",
"require_onnx",
"require_onnxruntime",
]
_ONNX_MIN_VERSION = "1.5.0"
_ORT_MIN_VERSION = "1.0.0"
def check_onnx_install(
min_version: Optional[str] = _ONNX_MIN_VERSION,
max_version: Optional[str] = None,
raise_on_error: bool = True,
) -> bool:
"""
Check that the onnx package is installed.
If raise_on_error, will raise an ImportError if it is not installed or
the required version range, if set, is not installed.
If not raise_on_error, will return True if installed with required version
and False otherwise.
:param min_version: The minimum version for onnx that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for onnx that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
:param raise_on_error: True to raise any issues such as not installed,
minimum version, or maximum version as ImportError. False to return the result.
:type raise_on_error: bool
:return: If raise_on_error, will return False if onnx is not installed
or the version is outside the accepted bounds and True if everything is correct.
:rtype: bool
"""
if onnx_err is not None:
if raise_on_error:
raise onnx_err
return False
return check_version("onnx", min_version, max_version, raise_on_error)
def check_onnxruntime_install(
min_version: Optional[str] = _ORT_MIN_VERSION,
max_version: Optional[str] = None,
raise_on_error: bool = True,
) -> bool:
"""
Check that the onnxruntime package is installed.
If raise_on_error, will raise an ImportError if it is not installed or
the required version range, if set, is not installed.
If not raise_on_error, will return True if installed with required version
and False otherwise.
:param min_version: The minimum version for onnxruntime that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for onnxruntime that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
:param raise_on_error: True to raise any issues such as not installed,
minimum version, or maximum version as ImportError. False to return the result.
:type raise_on_error: bool
:return: If raise_on_error, will return False if onnxruntime is not installed
or the version is outside the accepted bounds and True if everything is correct.
:rtype: bool
"""
if onnxruntime_err is not None:
if raise_on_error:
raise onnxruntime_err
return False
return check_version("onnxruntime", min_version, max_version, raise_on_error)
def require_onnx(
min_version: Optional[str] = _ONNX_MIN_VERSION, max_version: Optional[str] = None
):
"""
Decorator function to require use of onnx.
Will check that onnx package is installed and within the bounding
ranges of min_version and max_version if they are set before calling
the wrapped function.
See :func:`check_onnx_install` for more info.
param min_version: The minimum version for onnx that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for onnx that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
"""
def _decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
check_onnx_install(min_version, max_version)
return func(*args, **kwargs)
return _wrapper
return _decorator
def require_onnxruntime(
min_version: Optional[str] = _ORT_MIN_VERSION, max_version: Optional[str] = None
):
"""
Decorator function to require use of onnxruntime.
Will check that onnxruntime package is installed and within the bounding
ranges of min_version and max_version if they are set before calling
the wrapped function.
See :func:`check_onnxruntime_install` for more info.
param min_version: The minimum version for onnxruntime that it must be greater than
or equal to, if unset will require no minimum version
:type min_version: str
:param max_version: The maximum version for onnxruntime that it must be less than
or equal to, if unset will require no maximum version.
:type max_version: str
"""
def _decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
check_onnxruntime_install(min_version, max_version)
return func(*args, **kwargs)
return _wrapper
return _decorator
|
from respa.settings import *
# Get whitenoise for serving static files
try:
place = MIDDLEWARE.index('django.middleware.security.SecurityMiddleware')
except ValueError:
place = 0
MIDDLEWARE.insert(place, 'whitenoise.middleware.WhiteNoiseMiddleware')
import environ
deploy_env = environ.Env(
USE_X_FORWARDED_HOST = (bool, False),
SECURE_PROXY = (bool, False)
)
USE_X_FORWARDED_HOST = deploy_env('USE_X_FORWARDED_HOST')
if deploy_env('SECURE_PROXY'):
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
|
# -*- coding: utf-8; -*-
#
# @file __init__.py
# @brief
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2016-09-01
# @copyright Copyright (c) 2016 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details
"""
coll-gate application initialisation
"""
# Application Config for startup and more...
default_app_config = __name__ + '.apps.CollGateMain'
|
import arcade
def draw_snowflake(x,y):
#90 degree up
arcade.draw_line(x, y,x, y+50, arcade.color.WHITE, 3)
arcade.draw_line(x, y+30, x+10, y + 40, arcade.color.WHITE, 3)
arcade.draw_line(x, y+30, x-10, y + 40, arcade.color.WHITE, 3)
#0 degree - right
arcade.draw_line(x, y, x+50, y, arcade.color.WHITE, 3)
arcade.draw_line(x+30, y, x+40, y + 10, arcade.color.WHITE, 3)
arcade.draw_line(x+30, y, x+40, y - 10, arcade.color.WHITE, 3)
#45 degree
arcade.draw_line(x, y, x+30, y + 30, arcade.color.WHITE, 3)
arcade.draw_line(x+20, y+20, x+30, y + 20, arcade.color.WHITE, 3)
arcade.draw_line(x+20, y+20, x+20, y + 30, arcade.color.WHITE, 3)
#180 degree
arcade.draw_line(x, y, x-50, y, arcade.color.WHITE, 3)
arcade.draw_line(x-30, y, x-40, y - 10, arcade.color.WHITE, 3)
arcade.draw_line(x-30, y, x-40, y + 10, arcade.color.WHITE, 3)
#270 degree
arcade.draw_line(x, y,x, y-50, arcade.color.WHITE, 3)
arcade.draw_line(x, y-30, x-10, y - 40, arcade.color.WHITE, 3)
arcade.draw_line(x, y-30, x+10, y - 40, arcade.color.WHITE, 3)
#135 degree
arcade.draw_line(x, y, x-30, y + 30, arcade.color.WHITE, 3)
arcade.draw_line(x-20, y+20, x-30, y + 20, arcade.color.WHITE, 3)
arcade.draw_line(x-20, y+20, x-20, y + 30, arcade.color.WHITE, 3)
#225 degree
arcade.draw_line(x, y, x-30, y - 30, arcade.color.WHITE, 3)
arcade.draw_line(x-20, y-20, x-30, y - 20, arcade.color.WHITE, 3)
arcade.draw_line(x-20, y-20, x-20, y - 30, arcade.color.WHITE, 3)
#315 degree
arcade.draw_line(x, y, x+30, y - 30, arcade.color.WHITE, 3)
arcade.draw_line(x+20, y-20, x+30, y - 20, arcade.color.WHITE, 3)
arcade.draw_line(x+20, y-20, x+20, y - 30, arcade.color.WHITE, 3)
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
from sklearn.svm import SVC
import pickle
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import collections
from sklearn import tree
from sklearn.neural_network import MLPClassifier
import time
import os
def _get_int_feature(dictionary, key, counter):
if key in dictionary:
return dictionary[key], counter
else: # key not in dictionary
dictionary[key] = counter
return dictionary[key], counter+1
# In[2]:
def calculate_macro_f1_score(predictions, true_labels):
true_positives = [0 for i in range(11)]
false_positives = [0 for i in range(11)]
false_negatives = [0 for i in range(11)]
if len(predictions) != len(true_labels):
print("bug in code, length of predictions should match length of true_labels")
return None
for i in range(len(predictions)):
if predictions[i] == true_labels[i]:
true_positives[predictions[i]] += 1
else:
false_positives[predictions[i]] += 1
false_negatives[true_labels[i]] += 1
total_classes = 0
total_f1 = 0
for i in range(11):
if true_positives[i]==0 and false_positives[i]==0:
continue
elif true_positives[i]==0 and false_negatives[i]==0:
continue
prec = true_positives[i]*1.0/(true_positives[i] + false_positives[i])
recall = true_positives[i]*1.0/(true_positives[i]+false_negatives[i])
f1=0
if prec+recall != 0:
f1 = 2*prec*recall/(prec+recall)
total_classes += 1
total_f1 += f1
return total_f1*100.0/total_classes
def calculate_micro_f1_score(predictions, true_labels):
true_positives = 0
false_positives = 0
false_negatives = 0
if len(predictions) != len(true_labels):
print("bug in code, length of predictions should match length of true_labels")
return None
for i in range(len(predictions)):
if predictions[i] == true_labels[i]:
true_positives += 1
else:
false_positives += 1
false_negatives += 1
prec = true_positives*1.0/(true_positives + false_positives)
recall = true_positives*1.0/(true_positives+false_negatives)
return 2*prec*recall*100.0/(prec+recall)
# In[3]:
dos = ['back','land','neptune','pod','smurf','teardrop']
u2r = ['buffer_overflow','loadmodule','perl','rootkit']
r2l = ['ftp_write','guess_passwd','imap','multihop','phf','spy','warezclient','warezmaster']
probing = ['ipsweep','nmap','portsweep','satan']
normal = ['normal']
ifile = open('../kddcup.data','r') # loading data
raw_data = ifile.readlines()
ifile.close()
## cleaning ##
cleanedData = []
dict_tcp,tcpCount = {},0
dict_http,httpCount = {},0
dict_sf,sfCount = {},0
nDOS,nU2R,nR2L,nProb,nNormal,nOthers = 0,0,0,0,0,0
for info in raw_data:
info = info.replace('\n','').replace('.','').split(',')
info[1], tcpCount = _get_int_feature(dict_tcp, info[1], tcpCount)
info[2], httpCount = _get_int_feature(dict_http, info[2], httpCount)
info[3], sfCount = _get_int_feature(dict_sf, info[3], sfCount)
# print("info is ", info)
if info[-1] in dos:
info[-1] = 1 #'DOS' label
nDOS += 1
# cleanedData.append(info)
elif info[-1] in u2r:
info[-1] = 2 #'U2R'
nU2R += 1
elif info[-1] in r2l:
info[-1] = 3 #'R2L'
nR2L += 1
elif info[-1] in probing:
info[-1] = 4 #'PROBING'
nProb += 1
elif info[-1] in normal: # label is normal
nNormal += 1
info[-1] = 0 #'NORMAL' label
else: # unspecified label
nOthers += 1
continue
cleanedData.append(info)
# with open('cleaned_data', 'wb') as fp:
# pickle.dump(cleanedData, fp)
# with open ('cleaned_data', 'rb') as fp:
# cleanedData = pickle.load(fp)
examples_matrix = np.array(cleanedData)
np.random.shuffle(examples_matrix)
# In[4]:
print(nDOS,nU2R,nR2L,nNormal,nOthers)
# In[5]:
def _run_svm(train_feature_matrix, train_label_matrix, test_feature_matrix):
clf = SVC(gamma='auto')
clf.fit(train_feature_matrix, train_label_matrix)
predicted_labels = clf.predict(test_feature_matrix)
return predicted_labels
# In[6]:
def _run_dtree(train_feature_matrix, train_label_matrix, test_feature_matrix):
dt_clf = tree.DecisionTreeClassifier()
dt_clf = dt_clf.fit(train_feature_matrix, train_label_matrix)
dt_predictions = dt_clf.predict(test_feature_matrix)
return dt_predictions
# In[7]:
def _run_nn(train_feature_matrix, train_label_matrix, test_feature_matrix):
nn_clf = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(50, 30), random_state=1)
nn_train_feature_matrix = train_feature_matrix.astype(np.float64)
nn_test_feature_matrix = test_feature_matrix.astype(np.float64)
nn_clf.fit(nn_train_feature_matrix, train_label_matrix)
nn_predictions = nn_clf.predict(nn_test_feature_matrix)
return nn_predictions
# In[ ]:
# print("example is ", examples_matrix[1])
result_file = open('results.txt','w')
result_file.write("dataset size, svm macroF1Score, svm accuracy, svm time, dtree macroF1Score, dtree accuracy, dtree time, nn macroF1Score, nn accuracy, nn time\n")
for data_size in range(250000,len(cleanedData)+1,5000):
train_size = int(data_size * 0.7)
test_size = data_size - train_size
# train_size = 70000
# test_size = 30000
train_feature_matrix = examples_matrix[:train_size,:-1]
train_label_matrix = examples_matrix[:train_size,-1]
test_feature_matrix = examples_matrix[train_size+1:train_size+test_size,:-1]
test_label_matrix = examples_matrix[train_size+1:train_size+test_size,-1]
#print(collections.Counter(train_label_matrix))
#print(collections.Counter(test_label_matrix))
print(data_size)
#run svm
print('SVM')
start_time = time.time()
predicted_labels = _run_svm(train_feature_matrix, train_label_matrix, test_feature_matrix)
end_time = time.time() - start_time
macro_f1_score = f1_score(test_label_matrix, predicted_labels, average='macro')
accuracy = accuracy_score(test_label_matrix, predicted_labels)
result_file.write(str(data_size) + ", ")
result_file.write(str(macro_f1_score) + ", " + str(accuracy) + ", " + str(end_time) + ", ")
result_file.flush()
os.fsync(result_file.fileno())
#run decision tree
print('DT')
start_time = time.time()
predicted_labels = _run_dtree(train_feature_matrix, train_label_matrix, test_feature_matrix)
end_time = time.time() - start_time
macro_f1_score = f1_score(test_label_matrix, predicted_labels, average='macro')
accuracy = accuracy_score(test_label_matrix, predicted_labels)
result_file.write(str(data_size) + ", ")
result_file.write(str(macro_f1_score) + ", " + str(accuracy) + ", " + str(end_time) + ", ")
result_file.flush()
os.fsync(result_file.fileno())
#run neural network
print('ANN')
start_time = time.time()
predicted_labels = _run_nn(train_feature_matrix, train_label_matrix, test_feature_matrix)
end_time = time.time() - start_time
macro_f1_score = f1_score(test_label_matrix, predicted_labels, average='macro')
accuracy = accuracy_score(test_label_matrix, predicted_labels)
result_file.write(str(macro_f1_score) + ", " + str(accuracy) + ", " + str(end_time) + "\n")
result_file.flush()
os.fsync(result_file.fileno())
result_file.close()
# In[ ]:
|
#!/usr/bin/env python
"""
obsgen.py
State Estimation and Analysis for PYthon
Module to process observations:
obsgen : class to convert from raw to ROMS observations using
specific subclasses
Written by Brian Powell on 08/15/15
Copyright (c)2010--2021 University of Hawaii under the MIT-License.
"""
import numpy as np
import netCDF4
import h5py
import seapy
import datetime
from warnings import warn
from rich.progress import track
def error_profile(obs, depth, error, provenance=None):
"""
Apply a vertical error profile to a given observation structure.
This allows for error minimums to vary by depth and observation
type.
Parameters
----------
obs : seapy.roms.obs.obs or string,
The observations to enforce the error profile upon.
depth : ndarray,
Array of depths for the errors provided
error : dict,
Dictionary of the errors, where the key is the type of observation
(as defined by seapy.roms.obs.obs_types) and the value is
an ndarray of same length as depth with the error [in squared units]
of the observation profile.
provenance : list of int or string, optional,
The provenance to apply the errors to (ignore other observations
of the same type, but different instrument)
Returns
-------
None:
The obs structure is mutable is changed in place
Examples
--------
>>> obs = obs('observation_file.nc')
>>> depth = [10, 30, 50, 1000, 2000]
>>> error['temp'] = [0.5, 0.2, 0.4, 0.1, 0.01]
>>> error_profile(obs, depth, error)
The resulting 'obs' class will have had its error profiles
modified.
"""
from scipy.interpolate import interp1d
obs = seapy.roms.obs.asobs(obs)
depth = np.atleast_1d(depth).flatten()
depth = np.abs(depth)
pro = seapy.roms.obs.asprovenance(provenance) if provenance else None
# Loop over all of the profiles in the error dictionary and
# apply them to the observations
for var in error:
typ = seapy.roms.obs.astype(var)
try:
fint = interp1d(depth, error[var].flatten(), copy=False)
if pro.any():
l = np.where(np.logical_and(obs.type == typ,
np.in1d(obs.provenance, pro)))
else:
l = np.where(np.logical_and(obs.type == typ, obs.depth < 0))
nerr = fint(np.abs(obs.depth[l]))
obs.error[l] = np.maximum(obs.error[l], nerr)
except ValueError:
warn("Error for {:s} is the wrong size".format(var))
continue
pass
def add_ssh_tides(obs, tide_file, tide_error, tide_start=None, provenance=None,
reftime=seapy.default_epoch):
"""
Apply predicted barotropic tides to the SSH values of given observations
using the tide_file given.
Parameters
----------
obs : seapy.roms.obs.obs or string,
The observations to enforce the error profile upon.
tide_file : string,
The name of the ROMS tidal forcing file to use for predicting the
barotropic tides.
tide_error : np.masked_array
A two dimensional array of the tidal fit errors to apply to
the ssh errors when adding the tides. This should be the same size
as the rho-grid. The units of the error must be in meters. If it is
masked, the mask will be honored and obs that are in the mask will
be removed. This allows you to filter on regions of high error.
tide_start : bool, optional,
If given, the tide_start of the tide file. If not specified,
will read the attribute of the tidal forcing file
provenance : list of int or string, optional,
The provenance to apply the tides to (ignore other observations
of the same type, but different instrument)
reftime: datetime,
Reference time for the observation times
Returns
-------
None:
The obs structure is mutable is changed in place
Examples
--------
>>> obs = obs('observation_file.nc')
>>> add_ssh_tides(obs, 'tide_frc.nc', errmap)
The resulting 'obs' variable will have modified data. To save it:
>>> obs.to_netcdf()
"""
# Load tidal file data
frc = seapy.roms.tide.load_forcing(tide_file)
if not tide_start:
tide_start = frc['tide_start']
# Make sure that the sizes are the same
if frc['Eamp'].shape[1:] != tide_error.shape:
raise ValueError(
"The error array is not the same size as the tidal grid")
# Gather the observations that need tidal information
obs = seapy.roms.obs.asobs(obs)
pro = seapy.roms.obs.asprovenance(provenance) if provenance else None
if pro:
l = np.where(np.logical_and(obs.type == 1,
np.in1d(obs.provenance, pro)))
else:
l = np.where(obs.type == 1)
# If we have any, then do tidal predictions and add the signal
# and error to the observations
bad = []
if l[0].any():
ox = np.rint(obs.x[l]).astype(int)
oy = np.rint(obs.y[l]).astype(int)
idx = seapy.unique_rows((ox, oy))
for cur in track(idx):
pts = np.where(np.logical_and(ox == ox[cur], oy == oy[cur]))
# If this point is masked, remove from the observations
if not tide_error[oy[cur], ox[cur]]:
bad.append(l[0][pts].tolist())
else:
time = [reftime + datetime.timedelta(t) for t in
obs.time[l][pts]]
amppha = seapy.tide.pack_amp_phase(
frc['tides'], frc['Eamp'][:, oy[cur], ox[cur]],
frc['Ephase'][:, oy[cur], ox[cur]])
zpred = seapy.tide.predict(time, amppha,
lat=obs.lat[l][cur],
tide_start=tide_start)
# Add the information to the observations
obs.value[l[0][pts]] += zpred
obs.error[l[0][pts]] = np.maximum(
obs.error[l[0][pts]], tide_error[oy[cur], ox[cur]]**2)
# If any were bad, then remove them
if bad:
obs.delete(seapy.flatten(bad))
pass
class obsgen(object):
def __init__(self, grid, dt, reftime=seapy.default_epoch):
"""
class for abstracting the processing of raw observation files
(satellite, in situ, etc.) into ROMS observations files. All
processing has commonalities which this class encapsulates, while
leaving the loading and translation of individual data formats
to subclasses.
Parameters
----------
grid: seapy.model.grid or string,
grid to use for generating observations
dt: float,
Model time-step or greater in units of days
epoch: datetime, optional,
Time to reference all observations from
Returns
-------
None
"""
self.grid = seapy.model.asgrid(grid)
self.dt = dt
self.epoch = reftime
def convert_file(self, file, title=None):
"""
convert a raw observation file into a ROMS observations structure.
The subclasses are responsible for the conversion, and this method
is obsgen is only a stub.
Parameters
----------
file : string,
filename of the file to process
title : string,
Title to give the new observation structure global attribute
Returns
-------
seapy.roms.obs.obs,
observation structure from raw obs
"""
pass
def datespan_file(self, file):
"""
check the given file and return the date span of data that are
covered by the file.
Parameters
----------
file : string,
filename of the file to process
Returns
-------
start : datetime
starting date and time of the data
end : datetime
ending date and time of the data
"""
return None, None
def batch_files(self, in_files, out_files, start_time=None,
end_time=None, clobber=True):
"""
Given a list of input files, process each one and save each result
into the given output file.
Parameters
----------
in_files : list of strings,
filenames of the files to process
out_files : list of strings,
filenames of the files to create for each of the input filenames.
If a single string is given, the character '#' will be replaced
by the starting time of the observation (e.g. out_files="out_#.nc"
will become out_03234.nc)
start_time : datetime, optional
starting date and time for data to process (ignore files that are
outside of the time period)
end_time : datetime, optional
ending date and time for data to process (ignore files that are
outside of the time period). If start_time is provided, and
end_time is not, then a period of one day is assumed.
clobber : bool, optional
If TRUE, overwrite any existing output files. If False, the
file is given a letter suffix.
Returns
-------
None
"""
import re
import os
datecheck = False
if start_time is not None:
datecheck = True
if end_time is None:
end_time = start_time + datetime.timedelta(1)
outtime = False
if isinstance(out_files, str):
outtime = True
time = re.compile('\#')
for n, file in enumerate(in_files):
try:
# Check the times if user requested
print(file, end="")
if datecheck:
st, en = self.datespan_file(file)
if (en is not None and en < start_time) or \
(st is not None and st > end_time):
print(": SKIPPED")
continue
# Convert the file
obs = self.convert_file(file)
if obs is None:
print(": NO OBS")
continue
# Output the obs to the correct file
if outtime:
ofile = time.sub("{:05d}".format(int(obs.time[0])),
out_files)
else:
ofile = out_files[n]
if clobber:
obs.to_netcdf(ofile, True)
else:
for i in "abcdefgh":
if os.path.isfile(ofile):
ofile = re.sub("[a-h]{0,1}\.nc", i + ".nc", ofile)
else:
break
obs.to_netcdf(ofile, False)
print(": SAVED")
except (BaseException, UserWarning) as e:
warn("WARNING: {:s} cannot be processed.\nError: {:}".format(
file, e.args))
pass
##############################################################################
#
# REMOTE-SENSING DATA
#
##############################################################################
class aquarius_sss(obsgen):
"""
class to process Aquarius SSS HDF5 files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, salt_limits=None,
salt_error=0.2):
if salt_limits is None:
self.salt_limits = (10, 36)
else:
self.salt_limits = salt_limits
self.salt_error = salt_error
super().__init__(grid, dt, reftime)
def datespan_file(self, file):
f = h5py.File(file, 'r')
try:
year = f.attrs['Period End Year']
day = f.attrs['Period End Day']
st = datetime.datetime(year, 1, 1) + datetime.timedelta(int(day))
en = st + datetime.timedelta(1)
except:
st = en = None
pass
finally:
f.close()
return st, en
def convert_file(self, file, title="AQUARIUS Obs"):
"""
Load an Aquarius file and convert into an obs structure
"""
f = h5py.File(file, 'r')
salt = np.ma.masked_equal(np.flipud(f['l3m_data'][:]),
f['l3m_data'].attrs['_FillValue'])
year = f.attrs['Period End Year']
day = f.attrs['Period End Day']
nlat = f.attrs['Northernmost Latitude'] - 0.5
slat = f.attrs['Southernmost Latitude'] + 0.5
wlon = f.attrs['Westernmost Longitude'] + 0.5
elon = f.attrs['Easternmost Longitude'] - 0.5
dlat = f.attrs['Latitude Step']
dlon = f.attrs['Longitude Step']
f.close()
[lon, lat] = np.meshgrid(np.arange(wlon, elon + dlon, dlon),
np.arange(slat, nlat + dlat, dlat))
time = (datetime.datetime(year, 1, 1) + datetime.timedelta(int(day)) -
self.epoch).days
lat = lat.flatten()
lon = lon.flatten()
if self.grid.east():
lon[lon < 0] += 360
salt = np.ma.masked_outside(salt.flatten(), self.salt_limits[0],
self.salt_limits[1])
data = [seapy.roms.obs.raw_data("SALT", "SSS_AQUARIUS",
salt, None, self.salt_error)]
# Grid it
return seapy.roms.obs.gridder(self.grid, time, lon, lat, None,
data, self.dt, title)
pass
class aviso_sla_map(obsgen):
"""
class to process AVISO SLA map netcdf files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, ssh_mean=None,
ssh_error=0.05):
if ssh_mean is not None:
self.ssh_mean = seapy.convolve_mask(ssh_mean, ksize=5, copy=True)
else:
self.ssh_mean = None
self.ssh_error = ssh_error
super().__init__(grid, dt, reftime)
def datespan_file(self, file):
nc = seapy.netcdf(file)
try:
st = datetime.datetime.strptime(nc.getncattr("time_coverage_start"),
"%Y-%m-%dT%H:%M:%SZ")
en = datetime.datetime.strptime(nc.getncattr("time_coverage_end"),
"%Y-%m-%dT%H:%M:%SZ")
except:
st = en = None
pass
finally:
nc.close()
return st, en
def convert_file(self, file, title="AVISO Obs"):
"""
Load an AVISO file and convert into an obs structure
"""
# Load AVISO Data
nc = seapy.netcdf(file)
lonname = 'lon' if 'lon' in nc.variables.keys() else 'longitude'
lon = nc.variables[lonname][:]
latname = 'lat' if 'lat' in nc.variables.keys() else 'latitude'
lat = nc.variables[latname][:]
dat = np.squeeze(nc.variables["sla"][:])
err = np.squeeze(nc.variables["err"][:])
time = seapy.roms.get_time(
nc, "time", records=[0], epoch=self.epoch)[0]
nc.close()
lon, lat = np.meshgrid(lon, lat)
lat = lat.flatten()
lon = lon.flatten()
if not self.grid.east():
lon[lon > 180] -= 360
data = [seapy.roms.obs.raw_data("ZETA", "SSH_AVISO_MAP",
dat.flatten(), err.flatten(), self.ssh_error)]
# Grid it
obs = seapy.roms.obs.gridder(self.grid, time, lon, lat, None,
data, self.dt, title)
# Apply the model mean ssh to the sla data
if self.ssh_mean is not None:
m, p = seapy.oasurf(self.grid.I, self.grid.J, self.ssh_mean,
obs.x, obs.y, nx=1, ny=1, weight=7)
obs.value += m
return obs
_aviso_sla_errors = {
"SSH_AVISO_ENVISAT": 0.06,
"SSH_AVISO_JASON1": 0.05,
"SSH_AVISO_JASON2": 0.05,
"SSH_AVISO_JASON3": 0.05,
"SSH_AVISO_GFO": 0.05,
"SSH_AVISO_ALTIKA": 0.07,
"SSH_AVISO_CRYOSAT2": 0.07,
"SSH_AVISO_HAIYANG": 0.07,
"SSH_AVISO_ERS1": 0.06,
"SSH_AVISO_ERS2": 0.06,
"SSH_AVISO_TOPEX_POSEIDON": 0.05,
"SSH_AVISO_SENTINEL3A": 0.05
}
class aviso_sla_track(obsgen):
"""
class to process AVISO SLA track netcdf files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data. THIS COVERS ALL SATELLITES/INSTRUMENTS FROM AVISO TRACK:
al, c2, e1, e2, en, enn, g2, h2, j1, j1g, j1n, j2, tp and tpn.
Parameters
----------
ssh_mean : ndarray,
Spatial map of rho-grid shape that contains the model mean SSH
ssh_error: dict, optional
Dictionary of the minimum errors for each satellite. The default
uses the errors defined in _aviso_sla_errors
repeat: int
Number of hours to repeat the track before and after its initial
pass
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, ssh_mean=None,
ssh_error=None, repeat=3, provenance="SSH"):
self.provenance = provenance.upper()
self.repeat = repeat
self.ssh_error = ssh_error if ssh_error else _aviso_sla_errors
if ssh_mean is not None:
self.ssh_mean = seapy.convolve_mask(ssh_mean, ksize=5, copy=True)
else:
self.ssh_mean = None
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="AVISO SLA Track Obs"):
"""
Load an AVISO file and convert into an obs structure
"""
# Load AVISO Data
nc = seapy.netcdf(file)
lon = nc.variables["longitude"][:]
lat = nc.variables["latitude"][:]
slaname = 'SLA' if 'SLA' in nc.variables.keys() else 'sla_filtered'
dat = nc.variables[slaname][:]
time = seapy.roms.num2date(nc, "time", epoch=self.epoch)
nc.close()
# make them into vectors
lat = lat.ravel()
lon = lon.ravel()
dat = dat.ravel()
err = np.ones(dat.shape) * _aviso_sla_errors.get(self.provenance, 0.1)
if not self.grid.east():
lon[lon > 180] -= 360
good = dat.nonzero()
data = [seapy.roms.obs.raw_data("ZETA", self.provenance,
dat[good], err[good], err[0])]
# Grid it
obs = seapy.roms.obs.gridder(self.grid, time, lon[good], lat[good], None,
data, self.dt, title)
# Apply the model mean ssh to the sla data
if self.ssh_mean is not None and obs is not None:
m, p = seapy.oasurf(self.grid.I, self.grid.J, self.ssh_mean,
obs.x, obs.y, nx=1, ny=1, weight=7)
obs.value += m
# Duplicate the observations before and after as per the repeat
# time unless it is zero
if self.repeat and obs:
prior = obs.copy()
after = obs.copy()
prior.time -= self.repeat / 24
after.time += self.repeat / 24
obs.add(prior)
obs.add(after)
return obs
class ostia_sst_map(obsgen):
"""
class to process OSTIA SST map netcdf files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, temp_error=0.4,
temp_limits=None):
self.temp_error = temp_error
if temp_limits is None:
self.temp_limits = (2, 35)
else:
self.temp_limits = temp_limits
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="OSTIA SST Obs"):
"""
Load an OSTIA file and convert into an obs structure
"""
# Load OSTIA Data
nc = seapy.netcdf(file)
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
dat = np.ma.masked_outside(np.squeeze(
nc.variables["analysed_sst"][:]) - 273.15,
self.temp_limits[0], self.temp_limits[1])
err = np.ma.masked_outside(np.squeeze(
nc.variables["analysis_error"][:]), 0.01, 2.0)
dat[err.mask] = np.ma.masked
time = seapy.roms.num2date(
nc, "time", records=[0], epoch=self.epoch)[0]
nc.close()
if self.grid.east():
lon[lon < 0] += 360
lon, lat = np.meshgrid(lon, lat)
good = dat.nonzero()
lat = lat[good]
lon = lon[good]
data = [seapy.roms.obs.raw_data("TEMP", "SST_OSTIA", dat.compressed(),
err[good], self.temp_error)]
# Grid it
return seapy.roms.obs.gridder(self.grid, time, lon, lat, None,
data, self.dt, title)
class navo_sst_map(obsgen):
"""
class to process NAVO SST map netcdf files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, depth=None, reftime=seapy.default_epoch,
temp_error=0.25, temp_limits=None, provenance="SST_NAVO_MAP"):
self.temp_error = temp_error
self.provenance = provenance.upper()
self.temp_limits = (2, 35) if temp_limits is None else temp_limits
self.depth = 4 if depth is None else np.abs(depth)
super().__init__(grid, dt, reftime)
def datespan_file(self, file):
nc = seapy.netcdf(file)
try:
st = datetime.datetime.strptime(nc.getncattr("start_date"),
"%Y-%m-%d UTC")
en = datetime.datetime.strptime(nc.getncattr("stop_date"),
"%Y-%m-%d UTC")
except:
st = en = None
pass
finally:
nc.close()
return st, en
def convert_file(self, file, title="NAVO SST Obs"):
"""
Load a NAVO map file and convert into an obs structure
"""
import re
import sys
nc = seapy.netcdf(file)
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
dat = np.ma.masked_outside(np.squeeze(nc.variables["analysed_sst"][:]) - 273.15,
self.temp_limits[0], self.temp_limits[1])
err = np.ma.array(np.squeeze(
nc.variables["analysis_error"][:]), mask=dat.mask)
# this is an analyzed product and provides errors as a function
# of space and time directly the temperature is the bulk
# temperature (ie at around 4m depth, below the e-folding depths of
# sunlight in the ocean so the product does not have a diuranl cycle
# (ie you don;t have to worry about hourly variations)
time = seapy.roms.num2date(
nc, "time", records=[0], epoch=self.epoch)[0]
nc.close()
# here we set the depth to be 4 m below the surface
if self.grid.east():
lon[lon < 0] += 360
lon, lat = np.meshgrid(lon, lat)
good = dat.nonzero()
lat = lat[good]
lon = lon[good]
data = [seapy.roms.obs.raw_data("TEMP", self.provenance, dat.compressed(),
err[good], self.temp_error)]
# Grid it
obs = seapy.roms.obs.gridder(self.grid, time, lon, lat, None,
data, self.dt, depth_adjust=True, title=title)
obs.z *= 0
obs.depth = -self.depth * np.ones(len(obs.depth))
return obs
class modis_sst_map(obsgen):
"""
class to process MODIS SST map netcdf files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, temp_error=0.5,
temp_limits=None, provenance="SST_MODIS_AQUA"):
self.temp_error = temp_error
self.provenance = provenance.upper()
if temp_limits is None:
self.temp_limits = (2, 35)
else:
self.temp_limits = temp_limits
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="MODIS SST Obs"):
"""
Load an MODIS file and convert into an obs structure
"""
# Load MODIS Data
import re
nc = seapy.netcdf(file)
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
dat = np.ma.masked_outside(nc.variables["sst"][:],
self.temp_limits[0], self.temp_limits[1])
err = np.ones(dat.shape) * self.temp_error
time = seapy.date2day(datetime.datetime.strptime(
re.sub('\.[0-9]+Z$', '', nc.time_coverage_end),
"%Y-%m-%dT%H:%M:%S"), self.epoch)
# Check the data flags
flags = np.ma.masked_not_equal(nc.variables["qual_sst"][:], 0)
dat[flags.mask] = np.ma.masked
nc.close()
if self.grid.east():
lon[lon < 0] += 360
lon, lat = np.meshgrid(lon, lat)
good = dat.nonzero()
lat = lat[good]
lon = lon[good]
data = [seapy.roms.obs.raw_data("TEMP", self.provenance, dat.compressed(),
err[good], self.temp_error)]
# Grid it
return seapy.roms.obs.gridder(self.grid, time, lon, lat, None,
data, self.dt, title)
class remss_swath(obsgen):
"""
class to process REMSS SST swath netcdf files into ROMS observation
files. The files may be AMSRE, TMI, etc. This is a subclass of
seapy.roms.genobs.genobs, and handles the loading of the data.
"""
def __init__(self, grid, dt, check_qc_flags=True, reftime=seapy.default_epoch, temp_error=0.4,
temp_limits=None, provenance="SST_REMSS"):
self.temp_error = temp_error
self.provenance = provenance.upper()
self.check_qc_flags = check_qc_flags
if temp_limits is None:
self.temp_limits = (2, 35)
else:
self.temp_limits = temp_limits
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="REMSS SST Obs"):
"""
Load an REMSS file and convert into an obs structure
"""
# Load REMSS Data
nc = seapy.netcdf(file)
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
dat = np.ma.masked_outside(np.squeeze(
nc.variables["sea_surface_temperature"][:]) - 273.15,
self.temp_limits[0], self.temp_limits[1])
err = np.ma.masked_outside(np.squeeze(
nc.variables["sses_standard_deviation"][:]), 0.01, 2.0)
dat[err.mask] = np.ma.masked
# Check the data flags
if self.check_qc_flags:
flags = np.ma.masked_not_equal(
np.squeeze(nc.variables["quality_level"][:]), 5)
dat[flags.mask] = np.ma.masked
else:
dat = np.ma.masked_where(
np.squeeze(nc.variables["quality_level"][:]).data == 1, dat)
# Grab the observation time
time = seapy.roms.num2date(nc, "time", records=[0])[0] - self.epoch
dtime = nc.variables["sst_dtime"][:]
time = np.squeeze((time.total_seconds() + dtime) * seapy.secs2day)
nc.close()
if self.grid.east():
lon[lon < 0] += 360
good = dat.nonzero()
data = [seapy.roms.obs.raw_data("TEMP", self.provenance,
dat.compressed(),
err[good], self.temp_error)]
# Grid it
return seapy.roms.obs.gridder(self.grid, time[good], lon[good], lat[good],
None, data, self.dt, title)
class remss_map(obsgen):
"""
class to process REMSS SST map netcdf files into ROMS observation
files. The files may be AMSRE, TMI, etc. This is a subclass of
seapy.roms.genobs.genobs, and handles the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, temp_error=0.4,
temp_limits=None, provenance="SST_REMSS"):
self.temp_error = temp_error
self.provenance = provenance.upper()
if temp_limits is None:
self.temp_limits = (2, 35)
else:
self.temp_limits = temp_limits
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="REMSS SST Obs"):
"""
Load an REMSS file and convert into an obs structure
"""
# Load REMSS Data
nc = seapy.netcdf(file)
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
dat = np.ma.masked_outside(np.squeeze(
nc.variables["sea_surface_temperature"][:]) - 273.15,
self.temp_limits[0], self.temp_limits[1])
err = np.ma.masked_outside(np.squeeze(
nc.variables["SSES_standard_deviation_error"][:]), 0.01, 2.0)
dat[err.mask] = np.ma.masked
# Check the data flags
flags = np.ma.masked_not_equal(
np.squeeze(nc.variables["rejection_flag"][:]), 0)
dat[flags.mask] = np.ma.masked
err[flags.mask] = np.ma.masked
# Grab the observation time
time = seapy.roms.num2date(nc, "time", epoch=self.epoch)
sst_time = nc.variables["sst_dtime"][:] * seapy.secs2day
for n, i in enumerate(time):
sst_time[n, :, :] += i
sst_time[dat.mask] = np.ma.masked
# Set up the coordinate
lon, lat = np.meshgrid(lon, lat)
lon = np.ma.masked_where(dat.mask, seapy.adddim(lon, len(time)))
lat = np.ma.masked_where(dat.mask, seapy.adddim(lat, len(time)))
nc.close()
if self.grid.east():
lon[lon < 0] += 360
data = [seapy.roms.obs.raw_data("TEMP", self.provenance,
dat.compressed(),
err.compressed(), self.temp_error)]
# Grid it
return seapy.roms.obs.gridder(self.grid, sst_time.compressed(),
lon.compressed(), lat.compressed, None,
data, self.dt, title)
class viirs_swath(obsgen):
"""
class to process VIIRS SST swath netcdf files into ROMS observation
files. This is a subclass of
seapy.roms.obsgen.obsgen, and handles the loading of the data.
"""
def __init__(self, grid, dt, check_qc_flags=True, reftime=seapy.default_epoch,
temp_error=0.4, temp_limits=None, provenance="SST_VIIRS"):
self.temp_error = temp_error
self.provenance = provenance.upper()
self.check_qc_flags = check_qc_flags
if temp_limits is None:
self.temp_limits = (2, 35)
else:
self.temp_limits = temp_limits
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="VIIRS SST Obs"):
"""
Load a VIIRS file and convert into an obs structure
"""
# Load VIIRS Data
nc = seapy.netcdf(file, aggdim="time")
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
dat = np.ma.masked_outside(
nc.variables["sea_surface_temperature"][:] - 273.15,
self.temp_limits[0], self.temp_limits[1])
err = np.ma.masked_outside(
nc.variables["sses_standard_deviation"][:], 0.01, 2.0)
dat[err.mask] = np.ma.masked
# Check the data flags
if self.check_qc_flags:
flags = np.ma.masked_not_equal(
nc.variables["quality_level"][:], 5)
dat[flags.mask] = np.ma.masked
else:
dat = np.ma.masked_where(
nc.variables["quality_level"][:].data == 1, dat)
# Grab the observation time
time = netCDF4.num2date(nc.variables["time"][:],
nc.variables["time"].units) - self.epoch
time = np.asarray([x.total_seconds() for x in time])[
:, np.newaxis, np.newaxis]
dtime = nc.variables["sst_dtime"][:]
time = (time + dtime) * seapy.secs2day
nc.close()
# Set up the coordinate
lon = np.ma.masked_where(dat.mask, seapy.adddim(lon, len(time)))
lat = np.ma.masked_where(dat.mask, seapy.adddim(lat, len(time)))
if self.grid.east():
lon[lon < 0] += 360
good = dat.nonzero()
data = [seapy.roms.obs.raw_data("TEMP", self.provenance,
dat.compressed(),
err[good], self.temp_error)]
# Grid it
return seapy.roms.obs.gridder(self.grid, time[good], lon[good], lat[good],
None, data, self.dt, title)
##############################################################################
#
# IN SITU DATA
#
##############################################################################
class seaglider_profile(obsgen):
"""
class to process SeaGlider .pro files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, dtype=None, temp_limits=None,
salt_limits=None, depth_limit=-15, temp_error=0.2,
salt_error=0.05):
if temp_limits is None:
self.temp_limits = (5, 30)
else:
self.temp_limits = temp_limits
if salt_limits is None:
self.salt_limits = (31, 35.5)
else:
self.salt_limits = salt_limits
if dtype is None:
self.dtype = {'names': ('time', 'pres', 'depth', 'temp', 'cond',
'salt', 'sigma', 'lat', 'lon'),
'formats': ['f4'] * 9}
else:
self.dtype = dtype
self.depth_limit = depth_limit
self.temp_error = temp_error
self.salt_error = salt_error
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="SeaGlider Obs"):
"""
Load a SeaGlider .pro file and convert into an obs structure
"""
import re
# Load the text file. All data goes into the pro dictionary
# as defined by dtype. The header information needs to be parsed
with open(file) as myfile:
header = [myfile.readline() for i in range(19)]
pro = np.loadtxt(myfile, self.dtype, delimiter=',', comments='%')
# Parse the header information
parser = re.compile('^%(\w+): (.*)$')
params = {}
for line in header:
try:
opt = parser.findall(line)
params[opt[0][0]] = opt[0][1]
except:
pass
# Determine the needed information from the headers
glider_name = "GLIDER" if params.get("glider", None) is None else \
"GLIDER_SG" + params["glider"]
provenance = seapy.roms.obs.asprovenance(glider_name)
try:
date = [int(s) for s in re.findall('([\d]{2})\s', params["start"])]
start_time = datetime.datetime.strptime(params["start"].strip(),
"%m %d 1%y %H %M %S")
dtime = (start_time - self.epoch).total_seconds() / 86400
except:
raise ValueError("date format incorrect in file: " + file)
# Make sure that the GPS fix isn't screwy
if self.grid.east():
pro["lon"][pro["lon"] < 0] += 360
dist = seapy.earth_distance(pro["lon"][0], pro["lat"][0],
pro["lon"][-1], pro["lat"][-1])
velocity = dist / pro["time"][-1]
if velocity > 2:
warn("WARNING: GPS fix is incorrect for " + file)
return None
# Build the data with masked entries
temp = np.ma.masked_outside(pro["temp"], self.temp_limits[0],
self.temp_limits[1])
salt = np.ma.masked_outside(pro["salt"], self.salt_limits[0],
self.salt_limits[1])
depth = np.ma.masked_greater(-pro["depth"], self.depth_limit)
good = ~np.ma.getmaskarray(depth)
# Grid it
data = [seapy.roms.obs.raw_data("TEMP", provenance, temp[good],
None, self.temp_error),
seapy.roms.obs.raw_data("SALT", provenance, salt[good],
None, self.salt_error)]
return seapy.roms.obs.gridder(self.grid, pro["time"][good] / 86400 + dtime,
pro["lon"][good],
pro["lat"][good],
depth.compressed(),
data, self.dt, title)
class mooring(obsgen):
"""
Class to process generic moorings into ROMS observation files. This
handles temp, salt, u, and v.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, temp_limits=None,
salt_limits=None, u_limits=None, v_limits=None,
depth_limit=0, temp_error=0.25, salt_error=0.08,
u_error=0.08, v_error=0.08, lat=None, lon=None,
provenance=None):
if temp_limits is None:
self.temp_limits = (5, 35)
else:
self.temp_limits = temp_limits
if salt_limits is None:
self.salt_limits = (31, 35.5)
else:
self.salt_limits = salt_limits
if u_limits is None:
self.u_limits = (-3, 3)
else:
self.u_limits = u_limits
if v_limits is None:
self.v_limits = (-3, 3)
else:
self.v_limits = v_limits
if provenance is None:
self.provenance = seapy.roms.obs.asprovenance("MOORING")
else:
self.provenance = provenance.upper()
self.depth_limit = depth_limit
self.temp_error = temp_error
self.salt_error = salt_error
self.u_error = u_error
self.v_error = v_error
self.lat = np.atleast_1d(lat)
self.lon = np.atleast_1d(lon)
super().__init__(grid, dt, reftime)
def convert_data(self, time, depth, data, error=None, title="Mooring Obs"):
"""
Given a set of data, process into an observation structure
Parameters
----------
time : ndarray
time of observations
depth : ndarray
depth of observations. depth is in rows, time in columns.
If depth does not change with time, it will be replicated in time.
data : dict
data to put into observations. A dictionary using seapy.roms.fields
as keys.
error : dict, optional
error of the observations (same keys and sizes as data)
title : string, optional
title for obs
Returns
-------
obs: seapy.roms.obs.obs
"""
# Check that the lat/lon is in the grid
if self.grid.east():
self.lon[self.lon <= 0] += 360
else:
self.lon[self.lon >= 180] -= 360
if not np.logical_and.reduce((
self.lon >= np.min(self.grid.lon_rho),
self.lon <= np.max(self.grid.lon_rho),
self.lat >= np.min(self.grid.lat_rho),
self.lat <= np.max(self.grid.lat_rho))):
warn("Mooring location is not in grid")
return
depth = np.atleast_1d(depth)
if not error:
error = {}
if not data:
warn("No data is provided")
return
# Process the data
obsdata = []
for field in data:
limit = getattr(self, field + '_limits')
vals = np.ma.masked_outside(data[field], limit[0], limit[1],
copy=False)
obsdata.append(seapy.roms.obs.raw_data(field, self.provenance,
vals, getattr(
error, field, None),
getattr(self, field + '_error')))
ndep = depth.size
nt = len(time)
lat = np.resize(self.lat, (nt, ndep))
lon = np.resize(self.lon, (nt, ndep))
depth = np.resize(depth, (nt, ndep))
time = np.resize(time, (nt, ndep))
return seapy.roms.obs.gridder(self.grid, time, lon, lat, depth,
obsdata, self.dt, title)
class tao_mooring(mooring):
"""
class to process TAO files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, temp_limits=None,
salt_limits=None, u_limits=None, v_limits=None,
depth_limit=0, temp_error=0.25, salt_error=0.08,
u_error=0.08, v_error=0.08):
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="TAO Obs"):
"""
Load a TAO netcdf file and convert into an obs structure
"""
vals = {"temp": ["T_20", "QT_5020"],
"salt": ["S_41", "QS_5041"],
"u": ["U_320", "QS_5300"],
"v": ["V_321", "QS_5300"]}
nc = seapy.netcdf(file)
lat = nc.variables["lat"][:]
lon = nc.variables["lon"][:]
if not self.grid.east():
lon[lon > 180] -= 360
lat, lon = np.meshgrid(lat, lon)
time = seapy.roms.num2date(nc, "time", epoch=self.epoch)
depth = -nc.variables["depth"][:]
profile_list = np.where(np.logical_and.reduce((
lon >= np.min(self.grid.lon_rho),
lon <= np.max(self.grid.lon_rho),
lat >= np.min(self.grid.lat_rho),
lat <= np.max(self.grid.lat_rho))))
# If nothing is in the area, return nothing
if not profile_list[0].size:
return None
# Process each of the variables that are present
obsdata = []
for field in vals:
limit = getattr(self, field + '_limits')
if vals[field][0] in nc.variables:
data = nc.variables[vals[field][0]][:]
data = np.ma.masked_outside(
data[profile_list[0], profile_list[1], :, :],
limit[0], limit[1], copy=False)
qc = nc.variables[vals[field][1]][:]
qc = qc[profile_list[0], profile_list[1], :, :]
bad = np.where(np.logical_and(qc != 1, qc != 2))
data[bad] = np.ma.masked
obsdata.append(seapy.roms.obs.raw_data(field, "TAO_ARRAY",
data.compressed(), None,
getattr(self, field + '_error')))
nc.close()
# Build the time, lon, lat, and depth arrays of appropriate size
npts = profile_list[0].size
ndep = depth.size
nt = len(time)
lat = np.resize(lat[profile_list], (nt, ndep, npts))
lat = np.squeeze(np.transpose(lat, (2, 1, 0)))[~data.mask]
lon = np.resize(lon[profile_list], (nt, ndep, npts))
lon = np.squeeze(np.transpose(lon, (2, 1, 0)))[~data.mask]
depth = np.resize(depth, (npts, nt, ndep))
depth = np.squeeze(np.transpose(depth, (0, 2, 1)))[~data.mask]
time = np.squeeze(np.resize(time, (npts, ndep, nt)))[~data.mask]
return seapy.roms.obs.gridder(self.grid, time, lon, lat, depth,
obsdata, self.dt, title)
class argo_ctd(obsgen):
"""
class to process ARGO CTD netcdf files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, temp_limits=None,
salt_limits=None, temp_error=0.25,
salt_error=0.1):
if temp_limits is None:
self.temp_limits = (2, 35)
else:
self.temp_limits = temp_limits
if salt_limits is None:
self.salt_limits = (10, 35.5)
else:
self.salt_limits = salt_limits
self.temp_error = temp_error
self.salt_error = salt_error
super().__init__(grid, dt, reftime)
def datespan_file(self, file):
"""
return the just the day that this argo file covers
"""
nc = seapy.netcdf(file)
try:
d = netCDF4.num2date(nc.variables['JULD'][0],
nc.variables['JULD'].units)
st = datetime.datetime(*d.timetuple()[:3])
en = datetime.datetime(*d.timetuple()[:3] + (23, 59, 59))
except:
st = en = None
pass
finally:
nc.close()
return st, en
def convert_file(self, file, title="Argo Obs"):
"""
Load an Argo file and convert into an obs structure
"""
nc = seapy.netcdf(file, aggdim="N_PROF")
# Load the position of all profiles in the file
lon = nc.variables["LONGITUDE"][:]
lat = nc.variables["LATITUDE"][:]
pro_q = nc.variables["POSITION_QC"][:].astype(int)
# Find the profiles that are in our area with known locations quality
if self.grid.east():
lon[lon < 0] += 360
profile_list = np.where(np.logical_and.reduce((
lat >= np.min(self.grid.lat_rho),
lat <= np.max(self.grid.lat_rho),
lon >= np.min(self.grid.lon_rho),
lon <= np.max(self.grid.lon_rho),
pro_q == 1)))[0]
# Check which are good profiles
profile_qc = nc.variables["PROFILE_PRES_QC"][
profile_list].astype('<U1')
profile_list = profile_list[profile_qc == 'A']
if not profile_list.size:
return None
# Load only the data from those in our area
julian_day = nc.variables["JULD_LOCATION"][profile_list]
argo_epoch = datetime.datetime.strptime(''.join(
nc.variables["REFERENCE_DATE_TIME"][:].astype('<U1')), '%Y%m%d%H%M%S')
time_delta = (self.epoch - argo_epoch).days
file_stamp = datetime.datetime.strptime(''.join(
nc.variables["DATE_CREATION"][:].astype('<U1')), '%Y%m%d%H%M%S')
# Grab data over the previous day
file_time = np.minimum((file_stamp - argo_epoch).days,
int(np.max(julian_day)))
time_list = np.where(julian_day >= file_time - 1)[0]
julian_day = julian_day[time_list]
lon = lon[profile_list[time_list]]
lat = lat[profile_list[time_list]]
profile_list = profile_list[time_list]
# Load the data in our region and time
temp = nc.variables["TEMP"][profile_list, :]
temp_qc = nc.variables["TEMP_QC"][profile_list, :]
salt = nc.variables["PSAL"][profile_list, :]
salt_qc = nc.variables["PSAL_QC"][profile_list, :]
pres = nc.variables["PRES"][profile_list, :]
pres_qc = nc.variables["PRES_QC"][profile_list, :]
nc.close()
# Ensure consistency
full_mask = np.logical_or.reduce((temp.mask, salt.mask, pres.mask))
temp[full_mask] = np.ma.masked
temp_qc[full_mask] = np.ma.masked
salt[full_mask] = np.ma.masked
salt_qc[full_mask] = np.ma.masked
pres[full_mask] = np.ma.masked
pres_qc[full_mask] = np.ma.masked
# Combine the QC codes
qc = np.mean(np.vstack((temp_qc.compressed(), salt_qc.compressed(),
pres_qc.compressed())).astype(int), axis=0)
good_data = np.where(qc == 1)
# Put everything together into individual observations
time = np.resize(julian_day - time_delta,
pres.shape[::-1]).T[~temp.mask][good_data]
lat = np.resize(lat, pres.shape[::-1]).T[~temp.mask][good_data]
lon = np.resize(lon, pres.shape[::-1]).T[~temp.mask][good_data]
depth = -seapy.seawater.depth(pres.compressed()[good_data], lat)
# Apply the limits
temp = np.ma.masked_outside(temp.compressed()[good_data],
self.temp_limits[0], self.temp_limits[1])
salt = np.ma.masked_outside(salt.compressed()[good_data],
self.salt_limits[0], self.salt_limits[1])
data = [seapy.roms.obs.raw_data("TEMP", "CTD_ARGO", temp,
None, self.temp_error),
seapy.roms.obs.raw_data("SALT", "CTD_ARGO", salt,
None, self.salt_error)]
return seapy.roms.obs.gridder(self.grid, time, lon, lat, depth,
data, self.dt, title)
|
from pydantic import BaseModel, Field, UUID4
from typing import Optional
from uuid import uuid4
from api.models import type_str, validators
class AlertTypeBase(BaseModel):
"""Represents a type of alert."""
description: Optional[type_str] = Field(description="An optional human-readable description of the alert type")
value: type_str = Field(description="The value of the alert type")
class AlertTypeCreate(AlertTypeBase):
uuid: UUID4 = Field(default_factory=uuid4, description="The UUID of the alert type")
class AlertTypeRead(AlertTypeBase):
uuid: UUID4 = Field(description="The UUID of the alert type")
class Config:
orm_mode = True
class AlertTypeUpdate(AlertTypeBase):
value: Optional[type_str] = Field(description="The value of the alert type")
_prevent_none: classmethod = validators.prevent_none("value")
|
#! /usr/bin/env python
'''
This module is part of the FMLC package.
https://github.com/LBNL-ETA/FMLC
'''
import time
class triggering(object):
'''
Class to handle internal triggering of models.
'''
def __init__(self, ts, init_now=False):
'''
Input
-----
ts (dict): Timesteps for triggering with timestep in seconds.
init_now (bool): Initialize with current timestep. This is not
recommended as timesteps are not aligned. Good for testing and
debugging. (Default = False)
'''
self.ts = ts
self.init_now = init_now
self._initialize_all_trigger()
def _initialize_all_trigger(self):
'''
Initializaiton of all triggers.
'''
now = time.time()
self.trigger = {}
mode = 'prev' if self.init_now else 'next'
for k in self.ts.keys():
self.trigger[k] = self._get_trigger(self.ts[k], now, mode=mode,
integer=(self.ts[k]%1) == 0)
def refresh_trigger(self, name, now=time.time()):
'''
Refresh the trigger.
Input
-----
name (str): Name of the trigger.
now (time.time): Current time to refresh. (Default = time.time())
'''
self.trigger[name] = self._get_trigger(self.ts[name], now, mode='next',
integer=(self.ts[name]%1) == 0)
def _get_trigger(self, ts, now=time.time(), mode='next', integer=False):
'''
Get the current trigger value.
Input
-----
ts (float): The timestep of the trigger.
now (time.time): Current time to refresh. (Default = time.time())
mode (str): Mode of calculation, either "next" or "prev".
(Default = "next")
integer (bool): Round seconds to full integer.
Recommended when ts > 10 s. (Defualt = False)
Return
------
trigger (float): Next trigger as timestamp.
'''
trigger = round(now/ts) * ts
if integer:
trigger = int(trigger)
if mode == 'next':
trigger = trigger + ts
elif mode == 'prev':
trigger = trigger - ts
else:
print('ERROR: "mode" must be "next" or "prev"')
return trigger
if __name__ == '__main__':
ts = {}
ts['main'] = 0.5 # seconds
ts['print'] = 1
print('"Main" should be triggered every {} s.'.format(ts['main']))
print('"Print" should be triggered every {} s.'.format(ts['print']))
trigger_test = triggering(ts)
now_init = time.time()
now = now_init
while now < now_init+3:
now = time.time()
if now >= trigger_test.trigger['main']:
print('Main triggered\t{}'.format(round(now, 2)))
trigger_test.refresh_trigger('main', now)
if now >= trigger_test.trigger['print']:
print('Print triggered\t{}'.format(round(now, 2)))
trigger_test.refresh_trigger('print', now) |
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.contrib import messages
from django.utils.translation import gettext_lazy as _
from nami import Nami, MemberNotFound
from nami.mock import Session as NamiMock
from .views import NamiSearchView
class IndexTest(TestCase):
fixtures = ["troop_130000.json"]
def setUp(self):
self.user = get_user_model().objects.get(email="user@test")
self.client.force_login(self.user)
def test_must_be_logged_in(self):
self.client.logout()
response = self.client.get(reverse("troop:index", kwargs={"troop_number": 404}))
self.assertEqual(response.status_code, 302)
def test_not_found(self):
response = self.client.get(reverse("troop:index", kwargs={"troop_number": 404}))
self.assertEqual(response.status_code, 403)
def test_not_allowed(self):
response = self.client.get(
reverse("troop:index", kwargs={"troop_number": 130100})
)
self.assertEqual(response.status_code, 403)
def test_found(self):
response = self.client.get(
reverse("troop:index", kwargs={"troop_number": 130000})
)
self.assertEqual(response.status_code, 200)
class CreateParticipantTest(TestCase):
fixtures = ["troop_130000.json"]
valid_data = {
"troop": "1", # id of the troop
"first_name": "Trick",
"last_name": "Duck",
"gender": "male",
"birthday": "1.1.1900",
# "email": "", not required
"nami": "12",
"age_section": "cub",
# "is_leader": "", not required
"attendance": [1], # id of the day
# "diet": "", not required
# "medication": "", not required
# "comment": "", not required
}
def setUp(self):
self.user = get_user_model().objects.get(email="user@test")
self.client.force_login(self.user)
def test_get_form(self):
response = self.client.get(
reverse("troop:participant.create", kwargs={"troop_number": 130000})
)
self.assertEqual(response.status_code, 200)
def test_post_empty_form(self):
response = self.client.post(
reverse("troop:participant.create", kwargs={"troop_number": 130000})
)
self.assertEqual(response.status_code, 422)
def test_post_form(self):
response = self.client.post(
reverse("troop:participant.create", kwargs={"troop_number": 130000}),
self.valid_data,
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
reverse("troop:participant.index", kwargs={"troop_number": 130000}),
)
def test_post_form_addanother(self):
data = self.valid_data.copy()
data["_addanother"] = "1"
response = self.client.post(
reverse("troop:participant.create", kwargs={"troop_number": 130000}), data
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
reverse("troop:participant.nami-search", kwargs={"troop_number": 130000}),
)
def test_get_form_prefilled(self):
response = self.client.get(
reverse("troop:participant.create", kwargs={"troop_number": 130000}),
data={"first_name": "Trick"},
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'value="Trick"')
class UpdateParticipantTest(TestCase):
fixtures = ["troop_130000.json"]
url = reverse("troop:participant.edit", kwargs={"troop_number": 130000, "pk": 1})
def setUp(self):
self.user = get_user_model().objects.get(email="user@test")
self.client.force_login(self.user)
def test_get_form(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_post_empty_form(self):
response = self.client.post(self.url)
self.assertEqual(response.status_code, 422)
def test_post_form(self):
response = self.client.post(
self.url,
{
"troop": "1",
"first_name": "Track", # new name
"last_name": "Duck",
"gender": "male",
"birthday": "1.1.1900",
"nami": "12",
"age_section": "", # new section (empty)
"attendance": [1],
},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
reverse("troop:participant.index", kwargs={"troop_number": 130000}),
)
class NamiSearchTest(TestCase):
fixtures = ["troop_130000.json"]
def setUp(self):
self.user = get_user_model().objects.get(email="user@test")
self.client.force_login(self.user)
self.original_nami_method = NamiSearchView.nami
self.nami_mock = Nami({}, session_cls=NamiMock)
self.mocked_nami_method = lambda s: self.nami_mock
def tearDown(self):
NamiSearchView.nami = self.original_nami_method
def test_get_form(self):
response = self.client.get(
reverse("troop:participant.nami-search", kwargs={"troop_number": 130000})
)
self.assertEqual(response.status_code, 200)
def test_post_empty_form(self):
response = self.client.post(
reverse("troop:participant.nami-search", kwargs={"troop_number": 130000})
)
self.assertEqual(response.status_code, 422)
def test_post_form(self):
self.nami_mock.session.response = [
{
"entries_nachname": "Duck",
"entries_vorname": "Trick",
"entries_mitgliedsNummer": 12345,
}
]
NamiSearchView.nami = self.mocked_nami_method
response = self.client.post(
reverse("troop:participant.nami-search", kwargs={"troop_number": 130000}),
{"nami": "12345"},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
reverse("troop:participant.create", kwargs={"troop_number": 130000})
+ "?last_name=Duck&first_name=Trick&nami=12345",
)
m = list(messages.get_messages(response.wsgi_request))
self.assertEqual(1, len(m))
self.assertEqual(messages.SUCCESS, m[0].level)
def test_post_form_empty_nami_settings(self):
response = self.client.post(
reverse("troop:participant.nami-search", kwargs={"troop_number": 130000}),
{"nami": "12345"},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
reverse("troop:participant.create", kwargs={"troop_number": 130000})
+ "?nami=12345",
)
m = list(messages.get_messages(response.wsgi_request))
self.assertEqual(1, len(m))
self.assertEqual(messages.WARNING, m[0].level)
def test_post_not_found(self):
self.nami_mock.session.exception = MemberNotFound
NamiSearchView.nami = self.mocked_nami_method
response = self.client.post(
reverse("troop:participant.nami-search", kwargs={"troop_number": 130000}),
{"nami": "12345"},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
reverse("troop:participant.create", kwargs={"troop_number": 130000})
+ "?nami=12345",
)
m = list(messages.get_messages(response.wsgi_request))
self.assertEqual(1, len(m))
self.assertEqual(messages.INFO, m[0].level)
def test_post_form_already_in_db(self):
response = self.client.post(
reverse("troop:participant.nami-search", kwargs={"troop_number": 130000}),
{"nami": "130001"},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.url,
reverse("troop:participant.edit", kwargs={"troop_number": 130000, "pk": 1}),
)
m = list(messages.get_messages(response.wsgi_request))
self.assertEqual(1, len(m))
self.assertEqual(messages.INFO, m[0].level)
def test_post_form_already_in_db_wrong_troop(self):
response = self.client.post(
reverse("troop:participant.nami-search", kwargs={"troop_number": 130000}),
{"nami": "130002"},
)
self.assertEqual(response.status_code, 409)
m = list(messages.get_messages(response.wsgi_request))
self.assertEqual(1, len(m))
self.assertEqual(messages.ERROR, m[0].level)
class IndexParticipantTest(TestCase):
fixtures = ["troop_130000.json"]
def setUp(self):
self.user = get_user_model().objects.get(email="user@test")
self.client.force_login(self.user)
def test_found(self):
response = self.client.get(
reverse("troop:participant.index", kwargs={"troop_number": 130000})
)
self.assertEqual(response.status_code, 200)
def test_contains_creations(self):
response = self.client.get(
reverse("troop:participant.index", kwargs={"troop_number": 130000})
)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Trick")
self.client.post(
reverse("troop:participant.create", kwargs={"troop_number": 130000}),
CreateParticipantTest.valid_data,
)
response = self.client.get(
reverse("troop:participant.index", kwargs={"troop_number": 130000})
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Trick")
class ParticipantExportTest(TestCase):
fixtures = ["troop_130000.json"]
def setUp(self):
self.user = get_user_model().objects.get(email="user@test")
self.client.force_login(self.user)
def test_export(self):
response = self.client.get(
reverse("troop:participant.export", kwargs={"troop_number": 130000})
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["content-type"], "text/csv")
self.assertEqual(
response["content-disposition"],
'attachment; filename="packmas13_130000.csv"',
)
self.assertContains(response, "Vor")
self.assertContains(response, "Nach")
self.assertContains(response, _("no section"))
self.assertContains(response, "2020-02-20")
self.assertContains(response, "Vegan")
|
"""http://projecteuler.net/problem=056
Powerful digit sum
A googol (10^100) is a massive number: one followed by one-hundred zeros; 100^100 is almost unimaginably large: one followed by two-hundred zeros. Despite their size, the sum of the digits in each number is only 1.
Considering natural numbers of the form, a^b, where a, b < 100, what is the maximum digital sum?
Solution by jontsai <[email protected]>
"""
from utils import *
EXPECTED_ANSWER = 972
max_so_far = 0
for a in xrange(1, 100):
for b in xrange(1, 100):
value = a ** b
max_so_far = max(sum_digits(value), max_so_far)
answer = max_so_far
print 'Expected: %s, Answer: %s' % (EXPECTED_ANSWER, answer)
|
import os
import requests
from b2stage.endpoints.commons.b2access import B2accessUtilities
from restapi import decorators
from restapi.connectors import celery
from restapi.exceptions import RestApiException
from restapi.services.authentication import Role
from restapi.utilities.logs import log
from seadata.endpoints.commons.cluster import ClusterContainerEndpoint
class PidCache(ClusterContainerEndpoint, B2accessUtilities):
labels = ["helper"]
@decorators.auth.require_any(Role.ADMIN, Role.STAFF)
@decorators.endpoint(
path="/pidcache",
summary="Retrieve values from the pid cache",
responses={200: "Async job started"},
)
def get(self):
c = celery.get_instance()
task = c.celery_app.send_task("inspect_pids_cache")
log.info("Async job: {}", task.id)
return self.return_async_id(task.id)
@decorators.auth.require_any(Role.ADMIN, Role.STAFF)
@decorators.endpoint(
path="/pidcache/<batch_id>",
summary="Fill the pid cache",
responses={200: "Async job started"},
)
def post(self, batch_id):
try:
imain = self.get_main_irods_connection()
ipath = self.get_irods_production_path(imain)
collection = os.path.join(ipath, batch_id)
if not imain.exists(collection):
raise RestApiException(f"Invalid batch id {batch_id}", status_code=404)
c = celery.get_instance()
task = c.celery_app.send_task("cache_batch_pids", args=[collection])
log.info("Async job: {}", task.id)
return self.return_async_id(task.id)
except requests.exceptions.ReadTimeout:
return self.send_errors("B2SAFE is temporarily unavailable", code=503)
|
import rclpy
from rclpy.node import Node
from robot_interfaces.srv import SetMuxSource
from sensor_msgs.msg import Joy
class JoyCommands(Node):
def __init__(self):
super(JoyCommands, self).__init__('joy_commands')
self._subscription = self.create_subscription(Joy, 'joy', self.joy_cb, 10)
self._client = self.create_client(SetMuxSource, '/cmd_vel_source')
self._buttons = {
2: 'nav',
3: 'joy',
}
while not self._client.wait_for_service(timeout_sec=1.0):
self.get_logger().info('cmd_vel_source service not available, waiting...')
def joy_cb(self, msg):
for btn, src in self._buttons.items():
if msg.buttons[btn]:
req = SetMuxSource.Request(name=src)
self._client.call_async(req)
def main():
rclpy.init()
node = JoyCommands()
rclpy.spin(node)
rclpy.shutdown()
if __name__ == '__main__':
main()
|
"""
This module allows doctest to find typechecked functions.
Currently, doctest verifies functions to make sure that their
globals() dict is the __dict__ of their module. In the case of
decorated functions, the globals() dict *is* not the right one.
To enable support for doctest do:
import typecheck.doctest_support
This import must occur before any calls to doctest methods.
"""
def __DocTestFinder_from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
import inspect
if module is None:
return True
elif inspect.isfunction(object) or inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
import doctest as __doctest
__doctest.DocTestFinder._from_module = __DocTestFinder_from_module |
import os
from glob import glob
import torch
def mkdir_ifnotexists(directory):
if not os.path.exists(directory):
os.mkdir(directory)
def get_class(kls):
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def glob_imgs(path):
imgs = []
for ext in ['*.png', '*.jpg', '*.JPEG', '*.JPG']:
imgs.extend(glob(os.path.join(path, ext)))
return imgs
def split_input(model_input, total_pixels):
'''
Split the input to fit Cuda memory for large resolution.
Can decrease the value of n_pixels in case of cuda out of memory error.
'''
n_pixels = 10000 # NOTE: hard code point size
split = []
for i, indx in enumerate(torch.split(torch.arange(total_pixels).cuda(), n_pixels, dim=0)):
data = model_input.copy()
data['uv'] = torch.index_select(model_input['uv'], 1, indx)
data['object_mask'] = torch.index_select(model_input['object_mask'], 1, indx)
# data['gt_depth'] = torch.index_select(model_input['gt_depth'], 1, indx)
# data['gt_normal'] = torch.index_select(model_input['gt_normal'], 1, indx)
split.append(data)
return split
def merge_output(res, total_pixels, batch_size):
''' Merge the split output. '''
model_outputs = {}
for entry in res[0]:
if res[0][entry] is None:
continue
if len(res[0][entry].shape) == 1:
model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, -1, 1) for r in res],
1).reshape(batch_size * total_pixels)
else:
model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, -1, r[entry].shape[-1]) for r in res],
1).reshape(batch_size * total_pixels, -1)
return model_outputs |
"""Provide Hook implementations for contingency checks."""
import logging
from nested_lookup import nested_lookup
from boardfarm.exceptions import ContingencyCheckError
from boardfarm.lib.common import (
check_prompts,
domain_ip_reach_check,
retry_on_exception,
)
from boardfarm.lib.DeviceManager import device_type
from boardfarm.lib.hooks import contingency_impl, hookimpl
from boardfarm.plugins import BFPluginManager
logger = logging.getLogger("tests_logger")
class ContingencyCheck:
"""Contingency check implementation."""
impl_type = "base"
@hookimpl(tryfirst=True)
def contingency_check(self, env_req, dev_mgr, env_helper):
"""Register service check plugins based on env_req.
Reading the key value pairs from env_req, BFPluginManager scans
for relative hook specs and implementations and loads them into a
feature PluginManager (use generate_feature_manager).
Once all plugins are registered, this functions will call the hook
initiating respective service checks.
:param env_req: ENV request provided by a test
:type env_req: dict
"""
logger.info("Executing all contingency service checks under boardfarm")
# initialize a feature Plugin Manager for Contingency check
pm = BFPluginManager("contingency")
# this will load all feature hooks for contingency
pm.load_hook_specs("feature")
all_impls = pm.fetch_impl_classes("feature")
plugins_to_register = [all_impls["boardfarm.DefaultChecks"]]
# referencing this from boardfarm-lgi
dns_env = nested_lookup("DNS", env_req.get("environment_def", {}))
if dns_env:
plugins_to_register.append(all_impls["boardfarm.DNS"])
# ACS reference from boardfarm-lgi
if "tr-069" in env_req.get("environment_def", {}):
plugins_to_register.append(all_impls["boardfarm.ACS"])
# Voice reference from boardfarm-lgi
if "voice" in env_req.get("environment_def", {}):
plugins_to_register.append(all_impls["boardfarm.Voice"])
plugins_to_register.append(all_impls["boardfarm.CheckInterface"])
# since Pluggy executes plugin in LIFO order of registration
# reverse the list so that Default check is executed first
for i in reversed(plugins_to_register):
pm.register(i)
result = pm.hook.service_check(
env_req=env_req, dev_mgr=dev_mgr, env_helper=env_helper
)
# this needs to be orchestrated by hook wrapper maybe
BFPluginManager.remove_plugin_manager("contingency")
return result
class DefaultChecks:
"""Perform these checks even if ENV req is empty."""
impl_type = "feature"
@contingency_impl
def service_check(self, env_req, dev_mgr, env_helper):
"""Implement Default Contingency Hook."""
logger.info("Executing Default service check [check_prompts] for BF")
provisioner = dev_mgr.by_type(device_type.provisioner)
wan = dev_mgr.by_type(device_type.wan)
sipserver = dev_mgr.by_type(device_type.sipcenter)
softphone = dev_mgr.by_type(device_type.softphone)
wan_devices = [wan, provisioner]
lan_devices = []
voice = "voice" in env_req.get("environment_def", {})
if voice:
lan_devices = [dev_mgr.lan, dev_mgr.lan2]
wan_devices = [wan, provisioner, sipserver, softphone]
check_prompts(wan_devices + lan_devices)
logger.info("Default service check [check_prompts] for BF executed")
class CheckInterface:
"""Perform these checks even if ENV req is empty."""
impl_type = "feature"
@contingency_impl(trylast=True)
def service_check(self, env_req, dev_mgr, env_helper):
"""Implement Default Contingency Hook."""
logger.info("Executing CheckInterface service check for BF ")
ip = {}
wan = dev_mgr.by_type(device_type.wan)
# TODO: should be driven by env_req
lan_devices = [dev_mgr.lan, dev_mgr.lan2]
def _start_lan_client(dev):
ipv4, ipv6 = retry_on_exception(dev.start_lan_client, [], retries=1)
return {"ipv4": ipv4, "ipv6": ipv6}
def _setup_as_wan_gateway():
ipv4 = wan.get_interface_ipaddr(wan.iface_dut)
ipv6 = wan.get_interface_ip6addr(wan.iface_dut)
return {"ipv4": ipv4, "ipv6": ipv6}
for dev in lan_devices:
ip[dev.name] = _start_lan_client(dev)
ip["wan"] = _setup_as_wan_gateway()
logger.info("CheckInterface service checks for BF executed")
return ip
class DNS:
"""DNS contingency checks."""
impl_type = "feature"
@contingency_impl
def service_check(self, env_req, dev_mgr):
"""Implement Contingency Hook for DNS."""
logger.info("Executing DNS service check for BF")
board = dev_mgr.by_type(device_type.DUT)
dns_env = nested_lookup("DNS", env_req["environment_def"])
if nested_lookup("ACS_SERVER", dns_env):
acs_dns = dns_env[0]["ACS_SERVER"]
if acs_dns:
output = board.dns.nslookup("acs_server.boardfarm.com")
domain_ip_reach_check(
board.arm,
acs_dns["ipv4"]["reachable"] + acs_dns["ipv6"]["reachable"],
acs_dns["ipv4"]["unreachable"] + acs_dns["ipv6"]["unreachable"],
output,
)
logger.info("DNS service checks for BF executed")
class ACS:
"""ACS contingency checks."""
impl_type = "feature"
@contingency_impl
def service_check(self, env_req, dev_mgr):
"""Implement Contingency Hook for ACS."""
logger.info("Executing ACS service check for BF")
board = dev_mgr.by_type(device_type.DUT)
acs_server = dev_mgr.by_type(device_type.acs_server)
packet_analysis = "packet_analysis" in env_req["environment_def"]["tr-069"]
def check_acs_connection():
return bool(
acs_server.get(board.get_cpeid(), "Device.DeviceInfo.SoftwareVersion")
)
acs_connection = check_acs_connection()
if not acs_connection:
raise ContingencyCheckError("ACS service check Failed.")
if packet_analysis:
def check_packet_analysis_enable():
return acs_server.session_connected
check_packet_analysis_enable()
logger.info("ACS service checks for BF executed")
class Voice:
"""VOICE contingency checks."""
impl_type = "feature"
@contingency_impl
def service_check(self, env_req, dev_mgr):
"""Implement Contingency Hook for VOICE."""
logger.info("Executing Voice service check for BF")
dev_mgr.lan.check_tty()
dev_mgr.lan2.check_tty()
dev_mgr.board.mta_prov_check()
dev_mgr.board.check_sip_endpoints_registration()
logger.info("Voice service checks for BF executed")
|
# @Author : Peizhao Li
# @Contact : [email protected]
import scipy.sparse as sp
import torch
from torch import optim
import torch.nn.functional as F
from args import parse_args
from utils import fix_seed, find_link
from dataloader import get_dataset
from model.utils import preprocess_graph, project
from model.optimizer import loss_function
from model.gae import GCNModelVAE
from eval import fair_link_eval
def main(args):
# Data preparation
G, adj, features, sensitive, test_edges_true, test_edges_false = get_dataset(args.dataset, args.scale,
args.test_ratio)
n_nodes, feat_dim = features.shape
features = torch.from_numpy(features).float().to(args.device)
sensitive_save = sensitive.copy()
adj_norm = preprocess_graph(adj).to(args.device)
adj = sp.coo_matrix(adj + sp.eye(adj.shape[0]))
adj_label = torch.FloatTensor(adj.toarray()).to(args.device)
intra_pos, inter_pos, intra_link_pos, inter_link_pos = find_link(adj, sensitive)
pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
pos_weight = torch.Tensor([pos_weight]).to(args.device)
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)
# Initialization
model = GCNModelVAE(feat_dim, args.hidden1, args.hidden2, args.dropout).to(args.device)
optimizer = optim.Adam(model.get_parameters(), lr=args.lr)
# Training
model.train()
for i in range(args.outer_epochs):
for epoch in range(args.T1):
optimizer.zero_grad()
recovered, z, mu, logvar = model(features, adj_norm)
loss = loss_function(preds=recovered, labels=adj_label, mu=mu, logvar=logvar, n_nodes=n_nodes, norm=norm,
pos_weight=pos_weight)
loss.backward()
cur_loss = loss.item()
optimizer.step()
print("Epoch in T1: [{:d}/{:d}];".format((epoch + 1), args.T1), "Loss: {:.3f};".format(cur_loss))
for epoch in range(args.T2):
adj_norm = adj_norm.requires_grad_(True)
recovered = model(features, adj_norm)[0]
if args.eq:
intra_score = recovered[intra_link_pos[:, 0], intra_link_pos[:, 1]].mean()
inter_score = recovered[inter_link_pos[:, 0], inter_link_pos[:, 1]].mean()
else:
intra_score = recovered[intra_pos[:, 0], intra_pos[:, 1]].mean()
inter_score = recovered[inter_pos[:, 0], inter_pos[:, 1]].mean()
loss = F.mse_loss(intra_score, inter_score)
loss.backward()
cur_loss = loss.item()
print("Epoch in T2: [{:d}/{:d}];".format(epoch + 1, args.T2), "Loss: {:.5f};".format(cur_loss))
adj_norm = adj_norm.add(adj_norm.grad.mul(-args.eta)).detach()
adj_norm = adj_norm.to_dense()
for i in range(adj_norm.shape[0]):
adj_norm[i] = project(adj_norm[i])
adj_norm = adj_norm.to_sparse()
# Evaluation
model.eval()
with torch.no_grad():
z = model(features, adj_norm)[1]
hidden_emb = z.data.cpu().numpy()
std = fair_link_eval(hidden_emb, sensitive_save, test_edges_true, test_edges_false)
col = ["auc", "ap", "dp", "true", "false", "fnr", "tnr"]
print("Result below ------")
for term, val in zip(col, std):
print(term, ":", val)
return
if __name__ == "__main__":
args = parse_args()
args.device = torch.device(args.device)
fix_seed(args.seed)
main(args)
|
from mine_generate import mine_generate
import random
import numpy as np
class Cell:
def __init__(self, coordinate):
self.clues = set()
self.reveal = 0
self.probability = 1.0
self.neighbor = 0
self.coordinate = coordinate
self.mined = -1
class MineSweeper(object):
def __init__(self, width, length, num):
self.width = width
self.length = length
self.num = num
self.unknown_cell = []
for row in range(self.width):
for col in range(self.length):
self.unknown_cell.append([row, col])
self.clear_cell = []
self.mine_cell = []
self.clue_cell = [] # info
self.cells = []
for row in range(self.width):
self.cells.append([])
for col in range(self.length):
cell = Cell([row, col])
self.cells[row].append(cell)
self.cells[0][0].probability = 0.0
self.queue = [[0, 0]]
self.visit = [[0 for col in range(self.length)] for row in range(self.width)]
self.visit[0][0] = 1
self.front = 0
self.rear = 1 # search
self.chains = []
def reveal_query(self, x, y):
self.cells[x][y].reveal = int(
input('The state of Position (' + str(x) + ', ' + str(y) + '): '))
def clues_get(self, x, y):
around = [[-1, -1], [0, -1], [1, -1],
[-1, 0], [1, 0],
[-1, 1], [0, 1], [1, 1]]
clues = set()
for i in range(len(around)):
x_around = x + around[i][0]
y_around = y + around[i][1]
if x_around < 0 or y_around < 0 or x_around > self.width - 1 or y_around > self.length - 1 or \
self.cells[x_around][y_around].mined == 0:
continue
if self.cells[x_around][y_around].mined == 1:
self.cells[x][y].reveal -= 1
else:
clues.add((x_around, y_around))
if self.cells[x][y].mined == -1:
self.unknown_cell.remove([x, y])
self.clear_cell.append([x, y])
self.cells[x][y].mined = 0
self.cells[x][y].clues = clues
def renew_chains(self, x, y):
for chain in self.chains:
if (x, y) in chain.clues:
chain.clues.remove((x, y))
chain.reveal -= self.cells[x][y].mined
if len(chain.clues) == 0:
self.chains.remove(chain)
self.clue_cell.remove(chain.coordinate)
def influence_chains(self, x, y):
self.renew_chains(x, y)
if len(self.cells[x][y].clues) == 0:
return
self.chains.append(self.cells[x][y])
self.clue_cell.append([x, y])
change = True
while change:
change = False
for chain in self.chains:
if chain.reveal != 0 and chain.reveal != len(chain.clues):
for chain_t in self.chains:
if chain_t != chain:
if chain_t.clues.issubset(chain.clues):
change = True
chain.clues -= chain_t.clues
chain.reveal -= chain_t.reveal
else:
change = True
self.chains.remove(chain)
self.clue_cell.remove(chain.coordinate)
for clue in chain.clues:
self.cells[clue[0]][clue[1]].mined = chain.reveal / len(chain.clues)
self.cells[clue[0]][clue[1]].probability = self.cells[clue[0]][clue[1]].mined
if chain.reveal == 0:
self.clear_cell.append([clue[0], clue[1]])
else:
self.mine_cell.append([clue[0], clue[1]])
self.unknown_cell.remove([clue[0], clue[1]])
self.renew_chains(clue[0], clue[1])
for chain in self.chains:
probability = chain.reveal / len(chain.clues)
for clue in chain.clues:
self.cells[clue[0]][clue[1]].probability = probability
def queue_sort(self):
for i in range(self.front + 1, self.rear):
# sort queue according to probability, insertion sort for almost sorted array
key = self.queue[i]
j = i - 1
while j >= self.front and self.cells[self.queue[j][0]][self.queue[j][1]].probability > \
self.cells[key[0]][key[1]].probability:
self.queue[j + 1] = self.queue[j]
j -= 1
self.queue[j + 1] = key
# for i in range(front, rear):
# print('probability', queue[i], self.cells[queue[i][0]][queue[i][1]].probability)
def get_neighbor(self):
around = [[-1, -1], [0, -1], [1, -1],
[-1, 0], [1, 0],
[-1, 1], [0, 1], [1, 1]]
for row in range(self.width):
for col in range(self.length):
count = 0
for i in range(len(around)):
x_around = row + around[i][0]
y_around = col + around[i][1]
if x_around < 0 or y_around < 0 or x_around > self.width - 1 or y_around > self.length - 1 or \
self.cells[x_around][y_around].mined != -1:
continue
count += 1
self.cells[row][col].neighbor = count
def uncertainty(self):
q_prob = self.cells[self.queue[self.front][0]][self.queue[self.front][1]].probability
c_prob = (self.num - len(self.mine_cell)) / len(self.unknown_cell)
self.get_neighbor()
if q_prob > c_prob:
coor = []
neighbor = 9
for row in range(self.width):
for col in range(self.length):
if self.visit[self.cells[row][col].coordinate[0]][self.cells[row][col].coordinate[1]] == 1 or \
self.cells[row][col].probability != 1:
continue
if self.cells[row][col].neighbor < neighbor:
neighbor = self.cells[row][col].neighbor
coor = self.cells[row][col].coordinate
self.cells[coor[0]][coor[1]].probability = c_prob
self.visit[coor[0]][coor[1]] = 1
self.queue.append([coor[0], coor[1]])
self.rear += 1
self.queue_sort()
else:
i = self.front
while i < self.rear:
if self.cells[self.queue[i][0]][self.queue[i][1]].probability > q_prob:
break
i += 1
pos = self.front
for j in range(self.front + 1, i):
if self.cells[self.queue[j][0]][self.queue[j][1]].neighbor < \
self.cells[self.queue[pos][0]][self.queue[pos][1]].neighbor:
pos = j
t = self.queue[pos]
self.queue[pos] = self.queue[self.front]
self.queue[self.front] = t
def info_display(self):
print('Knowledge base about the board: ')
print('Unknown cells: ', self.unknown_cell)
print('Clear cells: ', self.clear_cell)
print('Mine cells: ', self.mine_cell)
print('Clue cells: ', self.clue_cell)
def step(self):
self.queue_sort()
if self.cells[self.queue[self.front][0]][self.queue[self.front][1]].probability != 0:
self.uncertainty()
x, y = self.queue[self.front][0], self.queue[self.front][1]
self.front += 1
next_step = [[-1, -1], [0, -1], [1, -1],
[-1, 0], [1, 0],
[-1, 1], [0, 1], [1, 1]]
for i in range(len(next_step)):
x_next = x + next_step[i][0]
y_next = y + next_step[i][1]
if x_next < 0 or y_next < 0 or x_next > self.width - 1 or y_next > self.length - 1 or \
self.visit[x_next][y_next] == 1:
continue
self.visit[x_next][y_next] = 1
self.queue.append([x_next, y_next])
self.rear += 1
return x, y
def mine_sweeper(self):
game_over = False
receive_prob = 0.9
while self.front < self.rear:
if len(self.unknown_cell) == 0 or len(self.mine_cell) == self.num:
break
if self.cells[self.queue[self.front][0]][self.queue[self.front][1]].probability != 0:
self.uncertainty()
x, y = self.queue[self.front][0], self.queue[self.front][1]
self.front += 1
if self.cells[x][y].mined != 1:
self.info_display()
self.reveal_query(x, y)
if self.cells[x][y].reveal == -1:
game_over = True
break
if random.random() < receive_prob:
self.clues_get(x, y)
self.influence_chains(x, y)
next_step = [[-1, -1], [0, -1], [1, -1],
[-1, 0], [1, 0],
[-1, 1], [0, 1], [1, 1]]
for i in range(len(next_step)):
x_next = x + next_step[i][0]
y_next = y + next_step[i][1]
if x_next < 0 or y_next < 0 or x_next > self.width - 1 or y_next > self.length - 1 or \
self.visit[x_next][y_next] == 1:
continue
self.visit[x_next][y_next] = 1
self.queue.append([x_next, y_next])
self.rear += 1
self.queue_sort()
if game_over:
print('Game over, computer lost!')
else:
self.info_display()
print('Computer win!')
if __name__ == '__main__':
width = 10
length = 10
num = 30
#board = mine_generate(width, length, num)
board = np.array([[0,1,0,1,0],
[0,0,1,0,0],
[0,1,0,1,0],
[0,0,0,0,0],
[0,0,0,0,0]])
print(board)
receive_prob = 1
mine_sweeper = MineSweeper(width, length, num)
# mine_sweeper.mine_sweeper()
while 1:
x, y = mine_sweeper.step()
if mine_sweeper.cells[x][y].mined != 1:
mine_sweeper.cells[x][y].reveal = int(input('The state of Position (' + str(x) + ', ' + str(y) + '): '))
if random.random() < receive_prob:
mine_sweeper.clues_get(x, y)
mine_sweeper.influence_chains(x, y)
if mine_sweeper.cells[x][y].reveal == -1:
break
if len(mine_sweeper.mine_cell) == num or not len(mine_sweeper.unknown_cell):
# mine_sweeper.info_display()
break |
# -------------------------------------------------------------------------
#
# Author: RRD
#
# Created: 24/10/2012
# Copyright: (c) rdamiani 2012
# Licence: <your licence>
# -------------------------------------------------------------------------
import numpy as np
def frustum(Db, Dt, H):
"""This function returns a frustum's volume and center of mass, CM
INPUT:
Parameters
----------
Db : float, base diameter
Dt : float, top diameter
H : float, height
OUTPUTs:
-------
vol : float, volume
cm : float, geometric centroid relative to bottom (center of mass if uniform density)
"""
vol = frustumVol(Db, Dt, H, diamFlag=True)
cm = frustumCG(Db, Dt, H, diamFlag=True)
# vol = np.pi/12*H * (Db**2 + Dt**2 + Db * Dt)
# cm = H/4 * (Db**2 + 3*Dt**2 + 2*Db*Dt) / (Db**2 + Dt**2 + Db*Dt)
return vol, cm
def frustumVol(rb, rt, h, diamFlag=False):
"""This function returns a frustum's volume with radii or diameter inputs.
INPUTS:
Parameters
----------
rb : float (scalar/vector), base radius
rt : float (scalar/vector), top radius
h : float (scalar/vector), height
diamFlag : boolean, True if rb and rt are entered as diameters
OUTPUTs:
-------
vol : float (scalar/vector), volume
"""
if diamFlag:
# Convert diameters to radii
rb *= 0.5
rt *= 0.5
return np.pi * (h / 3.0) * (rb * rb + rt * rt + rb * rt)
def frustumCG(rb, rt, h, diamFlag=False):
"""This function returns a frustum's center of mass/gravity (centroid) with radii or diameter inputs.
NOTE: This is for a SOLID frustum, not a shell
INPUTS:
Parameters
----------
rb : float (scalar/vector), base radius
rt : float (scalar/vector), top radius
h : float (scalar/vector), height
diamFlag : boolean, True if rb and rt are entered as diameters
OUTPUTs:
-------
cg : float (scalar/vector), center of mass/gravity (centroid)
"""
if diamFlag:
# Convert diameters to radii
rb *= 0.5
rt *= 0.5
return 0.25 * h * (rb ** 2 + 2.0 * rb * rt + 3.0 * rt ** 2) / (rb ** 2 + rb * rt + rt ** 2)
def frustumIzz(rb, rt, h, diamFlag=False):
"""This function returns a frustum's mass-moment of inertia (divided by density) about the
central (axial) z-axis with radii or diameter inputs.
NOTE: This is for a SOLID frustum, not a shell
INPUTS:
Parameters
----------
rb : float (scalar/vector), base radius
rt : float (scalar/vector), top radius
h : float (scalar/vector), height
diamFlag : boolean, True if rb and rt are entered as diameters
OUTPUTs:
-------
Izz : float (scalar/vector), Moment of inertia about z-axis
"""
if diamFlag:
# Convert diameters to radii
rb *= 0.5
rt *= 0.5
# Integrate 2*pi*r*r^2 dr dz from r=0 to r(z), z=0 to h
# Also equals 0.3*Vol * (rt**5.0 - rb**5.0) / (rt**3.0 - rb**3.0)
# Also equals (0.1*np.pi*h * (rt**5.0 - rb**5.0) / (rt - rb) )
return 0.1 * np.pi * h * (rt ** 4.0 + rb * rt ** 3 + rb ** 2 * rt ** 2 + rb ** 3 * rt + rb ** 4.0)
def frustumIxx(rb, rt, h, diamFlag=False):
"""This function returns a frustum's mass-moment of inertia (divided by density) about the
transverse x/y-axis passing through the center of mass with radii or diameter inputs.
NOTE: This is for a SOLID frustum, not a shell
INPUTS:
Parameters
----------
rb : float (scalar/vector), base radius
rt : float (scalar/vector), top radius
h : float (scalar/vector), height
diamFlag : boolean, True if rb and rt are entered as diameters
OUTPUTs:
-------
Ixx=Iyy : float (scalar/vector), Moment of inertia about x/y-axis through center of mass (principle axes)
"""
if diamFlag:
# Convert diameters to radii
rb *= 0.5
rt *= 0.5
# Integrate pi*r(z)^4/4 + pi*r(z)^2*(z-z_cg)^2 dz from z=0 to h
A = 0.5 * frustumIzz(rb, rt, h)
B = (
np.pi
* h ** 3
/ 80.0
* (
(rb ** 4 + 4.0 * rb ** 3 * rt + 10.0 * rb ** 2 * rt ** 2 + 4.0 * rb * rt ** 3 + rt ** 4)
/ (rb ** 2 + rb * rt + rt ** 2)
)
)
return A + B
def frustumShellVol(rb, rt, t, h, diamFlag=False):
"""This function returns a frustum shell's volume (for computing mass with density) with radii or diameter inputs.
NOTE: This is for a frustum SHELL, not a solid
INPUTS:
Parameters
----------
rb : float (scalar/vector), base radius
rt : float (scalar/vector), top radius
t : float (scalar/vector), thickness
h : float (scalar/vector), height
diamFlag : boolean, True if rb and rt are entered as diameters
OUTPUTs:
-------
cg : float (scalar/vector), center of mass/gravity (ventroid)
"""
if diamFlag:
# Convert diameters to radii
rb *= 0.5
rt *= 0.5
# Integrate 2*pi*r*dr*dz from r=ri(z) to ro(z), z=0 to h
rb_o = rb
rb_i = rb - t
rt_o = rt
rt_i = rt - t
# ( (np.pi*h/3.0) * ( (rb_o**2 + rb_o*rt_o + rt_o**2) - (rb_i**2 + rb_i*rt_i + rt_i**2) ) )
return frustumVol(rb_o, rt_o, h) - frustumVol(rb_i, rt_i, h)
def frustumShellCG(rb, rt, t, h, diamFlag=False):
"""This function returns a frustum's center of mass/gravity (centroid) with radii or diameter inputs.
NOTE: This is for a frustum SHELL, not a solid
INPUTS:
Parameters
----------
rb : float (scalar/vector), base radius
rt : float (scalar/vector), top radius
t : float (scalar/vector), thickness
h : float (scalar/vector), height
diamFlag : boolean, True if rb and rt are entered as diameters
OUTPUTs:
-------
cg : float (scalar/vector), center of mass/gravity (ventroid)
"""
if diamFlag:
# Convert diameters to radii
rb *= 0.5
rt *= 0.5
# Integrate 2*pi*r*z*dr*dz/V from r=ri(z) to ro(z), z=0 to h
rb_o = rb
rb_i = rb - t
rt_o = rt
rt_i = rt - t
A = (rb_o ** 2 + 2.0 * rb_o * rt_o + 3.0 * rt_o ** 2) - (rb_i ** 2 + 2.0 * rb_i * rt_i + 3.0 * rt_i ** 2)
B = (rb_o ** 2 + rb_o * rt_o + rt_o ** 2) - (rb_i ** 2 + rb_i * rt_i + rt_i ** 2)
return h * A / 4.0 / B
def frustumShellIzz(rb, rt, t, h, diamFlag=False):
"""This function returns a frustum's mass-moment of inertia (divided by density) about the
central (axial) z-axis with radii or diameter inputs.
NOTE: This is for a frustum SHELL, not a solid
INPUTS:
Parameters
----------
rb : float (scalar/vector), base radius
rt : float (scalar/vector), top radius
t : float (scalar/vector), thickness
h : float (scalar/vector), height
diamFlag : boolean, True if rb and rt are entered as diameters
OUTPUTs:
-------
Izz : float (scalar/vector), Moment of inertia about z-axis
"""
if diamFlag:
# Convert diameters to radii
rb *= 0.5
rt *= 0.5
# Integrate 2*pi*r*dr*dz from r=ri(z) to ro(z), z=0 to h
rb_o = rb
rb_i = rb - t
rt_o = rt
rt_i = rt - t
return frustumIzz(rb_o, rt_o, h) - frustumIzz(rb_i, rt_i, h)
def frustumShellIxx(rb, rt, t, h, diamFlag=False):
"""This function returns a frustum's mass-moment of inertia (divided by density) about the
transverse x/y-axis passing through the center of mass with radii or diameter inputs.
NOTE: This is for a frustum SHELL, not a solid
INPUTS:
Parameters
----------
rb : float (scalar/vector), base radius
rt : float (scalar/vector), top radius
t : float (scalar/vector), thickness
h : float (scalar/vector), height
diamFlag : boolean, True if rb and rt are entered as diameters
OUTPUTs:
-------
Ixx=Iyy : float (scalar/vector), Moment of inertia about x/y-axis through center of mass (principle axes)
"""
if diamFlag:
# Convert diameters to radii
rb *= 0.5
rt *= 0.5
# Integrate 2*pi*r*dr*dz from r=ri(z) to ro(z), z=0 to h
rb_o = rb
rb_i = rb - t
rt_o = rt
rt_i = rt - t
return frustumIxx(rb_o, rt_o, h) - frustumIxx(rb_i, rt_i, h)
if __name__ == "__main__":
Db = 6.5
Dt = 4.0
H = 120.0
print("From commonse.Frustum: Sample Volume and CM of FRUSTUM=" + 4 * "{:8.4f}, ").format(
*frustum(Db, Dt, H)[0].flatten()
)
def main():
pass
|
from __future__ import absolute_import, print_function
import unittest
from metavariant.components import VariantComponents, findall_aminochanges_in_text, parse_components_from_aminochange
AA_synonyms = {'Leu653Arg': ['L653R', 'Leu653Arg', '(Leu653Arg)'],
'Cys344Tyr': ['(Cys344Tyr)', 'C344Y', 'Cys344Tyr'],
}
good = 0
bad = 0
total = 6
class TestAminoChangeSearch(unittest.TestCase):
def test_findall_aminochanges_in_text(self):
for a_chg in AA_synonyms.keys():
found = findall_aminochanges_in_text('%r' % AA_synonyms[a_chg])
for synonym in AA_synonyms[a_chg]:
assert synonym in found
def test_parse_components_from_aminochange(self):
for a_chg_list in AA_synonyms.values():
for a_chg in a_chg_list:
assert parse_components_from_aminochange(a_chg) is not None
def test_posedits_from_aminochange(self):
achg = 'Leu653Arg'
comp = VariantComponents(aminochange=achg)
assert comp.posedit == achg
assert 'L653R' in comp.posedit_slang
|
import logging
import time
from typing import Optional, Tuple
import numpy as np
from torch import Tensor
import puc
from puc import PUcKernelType
from apu import config
from apu.datasets.types import TensorGroup
from apu.utils import ViewTo1D
class PUcLearner:
r""" Encapsulates learning for the PUc learner"""
BASE_NAME = "PUc"
def __init__(self, prior: Optional[float] = None):
self._train_start = None
try:
self._kernel_type = PUcKernelType[config.KERNEL_TYPE.upper()]
except KeyError:
raise ValueError(f"Unknown kernel type: {config.KERNEL_TYPE.upper()}")
self._model = None
self._gamma_list = np.mgrid[.1:.9:9j]
self._lambda_list = np.logspace(-6, 1, 20)
self._prior = prior if prior is not None else config.TRAIN_PRIOR
self._flatten = ViewTo1D()
def _flatten_to_np(self, x: Tensor) -> np.ndarray:
r""" Takes a \p torch \p Tensor object and flattens it to be 1D for an SVM """
return self._flatten(x).cpu().numpy()
def name(self) -> str:
r""" Name of the learner"""
return self.BASE_NAME
def fit(self, ts_grp: TensorGroup):
r""" Fit the PUc learner """
msg = f"Training PUc with {config.KERNEL_TYPE.upper()} kernel & prior {self._prior:.2}"
logging.info(f"Starting: {msg}")
self._train_start = time.time()
# Since PUc uses an SVM, must be 1D data vector
p_x = self._flatten_to_np(ts_grp.p_x)
u_tr_x = self._flatten_to_np(ts_grp.u_tr_x)
u_te_x = self._flatten_to_np(ts_grp.u_te_x)
# self._model = puc.sq_pu(xp=p_x, xu=u_tr_x,
self._model = puc.fit(xp_tr=p_x, xu_tr=u_tr_x, xu_te=u_te_x,
gamma_list=self._gamma_list,
kertype=self._kernel_type.value,
prior=self._prior,
lambda_list=self._lambda_list)
logging.info(f"COMPLETED: {msg}")
def decision_function(self, x: Tensor) -> np.ndarray:
r""" Predicts the tensor labels """
assert self._model is not None, "Model not trained"
return puc.decision_function(self._model, self._flatten_to_np(x))
def decision_boundary(self) -> Tuple[float, float]:
r""" Gets linear decision boundary """
assert self._model is not None, "Model not trained"
assert self._kernel_type == PUcKernelType.LINEAR, "Only linear boundary supported"
alpha = self._model["alpha"]
m = -alpha[0] / alpha[1]
b = -alpha[2] / alpha[1]
return m, b
def predict(self, x: Tensor) -> np.ndarray:
r""" Predicts the tensor labels """
assert self._model is not None, "Model not trained"
return puc.predict(self._model, self._flatten_to_np(x))
|
from .bymean import DistanceFromMeanClassifier
from .tensorflow import *
from .tflearn import * |
from antlr4 import Token, DiagnosticErrorListener, FileStream, CommonTokenStream
from antlr4.error.ErrorListener import ErrorListener
from pygls.types import Diagnostic, Range, DiagnosticSeverity, Position, Location
from .antlr_build.diagnosis.SystemVerilogLexer import SystemVerilogLexer as DiagnosisLexer
from .antlr_build.diagnosis.SystemVerilogParser import SystemVerilogParser as DiagnosisParser
# Diagnosis is extremly slow at the moment,
# should use the antlr, diagnostic tool to figure
# out why.
def parse(self, fname: str):
input_stream = FileStream(fname)
lexer = DiagnosisLexer(input_stream)
stream = CommonTokenStream(lexer)
parser = DiagnosisParser(stream)
listener = DiagnosisListener()
parser.addErrorListener(listener)
tree = parser.system_verilog_text()
return listener.errors
class DiagnosisListener(DiagnosticErrorListener):
def __init__(self):
self.errors: [Diagnostic] = []
super().__init__()
def syntaxError(self, recognizer, offendingSymbol: Token, line, column, msg, e):
err = Diagnostic(
range=Range(Position(line, column), Position(line, column+len(offendingSymbol.text))),
message=msg
)
self.errors.append(err)
|
# Author of Aqsa: Yulay Musin
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls import url, include
from aqsa_apps.account.urls import before_login_urlpatterns
from django.views.generic import TemplateView
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = i18n_patterns(
url(r'^registration/', include(before_login_urlpatterns)),
url(r'^account/', include('aqsa_apps.account.urls')),
url(r'^wallet_tag_etc/', include('aqsa_apps.wallet_tag_etc.urls')),
url(r'^transaction/', include('aqsa_apps.transaction.urls')),
url(r'^export_to_file/', include('aqsa_apps.export_to_file.urls')),
url(r'^import_from_file/', include('aqsa_apps.import_from_file.urls')),
url(r'^dashboard/', include('aqsa_apps.dashboard.urls')),
url(r'^report/', include('aqsa_apps.report.urls')),
url(r'^', include('aqsa_apps.about.urls')),
)
urlpatterns += [
url(r'^robots.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')),
url(r'^humans.txt$', TemplateView.as_view(template_name='humans.txt', content_type='text/plain')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# Copyright (c) 2021, Mahmood Sharif, Keane Lucas, Michael K. Reiter, Lujo Bauer, and Saurabh Shintre
# This file is code used in Malware Makeover
"""
Randomize multiple binaries to test that randomization
doesn't break functionality.
"""
import random
import time
random.seed(time.time())
import sys
sys.path.append('orp')
import peLib
import copy
import func
import inp
import swap
import reorder
import equiv
import preserv
import disp
import semnops
from randtoolkit import reanalyze_functions, patch
VER="0.3"
# ALLOWED_TRANSFORMS = ['equiv', 'swap', 'preserv', \
# 'reorder', 'disp', 'semnops']
ALLOWED_TRANSFORMS = ['disp', 'semnops']
print('******* Allowed transformations: %s *******'%ALLOWED_TRANSFORMS)
def randomize(input_file, n_randomize=10):
pe_file, epilog = peLib.read_pe(input_file)
# init DispState
disp_state = disp.DispState(pe_file)
# get the changed byte sets
functions = inp.get_functions(input_file)
levels = func.classify_functions(functions)
func.analyze_functions(functions, levels)
# see what happens when randomizing again and again and again...
for i_r in range(n_randomize):
global_diffs = []
changed_bytes = set()
changed_insts = set()
# transform counts
transform_counts = [0]*len(ALLOWED_TRANSFORMS)
for f in filter(lambda x: x.level != -1, functions.itervalues()):
# skip the SEH prolog and epilog functions .. they cause trouble
if "_SEH_" in f.name:
continue
selected_transform = random.choice(ALLOWED_TRANSFORMS)
transform_counts[ALLOWED_TRANSFORMS.index(selected_transform)] += 1
if selected_transform=='equiv': # equivs
diffs, c_b, c_i = equiv.do_equiv_instrs(f, p=0.5)
if diffs:
changed_bytes.update(c_b)
changed_insts.update(c_i)
global_diffs.extend(diffs)
patch(pe_file, disp_state, diffs)
elif selected_transform=='swap': # swaps
swap.liveness_analysis(f.code)
live_regs = swap.get_reg_live_subsets(f.instrs, f.code, f.igraph)
swaps = swap.get_reg_swaps(live_regs)
diffs, c_b, c_i = swap.do_multiple_swaps(f, swaps, p=0.5)
if diffs:
changed_bytes.update(c_b)
changed_insts.update(c_i)
global_diffs.extend(diffs)
patch(pe_file, disp_state, diffs)
elif selected_transform=='preserv': # preservs
preservs, avail_regs = preserv.get_reg_preservations(f)
diffs, c_b, c_i = preserv.do_reg_preservs(f, preservs, avail_regs, p=0.5)
if diffs:
changed_bytes.update(c_b)
changed_insts.update(c_i)
global_diffs.extend(diffs)
patch(pe_file, disp_state, diffs)
elif selected_transform=='reorder': # reorders
diffs, c_b = reorder.do_random_reordering(f, pe_file)
if diffs:
changed_bytes.update(c_b)
global_diffs.extend(diffs)
patch(pe_file, disp_state, diffs)
elif selected_transform=='disp': # displacements
diffs, c_b, c_i = disp.displace_block(f, disp_state)
if diffs:
changed_bytes.update(c_b)
changed_insts.update(c_i)
global_diffs.extend(diffs)
patch(pe_file, disp_state, diffs)
elif selected_transform=='semnops': # semantic nops
diffs, c_b = semnops.do_semnops(f)
if diffs:
changed_bytes.update(c_b)
global_diffs.extend(diffs)
patch(pe_file, disp_state, diffs)
else:
raise ValueError('Unknown transform type: %s'%transform)
# update
print '[iter %d]'%i_r
print 'changed %d bytes (and %d instructions)'\
%(len(changed_bytes),len(changed_insts))
print 'transformation counts: %s'%transform_counts
# reanalyze functions (if not the last iteration)
if i_r<n_randomize-1:
reanalyze_functions(functions, levels)
# add displacements to the pe
adj_pe = peLib.AdjustPE(pe_file)
adj_pe.update_displacement(disp_state)
# write output
output_file = input_file.replace(".exe", "") + "_patched-w-compositions.exe"
peLib.write_pe(output_file, pe_file, epilog)
pe_file.close()
# if need to merge with /tmp/reloc.data
if disp_state.peinfo.getRelocationSize()>0:
disp._merge_file(output_file)
if __name__=="__main__":
binary_paths = [\
'test/caffeine/caffeine.exe', \
'test/checksum-cygwin/cksum.exe', \
'test/diff-cygwin/diff.exe', \
'test/find-cygwin/find.exe', \
'test/grep-cygwin/grep.exe', \
'test/info-cygwin/info.exe', \
'test/less-cygwin/less.exe', \
'test/mv-cygwin/mv.exe', \
'test/pip/pip.exe', \
'test/python/python.exe'
]
# import os
# mal_dir = '../../data/virusshare-samples/samples/'
# bin_names = os.listdir(mal_dir)
# binary_paths = [os.path.join(mal_dir, bin_name, bin_name) for bin_name in bin_names]
for bin_path in binary_paths:
print('====================')
print('Randomizing "%s"...'%(bin_path))
randomize(bin_path, n_randomize=10)
|
"""
Write a Python program to append text in a existing file
"""
def append_text(file_name):
with open(file_name, "a") as file:
file.write("Django is the Python high level web framework!\n")
file.write("and flask is also a good python framework")
with open(file_name, "r") as read_file:
res = [i for i in read_file]
return res
print(append_text("append.txt")) |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from classy_vision import meters
from classy_vision.meters import VideoAccuracyMeter
from test.generic.meter_test_utils import ClassificationMeterTest
class TestVideoAccuracyMeter(ClassificationMeterTest):
def test_accuracy_meter_registry(self):
accuracy_meter = meters.build_meter(
{
"name": "video_accuracy",
"topk": [1, 2],
"clips_per_video_train": 1,
"clips_per_video_test": 2,
}
)
self.assertTrue(isinstance(accuracy_meter, VideoAccuracyMeter))
def test_single_meter_update_and_reset(self):
"""
This test verifies that the meter works as expected on a single
update + reset + same single update.
"""
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
# Batchsize = 3, num classes = 3, clips_per_video is 2,
# score is a value in {1, 2, 3}
model_output = torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
)
# Class 0 is the correct class for video 1, class 2 for video 2, and
# class 1 for video
target = torch.tensor([0, 0, 1, 1, 2, 2])
# Only the first sample has top class correct, first and third
# sample have correct class in top 2
expected_value = {"top_1": 1 / 3.0, "top_2": 3 / 3.0}
self.meter_update_and_reset_test(
meter, model_output, target, expected_value, is_train=False
)
def test_double_meter_update_and_reset(self):
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
# Batchsize = 3, num classes = 3, clips_per_video is 2,
# score is a value in {1, 2, 3}.
# Data of two batch is provided
model_outputs = [
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
),
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
),
]
# Class 0 is the correct class for video 1, class 2 for video 2, and
# class 1 for video, in both batches
targets = [torch.tensor([0, 0, 1, 1, 2, 2]), torch.tensor([0, 0, 1, 1, 2, 2])]
# First batch has top-1 accuracy of 1/3.0, top-2 accuracy of 2/3.0
# Second batch has top-1 accuracy of 2/3.0, top-2 accuracy of 3/3.0
expected_value = {"top_1": 2 / 6.0, "top_2": 6 / 6.0}
self.meter_update_and_reset_test(
meter, model_outputs, targets, expected_value, is_train=False
)
def test_meter_invalid_model_output(self):
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
# This model output has 3 dimensions instead of expected 2
model_output = torch.tensor(
[[[3, 2, 1], [1, 2, 3]], [[-1, -3, -4], [-10, -90, -100]]],
dtype=torch.float,
)
target = torch.tensor([0, 1, 2])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_invalid_target(self):
meter = VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
)
model_output = torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
)
# Target has 3 dimensions instead of expected 1 or 2
target = torch.tensor([[[0, 1, 2], [0, 1, 2]]])
self.meter_invalid_meter_input_test(meter, model_output, target)
# Target of clips from the same video is not consistent
target = torch.tensor([0, 2, 1, 1, 2, 2])
self.meter_invalid_update_test(meter, model_output, target, is_train=False)
def test_meter_invalid_topk(self):
meter = VideoAccuracyMeter(
topk=[1, 5], clips_per_video_train=1, clips_per_video_test=2
)
model_output = torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
)
target = torch.tensor([0, 1, 2])
self.meter_invalid_meter_input_test(meter, model_output, target)
def test_meter_get_set_classy_state_test(self):
# In this test we update meter0 with model_output0 & target0
# and we update meter1 with model_output1 & target1 then
# transfer the state from meter1 to meter0 and validate they
# give same expected value.
# Expected value is the expected value of meter1
meters = [
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
]
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_outputs = [
torch.tensor(
[[1, 2, 3], [1, 1, 3], [2, 2, 1], [3, 2, 1], [2, 2, 2], [2, 3, 1]],
dtype=torch.float,
),
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
),
]
# Class 2 is the correct class for sample 1, class 0 for sample 2, etc
targets = [torch.tensor([0, 0, 1, 1, 2, 2]), torch.tensor([0, 0, 1, 1, 2, 2])]
# Value for second update
expected_value = {"top_1": 1 / 3.0, "top_2": 3 / 3.0}
self.meter_get_set_classy_state_test(
meters, model_outputs, targets, expected_value, is_train=False
)
def test_meter_distributed(self):
# Meter0 will execute on one process, Meter1 on the other
meters = [
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
VideoAccuracyMeter(
topk=[1, 2], clips_per_video_train=1, clips_per_video_test=2
),
]
# Batchsize = 3, num classes = 3, score is a value in {1, 2,
# 3}...3 is the highest score
model_outputs = [
torch.tensor(
[[1, 2, 3], [1, 1, 3], [2, 2, 1], [3, 2, 1], [2, 2, 2], [2, 3, 1]],
dtype=torch.float,
), # Meter 0
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
), # Meter 1
torch.tensor(
[[1, 2, 3], [1, 1, 3], [2, 2, 1], [3, 2, 1], [2, 2, 2], [2, 3, 1]],
dtype=torch.float,
), # Meter 0
torch.tensor(
[[3, 2, 1], [3, 1, 2], [1, 2, 2], [1, 2, 3], [2, 2, 2], [1, 3, 2]],
dtype=torch.float,
), # Meter 1
]
# For meter 0, class 2 is the correct class for sample 1, class 0 for sample 2,
# etc
targets = [
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 0
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 1
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 0
torch.tensor([0, 0, 1, 1, 2, 2]), # Meter 1
]
# In first two updates there are 3 correct top-2, 5 correct in top 2
# The same occurs in the second two updates and is added to first
expected_values = [
{"top_1": 1 / 6.0, "top_2": 4 / 6.0}, # After one update to each meter
{"top_1": 2 / 12.0, "top_2": 8 / 12.0}, # After two updates to each meter
]
self.meter_distributed_test(
meters, model_outputs, targets, expected_values, is_train=False
)
|
# generated by datamodel-codegen:
# filename: schema/api/teams/createUser.json
# timestamp: 2021-10-01T19:50:55+00:00
from __future__ import annotations
from typing import List, Optional
from pydantic import BaseModel, Field
from ...entity.teams import user
from ...type import basic, profile
class RequestToCreateUserEntity(BaseModel):
name: user.UserName
displayName: Optional[str] = Field(
None, description="Name used for display purposes. Example 'FirstName LastName'"
)
email: basic.Email
timezone: Optional[str] = Field(None, description='Timezone of the user')
isBot: Optional[bool] = Field(
None,
description='When true indicates user is a bot with appropriate privileges',
)
isAdmin: Optional[bool] = Field(
False,
description='When true indicates user is an adiministrator for the sytem with superuser privileges',
)
profile: Optional[profile.Profile] = None
teams: Optional[List[basic.Uuid]] = Field(
None, description='Teams that the user belongs to'
)
|
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA
#
# https://github.com/CNES/Pandora
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions associated to the disparity map computation step.
"""
import logging
from abc import ABCMeta, abstractmethod
from typing import Dict, Union, Tuple
import numpy as np
import xarray as xr
from json_checker import Checker, And, Or
from scipy.ndimage.morphology import binary_dilation
import pandora.constants as cst
from pandora.img_tools import compute_std_raster
class AbstractDisparity:
"""
Abstract Disparity class
"""
__metaclass__ = ABCMeta
disparity_methods_avail: Dict = {}
cfg = None
def __new__(cls, **cfg: dict):
"""
Return the plugin associated with the validation_method given in the configuration
:param cfg: configuration {'validation_method': value}
:type cfg: dictionary
"""
if cls is AbstractDisparity:
if isinstance(cfg["disparity_method"], str):
try:
return super(AbstractDisparity, cls).__new__(cls.disparity_methods_avail[cfg["disparity_method"]])
except KeyError:
logging.error("No disparity method named % supported", cfg["disparity_method"])
raise KeyError
else:
if isinstance(cfg["disparity_method"], unicode): # type: ignore # pylint: disable=undefined-variable
# creating a plugin from registered short name given as unicode (py2 & 3 compatibility)
try:
return super(AbstractDisparity, cls).__new__(
cls.disparity_methods_avail[cfg["disparity_method"].encode("utf-8")]
)
except KeyError:
logging.error(
"No disparity method named % supported",
cfg["disparity_method"],
)
raise KeyError
else:
return super(AbstractDisparity, cls).__new__(cls)
return None
@classmethod
def register_subclass(cls, short_name: str):
"""
Allows to register the subclass with its short name
:param short_name: the subclass to be registered
:type short_name: string
"""
def decorator(subclass):
"""
Registers the subclass in the available methods
:param subclass: the subclass to be registered
:type subclass: object
"""
cls.disparity_methods_avail[short_name] = subclass
return subclass
return decorator
@abstractmethod
def desc(self):
"""
Describes the disparity method
"""
print("Disparity method description")
@abstractmethod
def to_disp(self, cv: xr.Dataset, img_left: xr.Dataset = None, img_right: xr.Dataset = None) -> xr.Dataset:
"""
Disparity computation by applying the Winner Takes All strategy
:param cv: the cost volume datset with the data variables:
- cv 3D xarray.DataArray (row, col, disp)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
:type cv: xarray.Dataset,
:param img_left: left Dataset image containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:type img_left: xarray.Dataset
:param img_right: right Dataset image containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:type img_right: xarray.Dataset
:return: Dataset with the disparity map and the confidence measure with the data variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
:rtype: xarray.Dataset
"""
@staticmethod
def coefficient_map(cv: xr.DataArray) -> xr.DataArray:
"""
Return the coefficient map
:param cv: cost volume
:type cv: xarray.Dataset, with the data variables cost_volume 3D xarray.DataArray (row, col, disp)
:return: the coefficient map
:rtype: 2D DataArray (row, col)
"""
row = cv.coords["row"].data
col = cv.coords["col"].data
# Create the coefficient map
coeff_map = xr.DataArray(
cv["cost_volume"].sel(disp=cv["disp_indices"]).data.astype(np.float32),
coords=[("row", row), ("col", col)],
)
coeff_map.name = "Coefficient Map"
coeff_map.attrs = cv.attrs
return coeff_map
@staticmethod
def approximate_right_disparity(cv: xr.Dataset, img_right: xr.Dataset, invalid_value: float = 0) -> xr.Dataset:
"""
Create the right disparity map, by a diagonal search for the minimum in the left cost volume
ERNST, Ines et HIRSCHMÜLLER, Heiko.
Mutual information based semi-global stereo matching on the GPU.
In : International Symposium on Visual Computing. Springer, Berlin, Heidelberg, 2008. p. 228-239.
:param cv: the cost volume dataset with the data variables:
- cost_volume 3D xarray.DataArray (row, col, disp)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
:type cv: xarray.Dataset
:param img_right: right Dataset image containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:type img_right: xarray.Dataset
:param invalid_value: disparity to assign to invalid pixels
:type invalid_value: float
:return: Dataset with the right disparity map, the confidence measure and the validity mask with \
the data variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
- validity_mask 2D xarray.DataArray (row, col)
:rtype: xarray.Dataset
"""
disp_range = cv.coords["disp"].data.astype(float)
col_range = cv.coords["col"].data
row_range = cv.coords["row"].data
# Extract integer disparity
disp_range = np.extract(np.mod(disp_range, 1) == 0, disp_range)
# Allocate the disparity map
data = np.zeros((len(row_range), len(col_range))).astype(np.float32)
disp_map = xr.Dataset(
{"disparity_map": (["row", "col"], data)},
coords={"row": cv.coords["row"], "col": cv.coords["col"]},
)
confidence_measure = compute_std_raster(img_right, cv.attrs["window_size"])
# Create the confidence measure with the original image size and fill it
confidence_measure_full = np.full((len(row_range), len(col_range), 1), np.nan, dtype=np.float32)
offset = cv.attrs["offset_row_col"]
row_off = np.arange(row_range[0] + offset, row_range[-1] - offset + 1)
col_off = np.arange(col_range[0] + offset, col_range[-1] - offset + 1)
if offset != 0:
confidence_measure_full[offset:-offset, offset:-offset, :] = confidence_measure.reshape(
(len(row_off), len(col_off), 1)
)
else:
confidence_measure_full = confidence_measure.reshape((len(row_range), len(col_range), 1))
disp_map["confidence_measure"] = xr.DataArray(
data=confidence_measure_full.astype(np.float32),
dims=["row", "col", "indicator"],
)
# Allocate the validity mask
disp_map["validity_mask"] = xr.DataArray(
np.zeros(disp_map["disparity_map"].shape, dtype=np.uint16),
dims=["row", "col"],
)
disp_map.attrs = cv.attrs
d_range = cv.coords["disp"].data
disp_map.attrs["disp_min"] = d_range[0]
disp_map.attrs["disp_max"] = d_range[-1]
offset = disp_map.attrs["offset_row_col"]
indices_nan = np.isnan(cv["cost_volume"].data)
if cv.attrs["type_measure"] == "max":
cv["cost_volume"].data[indices_nan] = -np.inf
else:
cv["cost_volume"].data[indices_nan] = np.inf
for col in col_range:
x_d = col - disp_range
valid = np.where((x_d >= col_range[0]) & (x_d <= col_range[-1]))
# The disparity interval is missing in the left image
if x_d[valid].size == 0:
disp_map["disparity_map"].loc[dict(col=col)] = invalid_value
# Invalid pixel : the disparity interval is missing in the right image
disp_map["validity_mask"].loc[
dict(col=col)
] += cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING
else:
# Diagonal search for the minimum or maximum
if cv.attrs["type_measure"] == "max":
min_ = (
cv["cost_volume"]
.sel(
col=xr.DataArray(np.flip(x_d[valid]), dims="disp_"),
disp=xr.DataArray(np.flip(disp_range[valid]), dims="disp_"),
)
.argmax(dim="disp_")
)
else:
min_ = (
cv["cost_volume"]
.sel(
col=xr.DataArray(np.flip(x_d[valid]), dims="disp_"),
disp=xr.DataArray(np.flip(disp_range[valid]), dims="disp_"),
)
.argmin(dim="disp_")
)
# Disparity interval is incomplete
if x_d[valid].size != disp_range.size:
# Information: the disparity interval is incomplete (border reached in the right image)
disp_map["validity_mask"].loc[
dict(col=col)
] += cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE
disp_map["disparity_map"].loc[dict(col=col)] = -1 * np.flip(disp_range[valid])[min_.data] # type:ignore
cv["cost_volume"].data[indices_nan] = np.nan
invalid_mc = np.min(indices_nan, axis=2)
# Pixels where the disparity interval is missing in the right image, have a disparity value invalid_value
invalid_pixel = np.where(invalid_mc)
disp_map["disparity_map"].data[invalid_pixel] = invalid_value
if offset > 0:
AbstractDisparity.mask_border(disp_map)
return disp_map
def validity_mask(
self,
disp: xr.Dataset,
img_left: xr.Dataset,
img_right: xr.Dataset,
cv: xr.Dataset,
) -> None:
"""
Create the validity mask of the disparity map
:param disp: dataset with the disparity map and the confidence measure
:type disp: xarray.Dataset with the data variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray(row, col, indicator)
:param img_left: left Dataset image containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:type img_left: xarray.Dataset
:param img_right: right Dataset image containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:type img_right: xarray.Dataset
:param cv: cost volume dataset with the data variables:
- cost_volume 3D xarray.DataArray (row, col, disp)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
:type cv: xarray.Dataset
:return: None
"""
# Allocate the validity mask
disp["validity_mask"] = xr.DataArray(
np.zeros(disp["disparity_map"].shape, dtype=np.uint16), dims=["row", "col"]
)
d_min = int(disp.attrs["disp_min"])
d_max = int(disp.attrs["disp_max"])
col = disp.coords["col"].data
# Since disparity map is full size (input images size)
offset = disp.attrs["offset_row_col"]
# Negative disparity range
if d_max < 0:
bit_1 = np.where((col + d_max) < (col[0] + offset))
# Information: the disparity interval is incomplete (border reached in the right image)
disp["validity_mask"].data[
:,
np.where(((col + d_max) >= (col[0] + offset)) & ((col + d_min) < (col[0] + offset))),
] += cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE
else:
# Positive disparity range
if d_min > 0:
bit_1 = np.where((col + d_min) > (col[-1] - offset))
# Information: the disparity interval is incomplete (border reached in the right image)
disp["validity_mask"].data[
:,
np.where(((col + d_min) <= (col[-1] - offset)) & ((col + d_max) > (col[-1] - offset))),
] += cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE
# Disparity range contains 0
else:
bit_1 = ([],)
# Information: the disparity interval is incomplete (border reached in the right image)
disp["validity_mask"].data[
:,
np.where(((col + d_min) < (col[0] + offset)) | (col + d_max > (col[-1]) - offset)),
] += cst.PANDORA_MSK_PIXEL_RIGHT_INCOMPLETE_DISPARITY_RANGE
# Invalid pixel : the disparity interval is missing in the right image ( disparity range
# outside the image )
disp["validity_mask"].data[:, bit_1] += cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING
if "msk" in img_left.data_vars:
self.allocate_left_mask(disp, img_left)
if "msk" in img_right.data_vars:
self.allocate_right_mask(disp, img_right, bit_1)
# The disp_min and disp_max used to search missing disparity interval are not the local disp_min and disp_max
# in case of a variable range of disparities. So there may be pixels that have missing disparity range (all
# cost are np.nan), but are not detected in the code block above. To find the pixels that have a missing
# disparity range, we search in the cost volume pixels where cost_volume(row,col, for all d) = np.nan
self.mask_invalid_variable_disparity_range(disp, cv)
if offset > 0:
self.mask_border(disp)
@staticmethod
def mask_border(disp: xr.Dataset):
"""
Mask border pixel which haven't been calculated because of the window's size
:param disp: dataset with the disparity map and the confidence measure with the data variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray(row, col, indicator)
:type disp: xarray.Dataset
:return: None
"""
offset = disp.attrs["offset_row_col"]
if offset > 0:
# Border pixels have invalid disparity, erase the potential previous values
disp["validity_mask"].data[:offset, :] = cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER
disp["validity_mask"].data[-offset:, :] = cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER
disp["validity_mask"].data[offset:-offset, :offset] = cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER
disp["validity_mask"].data[offset:-offset, -offset:] = cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER
@staticmethod
def mask_invalid_variable_disparity_range(disp, cv) -> None:
"""
Mask the pixels that have a missing disparity range, searching in the cost volume
the pixels where cost_volume(row,col, for all d) = np.nan
:param disp: dataset with the disparity map and the confidence measure with the data variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray(row, col, indicator)
:type disp: xarray.Dataset
:param cv: cost volume dataset with the data variables:
- cost_volume 3D xarray.DataArray (row, col, disp)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
:type cv: xarray.Dataset
:return: None
"""
indices_nan = np.isnan(cv["cost_volume"].data)
missing_disparity_range = np.min(indices_nan, axis=2)
missing_range_y, missing_range_x = np.where(missing_disparity_range)
# Mask the positions which have an missing disparity range, not already taken into account
condition_to_mask = (
disp["validity_mask"].data[missing_range_y, missing_range_x]
& cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING
) == 0
masking_value = (
disp["validity_mask"].data[missing_range_y, missing_range_x]
+ cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING
)
no_masking_value = disp["validity_mask"].data[missing_range_y, missing_range_x]
disp["validity_mask"].data[missing_range_y, missing_range_x] = np.where(
condition_to_mask, masking_value, no_masking_value
)
@staticmethod
def allocate_left_mask(disp: xr.Dataset, img_left: xr.Dataset) -> None:
"""
Allocate the left image mask
:param disp: dataset with the disparity map and the confidence measure with the data variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray(row, col, indicator)
:type disp: xarray.Dataset
:param img_left: left Dataset image containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:type img_left: xarray.Dataset
:return: None
"""
_, r_mask = xr.align(disp["validity_mask"], img_left["msk"])
# Dilatation : pixels that contains no_data in their aggregation window become no_data
dil = binary_dilation(
img_left["msk"].data == img_left.attrs["no_data_mask"],
structure=np.ones((disp.attrs["window_size"], disp.attrs["window_size"])),
iterations=1,
)
# Invalid pixel : no_data in the left image
disp["validity_mask"] += dil.astype(np.uint16) * cst.PANDORA_MSK_PIXEL_LEFT_NODATA_OR_BORDER
# Invalid pixel : invalidated by the validity mask of the left image given as input
disp["validity_mask"] += xr.where(
(r_mask != img_left.attrs["no_data_mask"]) & (r_mask != img_left.attrs["valid_pixels"]),
cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_LEFT,
0,
).astype(np.uint16)
@staticmethod
def allocate_right_mask(disp: xr.Dataset, img_right: xr.Dataset, bit_1: Union[np.ndarray, Tuple]) -> None:
"""
Allocate the right image mask
:param disp: dataset with the disparity map and the confidence measure with the data variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray(row, col, indicator)
:type disp: xarray.Dataset
:param img_right: left Dataset image containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:type img_right: xarray.Dataset
:param bit_1: where the disparity interval is missing in the right image ( disparity range outside the image )
:type: ndarray or Tuple
:return: None
"""
offset = disp.attrs["offset_row_col"]
_, r_mask = xr.align(disp["validity_mask"], img_right["msk"])
d_min = int(disp.attrs["disp_min"])
d_max = int(disp.attrs["disp_max"])
col = disp.coords["col"].data
row = disp.coords["row"].data
# Dilatation : pixels that contains no_data in their aggregation window become no_data
dil = binary_dilation(
img_right["msk"].data == img_right.attrs["no_data_mask"],
structure=np.ones((disp.attrs["window_size"], disp.attrs["window_size"])),
iterations=1,
)
r_mask = xr.where(
(r_mask != img_right.attrs["no_data_mask"]) & (r_mask != img_right.attrs["valid_pixels"]),
1,
0,
).data
# Useful to calculate the case where the disparity interval is incomplete, and all remaining right
# positions are invalidated by the right mask
b_2_7 = np.zeros((len(row), len(col)), dtype=np.uint16)
# Useful to calculate the case where no_data in the right image invalidated the disparity interval
no_data_right = np.zeros((len(row), len(col)), dtype=np.uint16)
col_range = np.arange(len(col))
for dsp in range(d_min, d_max + 1):
# Diagonal in the cost volume
col_d = col_range + dsp
valid_index = np.where((col_d >= col_range[0] + offset) & (col_d <= col_range[-1] - offset))
# No_data and masked pixels do not raise the same flag, we need to treat them differently
b_2_7[:, col_range[valid_index]] += r_mask[:, col_d[valid_index]].astype(np.uint16)
b_2_7[:, col_range[np.setdiff1d(col_range, valid_index)]] += 1
no_data_right[:, col_range[valid_index]] += dil[:, col_d[valid_index]]
no_data_right[:, col_range[np.setdiff1d(col_range, valid_index)]] += 1
# Exclusion of pixels that have flag 1 already enabled
b_2_7[:, bit_1[0]] = 0
no_data_right[:, bit_1[0]] = 0
# Invalid pixel: right positions invalidated by the mask of the right image given as input
disp["validity_mask"].data[
np.where(b_2_7 == len(range(d_min, d_max + 1)))
] += cst.PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_RIGHT
# If Invalid pixel : the disparity interval is missing in the right image (disparity interval
# is invalidated by no_data in the right image )
disp["validity_mask"].data[
np.where(no_data_right == len(range(d_min, d_max + 1)))
] += cst.PANDORA_MSK_PIXEL_RIGHT_NODATA_OR_DISPARITY_RANGE_MISSING
@AbstractDisparity.register_subclass("wta")
class WinnerTakesAll(AbstractDisparity):
"""
WinnerTakesAll class allows to perform the disparity computation step
"""
# Default configuration, do not change this value
_INVALID_DISPARITY = -9999
def __init__(self, **cfg):
"""float
:param cfg: optional configuration
:type cfg: dictionary
"""
self.cfg = self.check_conf(**cfg)
self._invalid_disparity = self.cfg["invalid_disparity"]
def check_conf(self, **cfg: Union[str, int, float, bool]) -> Dict[str, Union[str, int, float, bool]]:
"""
Add default values to the dictionary if there are missing elements and check if the dictionary is correct
:param cfg: disparity configuration
:type cfg: dict
:return cfg: disparity configuration updated
:rtype: dict
"""
# Give the default value if the required element is not in the configuration
if "invalid_disparity" not in cfg:
cfg["invalid_disparity"] = self._INVALID_DISPARITY
schema = {
"disparity_method": And(str, lambda input: "wta"),
"invalid_disparity": Or(int, float, lambda input: np.isnan(input)),
}
checker = Checker(schema)
checker.validate(cfg)
return cfg
def desc(self) -> None:
"""
Describes the disparity method
:return: None
"""
print("Winner takes all method")
def to_disp(self, cv: xr.Dataset, img_left: xr.Dataset = None, img_right: xr.Dataset = None) -> xr.Dataset:
"""
Disparity computation by applying the Winner Takes All strategy
:param cv: the cost volume datset with the data variables:
- cost_volume 3D xarray.DataArray (row, col, disp)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
:type cv: xarray.Dataset
:param img_left: left Dataset image containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:type img_left: xarray.Dataset
:param img_right: right Dataset image containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:type img_right: xarray.Dataset
:return: Dataset with the disparity map and the confidence measure with the data variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
:rtype: xarray.Dataset
"""
indices_nan = np.isnan(cv["cost_volume"].data)
# Winner Takes All strategy
if cv.attrs["type_measure"] == "max":
# Disparities are computed by selecting the maximal cost at each pixel
cv["cost_volume"].data[indices_nan] = -np.inf
disp = self.argmax_split(cv)
else:
# Disparities are computed by selecting the minimal cost at each pixel
cv["cost_volume"].data[indices_nan] = np.inf
disp = self.argmin_split(cv)
cv["cost_volume"].data[indices_nan] = np.nan
row = cv.coords["row"]
col = cv.coords["col"]
# ----- Disparity map -----
disp_map = xr.Dataset({"disparity_map": (["row", "col"], disp)}, coords={"row": row, "col": col})
invalid_mc = np.min(indices_nan, axis=2)
# Pixels where the disparity interval is missing in the right image, have a disparity value invalid_value
invalid_pixel = np.where(invalid_mc)
disp_map["disparity_map"].data[invalid_pixel] = self._invalid_disparity
# Save the disparity map in the cost volume
cv["disp_indices"] = disp_map["disparity_map"].copy(deep=True)
disp_map.attrs = cv.attrs
d_range = cv.coords["disp"].data
disp_map.attrs["disp_min"] = d_range[0]
disp_map.attrs["disp_max"] = d_range[-1]
# ----- Confidence measure -----
# Allocate the confidence measure in the disparity_map dataset
if "confidence_measure" in cv.data_vars:
disp_map["confidence_measure"] = cv["confidence_measure"]
# Remove temporary values
del indices_nan
del invalid_mc
return disp_map
@staticmethod
def argmin_split(cost_volume: xr.Dataset) -> np.ndarray:
"""
Find the indices of the minimum values for a 3D DataArray, along axis 2.
Memory consumption is reduced by splitting the 3D Array.
:param cost_volume: the cost volume dataset
:type cost_volume: xarray.Dataset
:return: the disparities for which the cost volume values are the smallest
:rtype: np.ndarray
"""
ncol, nrow, ndsp = cost_volume["cost_volume"].shape # pylint: disable=unused-variable
disp = np.zeros((ncol, nrow), dtype=np.float32)
# Numpy argmin is making a copy of the cost volume.
# To reduce memory, numpy argmin is applied on a small part of the cost volume.
# The cost volume is split (along the row axis) into multiple sub-arrays with a step of 100
cv_chunked_y = np.array_split(cost_volume["cost_volume"].data, np.arange(100, ncol, 100), axis=0)
y_begin = 0
for col, cv_y in enumerate(cv_chunked_y): # pylint: disable=unused-variable
# To reduce memory, the cost volume is split (along the col axis) into
# multiple sub-arrays with a step of 100
cv_chunked_x = np.array_split(cv_y, np.arange(100, nrow, 100), axis=1)
x_begin = 0
for row, cv_x in enumerate(cv_chunked_x): # pylint: disable=unused-variable
disp[y_begin : y_begin + cv_y.shape[0], x_begin : x_begin + cv_x.shape[1]] = cost_volume.coords[
"disp"
].data[np.argmin(cv_x, axis=2)]
x_begin += cv_x.shape[1]
y_begin += cv_y.shape[0]
return disp
@staticmethod
def argmax_split(cost_volume: xr.Dataset) -> np.ndarray:
"""
Find the indices of the maximum values for a 3D DataArray, along axis 2.
Memory consumption is reduced by splitting the 3D Array.
:param cost_volume: the cost volume dataset
:type cost_volume: xarray.Dataset
:return: the disparities for which the cost volume values are the highest
:rtype: np.ndarray
"""
ncol, nrow, ndisp = cost_volume["cost_volume"].shape # pylint: disable=unused-variable
disp = np.zeros((ncol, nrow), dtype=np.float32)
# Numpy argmax is making a copy of the cost volume.
# To reduce memory, numpy argmax is applied on a small part of the cost volume.
# The cost volume is split (along the row axis) into multiple sub-arrays with a step of 100
cv_chunked_col = np.array_split(cost_volume["cost_volume"].data, np.arange(100, ncol, 100), axis=0)
col_begin = 0
for col, cv_y in enumerate(cv_chunked_col): # pylint: disable=unused-variable
# To reduce memory, the cost volume is split (along the col axis)
# into multiple sub-arrays with a step of 100
cv_chunked_row = np.array_split(cv_y, np.arange(100, nrow, 100), axis=1)
row_begin = 0
for row, cv_x in enumerate(cv_chunked_row): # pylint: disable=unused-variable
disp[
col_begin : col_begin + cv_y.shape[0],
row_begin : row_begin + cv_x.shape[1],
] = cost_volume.coords["disp"].data[np.argmax(cv_x, axis=2)]
row_begin += cv_x.shape[1]
col_begin += cv_y.shape[0]
return disp
|
__author__ = 'rcj1492'
__created__ = '2017.12'
__license__ = 'MIT'
def walk_data(input_data):
''' a generator function for retrieving data in a nested dictionary
:param input_data: dictionary or list with nested data
:return: string with dot_path, object with value of endpoint
'''
def _walk_dict(input_dict, path_to_root):
if not path_to_root:
yield '.', input_dict
for key, value in input_dict.items():
key_path = '%s.%s' % (path_to_root, key)
type_name = value.__class__.__name__
yield key_path, value
if type_name == 'dict':
for dot_path, value in _walk_dict(value, key_path):
yield dot_path, value
elif type_name == 'list':
for dot_path, value in _walk_list(value, key_path):
yield dot_path, value
def _walk_list(input_list, path_to_root):
for i in range(len(input_list)):
item_path = '%s[%s]' % (path_to_root, i)
type_name = input_list[i].__class__.__name__
yield item_path, input_list[i]
if type_name == 'dict':
for dot_path, value in _walk_dict(input_list[i], item_path):
yield dot_path, value
elif type_name == 'list':
for dot_path, value in _walk_list(input_list[i], item_path):
yield dot_path, value
if isinstance(input_data, dict):
for dot_path, value in _walk_dict(input_data, ''):
yield dot_path, value
elif isinstance(input_data, list):
for dot_path, value in _walk_list(input_data, ''):
yield dot_path, value
else:
raise ValueError('walk_data() input_data argument must be a list or dictionary.')
def segment_path(dot_path):
''' a function to separate the path segments in a dot_path key
:param dot_path: string with dot path syntax
:return: list of string segments of path
'''
import re
digit_pat = re.compile('\[(\d+)\]')
key_list = dot_path.split('.')
segment_list = []
for key in key_list:
if key:
item_list = digit_pat.split(key)
for item in item_list:
if item:
segment_list.append(item)
return segment_list
def transform_data(function, input_data):
''' a function to apply a function to each value in a nested dictionary
:param function: callable function with a single input of any datatype
:param input_data: dictionary or list with nested data to transform
:return: dictionary or list with data transformed by function
'''
# construct copy
try:
from copy import deepcopy
output_data = deepcopy(input_data)
except:
raise ValueError('transform_data() input_data argument cannot contain module datatypes.')
# walk over data and apply function
for dot_path, value in walk_data(input_data):
current_endpoint = output_data
segment_list = segment_path(dot_path)
segment = None
if segment_list:
for i in range(len(segment_list)):
try:
segment = int(segment_list[i])
except:
segment = segment_list[i]
if i + 1 == len(segment_list):
pass
else:
current_endpoint = current_endpoint[segment]
current_endpoint[segment] = function(value)
return output_data
def clean_data(input_value):
''' a function to transform a value into a json or yaml valid datatype
:param input_value: object of any datatype
:return: object with json valid datatype
'''
# pass normal json/yaml datatypes
if input_value.__class__.__name__ in ['bool', 'str', 'float', 'int', 'NoneType']:
pass
# transform byte data to base64 encoded string
elif isinstance(input_value, bytes):
from base64 import b64encode
input_value = b64encode(input_value).decode()
# convert tuples and sets into lists
elif isinstance(input_value, tuple) or isinstance(input_value, set):
new_list = []
new_list.extend(input_value)
input_value = transform_data(clean_data, new_list)
# recurse through dictionaries and lists
elif isinstance(input_value, dict) or isinstance(input_value, list):
input_value = transform_data(clean_data, input_value)
# convert to string all python objects and callables
else:
input_value = str(input_value)
return input_value
def reconstruct_dict(dot_paths, values):
''' a method for reconstructing a dictionary from the values along dot paths '''
output_dict = {}
for i in range(len(dot_paths)):
if i + 1 <= len(values):
path_segments = segment_path(dot_paths[i])
current_nest = output_dict
for j in range(len(path_segments)):
key_name = path_segments[j]
try:
key_name = int(key_name)
except:
pass
if j + 1 == len(path_segments):
if isinstance(key_name, int):
current_nest.append(values[i])
else:
current_nest[key_name] = values[i]
else:
next_key = path_segments[j+1]
try:
next_key = int(next_key)
except:
pass
if isinstance(next_key, int):
if not key_name in current_nest.keys():
current_nest[key_name] = []
current_nest = current_nest[key_name]
else:
if isinstance(key_name, int):
current_nest.append({})
current_nest = current_nest[len(current_nest) - 1]
else:
if not key_name in current_nest.keys():
current_nest[key_name] = {}
current_nest = current_nest[key_name]
return output_dict
if __name__ == '__main__':
# test walk_data
from collections import OrderedDict
recursive_paths = []
recursive_values = []
test_dict = {
'you': {
'me': {
'us': 'them'
}
},
'him': [ { 'her': { 'their': 'our' } } ],
'here': [ { 'there': [ 'everywhere' ] } ]
}
ordered_dict = OrderedDict(test_dict)
for dot_path, value in walk_data(ordered_dict):
recursive_paths.append(dot_path)
recursive_values.append(value)
# test segment_paths and reconstruct_dict
dot_paths = []
values = []
for number in (3,7,11):
dot_paths.append(recursive_paths[number])
values.append(recursive_values[number])
rebuilt_dict = reconstruct_dict(dot_paths, values)
assert rebuilt_dict == test_dict |
from testfixtures import StringComparison as S, compare
from testfixtures.compat import PY2
from unittest import TestCase
class Tests(TestCase):
def test_equal_yes(self):
self.failUnless('on 40220'==S('on \d+'))
def test_equal_no(self):
self.failIf('on xxx'==S('on \d+'))
def test_not_equal_yes(self):
self.failIf('on 40220'!=S('on \d+'))
def test_not_equal_no(self):
self.failUnless('on xxx'!=S('on \d+'))
def test_comp_in_sequence(self):
self.failUnless((
1,2,'on 40220'
)==(
1,2,S('on \d+')
))
def test_not_string(self):
self.failIf(40220==S('on \d+'))
def test_repr(self):
compare('<S:on \\d+>',
repr(S('on \d+')))
def test_str(self):
compare('<S:on \\d+>',
str(S('on \d+')))
def test_sort(self):
a = S('a')
b = S('b')
c = S('c')
compare(sorted(('d',c,'e',a,'a1',b,)),
[a,'a1',b,c,'d','e',])
if PY2:
# cmp no longer exists in Python 3!
def test_cmp_yes(self):
self.failIf(cmp(S('on \d+'), 'on 4040'))
def test_cmp_no(self):
self.failUnless(cmp(S('on \d+'), 'on xx'))
|
from flask import Flask, request, render_template
import main.judge_ques as Jg
import main.judge_image as Img
import main.utils as Utils
import main.mock as Mock
import os,json
import main.db as db
from main.WXBizDataCrypt import WXBizDataCrypt
app = Flask(__name__)
'''
@project: 官网
@author: Laurel
'''
@app.route('/')
def admin():
return render_template('admin.html')
#整体测试路由
@app.route('/mini/test')
def test():
d = db.DB()
r = d.already_in('sfdasdf', '13333333333')
if r is None:
return "None"
else:
return r['phone']
return ('sfdsdf', '13333333333')
#重定向到图像识别测试模块
@app.route('/mini/recognition/front')
def recognition_front():
return render_template('drugcheck.html')
#图像识别测试模块
@app.route('/mini/recognition/test', methods=['GET', 'POST'])
def recognition_test():
return Mock.recognition_test(request, render_template)
'''
@project: questionnaire
@author: Laurel
@updated_at: 20200322
'''
#调查问卷
@app.route('/mini/questionnaire')
def questionnaire():
args = request.args.get('paper')
return Jg.judge(args)
'''
@project: drug check image recognition
@author: Laurel
@updated_at: -
'''
#图像识别
@app.route('/mini/recognition', methods=['GET', 'POST'])
def recognition():
openid = request.form.get('openid')
image = request.files.get('file')
path_list = Utils.saveImage(image, openid)
return Img.judgeimage(path_list[0], path_list[1])
#上传个人信息
@app.route('/mini/recognition/information', methods=['POST'])
def recognitionInformation():
params = json.loads(request.get_data(as_text=True))
information = params['information']
app.logger.info(information)
return ''
'''
public
'''
#登陆获取openid和sessionKey
@app.route('/mini/loginstatus')
def login_status():
r, openid, sessionkey = Utils.fetchOpenIdAndSession(request)
registed = Utils.is_registed(openid)
if registed is None:
return json.dumps({'status': 'unregisted', 'openid':openid, 'session_key':sessionkey})
else:
return r
#小程序注册
@app.route('/mini/regist', methods=['GET', 'POST'])
def drug_regist():
params = json.loads(request.get_data(as_text=True))
appid = params['appid']
iv = params['iv']
session_key = params['session_key']
en_data = params['encryptedData']
openid = params['openid']
pc = WXBizDataCrypt(appid, session_key)
un_data = pc.decrypt(en_data, iv)
return Utils.regist(openid, un_data['phoneNumber'])
if __name__ == '__main__':
# 服务器配置
app.run(host="0.0.0.0", port=3000, debug=True, threaded=True) |
import threading
class Adc(object):
"""Thread-safe wrapper around an ADC object."""
def __init__(self, adc):
"""Create a new Adc instance
Args:
adc: The raw ADC which this class makes thread-safe by synchronizing
its read calls.
"""
self._adc = adc
self._lock = threading.Lock()
def read_adc(self, adc_number):
"""Read a value from the ADC
Args:
adc_number: ADC channel to read.
Returns:
The value read from the given ADC channel (as a float).
"""
with self._lock:
return self._adc.read_adc(adc_number)
|
from setuptools import setup
setup(name='comic-scraper',
version='0.9.0',
description='Scraps comics,mangas and creates cbz (/pdf) files for offline reading',
url='https://github.com/AbstractGeek/comic-scraper',
download_url='https://github.com/AbstractGeek/comic-scraper/tarball/0.9.0',
author='Dinesh Natesan',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Topic :: Games/Entertainment',
],
keywords='comics manga scraper',
packages=['comic_scraper'],
install_requires=[
'beautifulsoup4==4.6.0',
'certifi==2017.7.27.1',
'chardet==3.0.4',
'futures==3.1.1',
'idna==2.6',
'img2pdf==0.2.4',
'olefile==0.44',
'Pillow==4.3.0',
'requests==2.18.4',
'urllib3==1.22'
],
entry_points={
'console_scripts':
['comic-scraper=comic_scraper.comic_scraper:main'],
},
include_package_data=True,
zip_safe=False)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 21 12:06:17 2018
@author: zoli
"""
from export.filter_volumes import filter_volumes
def get_server_name(server_id:str,instances:list) -> str:
for instance in instances:
if instance.id == server_id:
return instance.name
return ""
def volumes_prepare(volumes:list,instances:list,projects:dict) -> list:
"""
Convert volumes to dictionary and add extra parameters like server name and project name.
"""
v2 = []
for volume in volumes:
volume_dict = volume.to_dict()
volume_dict["project_name"] = projects[volume_dict["os-vol-tenant-attr:tenant_id"]]
if volume_dict["name"] == "None" or volume_dict["name"] == None:
volume_dict["name"] = ""
if volume_dict["name"] != "": #replace space to _ so its usable in the volume name, if it has volume name
volume_dict["name"] = str(volume_dict["name"]).replace(" ","_")
#check if volume is attached to an instance and act accordingly
if volume_dict["attachments"] != [] :
volume_dict["server_id"] = volume_dict["attachments"][0]["server_id"]
volume_dict["server_name"] = get_server_name(volume_dict["attachments"][0]["server_id"],instances)
volume_dict["mountpoint"] = volume_dict["attachments"][0]["device"].split('/')[-1]
if volume_dict["mountpoint"] == "vda":
volume_dict["mountpoint"] = "root"
else:
volume_dict["server_id"] = "not attached"
volume_dict["server_name"] = ""
volume_dict["mountpoint"] = ""
volume_dict["volume_migration_name"] = volume_dict["id"]+"-"+volume_dict["name"]+"-"+volume_dict["server_name"]+"-"+volume_dict["mountpoint"]
v2.append(volume_dict)
v2 = filter_volumes(v2)
return v2
|
# Copyright 2021 IBM Corp.
# SPDX-License-Identifier: Apache-2.0
import unittest
import sys
from xskipper import Registration
from xskipper.testing.utils import XskipperTestCase
class XskipperRegistrationTests(XskipperTestCase):
def setUp(self):
super(XskipperRegistrationTests, self).setUp()
def tearDown(self):
super(XskipperRegistrationTests, self).tearDown()
# tests to check the API works fine (logic is tested in Scala)
def test_registration(self):
# Add MetadataFilterFactor
Registration.addMetadataFilterFactory(self.spark, 'io.xskipper.search.filters.BaseFilterFactory')
# Add IndexFactory
Registration.addIndexFactory(self.spark, 'io.xskipper.index.BaseIndexFactory')
# Add MetaDataTranslator
Registration.addMetaDataTranslator(self.spark, 'io.xskipper.metadatastore.parquet.ParquetBaseMetaDataTranslator')
# Add ClauseTranslator
Registration.addClauseTranslator(self.spark, 'io.xskipper.metadatastore.parquet.ParquetBaseClauseTranslator')
if __name__ == "__main__":
xskipper_test = unittest.TestLoader().loadTestsFromTestCase(XskipperRegistrationTests)
result = unittest.TextTestRunner(verbosity=3).run(xskipper_test)
sys.exit(not result.wasSuccessful())
|
import example
print(f"result={example.add(2, 5)}")
|
import FWCore.ParameterSet.Config as cms
# DQM Environment
dqmEnv = cms.EDAnalyzer("DQMEventInfo",
# put your subsystem name here (this goes into the foldername)
subSystemFolder = cms.untracked.string('YourSubsystem'),
# set the window for eventrate calculation (in minutes)
eventRateWindow = cms.untracked.double(0.5),
# define folder to store event info (default: EventInfo)
eventInfoFolder = cms.untracked.string('EventInfo')
)
|
from unittest import TestCase
from mock import Mock, MagicMock, patch
from broker.service.spreadsheet_upload_service import SpreadsheetUploadService
class SpreadsheetUploadServiceTest(TestCase):
def setUp(self) -> None:
self.ingest_api = Mock('ingest_api')
self.storage_service = Mock('storage_service')
self.storage_service.store = Mock(return_value='path')
self.mock_submission = Mock('submission')
self.mock_template_mgr = Mock('template_mgr')
self.importer = MagicMock('importer')
self.importer.import_file = Mock(return_value=(self.mock_submission, self.mock_template_mgr))
self.importer.update_spreadsheet_with_uuids = Mock()
self.spreadsheet_upload_service = spreadsheet_upload_service = SpreadsheetUploadService(self.ingest_api, self.storage_service, self.importer)
def test_upload_success(self):
# when
self.spreadsheet_upload_service.upload('url', 'path')
# then
self.importer.import_file.assert_called_with('path', 'url', project_uuid=None)
self.importer.update_spreadsheet_with_uuids.assert_called_with(self.mock_submission, self.mock_template_mgr, 'path')
def test_upload_update_success(self):
# when
self.spreadsheet_upload_service.upload_updates('url', 'path')
# then
self.importer.import_file.assert_called_with('path', 'url', is_update=True)
self.importer.update_spreadsheet_with_uuids.assert_not_called() |
from rest_framework.schemas import AutoSchema
class CustomSchema(AutoSchema):
def get_link(self, path, method, base_url):
link = super().get_link(path, method, base_url)
link._fields += self.get_core_fields()
return link
def get_core_fields(self):
return getattr(self.view, "coreapi_fields", ())
|
import sys
executable = sys.executable
import mpi4py
mpi4py.rc.threads = False # no multithreading...
from mpi4py import MPI
def xtomo_reconstruct(data, theta, rot_center='None', Dopts=None, order='sino'):
if order != 'sino':
data=np.swapaxes(data,0,1)
if type(Dopts)==type(None):
Dopts={ 'algo':'iradon', 'GPU': True, 'n_workers' : 1 }
if Dopts['n_workers']==1:
from xtomo.loop_sino_simple import reconstruct
tomo = reconstruct(data, theta, rot_center, Dopts)
else:
from xtomo.spawn import reconstruct_mpiv as recon
tomo=recon(data,theta,rot_center, Dopts)
return tomo
# reconstruct from file
def reconstruct_mpi(fname, n_workers, Dopts):
Dopts['file_in']=fname
from xtomo.IO import getfilename
file_out=getfilename(Dopts['file_in'], Dopts['algo'])
#print('file_out=',file_out)
Dopts['file_out']=file_out
import xtomo
arg1=xtomo.__path__.__dict__["_path"][0]+'/mpi_worker_FIO.py'
comm = MPI.COMM_WORLD.Spawn(
executable,
args = [arg1,], #args=[sys.argv[0], start_worker],
maxprocs=n_workers)
comm_intra = comm.Merge(MPI.COMM_WORLD)
# broadcast the options
comm_intra.bcast(Dopts,root=0)
# make sure we sent the data
# doing the reconstruction
comm_intra.barrier()
comm_intra.Free()
del comm_intra
comm.Disconnect()
#comm.Free()
# reconstruct from numpy arrays to numpy array
def reconstruct_mpiv(sino, theta, rot_center, Dopts):
# Dopts['n_workers']=n_workers
#Dopts['file_in']=fname
#from xtomo.IO import getfilename
#file_out=getfilename(Dopts['file_in'], Dopts['algo'])
#print('file_out=',file_out)
#Dopts['file_out']=file_out
import xtomo
import xtomo.communicator
# arg1=xtomo.__path__.__dict__["_path"][0]+'/mpi_worker.py'
arg1=xtomo.__path__[0]+'/mpi_worker.py'
comm = MPI.COMM_WORLD.Spawn(
executable,
args = [arg1,], #args=[sys.argv[0], start_worker],
maxprocs=Dopts['n_workers'])
comm_intra = comm.Merge(MPI.COMM_WORLD)
print('spawned size',comm_intra.Get_size()-1)
# broadcast the data
sino_shape = sino.shape
theta_shape = theta.shape
num_slices =sino_shape[0]
num_rays = sino_shape[2]
tomo_shape = (num_slices, num_rays, num_rays)
comm_intra.bcast(sino_shape,root=0)
comm_intra.bcast(theta_shape,root=0)
comm_intra.bcast(rot_center,root=0)
comm_intra.bcast(Dopts,root=0)
# IO in shared memory
shared_sino = xtomo.communicator.allocate_shared(sino_shape, comm = comm_intra)
shared_theta = xtomo.communicator.allocate_shared(theta_shape, comm=comm_intra)
shared_tomo = xtomo.communicator.allocate_shared(tomo_shape, comm = comm_intra)
print('allocated shared')
shared_sino[...]=sino
shared_theta[...]=theta
# make sure we sent the data
comm_intra.barrier()
# do the reconstruction in spawned mpi calls
comm_intra.barrier()
# import numpy as np
# print("shared tomo and data",np.sum(shared_tomo),np.sum(shared_sino),np.sum(sino))
# done with the job
comm_intra.Free()
del comm_intra
comm.Disconnect()
del shared_sino, shared_theta, xtomo
return shared_tomo
#comm.Free()
import tempfile
tempdir = tempfile.gettempdir()+'/xpack/'
import os.path as path
import os
try:
os.mkdir(tempdir)
except:
None
import numpy as np
# memory mapping file
def mmdata(shp, fname, mode='w+'):
filename = path.join(tempdir, fname)
fp = np.memmap(filename, dtype='float32', mode=mode, shape=shp)
return fp
#shared_sino = xtomo.communicator.allocate_shared(sino_shape, comm = comm_intra)
def reconstruct_mpimm(sino, theta, rot_center, n_workers, Dopts, order='sino'):
import xtomo
import xtomo.communicator
import numpy as np
if order == 'proj':
sino=np.swapaxes(sino,0,1)
arg1=xtomo.__path__.__dict__["_path"][0]+'/mpi_worker_mm.py'
comm = MPI.COMM_WORLD.Spawn(
executable,
args = [arg1,], #args=[sys.argv[0], start_worker],
maxprocs=n_workers)
comm_intra = comm.Merge(MPI.COMM_WORLD)
print('spawned size',comm_intra.Get_size())
# broadcast the data
sino_shape = sino.shape
theta_shape = theta.shape
num_slices =sino_shape[0]
num_rays = sino_shape[2]
tomo_shape = (num_slices, num_rays, num_rays)
comm_intra.bcast(sino_shape,root=0)
comm_intra.bcast(theta_shape,root=0)
comm_intra.bcast(rot_center,root=0)
comm_intra.bcast(Dopts,root=0)
#print('broadcasted')
# sinogram in shared memory, which is memory mapped
shared_sino = mmdata(sino_shape, 'data', mode='w+')
#print('sinogram on file')
shared_theta = mmdata(theta_shape, 'theta', mode = 'w+')
shared_tomo =mmdata(tomo_shape, 'tomo', mode='w+')
shared_sino[...]=sino
shared_theta[...]=theta
#print('tomo on file')
# make sure the files exist
comm_intra.barrier()
# doing the reconstruction
comm_intra.barrier()
comm_intra.barrier()
comm_intra.Free()
del comm_intra
comm.Disconnect()
return shared_tomo
#comm.Free()
|
import requests
theEnum = {'place': '位置', 'kind': '類型', 'rentprice': '租金', 'area': '坪數'}
region = {}
region[0] = [
{'id': 0, 'txt': '北部'},
{'id': 1, 'txt': '台北市'},
{'id': 3, 'txt': '新北市'},
{'id': 6, 'txt': '桃園市'},
{'id': 4, 'txt': '新竹市'},
{'id': 5, 'txt': '新竹縣'},
{'id': 21, 'txt': '宜蘭縣'},
{'id': 2, 'txt': '基隆市'}
];
region[1] = [
{'id': 0, 'txt': '中部'},
{'id': 8, 'txt': '台中市'},
{'id': 10, 'txt': '彰化縣'},
{'id': 14, 'txt': '雲林縣'},
{'id': 7, 'txt': '苗栗縣'},
{'id': 11, 'txt': '南投縣'}
];
region[2] = [
{'id': 0, 'txt': '南部'},
{'id': 17, 'txt': '高雄市'},
{'id': 15, 'txt': '台南市'},
{'id': 12, 'txt': '嘉義市'},
{'id': 13, 'txt': '嘉義縣'},
{'id': 19, 'txt': '屏東縣'}
]
region[3] = [
{'id': 0, 'txt': '東部'},
{'id': 22, 'txt': '台東縣'},
{'id': 23, 'txt': '花蓮縣'},
{'id': 24, 'txt': '澎湖縣'},
{'id': 25, 'txt': '金門縣'},
{'id': 26, 'txt': '連江縣'}
]
section = {}
section[1] = {1: "中正區", 2: "大同區", 3: "中山區", 4: "松山區", 5: "大安區", 6: "萬華區", 7: "信義區", 8: "士林區", 9: "北投區", 10: "內湖區",
11: "南港區", 12: "文山區"};
section[2] = {13: "仁愛區", 14: "信義區", 15: "中正區", 16: "中山區", 17: "安樂區", 18: "暖暖區", 19: "七堵區"};
section[3] = {20: "萬里區", 21: "金山區", 26: "板橋區", 27: "汐止區", 28: "深坑區", 29: "石碇區", 30: "瑞芳區", 31: "平溪區", 32: "雙溪區",
33: "貢寮區", 34: "新店區", 35: "坪林區", 36: "烏來區", 37: "永和區", 38: "中和區", 39: "土城區", 40: "三峽區", 41: "樹林區",
42: "鶯歌區", 43: "三重區", 44: "新莊區", 45: "泰山區", 46: "林口區", 47: "蘆洲區", 48: "五股區", 49: "八里區", 50: "淡水區",
51: "三芝區", 52: "石門區"};
section[4] = {370: "香山區", 371: "東區", 372: "北區"};
section[5] = {54: "竹北市", 55: "湖口鄉", 56: "新豐鄉", 57: "新埔鎮", 58: "關西鎮", 59: "芎林鄉", 60: "寶山鄉", 61: "竹東鎮", 62: "五峰鄉",
63: "橫山鄉", 64: "尖石鄉", 65: "北埔鄉", 66: "峨嵋鄉"};
section[6] = {73: "桃園區", 67: "中壢區", 68: "平鎮區", 69: "龍潭區", 70: "楊梅區", 71: "新屋區", 72: "觀音區", 74: "龜山區", 75: "八德區",
76: "大溪區", 77: "復興區", 78: "大園區", 79: "蘆竹區"};
section[7] = {88: "苗栗市", 80: "竹南鎮", 81: "頭份市", 82: "三灣鄉", 83: "南庄鄉", 84: "獅潭鄉", 85: "後龍鎮", 86: "通霄鎮", 87: "苑裡鎮",
89: "造橋鄉", 90: "頭屋鄉", 91: "公館鄉", 92: "大湖鄉", 93: "泰安鄉", 94: "銅鑼鄉", 95: "三義鄉", 96: "西湖鄉", 97: "卓蘭鎮"};
section[8] = {98: "中區", 99: "東區", 100: "南區", 101: "西區", 102: "北區", 103: "北屯區", 104: "西屯區", 105: "南屯區", 106: "太平區",
107: "大里區", 108: "霧峰區", 109: "烏日區", 110: "豐原區", 111: "后里區", 112: "石岡區", 113: "東勢區", 114: "和平區",
115: "新社區", 116: "潭子區", 117: "大雅區", 118: "神岡區", 119: "大肚區", 120: "沙鹿區", 121: "龍井區", 122: "梧棲區",
123: "清水區", 124: "大甲區", 125: "外埔區", 126: "大安區"};
section[10] = {127: "彰化市", 128: "芬園鄉", 129: "花壇鄉", 130: "秀水鄉", 131: "鹿港鎮", 132: "福興鄉", 133: "線西鄉", 134: "和美鎮",
135: "伸港鄉", 136: "員林市", 137: "社頭鄉", 138: "永靖鄉", 139: "埔心鄉", 140: "溪湖鎮", 141: "大村鄉", 142: "埔鹽鄉",
143: "田中鎮", 144: "北斗鎮", 145: "田尾鄉", 146: "埤頭鄉", 147: "溪州鄉", 148: "竹塘鄉", 149: "二林鎮", 150: "大城鄉",
151: "芳苑鄉", 152: "二水鄉"};
section[11] = {153: "南投市", 154: "中寮鄉", 155: "草屯鎮", 156: "國姓鄉", 157: "埔里鎮", 158: "仁愛鄉", 159: "名間鄉", 160: "集集鎮",
161: "水里鄉", 162: "魚池鄉", 163: "信義鄉", 164: "竹山鎮", 165: "鹿谷鄉"};
section[12] = {373: "西區", 374: "東區"};
section[13] = {167: "番路鄉", 168: "梅山鄉", 169: "竹崎鄉", 170: "阿里山鄉", 171: "中埔鄉", 172: "大埔鄉", 173: "水上鄉", 174: "鹿草鄉",
175: "太保市", 176: "朴子市", 177: "東石鄉", 178: "六腳鄉", 179: "新港鄉", 180: "民雄鄉", 181: "大林鎮", 182: "溪口鄉",
183: "義竹鄉", 184: "布袋鎮"};
section[14] = {185: "斗南鎮", 186: "大埤鄉", 187: "虎尾鎮", 188: "土庫鎮", 189: "褒忠鄉", 190: "東勢鄉", 191: "臺西鄉", 192: "崙背鄉",
193: "麥寮鄉", 194: "斗六市", 195: "林內鄉", 196: "古坑鄉", 197: "莿桐鄉", 198: "西螺鎮", 199: "二崙鄉", 200: "北港鎮",
201: "水林鄉", 202: "口湖鄉", 203: "四湖鄉", 204: "元長鄉"};
section[15] = {206: "東區", 207: "南區", 208: "中西區", 209: "北區", 210: "安平區", 211: "安南區", 212: "永康區", 213: "歸仁區", 214: "新化區",
215: "左鎮區", 216: "玉井區", 217: "楠西區", 218: "南化區", 219: "仁德區", 220: "關廟區", 221: "龍崎區", 222: "官田區",
223: "麻豆區", 224: "佳里區", 225: "西港區", 226: "七股區", 227: "將軍區", 228: "學甲區", 229: "北門區", 230: "新營區",
231: "後壁區", 232: "白河區", 233: "東山區", 234: "六甲區", 235: "下營區", 236: "柳營區", 237: "鹽水區", 238: "善化區",
239: "大內區", 240: "山上區", 241: "新市區", 242: "安定區"};
section[17] = {243: "新興區", 244: "前金區", 245: "苓雅區", 246: "鹽埕區", 247: "鼓山區", 248: "旗津區", 249: "前鎮區", 250: "三民區",
251: "楠梓區", 252: "小港區", 253: "左營區", 254: "仁武區", 255: "大社區", 258: "岡山區", 259: "路竹區", 260: "阿蓮區",
261: "田寮區", 262: "燕巢區", 263: "橋頭區", 264: "梓官區", 265: "彌陀區", 266: "永安區", 267: "湖內區", 268: "鳳山區",
269: "大寮區", 270: "林園區", 271: "鳥松區", 272: "大樹區", 273: "旗山區", 274: "美濃區", 275: "六龜區", 276: "內門區",
277: "杉林區", 278: "甲仙區", 279: "桃源區", 280: "那瑪夏區", 281: "茂林區", 282: "茄萣區"};
section[19] = {295: "屏東市", 296: "三地門鄉", 297: "霧臺鄉", 298: "瑪家鄉", 299: "九如鄉", 300: "里港鄉", 301: "高樹鄉", 302: "鹽埔鄉",
303: "長治鄉", 304: "麟洛鄉", 305: "竹田鄉", 306: "內埔鄉", 307: "萬丹鄉", 308: "潮州鎮", 309: "泰武鄉", 310: "來義鄉",
311: "萬巒鄉", 312: "崁頂鄉", 313: "新埤鄉", 314: "南州鄉", 315: "林邊鄉", 316: "東港鎮", 317: "琉球鄉", 318: "佳冬鄉",
319: "新園鄉", 320: "枋寮鄉", 321: "枋山鄉", 322: "春日鄉", 323: "獅子鄉", 324: "車城鄉", 325: "牡丹鄉", 326: "恆春鎮",
327: "滿州鄉"};
section[21] = {328: "宜蘭市", 329: "頭城鎮", 330: "礁溪鄉", 331: "壯圍鄉", 332: "員山鄉", 333: "羅東鎮", 334: "三星鄉", 335: "大同鄉",
336: "五結鄉", 337: "冬山鄉", 338: "蘇澳鎮", 339: "南澳鄉"};
section[22] = {341: "台東市", 342: "綠島鄉", 343: "蘭嶼鄉", 344: "延平鄉", 345: "卑南鄉", 346: "鹿野鄉", 347: "關山鎮", 348: "海端鄉",
349: "池上鄉", 350: "東河鄉", 351: "成功鎮", 352: "長濱鄉", 353: "太麻里鄉", 354: "金峰鄉", 355: "大武鄉", 356: "達仁鄉"};
section[23] = {357: "花蓮市", 358: "新城鄉", 359: "秀林鄉", 360: "吉安鄉", 361: "壽豐鄉", 362: "鳳林鎮", 363: "光復鄉", 364: "豐濱鄉",
365: "瑞穗鄉", 366: "萬榮鄉", 367: "玉里鎮", 368: "卓溪鄉", 369: "富里鄉"};
section[24] = {283: "馬公市", 284: "西嶼鄉", 285: "望安鄉", 286: "七美鄉", 287: "白沙鄉", 288: "湖西鄉"};
section[25] = {289: "金沙鎮", 290: "金湖鎮", 291: "金寧鄉", 292: "金城鎮", 293: "烈嶼鄉", 294: "烏坵鄉"};
section[26] = {22: "南竿鄉", 23: "北竿鄉", 24: "莒光鄉", 25: "東引鄉", 256: "東沙", 257: "南沙"};
kind = {1: '整層住家', 2: '獨立套房', 3: '分租套房', 4: '雅房', 8: '車位', 24: '其他'}
def get_region_name(input_region_name):
for i in region:
for re in region[i]:
if re['txt'] == input_region_name:
return re['id']
return -1
def get_section_name(input_section_name):
for i in section:
for number, sec in section[i].items():
if sec == input_section_name:
return i, number
return -1, -1
def get_place_arg(input_place_name):
region_number = get_region_name(input_place_name)
if region_number != -1:
return 'region=' + str(region_number)
else:
the_region_number, section_number = get_section_name(input_place_name)
if the_region_number != -1 and section_number != -1:
return 'region=' + str(the_region_number) + '§ion=' + str(section_number)
return ''
def get_kind_arg(input_kind_name):
for k, v in kind.items():
if v == input_kind_name:
return 'kind=' + str(k)
return ''
def get_arguments_content(lists):
content = ''
for n in lists:
m = n.split('=')
if len(m) <= 1:
continue
input_key = m[0]
input_value = m[1]
for k, v in theEnum.items():
if v == input_key:
if k == 'place':
place_arg = get_place_arg(input_value)
if place_arg != '':
content += place_arg + '&'
break
if k == 'kind':
kind_arg = get_kind_arg(input_value)
if kind_arg != '':
content += kind_arg + '&'
break
content += k + '=' + input_value + '&'
break
return content[:-1] if len(content) > 0 and content[-1] == '&' else content
def rent_591_object_list(argu):
print(argu)
argu_content = get_arguments_content(argu)
if argu_content == '':
return []
print(argu_content)
target_url = 'https://rent.591.com.tw/home/search/rsList?is_new_list=1&' + argu_content
print(target_url)
header = {
'Host': 'rent.591.com.tw',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/59.0.3071.115 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-TW,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-CN;q=0.2',
'Cookie': 'PHPSESSID=sdsini6kdld2gnblp9h9nuur20; userLoginHttpReferer=https%253A%252F%252Fwww.591.com.tw%252Fuser-login.html; 591equipment=08826710014998500681545450; ResolutionSort=1; index_keyword_search_analysis=%7B%22role%22%3A%221%22%2C%22type%22%3A2%2C%22keyword%22%3A%22%22%2C%22selectKeyword%22%3A%22%E4%B8%89%E9%87%8D%E5%8D%80%22%2C%22menu%22%3A%22400-800%E8%90%AC%22%2C%22hasHistory%22%3A1%2C%22hasPrompt%22%3A0%2C%22history%22%3A0%7D; detail-guide=1; last_search_type=1; localTime=2; imgClick=5407932; __utma=82835026.1593737612.1499849967.1500696632.1500867495.2; __utmc=82835026; __utmz=82835026.1500696632.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); user_index_role=1; user_browse_recent=a%3A5%3A%7Bi%3A0%3Ba%3A2%3A%7Bs%3A4%3A%22type%22%3Bi%3A1%3Bs%3A7%3A%22post_id%22%3Bs%3A7%3A%225424210%22%3B%7Di%3A1%3Ba%3A2%3A%7Bs%3A4%3A%22type%22%3Bi%3A1%3Bs%3A7%3A%22post_id%22%3Bs%3A7%3A%225372536%22%3B%7Di%3A2%3Ba%3A2%3A%7Bs%3A4%3A%22type%22%3Bi%3A1%3Bs%3A7%3A%22post_id%22%3Bs%3A7%3A%225429867%22%3B%7Di%3A3%3Ba%3A2%3A%7Bs%3A4%3A%22type%22%3Bi%3A1%3Bs%3A7%3A%22post_id%22%3Bs%3A7%3A%225413919%22%3B%7Di%3A4%3Ba%3A2%3A%7Bs%3A4%3A%22type%22%3Bi%3A1%3Bs%3A7%3A%22post_id%22%3Bs%3A7%3A%225422226%22%3B%7D%7D; ba_cid=a%3A5%3A%7Bs%3A6%3A%22ba_cid%22%3Bs%3A32%3A%225f91a10df47762976645e67b67ee4864%22%3Bs%3A7%3A%22page_ex%22%3Bs%3A48%3A%22https%3A%2F%2Frent.591.com.tw%2Frent-detail-5372536.html%22%3Bs%3A4%3A%22page%22%3Bs%3A48%3A%22https%3A%2F%2Frent.591.com.tw%2Frent-detail-5424210.html%22%3Bs%3A7%3A%22time_ex%22%3Bi%3A1501484991%3Bs%3A4%3A%22time%22%3Bi%3A1501485062%3B%7D; __auc=e1db4f0b15d3607a4d9066c79d8; c10f3143a018a0513ebe1e8d27b5391c=1; client:unique:pc=eyJpdiI6Ink3RHRFU01hYnVEZVwvSkVPWThuVjBRPT0iLCJ2YWx1ZSI6ImVtRW5ZWkNZeEtlOWxQYTMrZlFuS0E9PSIsIm1hYyI6ImZkZTlmMjBmMTFjZThkYjgwY2VlMWZiNjhlNjdjOWQ2NjUyMWE0ZmEzMzRlNjEyYWMwYjQxNzIzMDM2MmEyZDUifQ%3D%3D; is_new_index=1; is_new_index_redirect=1; loginNoticeStatus=1; loginNoticeNumber=3; new_rent_list_kind_test=0; _ga=GA1.3.1593737612.1499849967; _gid=GA1.3.1595742426.1501650062; _ga=GA1.4.1593737612.1499849967; _gid=GA1.4.1595742426.1501650062; urlJumpIp=3; urlJumpIpByTxt=%E6%96%B0%E5%8C%97%E5%B8%82;'
}
res = requests.Session()
req = res.get(target_url, headers=header)
data = req.json()
return data['data']['data']
def rent_591_object_list_tostring(items):
limit = 5
cnt = 0
content = ''
for item in items:
title = item['address_img']
layout = item['layout']
kind_name = item['kind_name']
full_address = item['section_name'] + item['street_name'] + item['addr_number_name']
price = item['price']
link = "https://rent.591.com.tw/rent-detail-{}.html".format(item['id'])
content += "{}\n{}\n{}\n{}\n{}\n{}\n\n".format(title, layout, kind_name, full_address, price, link)
cnt += 1
if cnt >= limit:
break
return content
|
import os, json, time, gc, copy, shutil, random, pickle, sys, pdb
from datetime import datetime
import numpy as np
from allennlp.common.tee_logger import TeeLogger
from allennlp.modules.text_field_embedders import TextFieldEmbedder, BasicTextFieldEmbedder
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
from pytz import timezone
import faiss
import torch
import torch.nn as nn
from tqdm import tqdm
def cuda_device_parser(str_ids):
return [int(stridx) for stridx in str_ids.strip().split(',')]
def from_original_sentence2left_mention_right_tokens_before_berttokenized(sentence):
mention_start = '<target>'
mention_end = '</target>'
original_tokens = sentence.split(' ')
mention_start_idx = int(original_tokens.index(mention_start))
mention_end_idx = int(original_tokens.index(mention_end))
if mention_end_idx == len(sentence) - 1 :
return original_tokens[:mention_start_idx], original_tokens[mention_start_idx+1:mention_end_idx], []
else:
return original_tokens[:mention_start_idx], original_tokens[mention_start_idx+1:mention_end_idx], original_tokens[mention_end_idx+1:]
def parse_cuidx2encoded_emb_for_debugging(cuidx2encoded_emb, original_cui2idx):
print('/////Some entities embs are randomized for debugging./////')
for cuidx in tqdm(original_cui2idx.values()):
if cuidx not in cuidx2encoded_emb:
cuidx2encoded_emb.update({cuidx:np.random.randn(*cuidx2encoded_emb[0].shape)})
return cuidx2encoded_emb
def parse_cuidx2encoded_emb_2_cui2emb(cuidx2encoded_emb, original_cui2idx):
cui2emb = {}
for cui, idx in original_cui2idx.items():
cui2emb.update({cui:cuidx2encoded_emb[idx]})
return cui2emb
def experiment_logger(args):
'''
:param args: from biencoder_parameters
:return: dirs for experiment log
'''
experimet_logdir = args.experiment_logdir # / is included
timestamp = datetime.now(timezone('Asia/Tokyo'))
str_timestamp = '{0:%Y%m%d_%H%M%S}'.format(timestamp)[2:]
dir_for_each_experiment = experimet_logdir + str_timestamp
if os.path.exists(dir_for_each_experiment):
dir_for_each_experiment += '_d'
dir_for_each_experiment += '/'
logger_path = dir_for_each_experiment + 'teelog.log'
os.mkdir(dir_for_each_experiment)
if not args.debug:
sys.stdout = TeeLogger(logger_path, sys.stdout, False) # default: False
sys.stderr = TeeLogger(logger_path, sys.stderr, False) # default: False
return dir_for_each_experiment
def from_jsonpath_2_str2idx(json_path):
str2intidx = {}
with open(json_path, 'r') as f:
tmp_str2stridx = json.load(f)
for str_key, str_idx in tmp_str2stridx.items():
str2intidx.update({str_key:int(str_idx)})
return str2intidx
def from_jsonpath_2_idx2str(json_path):
intidx2str = {}
with open(json_path, 'r') as f:
tmp_stridx2str = json.load(f)
for str_idx, value_str in tmp_stridx2str.items():
intidx2str.update({int(str_idx):value_str})
return intidx2str
def from_jsonpath_2_str2strorlist(json_path):
with open(json_path, 'r') as f:
raw_json = json.load(f)
return raw_json
def pklloader(pkl_path):
with open(pkl_path, 'rb') as p:
loaded = pickle.load(p)
return loaded
class EmbLoader:
def __init__(self, args):
self.args = args
def emb_returner(self):
if self.args.bert_name == 'bert-base-uncased':
huggingface_model = 'bert-base-uncased'
elif self.args.bert_name == 'biobert':
assert self.args.ifbert_use_whichmodel == 'biobert'
huggingface_model = './biobert_transformers/'
else:
huggingface_model = 'dummy'
print(self.args.bert_name,'are not supported')
exit()
bert_embedder = PretrainedTransformerEmbedder(model_name=huggingface_model)
return bert_embedder, bert_embedder.get_output_dim(), BasicTextFieldEmbedder({'tokens': bert_embedder},
allow_unmatched_keys=True)
class OnlyFixedDatasetLoader:
'''
Before running this, we assume that preprocess has been already done
'''
def __init__(self, args):
self.args = args
self.dataset = self.dataset_name_returner()
self.dataset_dir = self.dataset_dir_returner()
def dataset_name_returner(self):
assert self.args.dataset in ['xxx', 'yyy', 'zzz']
return self.args.dataset
def dataset_dir_returner(self):
return self.args.mention_dump_dir + self.dataset + '/'
def fixed_idxnized_datapath_returner(self):
id2line_json_path = self.dataset_dir + 'id2line.json'
# pmid2int_mention_path = self.dataset_dir + 'pmid2ment.json'
train_mentionidpath = self.dataset_dir + 'train_mentionid.pkl'
dev_mentionidpath = self.dataset_dir + 'dev_mentionid.pkl'
test_mentionidpath = self.dataset_dir + 'test_mentionid.pkl'
return id2line_json_path, train_mentionidpath, dev_mentionidpath, test_mentionidpath
def id2line_path_2_intid2line(self, id2line_json_path):
with open(id2line_json_path, 'r') as id2l:
tmp_id2l = json.load(id2l)
intid2line = {}
for str_idx, line_mention in tmp_id2l.items():
intid2line.update({int(str_idx): line_mention})
return intid2line
def train_dev_test_mentionid_returner(self, train_mentionidpath, dev_mentionidpath, test_mentionidpath):
with open(train_mentionidpath, 'rb') as trp:
train_mentionid = pickle.load(trp)
with open(dev_mentionidpath, 'rb') as drp:
dev_mentionid = pickle.load(drp)
with open(test_mentionidpath, 'rb') as terp:
test_mentionid = pickle.load(terp)
if self.args.debug:
return train_mentionid[:300], dev_mentionid[:200], test_mentionid[:400]
else:
return train_mentionid, dev_mentionid, test_mentionid
def id2line_trn_dev_test_loader(self):
id2line_json_path, train_mentionidpath, dev_mentionidpath, test_mentionidpath = self.fixed_idxnized_datapath_returner()
id2line = self.id2line_path_2_intid2line(id2line_json_path=id2line_json_path)
train_mentionid, dev_mentionid, test_mentionid = self.train_dev_test_mentionid_returner(
train_mentionidpath=train_mentionidpath,
dev_mentionidpath=dev_mentionidpath,
test_mentionidpath=test_mentionidpath)
return id2line, train_mentionid, dev_mentionid, test_mentionid
class KBConstructor_fromKGemb:
def __init__(self, args):
self.args = args
self.kbemb_dim = self.args.kbemb_dim
self.original_kbloader_to_memory()
def original_kbloader_to_memory(self):
cui2idx_path, idx2cui_path, cui2emb_path, cui2cano_path, cui2def_path = self.from_datasetname_return_related_dicts_paths()
print('set value and load original KB')
self.original_cui2idx = from_jsonpath_2_str2idx(cui2idx_path)
self.original_idx2cui = from_jsonpath_2_idx2str(idx2cui_path)
self.original_cui2emb = pklloader(cui2emb_path)
self.original_cui2cano = from_jsonpath_2_str2strorlist(cui2cano_path)
self.original_cui2def = from_jsonpath_2_str2strorlist(cui2def_path)
def return_original_KB(self):
return self.original_cui2idx, self.original_idx2cui, self.original_cui2emb, self.original_cui2cano, self.original_cui2def
def from_datasetname_return_related_dicts_paths(self):
assert self.args.dataset in ['xxx','yyy','zzz']
if self.args.dataset in ['xxx', 'yyy']:
cui2idx_path = './dataset/cui2idx.json'
idx2cui_path = './dataset/idx2cui.json'
cui2emb_path = './dataset/cui2emb.pkl'
cui2cano_path = './dataset/cui2cano.json'
cui2def_path = './dataset/cui2def.json'
else:
cui2idx_path, idx2cui_path, cui2emb_path, cui2cano_path, cui2def_path = ['dummy' for i in range(5)]
print(self.args.dataset, 'are currently not supported')
exit()
return cui2idx_path, idx2cui_path, cui2emb_path, cui2cano_path, cui2def_path
def load_original_KBmatrix_alignedwith_idx2cui(self):
KBemb = np.random.randn(len(self.original_cui2emb.keys()), self.kbemb_dim).astype('float32')
for idx, cui in self.original_idx2cui.items():
KBemb[idx] = self.original_cui2emb[cui]
return KBemb
def indexed_faiss_loader_for_constructing_smallKB(self):
if self.args.search_method_for_faiss_during_construct_smallKBfortrain == 'indexflatl2': # L2
self.indexed_faiss = faiss.IndexFlatL2(self.kbemb_dim)
elif self.args.search_method_for_faiss_during_construct_smallKBfortrain == 'indexflatip': # innerdot * Beforehand-Normalization must be done.
self.indexed_faiss = faiss.IndexFlatIP(self.kbemb_dim)
elif self.args.search_method_for_faiss_during_construct_smallKBfortrain == 'cossim': # innerdot * Beforehand-Normalization must be done.
self.indexed_faiss = faiss.IndexFlatIP(self.kbemb_dim)
else:
print('currently',self.args.search_method_for_faiss_during_construct_smallKBfortrain, 'are not supported')
exit()
return self.indexed_faiss
class ForOnlyFaiss_KBIndexer:
def __init__(self, args, input_cui2idx, input_idx2cui, input_cui2emb, search_method_for_faiss, entity_emb_dim=300):
self.args = args
self.kbemb_dim = entity_emb_dim
self.cui2idx = input_cui2idx
self.idx2cui = input_idx2cui
self.cui2emb = input_cui2emb
self.search_method_for_faiss = search_method_for_faiss
self.indexed_faiss_loader()
self.KBmatrix = self.KBmatrixloader()
self.entity_num = len(input_cui2idx)
self.indexed_faiss_KBemb_adder(KBmatrix=self.KBmatrix)
def KBmatrixloader(self):
KBemb = np.random.randn(len(self.cui2idx.keys()), self.kbemb_dim).astype('float32')
for idx, cui in self.idx2cui.items():
KBemb[idx] = self.cui2emb[cui].astype('float32')
return KBemb
def indexed_faiss_loader(self):
if self.search_method_for_faiss == 'indexflatl2': # L2
self.indexed_faiss = faiss.IndexFlatL2(self.kbemb_dim)
elif self.search_method_for_faiss == 'indexflatip': #
self.indexed_faiss = faiss.IndexFlatIP(self.kbemb_dim)
elif self.search_method_for_faiss == 'cossim': # innerdot * Beforehand-Normalization must be done.
self.indexed_faiss = faiss.IndexFlatIP(self.kbemb_dim)
def indexed_faiss_KBemb_adder(self, KBmatrix):
if self.search_method_for_faiss == 'cossim':
KBemb_normalized_for_cossimonly = np.random.randn(self.entity_num, self.kbemb_dim).astype('float32')
for idx, emb in enumerate(KBmatrix):
if np.linalg.norm(emb, ord=2, axis=0) != 0:
KBemb_normalized_for_cossimonly[idx] = emb / np.linalg.norm(emb, ord=2, axis=0)
self.indexed_faiss.add(KBemb_normalized_for_cossimonly)
else:
self.indexed_faiss.add(KBmatrix)
def indexed_faiss_returner(self):
return self.indexed_faiss
def KBembeddings_loader(self):
KBembeddings = nn.Embedding(self.entity_num, self.kbemb_dim, padding_idx=0)
KBembeddings.weight.data.copy_(torch.from_numpy(self.KBmatrix))
KBembeddings.weight.requires_grad = False
return KBembeddings
class FixedNegativesEntityLoader:
def __init__(self, args):
self.args = args
self.dataset = self.args.dataset
self.dataset_checker()
def train_mention_idx2negatives_loader(self):
negatives_json_path = './duringtrain_sampled_negs/' + self.args.dataset + '.json'
with open(negatives_json_path, 'r') as f:
train_mention_stridx2negativesdata = json.load(f)
train_mention_intidx2negatives = {}
for stridx, itsdata in train_mention_stridx2negativesdata.items():
train_mention_intidx2negatives.update({int(stridx):itsdata})
return train_mention_intidx2negatives
def dataset_checker(self):
try:
assert self.args.dataset in ['xxx', 'yyy', 'zzz']
except:
raise NotImplementedError
|
"""2d Helmholtz"""
from .base_fun import fun_cst, fun_cst_der
from .calc_field import far_field, incident_field, scattered_field, total_field
from .prob_cst import create_problem_cst
|
from stt import Transcriber
import glob
import os
BASE_DIR = '/home/sontc/PycharmProjects/VPS/wave2vec_2/self-supervised-speech-recognition'
transcriber = Transcriber(lm_weight=1.51, word_score=2.57, beam_size=100)
result = []
file_names, data = [], []
for i, file_path in enumerate(glob.glob('converted_wavs/*', recursive=False)):
file_name = os.path.basename(file_path)
data.append(file_path)
file_names.append(file_name)
# because there are 2190 samples so we transcribe 10 samples each turn
if len(data) == 10:
hypos = transcriber.transcribe(data)
hypos = list(map(lambda x: x.lower(), hypos))
temp = zip(file_names, hypos)
temp = list(map(lambda x: '\t'.join(x), temp))
temp = '\n'.join(temp)
print(temp)
print(f'============processed {i + 1} samples============')
result.append(temp)
data = data[:0]
file_names = file_names[:0]
with open('vps_result.txt', 'w') as fp:
fp.write('\n'.join(result))
|
import time
import psycopg2
from Protocol import *
import threading
DATABASE_ADDRESS = "NONE"
DATABASE_NAME = "NONE"
DATABASE_SCHEME = "NONE"
DATABASE_USER = "NONE"
DATABASE_PASSWORD = "NONE"
DATABASE_PORT = "NONE"
SECRET_KEY = b'\xf8[\xd6\t<\xd8\x04a5siif\x93\xdc\xe0'
IV = b'\x8e;\xf21bB\x0c\x95\x93\xce\xe9J3,\x04\xdd'
class DBConnection():
def __init__(self):
self.conn = None
self.cursor = None
self.log = ""
def connect(self):
"""
Establish connection to PostgreSQL server.
:return: None, otherwise raise an error.
"""
try:
debugMessages("DB_CONNECT", True)
self.conn = psycopg2.connect(
host=DATABASE_ADDRESS,
database=DATABASE_NAME,
user=DATABASE_USER,
password=DATABASE_PASSWORD,
port=DATABASE_PORT)
except (Exception, psycopg2.OperationalError):
debugMessages("DB_CONNECTION_ERROR", True)
try:
# connect to the PostgreSQL server
self.log += 'Connecting to the PostgreSQL database...\n'
# create a cursor
self.cursor = self.conn.cursor()
# execute a statement
self.log += 'PostgreSQL database version:\n'
self.cursor.execute('SELECT version()')
# display the PostgreSQL database server version
db_version = self.cursor.fetchone()
self.log += str(db_version) + "\n"
debugMessages("DB_CONNECTED", True)
except (Exception, psycopg2.DatabaseError):
debugMessages("DB_OPERATION_ERROR", True)
def user_authentication(self, username, password, client_address):
"""
Validate login details of the client.
:param username: client (String) username
:param password: client (String) password
:param client_address: IP:PORT of client.
:return: success message (Protocol), otherwise error message.
"""
sql_auth = "SELECT * FROM online_chat_db.users WHERE username = %s AND password = %s"
self.cursor.execute(sql_auth, (username, password))
sql_query_result = self.cursor.fetchall()
# check if given data exists in sql table
if len(sql_query_result) == 0:
return PROTOCOLS["login_failed_msg"], []
# update online status to (True) of Client.
self.update(table="users", column="online", new_value=True, filter_key="id",
filter_value=sql_query_result[0][0])
# attach current socket ip to the logged-in user
ip_address = "{0}:{1}".format(client_address[0], str(client_address[1]))
self.update(table="users", column="ip_address", new_value=ip_address, filter_key="id",
filter_value=sql_query_result[0][0])
return PROTOCOLS["login_ok_msg"], sql_query_result[0]
def update(self, table, column, new_value, filter_key, filter_value):
"""
Update record in the database
:param table: database table name
:param column: database column name
:param filter_key: database column name to filter records. (Default: unique column)
:param filter_value: database row value to filter records
:param new_value: new data to update.
:return: None
"""
sql_query = "UPDATE {0}.{1} SET {2}='{3}' WHERE {4}='{5}'". \
format(DATABASE_SCHEME, table, column, new_value, filter_key, filter_value)
self.cursor.execute(sql_query)
self.conn.commit()
if len(self.query(table, filter_key, filter_value, column)) == 0:
debugMessages("DB_UPDATE_QUERY_FAIL", True)
def query(self, table, filter_key=None, filter_value=None, column=None):
"""
Retrieve record from the database.
:param table: database table name
:param column: database column name
:param filter_key: database column name to filter records. (Default: unique column)
:param filter_value: database row value to filter records
:return: None
"""
if column is None and filter_key is not None and filter_value is not None:
sql_auth = "SELECT * FROM {0}.{1} WHERE {2}='{3}'". \
format(DATABASE_SCHEME, table, filter_key, filter_value)
self.cursor.execute(sql_auth)
sql_query_result = self.cursor.fetchall()
return sql_query_result
if column is not None and filter_key is None and filter_value is None:
sql_auth = "SELECT {0} FROM {1}.{2}".format(column, DATABASE_SCHEME, table)
self.cursor.execute(sql_auth)
sql_query_result = self.cursor.fetchall()
return sql_query_result
if column is not None and filter_key is not None and filter_value is not None:
sql_auth = "SELECT {0} FROM {1}.{2} WHERE {3}='{4}'". \
format(column, DATABASE_SCHEME, table, filter_key, filter_value)
self.cursor.execute(sql_auth)
sql_query_result = self.cursor.fetchall()
return sql_query_result
if column is None and filter_key is None and filter_value is None:
print("{} table called from DB".format(table))
sql_auth = "SELECT * FROM {0}.{1}".format(DATABASE_SCHEME, table)
self.cursor.execute(sql_auth)
sql_query_result = self.cursor.fetchall()
return sql_query_result
def insert(self, cmd, data):
if cmd == "NEW_USER":
# organize data to variables.
username, password, online, ip_address, avatar = data[0], data[1], data[2], data[3], data[4]
status, room, color = data[5], data[6], data[7]
# check if username exist
search_username = self.query(table='users', filter_key='username', filter_value=username, column='username')
if len(search_username) == 0:
# create user id in chronological order.
self.cursor.execute("SELECT MAX(id)+1 FROM online_chat_db.users")
new_id = self.cursor.fetchall()[0][0]
# organize data for sql execution.
columns = "id, username, password, online, ip_address,avatar, status, room, color"
values = "{0},'{1}','{2}','{3}','{4}','{5}','{6}','{7}','{8}'". \
format(new_id, username, password, online, ip_address, avatar, status, room, color)
self.cursor.execute("INSERT INTO online_chat_db.users({0}) VALUES ({1})".format(columns, values))
self.conn.commit()
# check if record was successfully inserted
return self.query(table='users', filter_key='username', filter_value=username, column='username')
else:
return ["#USERNAME_EXIST#"]
def close_connection(self):
"""
Close connection to the PostgreSQL database.
:return: None.
"""
self.cursor.close()
self.conn.close()
|
from django.shortcuts import render
from forms import contactForm
from django.core.mail import send_mail
from django.conf import settings
# Create your views here.
def contact(request):
title = 'Contact'
form = contactForm(request.POST or None)
confirm_message = None
if form.is_valid():
name = form.cleaned_data['name']
comment = form.cleaned_data['comment']
subject = 'Message from MYSITE.com'
emailFrom = form.cleaned_data['email']
emailTo = [settings.EMAIL_HOST_USER]
message = '%s - %s - %s' %(comment, name, emailFrom)
send_mail(subject, message, emailFrom, emailTo, fail_silently=False)
title = 'Thanks!'
confirm_message = 'Thanks for the message. We will get right back to you.'
form = None
context = {'title':title, 'form':form, 'confirm_message':confirm_message}
template = 'contact.html'
return render(request, template, context)
|
#!/usr/bin/env python
from multiprocessing import Event
import json
import time
# from pygecko.multiprocessing import geckopy
# from pygecko.transport.zmq_sub_pub import Pub, Sub
from pygecko.transport.zmq_req_rep import Reply, Request
from pygecko.multiprocessing.process import GeckoSimpleProcess
from pygecko import zmqTCP, zmqUDS
from pygecko import get_ip
import shutil # move and delete files/folders
import os
def rmdir(path):
try:
shutil.rmtree(path)
except FileNotFoundError:
# folder was already deleted or doesn't exist ... it's ok
pass
ipAddress = get_ip()
endpt = zmqUDS("/tmp/req_rpy_0")
if os.path.exists(endpt):
os.remove(endpt)
def proc():
def cb(msg):
print("cb send:", msg)
msg = json.dumps({"recv send": "ans"}).encode("utf-8")
print("cb recv:", msg)
return msg
rep = Reply()
rep.bind(endpt)
good = False
while not good:
good = rep.listen(cb)
print(">> reply good?", good)
p = GeckoSimpleProcess()
p.start(func=proc, name='reply')
req = Request()
req.connect(endpt)
ans = None
while ans is None:
time.sleep(5)
msg = json.dumps({"request send": "question"}).encode("utf-8")
ans = req.get(msg)
print(">> request good?", ans)
time.sleep(0.01)
|
from reports.models import Station
def station_list(request): # pylint: disable=unused-argument
stations = list()
for station in Station.objects.all():
station.incidents_count = station.incidents.count()
stations.append(station)
return {'station_list': stations}
|
from __future__ import print_function, unicode_literals
"""
This file pretends to make it possible that all the unit tests use a particular and somehow configurable
port range (e.g. 10500 - 11000), so as to avoid conflicts with other applications. At this moment, the
system uses random ports which are in conflict with production systems when
tests are run (e.g. in the continuous integration server).
"""
STARTING_PORT = 10000
RESERVED_PORTS = 500 # for static files, etc.
CURRENT_PORT = STARTING_PORT + RESERVED_PORTS
def new():
global CURRENT_PORT
port = CURRENT_PORT
CURRENT_PORT += 1
return port
|
# Copyright (c) 2022 Dai HBG
"""
该脚本用于将1分钟数据中不在市的部分删除
"""
import os
import pandas as pd
def main():
data_path = 'D:/Documents/AutoFactoryData'
intra_path = 'E:/Backups/AutoFactoryData/StockIntraDayData/1m'
dates = os.listdir(intra_path)
for date in dates:
all_securities = pd.read_csv('{}/StockDailyData/{}/all_securities.csv'.format(data_path, date))
all_stocks = set(all_securities['code'])
files = os.listdir('{}/{}'.format(intra_path, date))
for f in files:
if f[:-4] not in all_stocks:
os.remove('{}/{}/{}'.format(intra_path, date, f))
print('{} done.'.format(date))
if __name__ == '__main__':
main()
|
# Copyright (c) 2021, zerodha and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
class RobinChapterMapping(Document):
"""Mapping."""
def before_insert(self):
if not self.user:
self.user = frappe.session.user
@frappe.whitelist()
def get_mapped_city():
"""Return mapped cities."""
data = frappe.db.sql("""
SELECT c.city as value
FROM `tabChapter` c
INNER JOIN `tabRobin Chapter Mapping` as r
ON c.name = r.chapter
WHERE r.user = %(user)s
""", {"user": frappe.session.user}, as_dict=True)
return data
|
#Oskar Svedlund
#TEINF-20
#24-08-21
#Welcome screen
print("Welcome to the game.")
name = input("What is your name? ")
print("Glad you're back, " + name)
cats = input("How many cats did you se on your way here? ")
paws = int(cats) * 4
print("so you saw" , cats, "cats on your way here? Thats like", paws, "paws. cool") |
# Generated by Django 2.1.7 on 2019-03-20 03:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forum_permission', '0003_remove_forumpermission_name'),
]
operations = [
migrations.RemoveField(
model_name='forumpermission',
name='is_global',
),
migrations.RemoveField(
model_name='forumpermission',
name='is_local',
),
]
|
from abc import ABCMeta, abstractmethod
from future.utils import with_metaclass, iteritems
class OpticAxisRule(with_metaclass(ABCMeta, object)):
# return values of neighbor_that_send_photor_to and
# neighbor_that_provide_photor should be the
# following:
# [x] is the neighbor at position x
# [x, y] is the neighbor at position y
# of neighbor at position x
# this is commutative so order does not matter
# positions are relative to o.
# The actual directions are given by `_get_unit_axis`
# of `HexagonArray` class
# 1
# 6 2
# o
# 5 3
# 4
# 0 is the current column, arrangement is based
# on right eye as seen from inside or left eye as
# seen from outside.
# 5 and 6 is the anterior side and 2, 3 the posterior
# dorsal/ventral depend on whether it is the left or
# the right eye.
# See also RFC #3 for the coordinate system for the left eye.
# The right eye is constructed by a rotation in opposit direction
# of the rotation of left eye.
# __metaclass__ = ABCMeta
inds = {'R{}'.format(i + 1): i + 1 for i in range(8)}
@classmethod
def name_to_ind(cls, name):
try:
return cls.inds[name]
except KeyError:
print('"{}" is not a valid neuron name'.format(name))
raise
@classmethod
def is_photor(cls, name):
return name in cls.inds.keys()
@abstractmethod
def neighbor_that_send_photor_to(self, photor_ind):
return
@abstractmethod
def neighbor_that_provide_photor(self, photor_ind):
return
class OpticAxisNeuralSuperpositionTop(OpticAxisRule):
def __init__(self):
self.name = 'neural superposition'
def neighbor_that_send_photor_to(self, photor_ind):
if photor_ind == 1:
neighbor = [2]
elif photor_ind == 2:
neighbor = [3]
elif photor_ind == 3:
neighbor = [12]
elif photor_ind == 4:
neighbor = [4]
elif photor_ind == 5:
neighbor = [5]
elif photor_ind == 6:
neighbor = [6]
elif photor_ind == 7:
neighbor = [0]
elif photor_ind == 8:
neighbor = [0]
else:
raise ValueError('Unexpected neighbor index {}. Expected 1-8.'
.format(photor_ind))
return neighbor
def neighbor_that_provide_photor(self, photor_ind):
if photor_ind == 1:
neighbor = [5]
elif photor_ind == 2:
neighbor = [6]
elif photor_ind == 3:
neighbor = [18]
elif photor_ind == 4:
neighbor = [1]
elif photor_ind == 5:
neighbor = [2]
elif photor_ind == 6:
neighbor = [3]
elif photor_ind == 7:
neighbor = [0]
elif photor_ind == 8:
neighbor = [0]
else:
raise ValueError('Unexpected neighbor index {}. Expected 1-8.'
.format(photor_ind))
return neighbor
class OpticAxisNeuralSuperpositionBottom(OpticAxisRule):
def __init__(self):
self.name = 'neural superposition'
def neighbor_that_send_photor_to(self, photor_ind):
if photor_ind == 1:
neighbor = [3]
elif photor_ind == 2:
neighbor = [2]
elif photor_ind == 3:
neighbor = [8]
elif photor_ind == 4:
neighbor = [1]
elif photor_ind == 5:
neighbor = [6]
elif photor_ind == 6:
neighbor = [5]
elif photor_ind == 7:
neighbor = [0]
elif photor_ind == 8:
neighbor = [0]
else:
raise ValueError('Unexpected neighbor index {}. Expected 1-6.'
.format(photor_ind))
return neighbor
def neighbor_that_provide_photor(self, photor_ind):
if photor_ind == 1:
neighbor = [6]
elif photor_ind == 2:
neighbor = [5]
elif photor_ind == 3:
neighbor = [14]
elif photor_ind == 4:
neighbor = [4]
elif photor_ind == 5:
neighbor = [3]
elif photor_ind == 6:
neighbor = [2]
elif photor_ind == 7:
neighbor = [0]
elif photor_ind == 8:
neighbor = [0]
else:
raise ValueError('Unexpected neighbor index {}. Expected 1-8.'
.format(photor_ind))
return neighbor
class OpticAxisPlain(OpticAxisRule):
def __init__(self):
self.name = 'plain'
def neighbor_that_send_photor_to(self, photor_ind):
return [0]
def neighbor_that_provide_photor(self, photor_ind):
return [0]
class RuleHexArrayMap(object):
'''
A class that assigns columns based on composition rule
in a consistent way.
'''
def __init__(self, rule, hexarray):
# keys are tuples (column_id, photoreceptorname)
neighbors_for_photor = {}
neighbors_that_provide_photor = {}
for el in hexarray.elements:
for neuron, ind in iteritems(OpticAxisRule.inds):
neighbordr = rule.neighbor_that_send_photor_to(ind)
neighborid = hexarray.get_neighborid(el.gid, neighbordr)
# While given selector is connected to a port
# search for a selector of a cartridge
# in the opposite direction (than the original rule).
# Stop if selector is the same as the previous.
# Here we assume the 2 methods of opticaxis
# return cartridge ids in opposite directions and if
# there is no cartridge in one of those directions
# method returns the current cartridge id
# instead of the neighbor's.
# In case the rule connects ommatidium and cartridges
# with same ids there should be no conflict in the first
# place (the following check is always False)
# Another option is to ignore those connections
# but unconnected ports cause problems elsewhere
while (neighborid, neuron) in neighbors_that_provide_photor:
neighbordr = rule.neighbor_that_provide_photor(ind)
neighborid_new = hexarray.get_neighborid(
neighborid, neighbordr)
if neighborid_new == neighborid:
break
else:
neighborid = neighborid_new
neighbors_for_photor[(el.gid, neuron)] = neighborid
neighbors_that_provide_photor[(neighborid, neuron)] = el.gid
self.neighbors_for_photor = neighbors_for_photor
self.neighbors_that_provide_photor = neighbors_that_provide_photor
def neighbor_that_send_photor_to(self, column_id, photor):
return self.neighbors_for_photor[(column_id, photor)]
def neighbor_that_provide_photor(self, column_id, photor):
return self.neighbors_that_provide_photor[(column_id, photor)]
# don't use this directly
# implementation might change
_class_dict = {
'Plain': OpticAxisPlain,
'SuperpositionTop': OpticAxisNeuralSuperpositionTop,
'SuperpositionBottom': OpticAxisNeuralSuperpositionBottom
}
def opticaxisFactory(rule):
try:
return _class_dict[rule]
except KeyError:
raise ValueError('Value {} not in axis rules {}'
' dictionary'.format(rule, list(_class_dict.keys())))
def main():
axis = opticaxisFactory('Superposition')()
neuronname = 'R1'
ind = axis.name_to_ind(neuronname)
print(axis.neighbor_that_send_photor_to(ind))
if __name__ == '__main__':
main()
|
"""Add vehicle table
Revision ID: 1e99b5efb76f
Revises: 2bbd670f53b4
Create Date: 2020-05-24 15:29:19.928946
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "1e99b5efb76f"
down_revision = "2bbd670f53b4"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"vehicle",
sa.Column("pk", sa.Integer(), nullable=False),
sa.Column("id", sa.String(), nullable=True),
sa.Column("source_pk", sa.Integer(), nullable=False),
sa.Column("system_pk", sa.Integer(), nullable=False),
sa.Column("trip_pk", sa.Integer(), nullable=True),
sa.Column("label", sa.String(), nullable=True),
sa.Column("license_plate", sa.String(), nullable=True),
sa.Column(
"current_status",
sa.Enum(
"INCOMING_AT",
"STOPPED_AT",
"IN_TRANSIT_TO",
name="status",
native_enum=False,
),
nullable=False,
),
sa.Column("latitude", sa.Float(), nullable=True),
sa.Column("longitude", sa.Float(), nullable=True),
sa.Column("bearing", sa.Float(), nullable=True),
sa.Column("odometer", sa.Float(), nullable=True),
sa.Column("speed", sa.Float(), nullable=True),
sa.Column(
"congestion_level",
sa.Enum(
"UNKNOWN_CONGESTION_LEVEL",
"RUNNING_SMOOTHLY",
"STOP_AND_GO",
"CONGESTION",
"SEVERE_CONGESTION",
name="congestionlevel",
native_enum=False,
),
nullable=False,
),
sa.Column("updated_at", sa.TIMESTAMP(timezone=True), nullable=True),
sa.ForeignKeyConstraint(["source_pk"], ["feed_update.pk"],),
sa.ForeignKeyConstraint(["system_pk"], ["system.pk"],),
sa.PrimaryKeyConstraint("pk"),
sa.UniqueConstraint("system_pk", "id"),
)
op.create_index(
op.f("ix_vehicle_source_pk"), "vehicle", ["source_pk"], unique=False
)
op.create_index(
op.f("ix_vehicle_system_pk"), "vehicle", ["system_pk"], unique=False
)
op.create_index(op.f("ix_vehicle_trip_pk"), "vehicle", ["trip_pk"], unique=True)
op.create_foreign_key(None, "vehicle", "trip", ["trip_pk"], ["pk"])
op.alter_column("alert", "cause", existing_type=sa.VARCHAR(), nullable=False)
op.alter_column("alert", "effect", existing_type=sa.VARCHAR(), nullable=False)
op.add_column("trip", sa.Column("delay", sa.Integer(), nullable=True))
op.add_column(
"trip", sa.Column("started_at", sa.TIMESTAMP(timezone=True), nullable=True)
)
op.add_column(
"trip", sa.Column("updated_at", sa.TIMESTAMP(timezone=True), nullable=True)
)
op.drop_column("trip", "current_stop_sequence")
op.drop_column("trip", "vehicle_id")
op.drop_column("trip", "last_update_time")
op.drop_column("trip", "start_time")
op.drop_column("trip", "current_status")
op.add_column("vehicle", sa.Column("current_stop_pk", sa.Integer(), nullable=True))
op.add_column(
"vehicle", sa.Column("current_stop_sequence", sa.Integer(), nullable=True)
)
op.add_column(
"vehicle",
sa.Column(
"occupancy_status",
sa.Enum(
"EMPTY",
"MANY_SEATS_AVAILABLE",
"FEW_SEATS_AVAILABLE",
"STANDING_ROOM_ONLY",
"CRUSHED_STANDING_ROOM_ONLY",
"FULL",
"NOT_ACCEPTING_PASSENGERS",
"UNKNOWN",
name="occupancystatus",
native_enum=False,
),
nullable=False,
),
)
op.create_foreign_key(None, "vehicle", "stop", ["current_stop_pk"], ["pk"])
def downgrade():
pass
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='datatransfer',
version='1.0',
description='the data transfering module to use scp.',
long_description=None,
author='Takeki Shikano',
author_email='',
url=None,
license='MIT',
packages=find_packages(exclude=('docs',))
)
|
#!/usr/bin/env python
#coding: utf-8
from riak_common import *
import riak
import redis
import numbers
def _set_expecting_numeric_response(nutcracker, key, value):
response = nutcracker.set(key, value)
if not isinstance(response, numbers.Number):
raise Exception('Expected nutcracker.set to return a number, but got: {0}'.format(response))
return response
def _create_delete_run(key_count, riak_client, riak_bucket, nutcracker, redis):
keys = [ distinct_key() for i in range(0, key_count)]
nc_keys = []
for i, key in enumerate(keys):
if i % 2 == 0:
nc_keys += [ key ]
else:
nc_keys += [ nutcracker_key(key) ]
cleanup_func = lambda : nutcracker.delete(*nc_keys)
retry_write(cleanup_func)
keys_created = 0
for i, key in enumerate(nc_keys):
write_func = lambda: _set_expecting_numeric_response(nutcracker, key, key)
keys_created += retry_write(write_func)
assert_equal(len(nc_keys), keys_created)
for i, key in enumerate(nc_keys):
read_func = lambda: nutcracker.get(key)
read_value = retry_read(read_func)
assert_equal(key, read_value)
delete_func = lambda : nutcracker.delete(*nc_keys)
del_response = retry_write(delete_func)
assert_equal(len(nc_keys), del_response)
failed_to_delete = 0
for i, key in enumerate(nc_keys):
riak_read_func = lambda : riak_bucket.get(key)
riak_object = retry_read_notfound_ok(riak_read_func)
if None != riak_object.data:
failed_to_delete += 1
continue
assert_equal(0, failed_to_delete)
def _create_delete(key_count, redis_to_shutdown, riak_to_shutdown):
(riak_client, riak_bucket, nutcracker, redis) = getconn(testing_type='partition')
try:
shutdown_redis_nodes(redis_to_shutdown)
shutdown_riak_nodes(riak_to_shutdown)
_create_delete_run(key_count, riak_client, riak_bucket, nutcracker, redis)
except:
raise
finally:
restore_redis_nodes()
restore_riak_nodes()
def test_happy_path():
_create_delete(riak_many_n, 0, 0)
def test_one_redis_node_down():
_create_delete(riak_many_n, 1, 0)
def test_two_redis_nodes_down():
_create_delete(riak_many_n, 2, 0)
def test_one_riak_node_down():
_create_delete(riak_many_n, 0, 1)
def test_two_riak_nodes_down():
_create_delete(riak_many_n, 0, 2)
|
#!/usr/bin/env python3
import sys
import yaml
config = {}
with open(sys.argv[1]) as raw:
config = yaml.load(raw)
def alias_for_cluster(cluster):
if cluster == "api.ci":
return "ci" # why do we still do this??
return cluster
def internal_hostnames_for_cluster(cluster):
if cluster == "api.ci":
return ["docker-registry.default.svc.cluster.local:5000", "docker-registry.default.svc:5000"]
return ["image-registry.openshift-image-registry.svc.cluster.local:5000", "image-registry.openshift-image-registry.svc:5000"]
def internal_auths_for_cluster(cluster):
auths = []
for hostname in internal_hostnames_for_cluster(cluster):
auths.append({
"bw_item": "build_farm",
"registry_url": hostname,
"auth_bw_attachment": "token_image-puller_{}_reg_auth_value.txt".format(alias_for_cluster(cluster)),
})
return auths
def config_for_cluster(cluster):
return {
"from": {
".dockerconfigjson": {
"dockerconfigJSON": internal_auths_for_cluster(cluster) + [
{
"bw_item": "cloud.openshift.com-pull-secret",
"registry_url": "cloud.openshift.com",
"auth_bw_attachment": "auth",
"email_bw_field": "email",
},
{
"bw_item": "quay.io-pull-secret",
"registry_url": "quay.io",
"auth_bw_attachment": "auth",
"email_bw_field": "email",
},
{
"bw_item": "registry.connect.redhat.com-pull-secret",
"registry_url": "registry.connect.redhat.com",
"auth_bw_attachment": "auth",
"email_bw_field": "email",
},
{
"bw_item": "registry.redhat.io-pull-secret",
"registry_url": "registry.redhat.io",
"auth_bw_attachment": "auth",
"email_bw_field": "email",
},
{
"bw_item": "build_farm",
"registry_url": "registry.svc.ci.openshift.org",
"auth_bw_attachment": "token_image-puller_ci_reg_auth_value.txt",
},
{
"bw_item": "build_farm",
"registry_url": "registry.ci.openshift.org",
"auth_bw_attachment": "token_image-puller_app.ci_reg_auth_value.txt",
},
{
"bw_item": "build_farm",
"registry_url": "registry.arm-build01.arm-build.devcluster.openshift.com",
"auth_bw_attachment": "token_image-puller_arm01_reg_auth_value.txt",
},
{
"bw_item": "build_farm",
"registry_url": "registry.build01.ci.openshift.org",
"auth_bw_attachment": "token_image-puller_build01_reg_auth_value.txt",
},
{
"bw_item": "build_farm",
"registry_url": "registry.build02.ci.openshift.org",
"auth_bw_attachment": "token_image-puller_build02_reg_auth_value.txt",
},
{
"bw_item": "build_farm",
"registry_url": "registry.apps.build01-us-west-2.vmc.ci.openshift.org",
"auth_bw_attachment": "token_image-puller_vsphere_reg_auth_value.txt",
}],
},
},
"to": [{
"cluster": cluster,
"namespace": "ci",
"name": "registry-pull-credentials-all",
"type": "kubernetes.io/dockerconfigjson",
},
{
"cluster": cluster,
"namespace": "test-credentials",
"name": "registry-pull-credentials-all",
"type": "kubernetes.io/dockerconfigjson",
}],
}
clusters = ["api.ci", "app.ci", "build01", "build02", "vsphere"]
configs = dict(zip(clusters, [config_for_cluster(cluster) for cluster in clusters]))
found = dict(zip(clusters, [False for cluster in clusters]))
for i, secret in enumerate(config["secret_configs"]):
for c in configs:
if secret["to"] == configs[c]["to"]:
found[configs[c]["to"][0]["cluster"]] = True
config["secret_configs"][i] = configs[c]
for c in found:
if not found[c]:
config["secret_configs"].append(configs[c])
with open(sys.argv[1], "w") as raw:
yaml.dump(config, raw)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.