content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
# Print out all the codons for the sequence below in reading frame 1
# Use a 'for' loop
dna = 'ATAGCGAATATCTCTCATGAGAGGGAA'
for nt in range(0, len(dna) - 2, 3):
print(dna[nt:nt+3])
"""
ATA
GCG
AAT
ATC
TCT
CAT
GAG
AGG
GAA
"""
| 11.636364 | 68 | 0.671875 | [
"MIT"
] | tmbolt/learning_python | codons.py | 256 | Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\design.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(632, 318)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.csv_output_button = QtWidgets.QPushButton(self.centralwidget)
self.csv_output_button.setObjectName("csv_output_button")
self.gridLayout.addWidget(self.csv_output_button, 1, 4, 1, 1)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.csv_output_line_edit = QtWidgets.QLineEdit(self.centralwidget)
self.csv_output_line_edit.setStyleSheet("output_line_edit.setStyleSheet(\"QLineEdit { border-radius: 5px; }\")")
self.csv_output_line_edit.setObjectName("csv_output_line_edit")
self.gridLayout.addWidget(self.csv_output_line_edit, 1, 1, 1, 3)
self.input_button = QtWidgets.QPushButton(self.centralwidget)
self.input_button.setObjectName("input_button")
self.gridLayout.addWidget(self.input_button, 0, 4, 1, 1)
self.video_line_edit = QtWidgets.QLineEdit(self.centralwidget)
self.video_line_edit.setStyleSheet("video_line_edit.setStyleSheet(\"QLineEdit { border: 2px solid gray; border-radius: 5px;}\")")
self.video_line_edit.setObjectName("video_line_edit")
self.gridLayout.addWidget(self.video_line_edit, 0, 1, 1, 3)
self.gridLayout.setColumnStretch(1, 7)
self.gridLayout.setColumnStretch(2, 1)
self.gridLayout.setColumnStretch(3, 1)
self.gridLayout.setColumnStretch(4, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setContentsMargins(-1, 0, -1, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.model_combo_box = QtWidgets.QComboBox(self.centralwidget)
self.model_combo_box.setEditable(False)
self.model_combo_box.setObjectName("model_combo_box")
self.model_combo_box.addItem("")
self.model_combo_box.addItem("")
self.model_combo_box.addItem("")
self.model_combo_box.addItem("")
self.horizontalLayout_2.addWidget(self.model_combo_box)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(-1, 24, -1, -1)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.save_video_check_box = QtWidgets.QCheckBox(self.centralwidget)
self.save_video_check_box.setChecked(True)
self.save_video_check_box.setObjectName("save_video_check_box")
self.horizontalLayout_3.addWidget(self.save_video_check_box)
self.dark_bg_check_box = QtWidgets.QCheckBox(self.centralwidget)
self.dark_bg_check_box.setObjectName("dark_bg_check_box")
self.horizontalLayout_3.addWidget(self.dark_bg_check_box)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(-1, -1, -1, 36)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setObjectName("label_4")
self.horizontalLayout_4.addWidget(self.label_4)
self.video_output_line_edit = QtWidgets.QLineEdit(self.centralwidget)
self.video_output_line_edit.setObjectName("video_output_line_edit")
self.horizontalLayout_4.addWidget(self.video_output_line_edit)
self.video_output_button = QtWidgets.QPushButton(self.centralwidget)
self.video_output_button.setObjectName("video_output_button")
self.horizontalLayout_4.addWidget(self.video_output_button)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.progress_bar = QtWidgets.QProgressBar(self.centralwidget)
self.progress_bar.setProperty("value", 0)
self.progress_bar.setTextVisible(False)
self.progress_bar.setObjectName("progress_bar")
self.verticalLayout.addWidget(self.progress_bar)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setContentsMargins(-1, 24, -1, -1)
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.cancel_button = QtWidgets.QPushButton(self.centralwidget)
self.cancel_button.setEnabled(False)
self.cancel_button.setCheckable(True)
self.cancel_button.setChecked(False)
self.cancel_button.setObjectName("cancel_button")
self.horizontalLayout.addWidget(self.cancel_button)
self.ok_button = QtWidgets.QPushButton(self.centralwidget)
self.ok_button.setObjectName("ok_button")
self.horizontalLayout.addWidget(self.ok_button)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout.setStretch(0, 1)
self.verticalLayout_2.addLayout(self.verticalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 632, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Путь к видео:"))
self.csv_output_button.setText(_translate("MainWindow", "Выбрать"))
self.label_2.setText(_translate("MainWindow", "CSV для точек:"))
self.input_button.setText(_translate("MainWindow", "Выбрать"))
self.label_3.setText(_translate("MainWindow", "Модель:"))
self.model_combo_box.setCurrentText(_translate("MainWindow", "cmu"))
self.model_combo_box.setItemText(0, _translate("MainWindow", "cmu"))
self.model_combo_box.setItemText(1, _translate("MainWindow", "mobilenet_thin"))
self.model_combo_box.setItemText(2, _translate("MainWindow", "mobilenet_v2_large"))
self.model_combo_box.setItemText(3, _translate("MainWindow", "mobilenet_v2_small"))
self.save_video_check_box.setText(_translate("MainWindow", "Показать ключевые точки в видео"))
self.dark_bg_check_box.setText(_translate("MainWindow", "Темный фон"))
self.label_4.setText(_translate("MainWindow", "Сохранить видео как:"))
self.video_output_button.setText(_translate("MainWindow", "Выбрать"))
self.progress_bar.setFormat(_translate("MainWindow", "%p%"))
self.cancel_button.setText(_translate("MainWindow", "Прервать"))
self.ok_button.setText(_translate("MainWindow", "ОК"))
| 58.832168 | 138 | 0.730893 | [
"Apache-2.0"
] | AlgieParvin/openpose-gui | design.py | 8,521 | Python |
from Cocoa import NSDocument
class CurrencyConvBindingDocument(NSDocument):
def windowNibName(self):
return "CurrencyConvBindingDocument"
| 21.714286 | 46 | 0.789474 | [
"MIT"
] | linuxfood/pyobjc-framework-Cocoa-test | Examples/AppKit/CocoaBindings/CurrencyConvBinding/CurrencyConvBindingDocument.py | 152 | Python |
import json
import pathlib
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from kubernetes.client import models as k8s
from airflow.contrib.operators import kubernetes_pod_operator
import datetime
from airflow import models
dag = DAG(
dag_id="kube-pod-operator-cluster",
start_date=airflow.utils.dates.days_ago(2),
schedule_interval="@daily",
)
start_kube_process = BashOperator(
task_id="start_kube_process",
bash_command="echo upload to s3",
dag=dag,
)
in_cluster=True
kubernetes_min_pod = kubernetes_pod_operator.KubernetesPodOperator(
task_id='pod-ex-minimum',
name='pod-ex-minimum',
cmds=['echo'],
namespace='default',
image='ubuntu:latest',
in_cluster=in_cluster,
executor_config={"LocalExecutor": {}}
)
run_another_pod = kubernetes_pod_operator.KubernetesPodOperator(
task_id='run-another-pod',
name='run-another-pod',
cmds=['echo'],
namespace='default',
image='ubuntu:latest',
in_cluster=in_cluster,
executor_config={"LocalExecutor": {}}
)
start_kube_process >> kubernetes_min_pod >> run_another_pod
| 21.678571 | 67 | 0.747117 | [
"Apache-2.0"
] | skhatri/airflow-by-example | airflow-example-dags/dags/k8s-pod-operator-cluster.py | 1,214 | Python |
from django.contrib import admin
from django.contrib.auth.models import User
from django.test.testcases import TestCase
from django.urls import reverse
from pagetools.menus.admin import MenuAdmin, make_entrieable_admin
from pagetools.menus.apps import MenusConfig
from pagetools.menus.models import Link, Menu, MenuEntry
from pagetools.tests.test_models import ConcretePublishableLangModel
from pagetools.utils import get_adminedit_url
from pagetools.widgets.settings import TEMPLATETAG_WIDGETS
class CPMAdmin(admin.ModelAdmin):
model = ConcretePublishableLangModel
admin.site.register(ConcretePublishableLangModel, CPMAdmin)
class MenuAdminTests(TestCase):
def setUp(self):
self.admin = User.objects.create_superuser("admin", "[email protected]", "password")
self.client.login(username="admin", password="password")
self.site = admin.site
def _data_from_menu(self, menu):
return {
key: menu.__dict__[key]
for key in (
"id",
"lang",
"title",
"slug",
"content_type_id",
"object_id",
"enabled",
"lft",
"rght",
"tree_id",
"level",
)
}
def test_admin_index(self):
""" test index because customdashboard with MenuModule is may used"""
adminindex = reverse("admin:index")
response = self.client.get(adminindex, follow=True, extra={"app_label": "admin"})
self.assertIn(response.status_code, (200, 302))
def test_add(self):
adminurl = reverse("admin:menus_menu_add", args=[])
self.client.post(adminurl, {"title": "Menu1"})
menu = Menu.objects.get(title="Menu1")
self.assertEqual(len(menu.children.all()), 0)
return menu
def test_update(self):
menu = Menu.objects.add_root(title="Menu1")
entries = []
for i in range(1, 3):
entries.append(
MenuEntry.objects.add_child(
parent=menu,
title="e%s" % i,
content_object=Link.objects.create(
url="#%s" % i,
),
enabled=True,
)
)
adminurl = reverse("admin:menus_menu_change", args=[menu.pk])
self.client.get(adminurl, {"pk": menu.pk})
data = self._data_from_menu(menu)
data["entry-order-id-0"] = entries[0].pk
data["entry-text-0"] = "changed"
data["entry-published-0"] = 1
self.client.post(adminurl, data)
children = menu.children_list()
self.assertEqual(children[0]["entry_title"], "changed")
def test_reorder(self):
menu = Menu.objects.add_root(title="Menu1")
entries = []
for i in range(1, 3):
entries.append(
MenuEntry.objects.add_child(
parent=menu,
title="e%s" % i,
content_object=Link.objects.create(
url="#%s" % i,
),
enabled=True,
)
)
adminurl = reverse("admin:menus_menu_change", args=[menu.pk])
data = self._data_from_menu(menu)
self.client.post(adminurl, data)
self.assertEqual([entry["entry_title"] for entry in menu.children_list()], ["e1", "e2"])
data.update(
{
"entry-order": "[%s]=null&[%s]=null" % (entries[1].pk, entries[0].pk),
}
)
self.client.post(adminurl, data)
self.assertEqual([e["entry_title"] for e in menu.children_list()], ["e2", "e1"])
def test_addentry(self):
menu = Menu.objects.add_root(title="Menu1", enabled=True)
entries = []
for i in range(1, 3):
entries.append(
MenuEntry.objects.add_child(
parent=menu,
title="e%s" % i,
content_object=Link.objects.create(
url="#%s" % i,
),
enabled=True,
)
)
adminurl = reverse("admin:menus_menu_change", args=[menu.pk])
data = self._data_from_menu(menu)
data["addentry"] = "menus#link"
result = self.client.post(adminurl, data)
self.assertEqual(result.status_code, 302)
def test_addableentries(self):
admininstance = MenuAdmin(model=Menu, admin_site=self.site)
menu = Menu.objects.add_root(title="Menu1")
entries = admininstance.addable_entries(obj=menu)
len_e = len(MenusConfig.entrieable_models)
if not TEMPLATETAG_WIDGETS:
len_e -= 1
self.assertEqual(entries.count("<li>"), len_e)
def test_mk_entriableadmin(self):
admincls = CPMAdmin
make_entrieable_admin(admincls)
self.assertTrue(admincls.is_menu_entrieable)
instance = ConcretePublishableLangModel.objects.create(foo="x")
data = instance.__dict__
menu = Menu.objects.add_root(title="Menu1")
admininstance = admincls(model=ConcretePublishableLangModel, admin_site=self.site)
self.assertTrue(admininstance.get_fields({}, instance), [])
self.assertTrue(admininstance.get_fieldsets({}, instance), [])
formcls = admincls.form
formcls._meta.model = ConcretePublishableLangModel
form = formcls(instance.__dict__)
self.assertTrue("menus" in form.fields.keys())
valid = form.is_valid()
self.assertTrue(valid)
data["menus"] = [menu.pk]
form = formcls(data, instance=instance)
self.assertTrue("menus" in form.fields.keys())
valid = form.is_valid()
self.assertTrue(valid)
data["status_changed_0"] = "2016-01-01"
data["status_changed_1"] = "23:00"
adminurl = get_adminedit_url(instance)
response = self.client.post(adminurl, data)
self.assertIn(response.status_code, (200, 302))
self.assertEqual(MenuEntry.objects.count(), 2)
response = self.client.get(adminurl)
content = str(response.content)
start = content.find('<input type="checkbox" name="menus"')
end = content[start:].find(">")
tag = content[start : start + end + 1]
self.assertTrue(" checked" in tag)
| 36.185393 | 96 | 0.575221 | [
"MIT"
] | theithec/django-pagetools | pagetools/menus/tests/test_admin.py | 6,441 | Python |
# coding: utf8
from __future__ import absolute_import
import datetime
from celery import shared_task
from celery.utils.log import get_task_logger
from django.utils.translation import ugettext as _
from django.core.mail import send_mail
from django.contrib.auth import get_user_model
from django.conf import settings
from django.template import loader, Context
from common.helpers import send_email
from .models import Challenge
log = get_task_logger(__name__)
def send_challenge_reminder(user_id):
user = get_user_model().objects.get(id=user_id)
today = datetime.date.today()
filters = {
'status': Challenge.ACTIVE,
'end_date': today
}
ending_challenges = user.challenges_recieved.filter(**filters)
email_subject = _('Challenge ends today!')
email_context = {
'ending_challenges': ending_challenges
}
send_email([user.email],
email_subject,
'challenges/emails/challenges_reminder.html',
email_context)
@shared_task(ignore_result=True)
def send_challenge_reminders():
# Fetch runners that has challenges ending today.
today = datetime.date.today()
filters = {
'is_active': True,
'challenges_recieved__end_date': today
}
relevant_runners = get_user_model().objects.filter(**filters)
for runner in relevant_runners:
send_challenge_reminder(runner.id)
| 26.185185 | 66 | 0.720651 | [
"MIT"
] | Socialsquare/RunningCause | challenges/tasks.py | 1,414 | Python |
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.goal.task_registrar import TaskRegistrar as task
from pants.contrib.codeanalysis.tasks.bundle_entries import BundleEntries
from pants.contrib.codeanalysis.tasks.extract_java import ExtractJava
from pants.contrib.codeanalysis.tasks.index_java import IndexJava
def register_goals():
task(name="kythe-java-extract", action=ExtractJava).install("index")
task(name="kythe-java-index", action=IndexJava).install("index")
task(name="bundle-entries", action=BundleEntries).install("index")
| 42.666667 | 73 | 0.798438 | [
"Apache-2.0"
] | odisseus/pants | contrib/codeanalysis/src/python/pants/contrib/codeanalysis/register.py | 640 | Python |
import os
import glob
import json
import argparse
from tokenizers import ByteLevelBPETokenizer
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train_path', type=str)
parser.add_argument('--n_files', type=int)
parser.add_argument('--save_path', type=str)
parser.add_argument('--vocab_size', type=int)
parser.add_argument('--control_codes', nargs='+',
default=['<|endoftext|>'])
args = parser.parse_args()
if os.path.isdir(args.train_path):
paths = glob.glob(os.path.join(args.train_path, '*'))
else:
paths = [args.train_path]
paths = paths[:args.n_files]
tok = ByteLevelBPETokenizer()
tok.train(files=paths, vocab_size=args.vocab_size,
special_tokens=args.control_codes)
tok.save(args.save_path)
tokenizer_config = {
"max_len": 1024
}
with open(os.path.join(args.save_path, "tokenizer_config.json"), 'w') as fp:
json.dump(tokenizer_config, fp)
| 25.625 | 80 | 0.654634 | [
"MIT"
] | bilal2vec/lm-finetuning | train_tokenizer.py | 1,025 | Python |
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class OslusiadasextractItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 21 | 53 | 0.725275 | [
"MIT"
] | Guilherme-Valle/os-lusiadas-api | root_package/oslusiadasextract/items.py | 273 | Python |
import torch
from torch.autograd import Variable
def train(epoch, dataloader, net, criterion, optimizer, opt):
net.train()
for i, (adj_matrix, annotation, target) in enumerate(dataloader, 0):
net.zero_grad()
padding = torch.zeros(len(annotation),
opt.state_dim - opt.annotation_dim)
init_input = torch.cat((annotation, padding), 1)
if opt.cuda:
init_input = init_input.cuda()
adj_matrix = adj_matrix.cuda()
annotation = annotation.cuda()
target = target.cuda()
init_input = Variable(init_input)
adj_matrix = Variable(adj_matrix)
annotation = Variable(annotation)
target = Variable(target)
output = net(init_input, annotation, adj_matrix)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if i % int(len(dataloader) / 10 + 1) == 0 and opt.verbal:
print('[%d/%d][%d/%d] Loss: %.4f' %
(epoch, opt.niter, i, len(dataloader), loss.data[0]))
| 31.764706 | 72 | 0.585185 | [
"MIT"
] | RanganThaya/ggnn.pytorch.sparse | utils/train.py | 1,080 | Python |
import numpy as np
import math
# number will be decreased by a small amount when some deletions happen
number_of_vertices = 5_000
# probability of an arc between any two instances
probability_of_an_arc = 0.001
# number of reads in the read-heavy test
read_test_operations = 20_000
# probability of removing a random vertex in each after processing each vertex
removal_probability = 0.04
# probability of adding a lookup command after each add arc command in write-heavy test
random_lookup_probability = 0.1
# probability of adding an add command after each lookup command in read-heavy test
random_add_probability = 0.1
# used in the write-heavy test. prabability of removing a vertex. Removing an arc has a 1-x probability
probability_of_removing_a_vertex = 0.5
# used in the read-heavy test. prabability of looking up a vertex. Looking up an arc has a 1-x probability
probability_of_looking_up_a_vertex = 0.5
avg_degree = number_of_vertices * probability_of_an_arc
std_deviation = math.sqrt((number_of_vertices-1)*probability_of_an_arc*(1-probability_of_an_arc))
write_heavy_test_name = "operations1.txt"
read_heavy_test_name = "operations2.txt"
with open(write_heavy_test_name, "w") as file:
# write the vertices first so you dont get errors in neo4j
for i in range(0, number_of_vertices):
file.write(f"av {i}\n")
print("Written vertices")
# start adding the arcs
for current_vertex in range(0, number_of_vertices):
# get the degree of the vertex using the normal distribution
degree = np.random.normal(avg_degree, std_deviation)
for j in range(0, int(degree)):
# select a target and write the operation to the instruction set
target = np.random.randint(0, number_of_vertices)
while target == current_vertex:
target = np.random.randint(0, number_of_vertices)
file.write(f"aa {current_vertex} {target}\n")
# add rare random lookups durring the write-heavy test
if(np.random.ranf()<random_lookup_probability):
if(np.random.ranf()<probability_of_looking_up_a_vertex):
vertex_to_look = np.random.randint(0, number_of_vertices)
file.write(f"lv {vertex_to_look}\n")
else:
source_arc_to_look = np.random.randint(0, number_of_vertices)
target_arc_to_look = np.random.randint(0, number_of_vertices)
file.write(f"la {source_arc_to_look} {target_arc_to_look}\n")
if(current_vertex % 1000 == 0):
print(f"Written arcs for {current_vertex} vertices")
# after processing the arcs of an vertex add a rare random removal command
if(np.random.ranf()<removal_probability):
if(np.random.ranf()<probability_of_removing_a_vertex):
vertex_to_remove = np.random.randint(0, number_of_vertices)
file.write(f"rv {vertex_to_remove}\n")
else:
source_arc_to_rmv = np.random.randint(0, number_of_vertices)
target_arc_to_rmv = np.random.randint(0, number_of_vertices)
file.write(f"ra {source_arc_to_rmv} {target_arc_to_rmv}\n")
print("Written arcs")
with open(read_heavy_test_name, "w") as file:
# write the read_test_operations read operations
for i in range(0, read_test_operations):
# before each read operation add a rare random write command
if(np.random.ranf()<random_add_probability):
file.write(f"av x{i}\n")
if(np.random.ranf()<probability_of_looking_up_a_vertex):
vertex_to_look = np.random.randint(0, number_of_vertices)
file.write(f"lv {vertex_to_look}\n")
else:
source_arc_to_look = np.random.randint(0, number_of_vertices)
target_arc_to_look = np.random.randint(0, number_of_vertices)
file.write(f"la {source_arc_to_look} {target_arc_to_look}\n")
if(i % 10_000 == 0):
print(f"Written {i} lookups")
print("Written lookups")
| 45.053191 | 106 | 0.661393 | [
"Apache-2.0"
] | ArendJan/TUD-DistributedSystems | Conflict-free_Replicated_Data_Types/experiments/benchmarking/OperationTestsGenerator.py | 4,235 | Python |
flag = b'HSCTF{1d9cb42f-3302-46f3-a3a7-0ca30d631cc9}'
| 27 | 53 | 0.777778 | [
"MIT"
] | scnu-sloth/hsctf-2021-freshmen | Crypto-babyFibo/ans/secret.py | 54 | Python |
"""Tests related to inheritance from interface."""
from datetime import datetime
import pytest
from generics import defended
from generics import delegated
from generics import private
pytestmark = pytest.mark.parametrize("f", [private, delegated, defended])
def test_allow_inheritance_from_interface(f, s):
"""Allow inheritance from interface."""
user_class = f(s.User)
user = user_class(last_login=datetime(1999, 12, 31))
assert not user.is_active()
| 24.947368 | 73 | 0.757384 | [
"BSD-2-Clause"
] | proofit404/generics | tests/test_subtyping.py | 474 | Python |
import math
import collections
import numpy as np
def __CheckEvaluationInput(y, yPredicted):
# Check sizes
if(len(y) != len(yPredicted)):
raise UserWarning("Attempting to evaluate between the true labels and predictions.\n Arrays contained different numbers of samples. Check your work and try again.")
# Check values
valueError = False
for value in y:
if value not in [0, 1]:
valueError = True
for value in yPredicted:
if value not in [0, 1]:
valueError = True
if valueError:
raise UserWarning("Attempting to evaluate between the true labels and predictions.\n Arrays contained unexpected value. Must be 0 or 1.")
def __CheckEvaluationCount(y, yPredicted):
# Check sizes
if(len(y) != len(yPredicted)):
raise UserWarning("Attempting to evaluate between the true labels and predictions.\n Arrays contained different numbers of samples. Check your work and try again.")
def Accuracy(y, yPredicted):
__CheckEvaluationInput(y, yPredicted)
correct = []
for i in range(len(y)):
if(y[i] == yPredicted[i]):
correct.append(1)
else:
correct.append(0)
return sum(correct)/len(correct)
def CountCorrect(y, yPredicted):
__CheckEvaluationInput(y, yPredicted)
correct = []
for i in range(len(y)):
if(y[i] == yPredicted[i]):
correct.append(1)
else:
correct.append(0)
return sum(correct)
def PredictionDiff(xTestRaw, y, yPredicted):
__CheckEvaluationCount(y, yPredicted)
__CheckEvaluationCount(xTestRaw, y)
predictionRange = {}
for i in range(len(y)):
predictionRange[xTestRaw[i]] = y[i] - yPredicted[i]
return predictionRange
def Precision(y, yPredicted):
numerator = TPCount(y, yPredicted)
denominator = (numerator + FPCount(y, yPredicted))
return 0.0 if denominator == 0 else numerator / denominator
def Recall(y, yPredicted):
numerator = TPCount(y, yPredicted)
denominator = (numerator + FNCount(y, yPredicted))
return 0.0 if denominator == 0 else numerator / denominator
def FalseNegativeRate(y, yPredicted):
numerator = FNCount(y, yPredicted)
denominator = numerator + TPCount(y, yPredicted)
return 0.0 if denominator == 0 else numerator / denominator
def FalsePositiveRate(y, yPredicted):
numerator = FPCount(y, yPredicted)
denominator = numerator + TNCount(y, yPredicted)
return 0.0 if denominator == 0 else numerator / denominator
def FNCount(y, yPredicted):
counter = 0
for i in range(len(y)):
if(y[i] == 1 and yPredicted[i] == 0):
counter += 1
return counter
def FPCount(y, yPredicted):
counter = 0
for i in range(len(y)):
if(y[i] == 0 and yPredicted[i] == 1):
counter += 1
return counter
def TNCount(y, yPredicted):
counter = 0
for i in range(len(y)):
if(y[i] == 0 and yPredicted[i] == 0):
counter += 1
return counter
def TPCount(y, yPredicted):
counter = 0
for i in range(len(y)):
if(y[i] == 1 and yPredicted[i] == 1):
counter += 1
return counter
def UpperAccRange(Accuracy, n):
return Accuracy + 1.96 * math.sqrt((Accuracy * (1 - Accuracy) / n))
def LowerAccRange(Accuracy, n):
return Accuracy - 1.96 * math.sqrt((Accuracy * (1 - Accuracy) / n))
def ConfusionMatrix(y, yPredicted):
print(" Predicted Negative | Predicted Positive")
print("Actual Negative | TN: " + str(TNCount(y, yPredicted)) + " | FP: " + str(FPCount(y, yPredicted)))
print("Actual Positive | FN: " + str(FNCount(y, yPredicted)) + " | TP: " + str(TPCount(y, yPredicted)))
def ExecuteAll(y, yPredicted):
accuracyVal = Accuracy(y, yPredicted)
print(ConfusionMatrix(y, yPredicted))
print("Accuracy:", accuracyVal)
print("Precision:", Precision(y, yPredicted))
print("Recall:", Recall(y, yPredicted))
print("FPR:", FalsePositiveRate(y, yPredicted))
print("FNR:", FalseNegativeRate(y, yPredicted))
print("95% confidence range:", LowerAccRange(accuracyVal, len(y)), "to", UpperAccRange(accuracyVal, len(y)) )
| 30.781022 | 174 | 0.641688 | [
"MIT"
] | isibord/LogisticRegression | Code/EvaluationsStub.py | 4,217 | Python |
import os
import json
import logging
logger = logging.getLogger(__name__)
from aiohttp import web, ClientSession
async def index(request):
logger.debug('Accessing index')
client = request.app['arango']
sys_db = client.db('_system', username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
dbs = sys_db.databases()
logger.info('Response: %s' % dbs)
return web.Response(text=json.dumps(dbs, indent=4))
async def addDB(request):
logger.debug('Adding DB')
client = request.app['arango']
sys_db = client.db('_system', username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
name = request.match_info['name']
if not sys_db.has_database(name):
sys_db.create_database(name)
else:
logger.info('Request to add db {} is a no-op because database is already present'.format(name))
return web.Response(text=name)
async def getDB(request):
logger.debug('Getting DB')
client = request.app['arango']
db = client.db(request.match_info['name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
graphs = [coll for coll in db.graphs() if not coll['name'].startswith('_')]
return web.Response(text=json.dumps(graphs, indent=4))
async def getGraph(request):
logger.debug('Getting Graph')
client = request.app['arango']
db = client.db(request.match_info['db_name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
graph = db.graph(request.match_info['name'])
vertex_collections = graph.vertex_collections()
edge_definitions = graph.edge_definitions()
return web.Response(text=json.dumps(
{
"vertex_collections": vertex_collections,
"edge_definitions": edge_definitions
},
indent=4
))
async def addGraph(request):
logger.debug('Adding Graph')
client = request.app['arango']
db = client.db(request.match_info['db_name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
name = request.match_info['name']
graph = db.graph(name) if db.has_graph(name) else db.create_graph(name)
return web.Response(text=graph.name)
async def addVertices(request):
logger.debug('Adding Vertices')
client = request.app['arango']
db = client.db(request.match_info['db_name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
graph = db.graph(request.match_info['graph_name'])
name = request.match_info['name']
collection = graph.vertex_collection(name) if graph.has_vertex_collection(name) else graph.create_vertex_collection(name)
reader = await request.multipart()
import_file = await reader.next()
logger.info(import_file.filename)
filedata = await import_file.text()
fileschema = [key.strip('"') for key in filedata.splitlines()[0].split(',')]
logger.info(fileschema)
filelines = filedata.splitlines()[1:]
for line in filelines:
values = [value.strip('"') for value in line.split(',')]
doc = {key:value for key, value in zip(fileschema, values)}
try:
collection.insert(doc)
except Exception as e:
logger.info(e)
return web.Response(text=collection.name)
async def getVertices(request):
logger.debug('Getting Vertices')
client = request.app['arango']
db = client.db(request.match_info['db_name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
graph = db.graph(request.match_info['graph_name'])
collection = db.collection(request.match_info['name'])
cursor = collection.all()
documents = [doc for doc in cursor]
return web.Response(text=json.dumps(documents[0:5], indent=4))
async def addEdges(request):
logger.debug('Adding Edges')
client = request.app['arango']
db = client.db(request.match_info['db_name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
graph = db.graph(request.match_info['graph_name'])
name = request.match_info['name']
reader = await request.multipart()
field = await reader.next()
text = await field.text()
from_collections = text.split(',')
field = await reader.next()
text = await field.text()
to_collections = text.split(',')
if graph.has_edge_definition(name):
collection = graph.edge_collection(name)
else:
collection = graph.create_edge_definition(
edge_collection=name,
from_vertex_collections=from_collections,
to_vertex_collections=to_collections)
import_file = await reader.next()
filedata = await import_file.text()
fileschema = [key.strip('"') for key in filedata.splitlines()[0].split(',')]
filelines = filedata.splitlines()[1:]
for line in filelines:
values = [value.strip('"') for value in line.split(',')]
doc = {key:value for key, value in zip(fileschema, values)}
try:
collection.insert(doc)
except Exception as e:
logger.info(e)
return web.Response(text=collection.name)
async def getEdges(request):
logger.debug('Getting Edges')
client = request.app['arango']
db = client.db(request.match_info['db_name'], username='root', password=os.environ['MULTINET_ROOT_PASSWORD'])
graph = db.graph(request.match_info['graph_name'])
collection = graph.edge_collection(request.match_info['name'])
cursor = collection.all()
documents = [doc for doc in cursor]
return web.Response(text=json.dumps(documents[0:5], indent=4))
| 37.684932 | 125 | 0.683206 | [
"MIT"
] | multinet-app/multinet-server-poc | multinet-server/views.py | 5,502 | Python |
"""
module housing core library functionality
"""
import numpy as np
from typing import Optional, Tuple
import humba.jits as jits
def histogram(
x: np.ndarray,
bins: int = 10,
range: Tuple[float, float] = (0, 10),
weights: Optional[np.ndarray] = None,
flow: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray], np.ndarray]:
"""Calculate the histogram for the data ``x``.
Parameters
----------
x : :obj:`numpy.ndarray`
data to histogram
bins : int
number of bins
range : (float, float)
axis range
weights : :obj:`numpy.ndarray`, optional
array of weights for ``x``
flow : bool
include over and underflow content in first and last bins
Returns
-------
count : :obj:`numpy.ndarray`
The values of the histogram
error : :obj:`numpy.ndarray`, optional
The poission uncertainty on the bin heights
edges : :obj:`numpy.ndarray`
The bin edges
Notes
-----
If the dtype of the ``weights`` is not the same as ``x``, then it
is converted to the dtype of ``x``.
Examples
--------
>>> import numpy as np
>>> from humba import histogram
>>> x = np.random.randn(100000)
>>> w = np.random.uniform(0.4, 0.5, x.shape[0])
>>> hist1, _, edges = humba.histogram(x, bins=50, range=(-5, 5))
>>> hist2, _, edges = humba.histogram(x, bins=50, range=(-5, 5), flow=True)
>>> hist3, error, edges = histogram(x, bins=50, range=(-5, 5), weights=w)
>>> hist4, error, edges = histogram(x, bins=50, range=(-3, 3), weights=w, flow=True)
"""
edges = np.linspace(range[0], range[1], bins + 1)
if weights is not None:
assert x.shape == weights.shape, "x and weights must have identical shape"
if x.dtype == np.float64:
hfunc = jits._hfloat64_weighted
elif x.dtype == np.float32:
hfunc = jits._hfloat32_weighted
else:
raise TypeError("dtype of input must be float32 or float64")
res, err = hfunc(x, weights.astype(x.dtype), bins, range[0], range[1], flow)
return (res, err, edges)
else:
if x.dtype == np.float64:
hfunc = jits._hfloat64
elif x.dtype == np.float32:
hfunc = jits._hfloat32
else:
raise TypeError("dtype of input must be float32 or float64")
res = hfunc(x, bins, range[0], range[1], flow)
return (res, None, edges)
def mwv_histogram(
x: np.ndarray,
weights: np.ndarray,
bins: int = 10,
range: Tuple[float, float] = (0, 10),
flow: bool = False,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Histogram the same data but with multiple weight variations.
Parameters
----------
x : :obj:`numpy.ndarray`
data to histogram
weights : :obj:`numpy.ndarray`, optional
multidimensional array of weights for ``x`` the first element
of the ``shape`` attribute must be equal to the length of ``x``.
bins : int
number of bins
range : (float, float)
axis range
flow : bool
include over and underflow content in first and last bins
Returns
-------
count : :obj:`numpy.ndarray`
The values of the histograms calculated from the weights
Shape will be (bins, ``weights.shape[0]``)
error : :obj:`numpy.ndarray`
The poission uncertainty on the bin heights (shape will be
the same as ``count``.
edges : :obj:`numpy.ndarray`
The bin edges
Notes
-----
If ``x`` is not the same dtype as ``weights``, then it is converted
to the dtype of ``weights`` (for multi weight histograms we expect
the weights array to be larger than the data array so we prefer to
cast the smaller chunk of data).
"""
edges = np.linspace(range[0], range[1], bins + 1)
assert x.shape[0] == weights.shape[0], "weights shape is not compatible with x"
if weights.dtype == np.float64:
hfunc = jits._hfloat64_multiweights
elif weights.dtype == np.float32:
hfunc = jits._hfloat32_multiweights
else:
raise TypeError("dtype of input must be float32 or float64")
res, err = hfunc(x.astype(weights.dtype), weights, bins, range[0], range[1], flow)
return (res, err, edges)
| 32.421053 | 88 | 0.603432 | [
"BSD-3-Clause"
] | douglasdavis/humba | humba/core.py | 4,312 | Python |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Template 2a_1:
.. parsed-literal::
┌───┐┌───┐
q_0: ┤ X ├┤ X ├
└───┘└───┘
"""
from qiskit.circuit.quantumcircuit import QuantumCircuit
def template_2a_1():
"""
Returns:
QuantumCircuit: template as a quantum circuit.
"""
qc = QuantumCircuit(1)
qc.x(0)
qc.x(0)
return qc
| 24.228571 | 77 | 0.646226 | [
"Apache-2.0"
] | AustinGilliam/qiskit-terra | qiskit/circuit/library/template_circuits/toffoli/template_2a_1.py | 896 | Python |
import os
import csv
import logging
import itertools
import pandas as pd
import psutil as ps
from _pytest.monkeypatch import MonkeyPatch
from dataactcore.interfaces.db import GlobalDB
from dataactcore.config import CONFIG_SERVICES
from dataactcore.models.domainModels import concat_tas_dict
from dataactcore.models.lookups import (FILE_TYPE_DICT, JOB_TYPE_DICT, JOB_STATUS_DICT, RULE_SEVERITY_DICT)
from dataactcore.models.jobModels import Submission, Job, FileType
from dataactcore.models.userModel import User
from dataactcore.models.errorModels import ErrorMetadata
from dataactcore.models.stagingModels import (
Appropriation, ObjectClassProgramActivity, AwardFinancial, FlexField, TotalObligations)
from dataactvalidator.health_check import create_app
import dataactvalidator.validation_handlers.validationManager
from dataactvalidator.validation_handlers.validationManager import (
ValidationManager, FileColumn, CsvReader, parse_fields
)
import dataactvalidator.validation_handlers.validator
from dataactbroker.handlers.fileHandler import report_file_name
from tests.unit.dataactcore.factories.domain import SF133Factory, TASFactory
from tests.integration.baseTestValidator import BaseTestValidator
from tests.integration.integration_test_helper import insert_submission, insert_job
FILES_DIR = os.path.join('tests', 'integration', 'data')
# Valid Files
APPROP_FILE = os.path.join(FILES_DIR, 'appropValid.csv')
AFINANCIAL_FILE = os.path.join(FILES_DIR, 'awardFinancialValid.csv')
CROSS_FILE_A = os.path.join(FILES_DIR, 'cross_file_A.csv')
CROSS_FILE_B = os.path.join(FILES_DIR, 'cross_file_B.csv')
# Invalid Files
HEADER_ERROR = os.path.join(FILES_DIR, 'appropHeaderError.csv')
READ_ERROR = os.path.join(FILES_DIR, 'appropReadError.csv')
LENGTH_ERROR = os.path.join(FILES_DIR, 'appropLengthError.csv')
TYPE_ERROR = os.path.join(FILES_DIR, 'appropTypeError.csv')
REQUIRED_ERROR = os.path.join(FILES_DIR, 'appropRequiredError.csv')
RULE_FAILED_WARNING = os.path.join(FILES_DIR, 'appropInvalidWarning.csv')
RULE_FAILED_ERROR = os.path.join(FILES_DIR, 'appropInvalidError.csv')
INVALID_CROSS_A = os.path.join(FILES_DIR, 'invalid_cross_file_A.csv')
INVALID_CROSS_B = os.path.join(FILES_DIR, 'invalid_cross_file_B.csv')
BLANK_C = os.path.join(FILES_DIR, 'awardFinancialBlank.csv')
class ErrorWarningTests(BaseTestValidator):
""" Overall integration tests for error/warning reports.
For each file type (single-file, cross-file, errors, warnings), test if each has
- the correct structure
- each column's content is correct after testing each possible type of error:
- formatting
- length
- types
- required/optional
- SQL validation
Attributes:
session: the database session connection
validator: validator instance to be used for the tests
submission_id: the id of the submission foundation
submission: the submission foundation to be used for all the tests
val_job: the validation job to be used for all the tests
"""
CHUNK_SIZES = [4]
PARALLEL_OPTIONS = [True, False]
BATCH_SQL_OPTIONS = [True, False]
CONFIGS = list(itertools.product(CHUNK_SIZES, PARALLEL_OPTIONS, BATCH_SQL_OPTIONS))
@classmethod
def setUpClass(cls):
""" Set up class-wide resources (test data) """
super(ErrorWarningTests, cls).setUpClass()
logging.getLogger('dataactcore').setLevel(logging.ERROR)
logging.getLogger('dataactvalidator').setLevel(logging.ERROR)
with create_app().app_context():
cls.monkeypatch = MonkeyPatch()
# get the submission test users
sess = GlobalDB.db().session
cls.session = sess
# set up default e-mails for tests
admin_user = sess.query(User).filter(User.email == cls.test_users['admin_user']).one()
cls.validator = ValidationManager(directory=CONFIG_SERVICES['error_report_path'])
# Just have one valid submission and then keep on reloading files
cls.submission_id = insert_submission(sess, admin_user.user_id, cgac_code='SYS', start_date='01/2001',
end_date='03/2001', is_quarter=True)
cls.submission = sess.query(Submission).filter_by(submission_id=cls.submission_id).one()
cls.val_job = insert_job(cls.session, FILE_TYPE_DICT['appropriations'], JOB_STATUS_DICT['ready'],
JOB_TYPE_DICT['csv_record_validation'], cls.submission_id,
filename=JOB_TYPE_DICT['csv_record_validation'])
cls.original_reports = set(os.listdir(CONFIG_SERVICES['error_report_path']))
# adding TAS to ensure valid file is valid
tas1 = TASFactory(account_num=1, allocation_transfer_agency='019', agency_identifier='072',
beginning_period_of_availa=None, ending_period_of_availabil=None,
availability_type_code='X', main_account_code='0306', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas2 = TASFactory(account_num=2, allocation_transfer_agency=None, agency_identifier='019',
beginning_period_of_availa='2016', ending_period_of_availabil='2016',
availability_type_code=None, main_account_code='0113', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas3 = TASFactory(account_num=3, allocation_transfer_agency=None, agency_identifier='028',
beginning_period_of_availa=None, ending_period_of_availabil=None,
availability_type_code='X', main_account_code='0406', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas4 = TASFactory(account_num=4, allocation_transfer_agency=None, agency_identifier='028',
beginning_period_of_availa='2010', ending_period_of_availabil='2011',
availability_type_code=None, main_account_code='0406', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas5 = TASFactory(account_num=5, allocation_transfer_agency='069', agency_identifier='013',
beginning_period_of_availa=None, ending_period_of_availabil=None,
availability_type_code='X', main_account_code='2050', sub_account_code='005',
internal_start_date='01-01-2000', financial_indicator2='F')
tas6 = TASFactory(account_num=6, allocation_transfer_agency='028', agency_identifier='028',
beginning_period_of_availa=None, ending_period_of_availabil=None,
availability_type_code='X', main_account_code='8007', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas7 = TASFactory(account_num=7, allocation_transfer_agency=None, agency_identifier='049',
beginning_period_of_availa=None, ending_period_of_availabil=None,
availability_type_code='X', main_account_code='0100', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas8 = TASFactory(account_num=8, allocation_transfer_agency=None, agency_identifier='049',
beginning_period_of_availa='2010', ending_period_of_availabil='2011',
availability_type_code=None, main_account_code='0100', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas9 = TASFactory(account_num=9, allocation_transfer_agency=None, agency_identifier='049',
beginning_period_of_availa='2014', ending_period_of_availabil='2015',
availability_type_code=None, main_account_code='0100', sub_account_code='000',
internal_start_date='01-01-2000', financial_indicator2='F')
tas10 = TASFactory(account_num=10, allocation_transfer_agency=None, agency_identifier='049',
beginning_period_of_availa='2015', ending_period_of_availabil='2016',
availability_type_code=None, main_account_code='0100', sub_account_code='000',
internal_start_date='01-01-2000')
sess.add_all([tas1, tas2, tas3, tas4, tas5, tas6, tas7, tas8, tas9, tas10])
# adding GTAS to ensure valid file is valid
gtas1 = SF133Factory(tas=concat_tas_dict(tas1.component_dict()), allocation_transfer_agency='019',
agency_identifier='072', beginning_period_of_availa=None, line=1009,
ending_period_of_availabil=None, availability_type_code='X',
main_account_code='0306', sub_account_code='000', period=6, fiscal_year=2001)
gtas2 = SF133Factory(tas=concat_tas_dict(tas2.component_dict()), allocation_transfer_agency=None,
agency_identifier='019', beginning_period_of_availa='2016', line=1009,
ending_period_of_availabil='2016', availability_type_code=None,
main_account_code='0113', sub_account_code='000', period=6, fiscal_year=2001)
gtas3 = SF133Factory(tas=concat_tas_dict(tas3.component_dict()), allocation_transfer_agency=None,
agency_identifier='028', beginning_period_of_availa=None, line=1009,
ending_period_of_availabil=None, availability_type_code='X',
main_account_code='0406', sub_account_code='000', period=6, fiscal_year=2001)
gtas4 = SF133Factory(tas=concat_tas_dict(tas4.component_dict()), allocation_transfer_agency=None,
agency_identifier='028', beginning_period_of_availa='2010', line=1009,
ending_period_of_availabil='2011', availability_type_code=None,
main_account_code='0406', sub_account_code='000', period=6, fiscal_year=2001)
gtas5 = SF133Factory(tas=concat_tas_dict(tas5.component_dict()), allocation_transfer_agency='069',
agency_identifier='013', beginning_period_of_availa=None, line=1009,
ending_period_of_availabil=None, availability_type_code='X',
main_account_code='2050', sub_account_code='005', period=6, fiscal_year=2001)
gtas6 = SF133Factory(tas=concat_tas_dict(tas6.component_dict()), allocation_transfer_agency='028',
agency_identifier='028', beginning_period_of_availa=None, line=1009,
ending_period_of_availabil=None, availability_type_code='X',
main_account_code='8007', sub_account_code='000', period=6, fiscal_year=2001)
gtas7 = SF133Factory(tas=concat_tas_dict(tas7.component_dict()), allocation_transfer_agency=None,
agency_identifier='049', beginning_period_of_availa=None, line=1009,
ending_period_of_availabil=None, availability_type_code='X',
main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
gtas8 = SF133Factory(tas=concat_tas_dict(tas8.component_dict()), allocation_transfer_agency=None,
agency_identifier='049', beginning_period_of_availa='2010', line=1009,
ending_period_of_availabil='2011', availability_type_code=None,
main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
gtas9 = SF133Factory(tas=concat_tas_dict(tas9.component_dict()), allocation_transfer_agency=None,
agency_identifier='049', beginning_period_of_availa='2014', line=1009,
ending_period_of_availabil='2015', availability_type_code=None,
main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
gtas10 = SF133Factory(tas=concat_tas_dict(tas10.component_dict()), allocation_transfer_agency=None,
agency_identifier='049', beginning_period_of_availa='2015', line=1009,
ending_period_of_availabil='2016', availability_type_code=None,
main_account_code='0100', sub_account_code='000', period=6, fiscal_year=2001)
sess.add_all([gtas1, gtas2, gtas3, gtas4, gtas5, gtas6, gtas7, gtas8, gtas9, gtas10])
sess.commit()
def setUp(self):
"""Test set-up."""
super(ErrorWarningTests, self).setUp()
def get_report_path(self, file_type, warning=False, cross_type=None):
filename = report_file_name(self.submission_id, warning, file_type, cross_type)
return os.path.join(CONFIG_SERVICES['error_report_path'], filename)
def setup_csv_record_validation(self, file, file_type):
self.session.query(Job).delete(synchronize_session='fetch')
self.val_job = insert_job(self.session, FILE_TYPE_DICT[file_type], JOB_STATUS_DICT['ready'],
JOB_TYPE_DICT['csv_record_validation'], self.submission_id,
filename=file)
def setup_validation(self):
self.session.query(Job).delete(synchronize_session='fetch')
self.val_job = insert_job(self.session, None, JOB_STATUS_DICT['ready'],
JOB_TYPE_DICT['validation'], self.submission_id,
filename=None)
def get_report_content(self, report_path, cross_file=False):
report_content = []
report_headers = None
with open(report_path, 'r') as report_csv:
reader = csv.DictReader(report_csv)
for row in reader:
report_content.append(row)
report_headers = reader.fieldnames
row_number_col = 'Row Number' if not cross_file else 'Source Row Number'
if row_number_col in report_headers:
report_content = list(sorted(report_content, key=lambda x: int(x[row_number_col] or 0)))
return report_headers, report_content
def generate_file_report(self, file, file_type, warning=False, ignore_error=False):
self.setup_csv_record_validation(file, file_type)
if ignore_error:
try:
self.validator.validate_job(self.val_job.job_id)
except Exception:
pass
else:
self.validator.validate_job(self.val_job.job_id)
report_path = self.get_report_path(file_type, warning=warning)
report_content = self.get_report_content(report_path, cross_file=False)
return report_content
def generate_cross_file_report(self, cross_files, warning=False, ignore_error=False):
cross_types = []
for cross_file in cross_files:
cross_types.append(cross_file[1])
self.generate_file_report(cross_file[0], cross_file[1], warning=warning, ignore_error=ignore_error)
self.setup_validation()
if ignore_error:
try:
self.validator.validate_job(self.val_job.job_id)
except Exception:
pass
else:
self.validator.validate_job(self.val_job.job_id)
report_path = self.get_report_path(cross_types[0], cross_type=cross_types[1], warning=warning)
report_content = self.get_report_content(report_path, cross_file=True)
return report_content
def cleanup(self):
new_reports = set(os.listdir(CONFIG_SERVICES['error_report_path'])) - self.original_reports
for new_report in new_reports:
os.remove(os.path.join(CONFIG_SERVICES['error_report_path'], new_report))
self.session.query(Appropriation).delete(synchronize_session='fetch')
self.session.query(ObjectClassProgramActivity).delete(synchronize_session='fetch')
self.session.query(AwardFinancial).delete(synchronize_session='fetch')
self.session.query(ErrorMetadata).delete(synchronize_session='fetch')
self.session.query(FlexField).delete(synchronize_session='fetch')
self.session.commit()
def test_single_file_warnings(self):
for chunk_size, parallel, batch_sql in self.CONFIGS:
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'CHUNK_SIZE', chunk_size)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'PARALLEL', parallel)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'BATCH_SQL_VAL_RESULTS',
batch_sql)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validator, 'SQL_VALIDATION_BATCH_SIZE',
chunk_size)
self.single_file_warnings()
def single_file_warnings(self):
self.cleanup()
# Valid
report_headers, report_content = self.generate_file_report(APPROP_FILE, 'appropriations', warning=True)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 10
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 20
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['warning']).count()
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 10
assert error_count == 0
assert report_headers == self.validator.report_headers
assert len(report_content) == 0
self.cleanup()
# Blank File
report_headers, report_content = self.generate_file_report(BLANK_C, 'award_financial', warning=True)
awfin_count = self.session.query(AwardFinancial).filter_by(submission_id=self.submission_id).count()
assert awfin_count == 0
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 0
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['warning']).count()
assert self.validator.job.number_of_rows == 1
assert self.validator.job.number_of_rows_valid == 2
assert error_count == 1
assert report_headers == self.validator.report_headers
expected_values = [
{
'Unique ID': '',
'Field Name': 'Blank File',
'Rule Message': 'File does not contain data. For files A and B, this must be addressed prior to'
' publication/certification. Blank file C does not prevent publication/certification.',
'Value Provided': '',
'Expected Value': '',
'Difference': '',
'Flex Field': '',
'Row Number': '',
'Rule Label': 'DABSBLANK'
}
]
assert report_content == expected_values
self.cleanup()
# SQL Validation
report_headers, report_content = self.generate_file_report(RULE_FAILED_WARNING, 'appropriations', warning=True)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 10
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 20
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['warning']).count()
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 10
assert error_count == 1
assert report_headers == self.validator.report_headers
expected_values = [
{
'Unique ID': 'TAS: 028-2010/2011-0406-000',
'Field Name': 'budgetauthorityunobligatedbalancebroughtforward_fyb',
'Rule Message': 'All the elements that have FYB in file A are expected in the first submission'
' for a fiscal year',
'Value Provided': 'budgetauthorityunobligatedbalancebroughtforward_fyb: ',
'Expected Value': 'If the reporting period is Quarter 1, a non-null amount should be submitted for the'
' following elements: BudgetAuthorityUnobligatedBalanceBroughtForward_FYB',
'Difference': '',
'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Row Number': '5',
'Rule Label': 'A16.1'
}
]
assert report_content == expected_values
self.cleanup()
def test_single_file_errors(self):
for chunk_size, parallel, batch_sql in self.CONFIGS:
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'CHUNK_SIZE', chunk_size)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'PARALLEL', parallel)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'BATCH_SQL_VAL_RESULTS',
batch_sql)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validator, 'SQL_VALIDATION_BATCH_SIZE',
chunk_size)
self.single_file_errors()
def single_file_errors(self):
self.cleanup()
# Valid
report_headers, report_content = self.generate_file_report(APPROP_FILE, 'appropriations', warning=False)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 10
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 20
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 10
assert error_count == 0
assert report_headers == self.validator.report_headers
assert len(report_content) == 0
self.cleanup()
# Header Error
report_headers, report_content = self.generate_file_report(HEADER_ERROR, 'appropriations', warning=False,
ignore_error=True)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 0
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 0
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert self.validator.job.number_of_rows is None
assert self.validator.job.number_of_rows_valid == 0
# Header errors do not get saved to the database
assert error_count == 0
assert report_headers == ['Error type', 'Header name']
expected_values = [
{
'Error type': 'Duplicated header',
'Header name': 'AllocationTransferAgencyIdentifier'
},
{
'Error type': 'Missing header',
'Header name': 'AdjustmentsToUnobligatedBalanceBroughtForward_CPE'
},
{
'Error type': 'Missing header',
'Header name': 'AgencyIdentifier'
},
{
'Error type': 'Missing header',
'Header name': 'BudgetAuthorityUnobligatedBalanceBroughtForward_FYB'
},
{
'Error type': 'Missing header',
'Header name': 'DeobligationsRecoveriesRefundsByTAS_CPE'
},
{
'Error type': 'Missing header',
'Header name': 'GrossOutlayAmountByTAS_CPE'
},
{
'Error type': 'Missing header',
'Header name': 'ObligationsIncurredTotalByTAS_CPE'
},
{
'Error type': 'Missing header',
'Header name': 'StatusOfBudgetaryResourcesTotal_CPE'
}
]
assert report_content == expected_values
self.cleanup()
# Read Error
report_headers, report_content = self.generate_file_report(READ_ERROR, 'appropriations', warning=False)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 6
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 12
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 6
format_errors = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).one()
format_error_count = format_errors.occurrences
assert format_error_count == 4
assert report_headers == self.validator.report_headers
expected_values = [
{
'Unique ID': '',
'Field Name': 'Formatting Error',
'Rule Message': 'Could not parse this record correctly.',
'Value Provided': '',
'Expected Value': '',
'Difference': '',
'Flex Field': '',
'Row Number': '2',
'Rule Label': ''
},
{
'Unique ID': '',
'Field Name': 'Formatting Error',
'Rule Message': 'Could not parse this record correctly.',
'Value Provided': '',
'Expected Value': '',
'Difference': '',
'Flex Field': '',
'Row Number': '3',
'Rule Label': ''
},
{
'Unique ID': '',
'Field Name': 'Formatting Error',
'Rule Message': 'Could not parse this record correctly.',
'Value Provided': '',
'Expected Value': '',
'Difference': '',
'Flex Field': '',
'Row Number': '5',
'Rule Label': ''
},
{
'Unique ID': '',
'Field Name': 'Formatting Error',
'Rule Message': 'Could not parse this record correctly.',
'Value Provided': '',
'Expected Value': '',
'Difference': '',
'Flex Field': '',
'Row Number': '7',
'Rule Label': ''
}
]
assert report_content == expected_values
self.cleanup()
# Type Error
report_headers, report_content = self.generate_file_report(TYPE_ERROR, 'appropriations', warning=False)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 9
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 18
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 9
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert error_count == 1
assert report_headers == self.validator.report_headers
expected_values = [
{
'Unique ID': 'TAS: 069-013-X-2050-005',
'Field Name': 'statusofbudgetaryresourcestotal_cpe',
'Rule Message': 'The value provided was of the wrong type. Note that all type errors in a line must be'
' fixed before the rest of the validation logic is applied to that line.',
'Value Provided': 'statusofbudgetaryresourcestotal_cpe: A',
'Expected Value': 'This field must be a decimal',
'Difference': '',
'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Row Number': '6',
'Rule Label': ''
}
]
assert report_content == expected_values
self.cleanup()
# Length Error
report_headers, report_content = self.generate_file_report(LENGTH_ERROR, 'appropriations', warning=False)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 10
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 20
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 9
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert error_count == 1
assert report_headers == self.validator.report_headers
expected_values = [
{
'Unique ID': 'TAS: 069-013-X-2050-005',
'Field Name': 'grossoutlayamountbytas_cpe',
'Rule Message': 'Value was longer than maximum length for this field.',
'Value Provided': 'grossoutlayamountbytas_cpe: 35000000000000000000000000',
'Expected Value': 'Max length: 21',
'Difference': '',
'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Row Number': '6',
'Rule Label': ''
}
]
assert report_content == expected_values
self.cleanup()
# Required Error + SQL Validation
report_headers, report_content = self.generate_file_report(REQUIRED_ERROR, 'appropriations', warning=False)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 10
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 20
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 9
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert error_count == 3
assert report_headers == self.validator.report_headers
expected_values = [
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Field Name': 'statusofbudgetaryresourcestotal_cpe',
'Rule Message': 'This field is required for all submissions but was not provided in this row.',
'Value Provided': '',
'Expected Value': '(not blank)',
'Difference': '',
'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Row Number': '3',
'Rule Label': ''
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Field Name': 'statusofbudgetaryresourcestotal_cpe, obligationsincurredtotalbytas_cpe,'
' unobligatedbalance_cpe',
'Rule Message': 'StatusOfBudgetaryResourcesTotal_CPE= ObligationsIncurredTotalByTAS_CPE'
' + UnobligatedBalance_CPE',
'Value Provided': 'statusofbudgetaryresourcestotal_cpe: , obligationsincurredtotalbytas_cpe: 8.08,'
' unobligatedbalance_cpe: 2.02',
'Expected Value': 'StatusOfBudgetaryResourcesTotal_CPE must equal the sum of these elements:'
' ObligationsIncurredTotalByTAS_CPE + UnobligatedBalance_CPE. The Broker cannot'
' distinguish which item is incorrect for this rule. Refer to related rule errors'
' and warnings in this report (rules A15, A22, A23) to distinguish which elements'
' may be incorrect.',
'Difference': '-10.10',
'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Row Number': '3',
'Rule Label': 'A4'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Field Name': 'statusofbudgetaryresourcestotal_cpe, totalbudgetaryresources_cpe',
'Rule Message': 'StatusOfBudgetaryResourcesTotal_CPE = TotalBudgetaryResources_CPE',
'Value Provided': 'statusofbudgetaryresourcestotal_cpe: , totalbudgetaryresources_cpe: 10.1',
'Expected Value': 'StatusOfBudgetaryResourcesTotal_CPE must equal TotalBudgetaryResources_CPE. The'
' Broker cannot distinguish which side of the equation is correct for this rule.'
' Refer to related rule errors and warnings in this report (rules A6, A23) to'
' distinguish which elements may be incorrect.',
'Difference': '-10.1',
'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Row Number': '3',
'Rule Label': 'A24'
}
]
assert report_content == expected_values
self.cleanup()
# SQL Validation (with difference)
report_headers, report_content = self.generate_file_report(RULE_FAILED_ERROR, 'appropriations', warning=False)
appro_count = self.session.query(Appropriation).filter_by(submission_id=self.submission_id).count()
assert appro_count == 10
flex_count = self.session.query(FlexField).filter_by(submission_id=self.submission_id).count()
assert flex_count == 20
assert self.validator.job.number_of_rows == 11
assert self.validator.job.number_of_rows_valid == 10
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert error_count == 0
assert report_headers == self.validator.report_headers
# TODO put this back when we put A2 back
# expected_values = [
# {
# 'Unique ID': 'TAS: 049-2014/2015-0100-000',
# 'Field Name': 'totalbudgetaryresources_cpe, budgetauthorityappropriatedamount_cpe,'
# ' budgetauthorityunobligatedbalancebroughtforward_fyb,'
# ' adjustmentstounobligatedbalancebroughtforward_cpe, otherbudgetaryresourcesamount_cpe',
# 'Rule Message': 'TotalBudgetaryResources_CPE = BudgetAuthorityAppropriatedAmount_CPE +'
# ' BudgetAuthorityUnobligatedBalanceBroughtForward_FYB +'
# ' AdjustmentsToUnobligatedBalanceBroughtForward_CPE +'
# ' OtherBudgetaryResourcesAmount_CPE',
# 'Value Provided': 'totalbudgetaryresources_cpe: 10.1, budgetauthorityappropriatedamount_cpe: 0.01,'
# ' budgetauthorityunobligatedbalancebroughtforward_fyb: 3.03,'
# ' adjustmentstounobligatedbalancebroughtforward_cpe: 2.02,'
# ' otherbudgetaryresourcesamount_cpe: 4.04',
# 'Expected Value': 'TotalBudgetaryResources_CPE must equal the sum of these elements:'
# ' BudgetAuthorityAppropriatedAmount_CPE +'
# ' BudgetAuthorityUnobligatedBalanceBroughtForward_FYB +'
# ' AdjustmentsToUnobligatedBalanceBroughtForward_CPE +'
# ' OtherBudgetaryResourcesAmount_CPE. The Broker cannot distinguish which item is'
# ' incorrect for this rule. Refer to related rule errors and warnings in this report'
# ' (rules A3, A6, A7, A8, A12) to distinguish which elements may be incorrect.',
# 'Difference': '1.00',
# 'Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
# 'Row Number': '10',
# 'Rule Label': 'A2'
# }
# ]
# assert report_content == expected_values
self.cleanup()
# Ensure total_obligations are being calculated correctly
self.generate_file_report(AFINANCIAL_FILE, 'award_financial', warning=False)
totals = self.session.query(TotalObligations).filter_by(submission_id=self.submission_id).one()
assert totals.total_obligations == 12000.00
assert totals.total_proc_obligations == 8000.00
assert totals.total_asst_obligations == 4000.00
self.cleanup()
def test_cross_file_warnings(self):
for chunk_size, parallel, batch_sql in self.CONFIGS:
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'CHUNK_SIZE', chunk_size)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'PARALLEL', parallel)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'BATCH_SQL_VAL_RESULTS',
batch_sql)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validator, 'SQL_VALIDATION_BATCH_SIZE',
chunk_size)
self.cross_file_warnings()
def cross_file_warnings(self):
self.cleanup()
# Valid
report_headers, report_content = self.generate_cross_file_report([(CROSS_FILE_A, 'appropriations'),
(CROSS_FILE_B, 'program_activity')],
warning=True)
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['warning']).count()
assert error_count == 0
assert report_headers == self.validator.cross_file_report_headers
assert len(report_content) == 0
self.cleanup()
# SQL Validation
report_headers, report_content = self.generate_cross_file_report([(INVALID_CROSS_A, 'appropriations'),
(INVALID_CROSS_B, 'program_activity')],
warning=True)
warnings = list(self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['warning']).all())
assert len(warnings) == 3
assert warnings[0].occurrences == 3
assert warnings[1].occurrences == 3
assert warnings[2].occurrences == 3
assert report_headers == self.validator.cross_file_report_headers
expected_values = [
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'grossoutlayamountbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'gross_outlay_amount_by_pro_cpe_sum',
'Rule Message': 'The GrossOutlayAmountByTAS_CPE amount in the appropriation file (A) does not equal the'
' sum of the corresponding GrossOutlayAmountByProgramObjectClass_CPE values in the'
' award financial file (B). {This value is the sum of all Gross Outlay Amounts reported'
' in file B, to indicate year-to-date activity by TAS/Subaccount.}',
'Source Value Provided': 'grossoutlayamountbytas_cpe: 10000',
'Target Value Provided': 'gross_outlay_amount_by_pro_cpe_sum: 6000',
'Difference': '4000',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '5',
'Rule Label': 'A18'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'obligationsincurredtotalbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'obligations_incurred_by_pr_cpe_sum',
'Rule Message': 'The ObligationsIncurredTotalByTAS_CPE amount in the appropriation file (A) does not'
' equal the negative sum of the corresponding'
' ObligationsIncurredByProgramObjectClass_CPE values in the award financial file (B).',
'Source Value Provided': 'obligationsincurredtotalbytas_cpe: 12000',
'Target Value Provided': 'obligations_incurred_by_pr_cpe_sum: 6000',
'Difference': '18000',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '5',
'Rule Label': 'A19'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'deobligationsrecoveriesrefundsbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'ussgl487100_downward_adjus_cpe_sum, ussgl497100_downward_adjus_cpe_sum,'
' ussgl487200_downward_adjus_cpe_sum, ussgl497200_downward_adjus_cpe_sum',
'Rule Message': 'DeobligationsRecoveriesRefundsByTAS_CPE in File A should equal USSGL'
' (4871_CPE+ 4971_CPE+ 4872_CPE+ 4972_CPE) for the TAS in File B.',
'Source Value Provided': 'deobligationsrecoveriesrefundsbytas_cpe: 16000',
'Target Value Provided': 'ussgl487100_downward_adjus_cpe_sum: 2000,'
' ussgl497100_downward_adjus_cpe_sum: 2000,'
' ussgl487200_downward_adjus_cpe_sum: 400,'
' ussgl497200_downward_adjus_cpe_sum: 2000',
'Difference': '9600',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '5',
'Rule Label': 'A35'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'grossoutlayamountbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'gross_outlay_amount_by_pro_cpe_sum',
'Rule Message': 'The GrossOutlayAmountByTAS_CPE amount in the appropriation file (A) does not equal the'
' sum of the corresponding GrossOutlayAmountByProgramObjectClass_CPE values in the'
' award financial file (B). {This value is the sum of all Gross Outlay Amounts reported'
' in file B, to indicate year-to-date activity by TAS/Subaccount.}',
'Source Value Provided': 'grossoutlayamountbytas_cpe: 10000',
'Target Value Provided': 'gross_outlay_amount_by_pro_cpe_sum: 6000',
'Difference': '4000',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '10',
'Rule Label': 'A18'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'obligationsincurredtotalbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'obligations_incurred_by_pr_cpe_sum',
'Rule Message': 'The ObligationsIncurredTotalByTAS_CPE amount in the appropriation file (A) does not'
' equal the negative sum of the corresponding'
' ObligationsIncurredByProgramObjectClass_CPE values in the award financial file (B).',
'Source Value Provided': 'obligationsincurredtotalbytas_cpe: 12000',
'Target Value Provided': 'obligations_incurred_by_pr_cpe_sum: 6000',
'Difference': '18000',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '10',
'Rule Label': 'A19'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'deobligationsrecoveriesrefundsbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'ussgl487100_downward_adjus_cpe_sum, ussgl497100_downward_adjus_cpe_sum,'
' ussgl487200_downward_adjus_cpe_sum, ussgl497200_downward_adjus_cpe_sum',
'Rule Message': 'DeobligationsRecoveriesRefundsByTAS_CPE in File A should equal USSGL'
' (4871_CPE+ 4971_CPE+ 4872_CPE+ 4972_CPE) for the TAS in File B.',
'Source Value Provided': 'deobligationsrecoveriesrefundsbytas_cpe: 16000',
'Target Value Provided': 'ussgl487100_downward_adjus_cpe_sum: 2000,'
' ussgl497100_downward_adjus_cpe_sum: 2000,'
' ussgl487200_downward_adjus_cpe_sum: 400,'
' ussgl497200_downward_adjus_cpe_sum: 2000',
'Difference': '9600',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '10',
'Rule Label': 'A35'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'grossoutlayamountbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'gross_outlay_amount_by_pro_cpe_sum',
'Rule Message': 'The GrossOutlayAmountByTAS_CPE amount in the appropriation file (A) does not equal the'
' sum of the corresponding GrossOutlayAmountByProgramObjectClass_CPE values in the'
' award financial file (B). {This value is the sum of all Gross Outlay Amounts reported'
' in file B, to indicate year-to-date activity by TAS/Subaccount.}',
'Source Value Provided': 'grossoutlayamountbytas_cpe: 10000',
'Target Value Provided': 'gross_outlay_amount_by_pro_cpe_sum: 6000',
'Difference': '4000',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '15',
'Rule Label': 'A18'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'obligationsincurredtotalbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'obligations_incurred_by_pr_cpe_sum',
'Rule Message': 'The ObligationsIncurredTotalByTAS_CPE amount in the appropriation file (A) does not'
' equal the negative sum of the corresponding'
' ObligationsIncurredByProgramObjectClass_CPE values in the award financial file (B).',
'Source Value Provided': 'obligationsincurredtotalbytas_cpe: 12000',
'Target Value Provided': 'obligations_incurred_by_pr_cpe_sum: 6000',
'Difference': '18000',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '15',
'Rule Label': 'A19'
},
{
'Unique ID': 'TAS: 019-2016/2016-0113-000',
'Source File': 'appropriations',
'Source Field Name': 'deobligationsrecoveriesrefundsbytas_cpe',
'Target File': 'program_activity',
'Target Field Name': 'ussgl487100_downward_adjus_cpe_sum, ussgl497100_downward_adjus_cpe_sum,'
' ussgl487200_downward_adjus_cpe_sum, ussgl497200_downward_adjus_cpe_sum',
'Rule Message': 'DeobligationsRecoveriesRefundsByTAS_CPE in File A should equal USSGL'
' (4871_CPE+ 4971_CPE+ 4872_CPE+ 4972_CPE) for the TAS in File B.',
'Source Value Provided': 'deobligationsrecoveriesrefundsbytas_cpe: 16000',
'Target Value Provided': 'ussgl487100_downward_adjus_cpe_sum: 2000,'
' ussgl497100_downward_adjus_cpe_sum: 2000,'
' ussgl487200_downward_adjus_cpe_sum: 400,'
' ussgl497200_downward_adjus_cpe_sum: 2000',
'Difference': '9600',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '15',
'Rule Label': 'A35'
}
]
assert report_content == expected_values
self.cleanup()
def test_cross_file_errors(self):
for chunk_size, parallel, batch_sql in self.CONFIGS:
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'CHUNK_SIZE', chunk_size)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'PARALLEL', parallel)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'BATCH_SQL_VAL_RESULTS',
batch_sql)
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validator, 'SQL_VALIDATION_BATCH_SIZE',
chunk_size)
self.cross_file_errors()
def cross_file_errors(self):
self.cleanup()
# Valid
report_headers, report_content = self.generate_cross_file_report([(CROSS_FILE_A, 'appropriations'),
(CROSS_FILE_B, 'program_activity')],
warning=False)
error_count = self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).count()
assert error_count == 0
assert report_headers == self.validator.cross_file_report_headers
assert len(report_content) == 0
self.cleanup()
# SQL Validation
report_headers, report_content = self.generate_cross_file_report([(INVALID_CROSS_A, 'appropriations'),
(INVALID_CROSS_B, 'program_activity')],
warning=False)
warnings = list(self.session.query(ErrorMetadata).filter_by(job_id=self.val_job.job_id,
severity_id=RULE_SEVERITY_DICT['fatal']).all())
assert len(warnings) == 1
assert warnings[0].occurrences == 3
assert report_headers == self.validator.cross_file_report_headers
expected_values = [
{
'Unique ID': 'TAS: 019-072-X-0306-000',
'Source File': 'appropriations',
'Source Field Name': 'allocationtransferagencyidentifier, agencyidentifier,'
' beginningperiodofavailability, endingperiodofavailability,'
' availabilitytypecode, mainaccountcode, subaccountcode',
'Target File': 'program_activity',
'Target Field Name': '',
'Rule Message': 'All TAS values in File A (appropriations) should exist in File B'
' (object class program activity)',
'Source Value Provided': 'allocationtransferagencyidentifier: 019, agencyidentifier: 072,'
' beginningperiodofavailability: , endingperiodofavailability: ,'
' availabilitytypecode: X, mainaccountcode: 0306, subaccountcode: 000',
'Target Value Provided': '',
'Difference': '',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '2',
'Rule Label': 'A30.1'
},
{
'Unique ID': 'TAS: 019-072-X-0306-000',
'Source File': 'appropriations',
'Source Field Name': 'allocationtransferagencyidentifier, agencyidentifier,'
' beginningperiodofavailability, endingperiodofavailability,'
' availabilitytypecode, mainaccountcode, subaccountcode',
'Target File': 'program_activity',
'Target Field Name': '',
'Rule Message': 'All TAS values in File A (appropriations) should exist in File B'
' (object class program activity)',
'Source Value Provided': 'allocationtransferagencyidentifier: 019, agencyidentifier: 072,'
' beginningperiodofavailability: , endingperiodofavailability: ,'
' availabilitytypecode: X, mainaccountcode: 0306, subaccountcode: 000',
'Target Value Provided': '',
'Difference': '',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '7',
'Rule Label': 'A30.1'
},
{
'Unique ID': 'TAS: 019-072-X-0306-000',
'Source File': 'appropriations',
'Source Field Name': 'allocationtransferagencyidentifier, agencyidentifier,'
' beginningperiodofavailability, endingperiodofavailability,'
' availabilitytypecode, mainaccountcode, subaccountcode',
'Target File': 'program_activity',
'Target Field Name': '',
'Rule Message': 'All TAS values in File A (appropriations) should exist in File B'
' (object class program activity)',
'Source Value Provided': 'allocationtransferagencyidentifier: 019, agencyidentifier: 072,'
' beginningperiodofavailability: , endingperiodofavailability: ,'
' availabilitytypecode: X, mainaccountcode: 0306, subaccountcode: 000',
'Target Value Provided': '',
'Difference': '',
'Source Flex Field': 'flex_field_a: FLEX_A, flex_field_b: FLEX_B',
'Source Row Number': '12',
'Rule Label': 'A30.1'
}
]
assert report_content == expected_values
self.cleanup()
def test_validation_parallelize_error(self):
# Test the parallelize function with a broken call to see if the process is properly cleaned up
self.monkeypatch.setattr(dataactvalidator.validation_handlers.validationManager, 'MULTIPROCESSING_POOLS', 2)
# Setting up all the other elements of the validator to simulate the integration test
self.validator.submission_id = 1
self.validator.file_type = self.session.query(FileType).filter_by(
file_type_id=FILE_TYPE_DICT['appropriations']).one()
self.validator.file_name = APPROP_FILE
self.setup_csv_record_validation(APPROP_FILE, 'appropriations')
self.validator.is_fabs = False
self.validator.reader = CsvReader()
self.validator.error_list = {}
self.validator.error_rows = []
self.validator.total_rows = 1
self.validator.total_data_rows = 0
self.validator.short_rows = []
self.validator.long_rows = []
self.validator.has_data = False
self.validator.model = Appropriation
self.validator.error_file_name = report_file_name(self.validator.submission_id, False,
self.validator.file_type.name)
self.validator.error_file_path = ''.join([CONFIG_SERVICES['error_report_path'],
self.validator.error_file_name])
self.validator.warning_file_name = report_file_name(self.validator.submission_id, True,
self.validator.file_type.name)
self.validator.warning_file_path = ''.join([CONFIG_SERVICES['error_report_path'],
self.validator.warning_file_name])
self.validator.fields = self.session.query(FileColumn) \
.filter(FileColumn.file_id == FILE_TYPE_DICT[self.validator.file_type.name]) \
.order_by(FileColumn.daims_name.asc()).all()
self.validator.expected_headers, self.validator.parsed_fields = parse_fields(self.session,
self.validator.fields)
self.validator.csv_schema = {row.name_short: row for row in self.validator.fields}
with open(self.validator.error_file_path, 'w', newline='') as error_file, \
open(self.validator.warning_file_path, 'w', newline='') as warning_file:
error_csv = csv.writer(error_file, delimiter=',', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
warning_csv = csv.writer(warning_file, delimiter=',', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
error_csv.writerow(self.validator.report_headers)
warning_csv.writerow(self.validator.report_headers)
# Finally open the file for loading into the database with baseline validations
self.validator.filename = self.validator.reader.get_filename(None, None, self.validator.file_name)
self.validator.reader.open_file(None, None, self.validator.file_name, self.validator.fields, None,
self.validator.get_file_name(self.validator.error_file_name),
self.validator.daims_to_short_dict[self.validator.file_type.file_type_id],
self.validator.short_to_daims_dict[self.validator.file_type.file_type_id],
is_local=self.validator.is_local)
# Going back to reprocess the header row
self.validator.reader.file.seek(0)
reader_obj = pd.read_csv(self.validator.reader.file, dtype=str, delimiter=',', error_bad_lines=False,
na_filter=False, chunksize=2, warn_bad_lines=False)
# Setting this outside of reader/file type objects which may not be used during processing
self.validator.flex_fields = ['flex_field_a', 'flex_field_b']
self.validator.header_dict = self.validator.reader.header_dict
self.validator.file_type_name = self.validator.file_type.name
self.validator.file_type_id = self.validator.file_type.file_type_id
self.validator.job_id = 2
# Making a broken list of chunks (one that should process fine, another with an error, another fine)
# This way we can tell that the latter chunks processed later are ignored due to the error
normal_chunks = list(reader_obj)
broken_chunks = [normal_chunks[0], 'BREAK', normal_chunks[1], normal_chunks[2]]
with self.assertRaises(Exception) as val_except:
# making the reader object a list of strings instead, causing the inner function to break
self.validator.parallel_data_loading(self.session, broken_chunks)
self.assertTrue(type(val_except.exception) == AttributeError)
self.assertTrue(str(val_except.exception) == "'str' object has no attribute 'empty'")
# Check to see the processes are killed
job = ps.Process(os.getpid())
assert len(job.children(recursive=True)) == 0
| 61.134111 | 120 | 0.599774 | [
"CC0-1.0"
] | RonSherfey/data-act-broker-backend | tests/integration/error_warning_file_tests.py | 62,907 | Python |
import os
import PyQt5.QtCore as qc
DATA_DIR = 'MiptCisDocs'
WRITABLE_LOCATION = qc.QStandardPaths.writableLocation(
qc.QStandardPaths.StandardLocation.AppDataLocation
)
def get_data_dir() -> str:
data_dir = os.path.abspath(os.path.join(WRITABLE_LOCATION, DATA_DIR))
if not os.path.exists(data_dir):
os.mkdir(data_dir)
return data_dir
| 22.6875 | 73 | 0.749311 | [
"MIT"
] | ivigns/mipt-cis-docs | client/data_manage/data_dir.py | 363 | Python |
# -*- coding: utf-8 -*-
import os
import sys
import csv
# -----------------------------------------------------------------------------
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
# -----------------------------------------------------------------------------
def retry_fetch_ohlcv(exchange, max_retries, symbol, timeframe, since, limit):
num_retries = 0
try:
num_retries += 1
ohlcv = exchange.fetch_ohlcv(symbol, timeframe, since, limit)
# print('Fetched', len(ohlcv), symbol, 'candles from', exchange.iso8601 (ohlcv[0][0]), 'to', exchange.iso8601 (ohlcv[-1][0]))
return ohlcv
except Exception:
if num_retries > max_retries:
raise # Exception('Failed to fetch', timeframe, symbol, 'OHLCV in', max_retries, 'attempts')
def scrape_ohlcv(exchange, max_retries, symbol, timeframe, since, limit):
earliest_timestamp = exchange.milliseconds()
timeframe_duration_in_seconds = exchange.parse_timeframe(timeframe)
timeframe_duration_in_ms = timeframe_duration_in_seconds * 1000
timedelta = limit * timeframe_duration_in_ms
all_ohlcv = []
while True:
fetch_since = earliest_timestamp - timedelta
ohlcv = retry_fetch_ohlcv(exchange, max_retries, symbol, timeframe, fetch_since, limit)
# if we have reached the beginning of history
if ohlcv[0][0] >= earliest_timestamp:
break
earliest_timestamp = ohlcv[0][0]
all_ohlcv = ohlcv + all_ohlcv
print(len(all_ohlcv), 'candles in total from', exchange.iso8601(all_ohlcv[0][0]), 'to', exchange.iso8601(all_ohlcv[-1][0]))
# if we have reached the checkpoint
if fetch_since < since:
break
return all_ohlcv
def write_to_csv(filename, data):
with open(filename, mode='w', newline = '') as output_file:
csv_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerows(data)
def scrape_candles_to_csv(filename, exchange_id, max_retries, symbol, timeframe, since, limit):
# instantiate the exchange by id
exchange = getattr(ccxt, exchange_id)({
'enableRateLimit': True, # required by the Manual
})
# convert since from string to milliseconds integer if needed
if isinstance(since, str):
since = exchange.parse8601(since)
# preload all markets from the exchange
exchange.load_markets()
# fetch all candles
ohlcv = scrape_ohlcv(exchange, max_retries, symbol, timeframe, since, limit)
# save them to csv file
write_to_csv(filename, ohlcv)
print('Saved', len(ohlcv), 'candles from', exchange.iso8601(ohlcv[0][0]), 'to', exchange.iso8601(ohlcv[-1][0]), 'to', filename)
# -----------------------------------------------------------------------------
scrape_candles_to_csv('binance_3.csv', 'binance', 3, 'BTC/USDT', '1h', '2019-05-01T00:00:00Z', 100)
| 39.578947 | 133 | 0.628657 | [
"MIT"
] | yinfeng2016/Bitcoin-Trader-RL | binance-fetch-ohlcv-to-csv.py | 3,008 | Python |
##########################################
# File: refine_multiple_shards_joint.py #
# Copyright Richard Stebbing 2014. #
# Distributed under the MIT License. #
# (See accompany file LICENSE or copy at #
# http://opensource.org/licenses/MIT) #
##########################################
# Imports
import argparse
import matplotlib.pyplot as plt
import numpy as np
import visualise_progress as vis
from functools import partial
from operator import itemgetter
from solve import fit_and_colour_shards
from time import time
# Requires `rscommon`.
from rscommon.pickle_ import dump
# main
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_all_iterations_Xy_path')
parser.add_argument('output_dir')
parser.add_argument('--visualise-progress',
action='store_true',
default=False)
parser.add_argument('--ftol', type=float, default=1e-8)
parser.add_argument('--xtol', type=float, default=1e-8)
parser.add_argument('--maxfev', type=int, default=0)
parser.add_argument('--epsilon', type=float, default=1e-6)
args = parser.parse_args()
ensure_output_path = partial(vis.ensure_path, args.output_dir)
all_iterations_Xy, orig_args = np.load(args.input_all_iterations_Xy_path)
print '<-', orig_args['input_path']
I = plt.imread(orig_args['input_path']).astype(np.float64)[..., :3]
if orig_args['base'] == 'white':
J0 = np.ones_like(I)
elif orig_args['base'] == 'black':
J0 = np.zeros_like(I)
else:
head, tail = os.path.split(orig_args['base'])
root, ext = os.path.splitext(tail)
if ext == '.dat':
J0 = np.load(orig_args['base'])
else:
J0 = plt.imread(orig_args['base']).astype(np.float64)[..., :3]
Xs0, ys0 = zip(*map(itemgetter(-1), all_iterations_Xy))
print 'Solving with `fit_and_colour_shards` ...'
np.seterr(over='ignore')
t0 = time()
(Xs, ys, all_Xs_ys), (exit_code, E0, E1, J, J1) = fit_and_colour_shards(
I, J0, orig_args['alpha'],
Xs0, ys0,
k=orig_args['k'],
epsilon=args.epsilon,
ftol=args.ftol,
xtol=args.xtol,
maxfev=args.maxfev,
return_info=True,
verbose=True)
t1 = time()
np.seterr(over='warn')
print 'E0:', E0
print 'E1:', E1
print 'Exit code: %d' % exit_code
print 'Time taken: %.3fs' % (t1 - t0)
output_path = ensure_output_path('all_Xs_ys.dat')
print '->', output_path
dump(output_path, (all_Xs_ys, args.__dict__), raise_on_failure=False)
output_path = ensure_output_path('J.dat')
print '->', output_path
dump(output_path, (J, args.__dict__), raise_on_failure=False)
output_path = ensure_output_path('J1.dat')
print '->', output_path
dump(output_path, (J1, args.__dict__), raise_on_failure=False)
if args.visualise_progress:
output_path = ensure_output_path('J.png')
print '->', output_path
f, ax = vis.make_image_figure(J)
vis.save_image_figure(output_path, f, J.shape)
output_path = ensure_output_path('J1.png')
print '->', output_path
f, ax = vis.make_image_figure(J1)
vis.save_image_figure(output_path, f, J1.shape)
if __name__ == '__main__':
main()
| 33.831683 | 78 | 0.609599 | [
"MIT"
] | rstebbing/shards | refine_multiple_shards_joint.py | 3,417 | Python |
import numpy as np
import math
from arena import Arena
from agent import HAgent, AAgent
import random
# np.random.seed(1234)
# place the humans on the arena
def place_soldiers(n, arena, agents):
x = 0
y = 0
for i in range(n):
agents[i + 1] = HAgent([x, y])
arena.arena[x, y] = 1
y += 2
return arena, agents
# place the alien agents on the arena
def place_targets(n, arena, targets, pos=None):
if pos is not None:
orig_pos = pos
for i in range(n):
targets[i + 1] = AAgent(pos[i])
arena.arena[pos[i][0], pos[i][1]] = 2
else:
orig_pos = []
for i in range(n):
while True:
x = np.rint(np.array([(arena.size - 1) * np.random.rand(1),
(arena.size - 1) * np.random.rand(1)]))
if x[0] > 7 or x[1] > 7:
break
x = [int(i) for i in x]
# x = [19, 19]
targets[i + 1] = AAgent(x)
arena.arena[x[0], x[1]] = 2
orig_pos.append([x[0], x[1]])
return arena, targets, orig_pos
# adds half-cover tiles in random locations in the arena
# At most n cover tiles added, though potentially fewer
def place_half_cover(n, arena):
for i in range(n):
x = np.random.randint(0, (arena.size - 1))
y = np.random.randint(0, (arena.size - 1))
if arena.arena[x, y] == 0:
arena.arena[x, y] = 3
return arena
# movement for agents
def move(agent, arena, loc):
# Check that agent has movement, if not, do nothing
if agent.moves <= 0:
# print('unsuccessful move')
return agent, arena
# Check if in movement range
elif abs((loc[0] - agent.pos[0]) + (loc[1] - agent.pos[1])) <= agent.move_range:
# print('successful move')
# update the arena matrix
arena.arena[agent.pos[0], agent.pos[1]] = 0
arena.arena[loc[0], loc[1]] = 1
# update agent location, number of moves
agent.moves -= 1
agent.pos = loc
arena.time += 1
return agent, arena
# if not in movement range, do nothing
else:
# print('unsuccessful move')
return agent, arena
# reload action
def reload(agent):
if agent.moves > 0:
agent.moves -= 1
agent.ammo = 5
return agent
def fire(agent, arena, target):
# for the moment, assume anything can be fired on
# set firing agent's moves to zero
agent.moves = 0
agent.ammo -= 1
cover = 0
# check if target is in (half) cover
if agent.pos[0] + 1 > target.pos[0]:
if arena.arena[target.pos[0] - 1, target.pos[1]] == 3:
cover = 20
if agent.pos[0] - 1 < target.pos[0]:
if arena.arena[target.pos[0] + 1, target.pos[1]] == 3:
cover = 20
if agent.pos[1] + 1 > target.pos[1]:
if arena.arena[target.pos[0], target.pos[1] - 1] == 3:
cover = 20
if agent.pos[1] - 1 < target.pos[1]:
if arena.arena[target.pos[0], target.pos[1] + 1] == 3:
cover = 20
# for distance equation, see
# https://www.ufopaedia.org/index.php/Chance_to_Hit_(EU2012)
diff = [agent.pos[0] - target.pos[0], agent.pos[1] - target.pos[1]]
distance_chance = 42 - 4.5 * (np.linalg.norm(diff))
# Hit chance is base aim, less cover, plus distance modifier
to_hit = agent.aim - cover + distance_chance
if np.random.randint(100) >= to_hit:
# miss, so no change
arena.time += 1
return agent, arena, target
else:
flanking = 0
crit_modifier = 1
# check if critical
if cover == 0:
flanking = 50
crit_chance = agent.base_crit + flanking
# crit modifier in xcom is 1.5x damage
if np.random.randint(100) < crit_chance:
crit_modifier = 1.5
# slight random variance from base damage, +1 to -1
damage = math.floor(crit_modifier * (np.random.randint(-1, 2) + agent.damage))
# apply damage and return
target.health -= damage
# check if damage causes death
arena, target = check_death_enemy(arena, target)
arena.time += 1
return agent, arena, target
# check to see if character is dead, update arena information if so
def check_death_enemy(arena, target):
if target.health <= 0:
target.moves = 0
arena.arena[target.pos] = 0
arena.targets -= 1
if arena.targets <= 0:
arena.targets = 0
return arena, target
# refresh movement for non-dead characters
def new_turn(arena, agents, targets):
for i in agents:
if i.health > 0:
i.moves = 2
for j in targets:
if j.health > 0:
j.moves = 2
return arena, agents, targets
# get a valid move
def get_valid_move(agent):
x_old = agent.pos[0]
y_old = agent.pos[1]
# print(x_old, y_old)
x = int(random.randint(x_old - 3, x_old + 3))
y = int(random.randint(y_old - 3, y_old + 3))
if x < 0:
x = x * -1
if y < 0:
y = y * -1
if x > 19:
x = 19
if y > 19:
y = 19
# print(x, y)
return x, y
| 29.344633 | 86 | 0.555641 | [
"MIT"
] | ryflect/CS683-xcom | helper.py | 5,194 | Python |
# -*- coding: utf-8 -*-
# This file was generated
import ctypes
import nidigital.errors as errors
import threading
from nidigital._visatype import * # noqa: F403,H303
import nidigital.history_ram_cycle_information as history_ram_cycle_information # noqa: F401
class Library(object):
'''Library
Wrapper around driver library.
Class will setup the correct ctypes information for every function on first call.
'''
def __init__(self, ctypes_library):
self._func_lock = threading.Lock()
self._library = ctypes_library
# We cache the cfunc object from the ctypes.CDLL object
self.niDigital_Abort_cfunc = None
self.niDigital_AbortKeepAlive_cfunc = None
self.niDigital_ApplyLevelsAndTiming_cfunc = None
self.niDigital_ApplyTDROffsets_cfunc = None
self.niDigital_BurstPattern_cfunc = None
self.niDigital_ClockGenerator_Abort_cfunc = None
self.niDigital_ClockGenerator_GenerateClock_cfunc = None
self.niDigital_Commit_cfunc = None
self.niDigital_ConfigureActiveLoadLevels_cfunc = None
self.niDigital_ConfigurePatternBurstSites_cfunc = None
self.niDigital_ConfigureTimeSetCompareEdgesStrobe_cfunc = None
self.niDigital_ConfigureTimeSetCompareEdgesStrobe2x_cfunc = None
self.niDigital_ConfigureTimeSetDriveEdges_cfunc = None
self.niDigital_ConfigureTimeSetDriveEdges2x_cfunc = None
self.niDigital_ConfigureTimeSetDriveFormat_cfunc = None
self.niDigital_ConfigureTimeSetEdge_cfunc = None
self.niDigital_ConfigureTimeSetEdgeMultiplier_cfunc = None
self.niDigital_ConfigureTimeSetPeriod_cfunc = None
self.niDigital_ConfigureVoltageLevels_cfunc = None
self.niDigital_CreateCaptureWaveformFromFileDigicapture_cfunc = None
self.niDigital_CreateCaptureWaveformParallel_cfunc = None
self.niDigital_CreateCaptureWaveformSerial_cfunc = None
self.niDigital_CreateSourceWaveformFromFileTDMS_cfunc = None
self.niDigital_CreateSourceWaveformParallel_cfunc = None
self.niDigital_CreateSourceWaveformSerial_cfunc = None
self.niDigital_CreateTimeSet_cfunc = None
self.niDigital_DeleteAllTimeSets_cfunc = None
self.niDigital_DisableSites_cfunc = None
self.niDigital_EnableSites_cfunc = None
self.niDigital_FetchCaptureWaveformU32_cfunc = None
self.niDigital_FetchHistoryRAMCycleInformation_cfunc = None
self.niDigital_FetchHistoryRAMCyclePinData_cfunc = None
self.niDigital_FetchHistoryRAMScanCycleNumber_cfunc = None
self.niDigital_FrequencyCounter_MeasureFrequency_cfunc = None
self.niDigital_GetAttributeViBoolean_cfunc = None
self.niDigital_GetAttributeViInt32_cfunc = None
self.niDigital_GetAttributeViInt64_cfunc = None
self.niDigital_GetAttributeViReal64_cfunc = None
self.niDigital_GetAttributeViString_cfunc = None
self.niDigital_GetChannelNameFromString_cfunc = None
self.niDigital_GetError_cfunc = None
self.niDigital_GetFailCount_cfunc = None
self.niDigital_GetHistoryRAMSampleCount_cfunc = None
self.niDigital_GetPatternName_cfunc = None
self.niDigital_GetPatternPinList_cfunc = None
self.niDigital_GetPinName_cfunc = None
self.niDigital_GetPinResultsPinInformation_cfunc = None
self.niDigital_GetSitePassFail_cfunc = None
self.niDigital_GetSiteResultsSiteNumbers_cfunc = None
self.niDigital_GetTimeSetDriveFormat_cfunc = None
self.niDigital_GetTimeSetEdge_cfunc = None
self.niDigital_GetTimeSetEdgeMultiplier_cfunc = None
self.niDigital_GetTimeSetName_cfunc = None
self.niDigital_GetTimeSetPeriod_cfunc = None
self.niDigital_InitWithOptions_cfunc = None
self.niDigital_Initiate_cfunc = None
self.niDigital_IsDone_cfunc = None
self.niDigital_IsSiteEnabled_cfunc = None
self.niDigital_LoadLevels_cfunc = None
self.niDigital_LoadPattern_cfunc = None
self.niDigital_LoadPinMap_cfunc = None
self.niDigital_LoadSpecifications_cfunc = None
self.niDigital_LoadTiming_cfunc = None
self.niDigital_LockSession_cfunc = None
self.niDigital_PPMU_Measure_cfunc = None
self.niDigital_PPMU_Source_cfunc = None
self.niDigital_ReadSequencerFlag_cfunc = None
self.niDigital_ReadSequencerRegister_cfunc = None
self.niDigital_ReadStatic_cfunc = None
self.niDigital_ResetDevice_cfunc = None
self.niDigital_SelfCalibrate_cfunc = None
self.niDigital_SendSoftwareEdgeTrigger_cfunc = None
self.niDigital_SetAttributeViBoolean_cfunc = None
self.niDigital_SetAttributeViInt32_cfunc = None
self.niDigital_SetAttributeViInt64_cfunc = None
self.niDigital_SetAttributeViReal64_cfunc = None
self.niDigital_SetAttributeViString_cfunc = None
self.niDigital_TDR_cfunc = None
self.niDigital_UnloadAllPatterns_cfunc = None
self.niDigital_UnloadSpecifications_cfunc = None
self.niDigital_UnlockSession_cfunc = None
self.niDigital_WaitUntilDone_cfunc = None
self.niDigital_WriteSequencerFlag_cfunc = None
self.niDigital_WriteSequencerRegister_cfunc = None
self.niDigital_WriteSourceWaveformBroadcastU32_cfunc = None
self.niDigital_WriteSourceWaveformDataFromFileTDMS_cfunc = None
self.niDigital_WriteSourceWaveformSiteUniqueU32_cfunc = None
self.niDigital_WriteStatic_cfunc = None
self.niDigital_close_cfunc = None
self.niDigital_error_message_cfunc = None
self.niDigital_reset_cfunc = None
self.niDigital_self_test_cfunc = None
def _get_library_function(self, name):
try:
function = getattr(self._library, name)
except AttributeError as e:
raise errors.DriverTooOldError() from e
return function
def niDigital_Abort(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_Abort_cfunc is None:
self.niDigital_Abort_cfunc = self._get_library_function('niDigital_Abort')
self.niDigital_Abort_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_Abort_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_Abort_cfunc(vi)
def niDigital_AbortKeepAlive(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_AbortKeepAlive_cfunc is None:
self.niDigital_AbortKeepAlive_cfunc = self._get_library_function('niDigital_AbortKeepAlive')
self.niDigital_AbortKeepAlive_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_AbortKeepAlive_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_AbortKeepAlive_cfunc(vi)
def niDigital_ApplyLevelsAndTiming(self, vi, site_list, levels_sheet, timing_sheet, initial_state_high_pins, initial_state_low_pins, initial_state_tristate_pins): # noqa: N802
with self._func_lock:
if self.niDigital_ApplyLevelsAndTiming_cfunc is None:
self.niDigital_ApplyLevelsAndTiming_cfunc = self._get_library_function('niDigital_ApplyLevelsAndTiming')
self.niDigital_ApplyLevelsAndTiming_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_ApplyLevelsAndTiming_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ApplyLevelsAndTiming_cfunc(vi, site_list, levels_sheet, timing_sheet, initial_state_high_pins, initial_state_low_pins, initial_state_tristate_pins)
def niDigital_ApplyTDROffsets(self, vi, channel_list, num_offsets, offsets): # noqa: N802
with self._func_lock:
if self.niDigital_ApplyTDROffsets_cfunc is None:
self.niDigital_ApplyTDROffsets_cfunc = self._get_library_function('niDigital_ApplyTDROffsets')
self.niDigital_ApplyTDROffsets_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViReal64)] # noqa: F405
self.niDigital_ApplyTDROffsets_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ApplyTDROffsets_cfunc(vi, channel_list, num_offsets, offsets)
def niDigital_BurstPattern(self, vi, site_list, start_label, select_digital_function, wait_until_done, timeout): # noqa: N802
with self._func_lock:
if self.niDigital_BurstPattern_cfunc is None:
self.niDigital_BurstPattern_cfunc = self._get_library_function('niDigital_BurstPattern')
self.niDigital_BurstPattern_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViBoolean, ViBoolean, ViReal64] # noqa: F405
self.niDigital_BurstPattern_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_BurstPattern_cfunc(vi, site_list, start_label, select_digital_function, wait_until_done, timeout)
def niDigital_ClockGenerator_Abort(self, vi, channel_list): # noqa: N802
with self._func_lock:
if self.niDigital_ClockGenerator_Abort_cfunc is None:
self.niDigital_ClockGenerator_Abort_cfunc = self._get_library_function('niDigital_ClockGenerator_Abort')
self.niDigital_ClockGenerator_Abort_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_ClockGenerator_Abort_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ClockGenerator_Abort_cfunc(vi, channel_list)
def niDigital_ClockGenerator_GenerateClock(self, vi, channel_list, frequency, select_digital_function): # noqa: N802
with self._func_lock:
if self.niDigital_ClockGenerator_GenerateClock_cfunc is None:
self.niDigital_ClockGenerator_GenerateClock_cfunc = self._get_library_function('niDigital_ClockGenerator_GenerateClock')
self.niDigital_ClockGenerator_GenerateClock_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViReal64, ViBoolean] # noqa: F405
self.niDigital_ClockGenerator_GenerateClock_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ClockGenerator_GenerateClock_cfunc(vi, channel_list, frequency, select_digital_function)
def niDigital_Commit(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_Commit_cfunc is None:
self.niDigital_Commit_cfunc = self._get_library_function('niDigital_Commit')
self.niDigital_Commit_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_Commit_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_Commit_cfunc(vi)
def niDigital_ConfigureActiveLoadLevels(self, vi, channel_list, iol, ioh, vcom): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureActiveLoadLevels_cfunc is None:
self.niDigital_ConfigureActiveLoadLevels_cfunc = self._get_library_function('niDigital_ConfigureActiveLoadLevels')
self.niDigital_ConfigureActiveLoadLevels_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViReal64, ViReal64, ViReal64] # noqa: F405
self.niDigital_ConfigureActiveLoadLevels_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureActiveLoadLevels_cfunc(vi, channel_list, iol, ioh, vcom)
def niDigital_ConfigurePatternBurstSites(self, vi, site_list): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigurePatternBurstSites_cfunc is None:
self.niDigital_ConfigurePatternBurstSites_cfunc = self._get_library_function('niDigital_ConfigurePatternBurstSites')
self.niDigital_ConfigurePatternBurstSites_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_ConfigurePatternBurstSites_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigurePatternBurstSites_cfunc(vi, site_list)
def niDigital_ConfigureTimeSetCompareEdgesStrobe(self, vi, pin_list, time_set_name, strobe_edge): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetCompareEdgesStrobe_cfunc is None:
self.niDigital_ConfigureTimeSetCompareEdgesStrobe_cfunc = self._get_library_function('niDigital_ConfigureTimeSetCompareEdgesStrobe')
self.niDigital_ConfigureTimeSetCompareEdgesStrobe_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViReal64] # noqa: F405
self.niDigital_ConfigureTimeSetCompareEdgesStrobe_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetCompareEdgesStrobe_cfunc(vi, pin_list, time_set_name, strobe_edge)
def niDigital_ConfigureTimeSetCompareEdgesStrobe2x(self, vi, pin_list, time_set_name, strobe_edge, strobe2_edge): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetCompareEdgesStrobe2x_cfunc is None:
self.niDigital_ConfigureTimeSetCompareEdgesStrobe2x_cfunc = self._get_library_function('niDigital_ConfigureTimeSetCompareEdgesStrobe2x')
self.niDigital_ConfigureTimeSetCompareEdgesStrobe2x_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViReal64, ViReal64] # noqa: F405
self.niDigital_ConfigureTimeSetCompareEdgesStrobe2x_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetCompareEdgesStrobe2x_cfunc(vi, pin_list, time_set_name, strobe_edge, strobe2_edge)
def niDigital_ConfigureTimeSetDriveEdges(self, vi, pin_list, time_set_name, format, drive_on_edge, drive_data_edge, drive_return_edge, drive_off_edge): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetDriveEdges_cfunc is None:
self.niDigital_ConfigureTimeSetDriveEdges_cfunc = self._get_library_function('niDigital_ConfigureTimeSetDriveEdges')
self.niDigital_ConfigureTimeSetDriveEdges_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ViReal64, ViReal64, ViReal64, ViReal64] # noqa: F405
self.niDigital_ConfigureTimeSetDriveEdges_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetDriveEdges_cfunc(vi, pin_list, time_set_name, format, drive_on_edge, drive_data_edge, drive_return_edge, drive_off_edge)
def niDigital_ConfigureTimeSetDriveEdges2x(self, vi, pin_list, time_set_name, format, drive_on_edge, drive_data_edge, drive_return_edge, drive_off_edge, drive_data2_edge, drive_return2_edge): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetDriveEdges2x_cfunc is None:
self.niDigital_ConfigureTimeSetDriveEdges2x_cfunc = self._get_library_function('niDigital_ConfigureTimeSetDriveEdges2x')
self.niDigital_ConfigureTimeSetDriveEdges2x_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ViReal64, ViReal64, ViReal64, ViReal64, ViReal64, ViReal64] # noqa: F405
self.niDigital_ConfigureTimeSetDriveEdges2x_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetDriveEdges2x_cfunc(vi, pin_list, time_set_name, format, drive_on_edge, drive_data_edge, drive_return_edge, drive_off_edge, drive_data2_edge, drive_return2_edge)
def niDigital_ConfigureTimeSetDriveFormat(self, vi, pin_list, time_set_name, drive_format): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetDriveFormat_cfunc is None:
self.niDigital_ConfigureTimeSetDriveFormat_cfunc = self._get_library_function('niDigital_ConfigureTimeSetDriveFormat')
self.niDigital_ConfigureTimeSetDriveFormat_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32] # noqa: F405
self.niDigital_ConfigureTimeSetDriveFormat_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetDriveFormat_cfunc(vi, pin_list, time_set_name, drive_format)
def niDigital_ConfigureTimeSetEdge(self, vi, pin_list, time_set_name, edge, time): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetEdge_cfunc is None:
self.niDigital_ConfigureTimeSetEdge_cfunc = self._get_library_function('niDigital_ConfigureTimeSetEdge')
self.niDigital_ConfigureTimeSetEdge_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ViReal64] # noqa: F405
self.niDigital_ConfigureTimeSetEdge_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetEdge_cfunc(vi, pin_list, time_set_name, edge, time)
def niDigital_ConfigureTimeSetEdgeMultiplier(self, vi, pin_list, time_set_name, edge_multiplier): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetEdgeMultiplier_cfunc is None:
self.niDigital_ConfigureTimeSetEdgeMultiplier_cfunc = self._get_library_function('niDigital_ConfigureTimeSetEdgeMultiplier')
self.niDigital_ConfigureTimeSetEdgeMultiplier_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32] # noqa: F405
self.niDigital_ConfigureTimeSetEdgeMultiplier_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetEdgeMultiplier_cfunc(vi, pin_list, time_set_name, edge_multiplier)
def niDigital_ConfigureTimeSetPeriod(self, vi, time_set_name, period): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureTimeSetPeriod_cfunc is None:
self.niDigital_ConfigureTimeSetPeriod_cfunc = self._get_library_function('niDigital_ConfigureTimeSetPeriod')
self.niDigital_ConfigureTimeSetPeriod_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViReal64] # noqa: F405
self.niDigital_ConfigureTimeSetPeriod_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureTimeSetPeriod_cfunc(vi, time_set_name, period)
def niDigital_ConfigureVoltageLevels(self, vi, channel_list, vil, vih, vol, voh, vterm): # noqa: N802
with self._func_lock:
if self.niDigital_ConfigureVoltageLevels_cfunc is None:
self.niDigital_ConfigureVoltageLevels_cfunc = self._get_library_function('niDigital_ConfigureVoltageLevels')
self.niDigital_ConfigureVoltageLevels_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViReal64, ViReal64, ViReal64, ViReal64, ViReal64] # noqa: F405
self.niDigital_ConfigureVoltageLevels_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ConfigureVoltageLevels_cfunc(vi, channel_list, vil, vih, vol, voh, vterm)
def niDigital_CreateCaptureWaveformFromFileDigicapture(self, vi, waveform_name, waveform_file_path): # noqa: N802
with self._func_lock:
if self.niDigital_CreateCaptureWaveformFromFileDigicapture_cfunc is None:
self.niDigital_CreateCaptureWaveformFromFileDigicapture_cfunc = self._get_library_function('niDigital_CreateCaptureWaveformFromFileDigicapture')
self.niDigital_CreateCaptureWaveformFromFileDigicapture_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_CreateCaptureWaveformFromFileDigicapture_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateCaptureWaveformFromFileDigicapture_cfunc(vi, waveform_name, waveform_file_path)
def niDigital_CreateCaptureWaveformParallel(self, vi, pin_list, waveform_name): # noqa: N802
with self._func_lock:
if self.niDigital_CreateCaptureWaveformParallel_cfunc is None:
self.niDigital_CreateCaptureWaveformParallel_cfunc = self._get_library_function('niDigital_CreateCaptureWaveformParallel')
self.niDigital_CreateCaptureWaveformParallel_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_CreateCaptureWaveformParallel_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateCaptureWaveformParallel_cfunc(vi, pin_list, waveform_name)
def niDigital_CreateCaptureWaveformSerial(self, vi, pin_list, waveform_name, sample_width, bit_order): # noqa: N802
with self._func_lock:
if self.niDigital_CreateCaptureWaveformSerial_cfunc is None:
self.niDigital_CreateCaptureWaveformSerial_cfunc = self._get_library_function('niDigital_CreateCaptureWaveformSerial')
self.niDigital_CreateCaptureWaveformSerial_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViUInt32, ViInt32] # noqa: F405
self.niDigital_CreateCaptureWaveformSerial_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateCaptureWaveformSerial_cfunc(vi, pin_list, waveform_name, sample_width, bit_order)
def niDigital_CreateSourceWaveformFromFileTDMS(self, vi, waveform_name, waveform_file_path, write_waveform_data): # noqa: N802
with self._func_lock:
if self.niDigital_CreateSourceWaveformFromFileTDMS_cfunc is None:
self.niDigital_CreateSourceWaveformFromFileTDMS_cfunc = self._get_library_function('niDigital_CreateSourceWaveformFromFileTDMS')
self.niDigital_CreateSourceWaveformFromFileTDMS_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViBoolean] # noqa: F405
self.niDigital_CreateSourceWaveformFromFileTDMS_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateSourceWaveformFromFileTDMS_cfunc(vi, waveform_name, waveform_file_path, write_waveform_data)
def niDigital_CreateSourceWaveformParallel(self, vi, pin_list, waveform_name, data_mapping): # noqa: N802
with self._func_lock:
if self.niDigital_CreateSourceWaveformParallel_cfunc is None:
self.niDigital_CreateSourceWaveformParallel_cfunc = self._get_library_function('niDigital_CreateSourceWaveformParallel')
self.niDigital_CreateSourceWaveformParallel_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32] # noqa: F405
self.niDigital_CreateSourceWaveformParallel_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateSourceWaveformParallel_cfunc(vi, pin_list, waveform_name, data_mapping)
def niDigital_CreateSourceWaveformSerial(self, vi, pin_list, waveform_name, data_mapping, sample_width, bit_order): # noqa: N802
with self._func_lock:
if self.niDigital_CreateSourceWaveformSerial_cfunc is None:
self.niDigital_CreateSourceWaveformSerial_cfunc = self._get_library_function('niDigital_CreateSourceWaveformSerial')
self.niDigital_CreateSourceWaveformSerial_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ViUInt32, ViInt32] # noqa: F405
self.niDigital_CreateSourceWaveformSerial_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateSourceWaveformSerial_cfunc(vi, pin_list, waveform_name, data_mapping, sample_width, bit_order)
def niDigital_CreateTimeSet(self, vi, name): # noqa: N802
with self._func_lock:
if self.niDigital_CreateTimeSet_cfunc is None:
self.niDigital_CreateTimeSet_cfunc = self._get_library_function('niDigital_CreateTimeSet')
self.niDigital_CreateTimeSet_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_CreateTimeSet_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_CreateTimeSet_cfunc(vi, name)
def niDigital_DeleteAllTimeSets(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_DeleteAllTimeSets_cfunc is None:
self.niDigital_DeleteAllTimeSets_cfunc = self._get_library_function('niDigital_DeleteAllTimeSets')
self.niDigital_DeleteAllTimeSets_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_DeleteAllTimeSets_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_DeleteAllTimeSets_cfunc(vi)
def niDigital_DisableSites(self, vi, site_list): # noqa: N802
with self._func_lock:
if self.niDigital_DisableSites_cfunc is None:
self.niDigital_DisableSites_cfunc = self._get_library_function('niDigital_DisableSites')
self.niDigital_DisableSites_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_DisableSites_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_DisableSites_cfunc(vi, site_list)
def niDigital_EnableSites(self, vi, site_list): # noqa: N802
with self._func_lock:
if self.niDigital_EnableSites_cfunc is None:
self.niDigital_EnableSites_cfunc = self._get_library_function('niDigital_EnableSites')
self.niDigital_EnableSites_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_EnableSites_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_EnableSites_cfunc(vi, site_list)
def niDigital_FetchCaptureWaveformU32(self, vi, site_list, waveform_name, samples_to_read, timeout, data_buffer_size, data, actual_num_waveforms, actual_samples_per_waveform): # noqa: N802
with self._func_lock:
if self.niDigital_FetchCaptureWaveformU32_cfunc is None:
self.niDigital_FetchCaptureWaveformU32_cfunc = self._get_library_function('niDigital_FetchCaptureWaveformU32')
self.niDigital_FetchCaptureWaveformU32_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ViReal64, ViInt32, ctypes.POINTER(ViUInt32), ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_FetchCaptureWaveformU32_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_FetchCaptureWaveformU32_cfunc(vi, site_list, waveform_name, samples_to_read, timeout, data_buffer_size, data, actual_num_waveforms, actual_samples_per_waveform)
def niDigital_FetchHistoryRAMCycleInformation(self, vi, site, sample_index, pattern_index, time_set_index, vector_number, cycle_number, num_dut_cycles): # noqa: N802
with self._func_lock:
if self.niDigital_FetchHistoryRAMCycleInformation_cfunc is None:
self.niDigital_FetchHistoryRAMCycleInformation_cfunc = self._get_library_function('niDigital_FetchHistoryRAMCycleInformation')
self.niDigital_FetchHistoryRAMCycleInformation_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt64, ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt64), ctypes.POINTER(ViInt64), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_FetchHistoryRAMCycleInformation_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_FetchHistoryRAMCycleInformation_cfunc(vi, site, sample_index, pattern_index, time_set_index, vector_number, cycle_number, num_dut_cycles)
def niDigital_FetchHistoryRAMCyclePinData(self, vi, site, pin_list, sample_index, dut_cycle_index, pin_data_buffer_size, expected_pin_states, actual_pin_states, per_pin_pass_fail, actual_num_pin_data): # noqa: N802
with self._func_lock:
if self.niDigital_FetchHistoryRAMCyclePinData_cfunc is None:
self.niDigital_FetchHistoryRAMCyclePinData_cfunc = self._get_library_function('niDigital_FetchHistoryRAMCyclePinData')
self.niDigital_FetchHistoryRAMCyclePinData_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt64, ViInt32, ViInt32, ctypes.POINTER(ViUInt8), ctypes.POINTER(ViUInt8), ctypes.POINTER(ViBoolean), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_FetchHistoryRAMCyclePinData_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_FetchHistoryRAMCyclePinData_cfunc(vi, site, pin_list, sample_index, dut_cycle_index, pin_data_buffer_size, expected_pin_states, actual_pin_states, per_pin_pass_fail, actual_num_pin_data)
def niDigital_FetchHistoryRAMScanCycleNumber(self, vi, site, sample_index, scan_cycle_number): # noqa: N802
with self._func_lock:
if self.niDigital_FetchHistoryRAMScanCycleNumber_cfunc is None:
self.niDigital_FetchHistoryRAMScanCycleNumber_cfunc = self._get_library_function('niDigital_FetchHistoryRAMScanCycleNumber')
self.niDigital_FetchHistoryRAMScanCycleNumber_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt64, ctypes.POINTER(ViInt64)] # noqa: F405
self.niDigital_FetchHistoryRAMScanCycleNumber_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_FetchHistoryRAMScanCycleNumber_cfunc(vi, site, sample_index, scan_cycle_number)
def niDigital_FrequencyCounter_MeasureFrequency(self, vi, channel_list, frequencies_buffer_size, frequencies, actual_num_frequencies): # noqa: N802
with self._func_lock:
if self.niDigital_FrequencyCounter_MeasureFrequency_cfunc is None:
self.niDigital_FrequencyCounter_MeasureFrequency_cfunc = self._get_library_function('niDigital_FrequencyCounter_MeasureFrequency')
self.niDigital_FrequencyCounter_MeasureFrequency_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViReal64), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_FrequencyCounter_MeasureFrequency_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_FrequencyCounter_MeasureFrequency_cfunc(vi, channel_list, frequencies_buffer_size, frequencies, actual_num_frequencies)
def niDigital_GetAttributeViBoolean(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_GetAttributeViBoolean_cfunc is None:
self.niDigital_GetAttributeViBoolean_cfunc = self._get_library_function('niDigital_GetAttributeViBoolean')
self.niDigital_GetAttributeViBoolean_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDigital_GetAttributeViBoolean_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetAttributeViBoolean_cfunc(vi, channel_name, attribute, value)
def niDigital_GetAttributeViInt32(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_GetAttributeViInt32_cfunc is None:
self.niDigital_GetAttributeViInt32_cfunc = self._get_library_function('niDigital_GetAttributeViInt32')
self.niDigital_GetAttributeViInt32_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetAttributeViInt32_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetAttributeViInt32_cfunc(vi, channel_name, attribute, value)
def niDigital_GetAttributeViInt64(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_GetAttributeViInt64_cfunc is None:
self.niDigital_GetAttributeViInt64_cfunc = self._get_library_function('niDigital_GetAttributeViInt64')
self.niDigital_GetAttributeViInt64_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViInt64)] # noqa: F405
self.niDigital_GetAttributeViInt64_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetAttributeViInt64_cfunc(vi, channel_name, attribute, value)
def niDigital_GetAttributeViReal64(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_GetAttributeViReal64_cfunc is None:
self.niDigital_GetAttributeViReal64_cfunc = self._get_library_function('niDigital_GetAttributeViReal64')
self.niDigital_GetAttributeViReal64_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViReal64)] # noqa: F405
self.niDigital_GetAttributeViReal64_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetAttributeViReal64_cfunc(vi, channel_name, attribute, value)
def niDigital_GetAttributeViString(self, vi, channel_name, attribute, buffer_size, value): # noqa: N802
with self._func_lock:
if self.niDigital_GetAttributeViString_cfunc is None:
self.niDigital_GetAttributeViString_cfunc = self._get_library_function('niDigital_GetAttributeViString')
self.niDigital_GetAttributeViString_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetAttributeViString_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetAttributeViString_cfunc(vi, channel_name, attribute, buffer_size, value)
def niDigital_GetChannelNameFromString(self, vi, indices, name_buffer_size, names): # noqa: N802
with self._func_lock:
if self.niDigital_GetChannelNameFromString_cfunc is None:
self.niDigital_GetChannelNameFromString_cfunc = self._get_library_function('niDigital_GetChannelNameFromString')
self.niDigital_GetChannelNameFromString_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetChannelNameFromString_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetChannelNameFromString_cfunc(vi, indices, name_buffer_size, names)
def niDigital_GetError(self, vi, error_code, error_description_buffer_size, error_description): # noqa: N802
with self._func_lock:
if self.niDigital_GetError_cfunc is None:
self.niDigital_GetError_cfunc = self._get_library_function('niDigital_GetError')
self.niDigital_GetError_cfunc.argtypes = [ViSession, ctypes.POINTER(ViStatus), ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetError_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetError_cfunc(vi, error_code, error_description_buffer_size, error_description)
def niDigital_GetFailCount(self, vi, channel_list, buffer_size, failure_count, actual_num_read): # noqa: N802
with self._func_lock:
if self.niDigital_GetFailCount_cfunc is None:
self.niDigital_GetFailCount_cfunc = self._get_library_function('niDigital_GetFailCount')
self.niDigital_GetFailCount_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViInt64), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetFailCount_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetFailCount_cfunc(vi, channel_list, buffer_size, failure_count, actual_num_read)
def niDigital_GetHistoryRAMSampleCount(self, vi, site, sample_count): # noqa: N802
with self._func_lock:
if self.niDigital_GetHistoryRAMSampleCount_cfunc is None:
self.niDigital_GetHistoryRAMSampleCount_cfunc = self._get_library_function('niDigital_GetHistoryRAMSampleCount')
self.niDigital_GetHistoryRAMSampleCount_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViInt64)] # noqa: F405
self.niDigital_GetHistoryRAMSampleCount_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetHistoryRAMSampleCount_cfunc(vi, site, sample_count)
def niDigital_GetPatternName(self, vi, pattern_index, name_buffer_size, name): # noqa: N802
with self._func_lock:
if self.niDigital_GetPatternName_cfunc is None:
self.niDigital_GetPatternName_cfunc = self._get_library_function('niDigital_GetPatternName')
self.niDigital_GetPatternName_cfunc.argtypes = [ViSession, ViInt32, ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetPatternName_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetPatternName_cfunc(vi, pattern_index, name_buffer_size, name)
def niDigital_GetPatternPinList(self, vi, start_label, pin_list_buffer_size, pin_list): # noqa: N802
with self._func_lock:
if self.niDigital_GetPatternPinList_cfunc is None:
self.niDigital_GetPatternPinList_cfunc = self._get_library_function('niDigital_GetPatternPinList')
self.niDigital_GetPatternPinList_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetPatternPinList_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetPatternPinList_cfunc(vi, start_label, pin_list_buffer_size, pin_list)
def niDigital_GetPinName(self, vi, pin_index, name_buffer_size, name): # noqa: N802
with self._func_lock:
if self.niDigital_GetPinName_cfunc is None:
self.niDigital_GetPinName_cfunc = self._get_library_function('niDigital_GetPinName')
self.niDigital_GetPinName_cfunc.argtypes = [ViSession, ViInt32, ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetPinName_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetPinName_cfunc(vi, pin_index, name_buffer_size, name)
def niDigital_GetPinResultsPinInformation(self, vi, channel_list, buffer_size, pin_indexes, site_numbers, channel_indexes, actual_num_values): # noqa: N802
with self._func_lock:
if self.niDigital_GetPinResultsPinInformation_cfunc is None:
self.niDigital_GetPinResultsPinInformation_cfunc = self._get_library_function('niDigital_GetPinResultsPinInformation')
self.niDigital_GetPinResultsPinInformation_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetPinResultsPinInformation_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetPinResultsPinInformation_cfunc(vi, channel_list, buffer_size, pin_indexes, site_numbers, channel_indexes, actual_num_values)
def niDigital_GetSitePassFail(self, vi, site_list, pass_fail_buffer_size, pass_fail, actual_num_sites): # noqa: N802
with self._func_lock:
if self.niDigital_GetSitePassFail_cfunc is None:
self.niDigital_GetSitePassFail_cfunc = self._get_library_function('niDigital_GetSitePassFail')
self.niDigital_GetSitePassFail_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViBoolean), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetSitePassFail_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetSitePassFail_cfunc(vi, site_list, pass_fail_buffer_size, pass_fail, actual_num_sites)
def niDigital_GetSiteResultsSiteNumbers(self, vi, site_list, site_result_type, site_numbers_buffer_size, site_numbers, actual_num_site_numbers): # noqa: N802
with self._func_lock:
if self.niDigital_GetSiteResultsSiteNumbers_cfunc is None:
self.niDigital_GetSiteResultsSiteNumbers_cfunc = self._get_library_function('niDigital_GetSiteResultsSiteNumbers')
self.niDigital_GetSiteResultsSiteNumbers_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ViInt32, ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetSiteResultsSiteNumbers_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetSiteResultsSiteNumbers_cfunc(vi, site_list, site_result_type, site_numbers_buffer_size, site_numbers, actual_num_site_numbers)
def niDigital_GetTimeSetDriveFormat(self, vi, pin, time_set_name, format): # noqa: N802
with self._func_lock:
if self.niDigital_GetTimeSetDriveFormat_cfunc is None:
self.niDigital_GetTimeSetDriveFormat_cfunc = self._get_library_function('niDigital_GetTimeSetDriveFormat')
self.niDigital_GetTimeSetDriveFormat_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetTimeSetDriveFormat_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetTimeSetDriveFormat_cfunc(vi, pin, time_set_name, format)
def niDigital_GetTimeSetEdge(self, vi, pin, time_set_name, edge, time): # noqa: N802
with self._func_lock:
if self.niDigital_GetTimeSetEdge_cfunc is None:
self.niDigital_GetTimeSetEdge_cfunc = self._get_library_function('niDigital_GetTimeSetEdge')
self.niDigital_GetTimeSetEdge_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViReal64)] # noqa: F405
self.niDigital_GetTimeSetEdge_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetTimeSetEdge_cfunc(vi, pin, time_set_name, edge, time)
def niDigital_GetTimeSetEdgeMultiplier(self, vi, pin, time_set_name, edge_multiplier): # noqa: N802
with self._func_lock:
if self.niDigital_GetTimeSetEdgeMultiplier_cfunc is None:
self.niDigital_GetTimeSetEdgeMultiplier_cfunc = self._get_library_function('niDigital_GetTimeSetEdgeMultiplier')
self.niDigital_GetTimeSetEdgeMultiplier_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_GetTimeSetEdgeMultiplier_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetTimeSetEdgeMultiplier_cfunc(vi, pin, time_set_name, edge_multiplier)
def niDigital_GetTimeSetName(self, vi, time_set_index, name_buffer_size, name): # noqa: N802
with self._func_lock:
if self.niDigital_GetTimeSetName_cfunc is None:
self.niDigital_GetTimeSetName_cfunc = self._get_library_function('niDigital_GetTimeSetName')
self.niDigital_GetTimeSetName_cfunc.argtypes = [ViSession, ViInt32, ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_GetTimeSetName_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetTimeSetName_cfunc(vi, time_set_index, name_buffer_size, name)
def niDigital_GetTimeSetPeriod(self, vi, time_set_name, period): # noqa: N802
with self._func_lock:
if self.niDigital_GetTimeSetPeriod_cfunc is None:
self.niDigital_GetTimeSetPeriod_cfunc = self._get_library_function('niDigital_GetTimeSetPeriod')
self.niDigital_GetTimeSetPeriod_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViReal64)] # noqa: F405
self.niDigital_GetTimeSetPeriod_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_GetTimeSetPeriod_cfunc(vi, time_set_name, period)
def niDigital_InitWithOptions(self, resource_name, id_query, reset_device, option_string, new_vi): # noqa: N802
with self._func_lock:
if self.niDigital_InitWithOptions_cfunc is None:
self.niDigital_InitWithOptions_cfunc = self._get_library_function('niDigital_InitWithOptions')
self.niDigital_InitWithOptions_cfunc.argtypes = [ctypes.POINTER(ViChar), ViBoolean, ViBoolean, ctypes.POINTER(ViChar), ctypes.POINTER(ViSession)] # noqa: F405
self.niDigital_InitWithOptions_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_InitWithOptions_cfunc(resource_name, id_query, reset_device, option_string, new_vi)
def niDigital_Initiate(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_Initiate_cfunc is None:
self.niDigital_Initiate_cfunc = self._get_library_function('niDigital_Initiate')
self.niDigital_Initiate_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_Initiate_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_Initiate_cfunc(vi)
def niDigital_IsDone(self, vi, done): # noqa: N802
with self._func_lock:
if self.niDigital_IsDone_cfunc is None:
self.niDigital_IsDone_cfunc = self._get_library_function('niDigital_IsDone')
self.niDigital_IsDone_cfunc.argtypes = [ViSession, ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDigital_IsDone_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_IsDone_cfunc(vi, done)
def niDigital_IsSiteEnabled(self, vi, site, enable): # noqa: N802
with self._func_lock:
if self.niDigital_IsSiteEnabled_cfunc is None:
self.niDigital_IsSiteEnabled_cfunc = self._get_library_function('niDigital_IsSiteEnabled')
self.niDigital_IsSiteEnabled_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDigital_IsSiteEnabled_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_IsSiteEnabled_cfunc(vi, site, enable)
def niDigital_LoadLevels(self, vi, file_path): # noqa: N802
with self._func_lock:
if self.niDigital_LoadLevels_cfunc is None:
self.niDigital_LoadLevels_cfunc = self._get_library_function('niDigital_LoadLevels')
self.niDigital_LoadLevels_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_LoadLevels_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_LoadLevels_cfunc(vi, file_path)
def niDigital_LoadPattern(self, vi, file_path): # noqa: N802
with self._func_lock:
if self.niDigital_LoadPattern_cfunc is None:
self.niDigital_LoadPattern_cfunc = self._get_library_function('niDigital_LoadPattern')
self.niDigital_LoadPattern_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_LoadPattern_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_LoadPattern_cfunc(vi, file_path)
def niDigital_LoadPinMap(self, vi, file_path): # noqa: N802
with self._func_lock:
if self.niDigital_LoadPinMap_cfunc is None:
self.niDigital_LoadPinMap_cfunc = self._get_library_function('niDigital_LoadPinMap')
self.niDigital_LoadPinMap_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_LoadPinMap_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_LoadPinMap_cfunc(vi, file_path)
def niDigital_LoadSpecifications(self, vi, file_path): # noqa: N802
with self._func_lock:
if self.niDigital_LoadSpecifications_cfunc is None:
self.niDigital_LoadSpecifications_cfunc = self._get_library_function('niDigital_LoadSpecifications')
self.niDigital_LoadSpecifications_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_LoadSpecifications_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_LoadSpecifications_cfunc(vi, file_path)
def niDigital_LoadTiming(self, vi, file_path): # noqa: N802
with self._func_lock:
if self.niDigital_LoadTiming_cfunc is None:
self.niDigital_LoadTiming_cfunc = self._get_library_function('niDigital_LoadTiming')
self.niDigital_LoadTiming_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_LoadTiming_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_LoadTiming_cfunc(vi, file_path)
def niDigital_LockSession(self, vi, caller_has_lock): # noqa: N802
with self._func_lock:
if self.niDigital_LockSession_cfunc is None:
self.niDigital_LockSession_cfunc = self._get_library_function('niDigital_LockSession')
self.niDigital_LockSession_cfunc.argtypes = [ViSession, ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDigital_LockSession_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_LockSession_cfunc(vi, caller_has_lock)
def niDigital_PPMU_Measure(self, vi, channel_list, measurement_type, buffer_size, measurements, actual_num_read): # noqa: N802
with self._func_lock:
if self.niDigital_PPMU_Measure_cfunc is None:
self.niDigital_PPMU_Measure_cfunc = self._get_library_function('niDigital_PPMU_Measure')
self.niDigital_PPMU_Measure_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ViInt32, ctypes.POINTER(ViReal64), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_PPMU_Measure_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_PPMU_Measure_cfunc(vi, channel_list, measurement_type, buffer_size, measurements, actual_num_read)
def niDigital_PPMU_Source(self, vi, channel_list): # noqa: N802
with self._func_lock:
if self.niDigital_PPMU_Source_cfunc is None:
self.niDigital_PPMU_Source_cfunc = self._get_library_function('niDigital_PPMU_Source')
self.niDigital_PPMU_Source_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_PPMU_Source_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_PPMU_Source_cfunc(vi, channel_list)
def niDigital_ReadSequencerFlag(self, vi, flag, value): # noqa: N802
with self._func_lock:
if self.niDigital_ReadSequencerFlag_cfunc is None:
self.niDigital_ReadSequencerFlag_cfunc = self._get_library_function('niDigital_ReadSequencerFlag')
self.niDigital_ReadSequencerFlag_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDigital_ReadSequencerFlag_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ReadSequencerFlag_cfunc(vi, flag, value)
def niDigital_ReadSequencerRegister(self, vi, reg, value): # noqa: N802
with self._func_lock:
if self.niDigital_ReadSequencerRegister_cfunc is None:
self.niDigital_ReadSequencerRegister_cfunc = self._get_library_function('niDigital_ReadSequencerRegister')
self.niDigital_ReadSequencerRegister_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_ReadSequencerRegister_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ReadSequencerRegister_cfunc(vi, reg, value)
def niDigital_ReadStatic(self, vi, channel_list, buffer_size, data, actual_num_read): # noqa: N802
with self._func_lock:
if self.niDigital_ReadStatic_cfunc is None:
self.niDigital_ReadStatic_cfunc = self._get_library_function('niDigital_ReadStatic')
self.niDigital_ReadStatic_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViUInt8), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_ReadStatic_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ReadStatic_cfunc(vi, channel_list, buffer_size, data, actual_num_read)
def niDigital_ResetDevice(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_ResetDevice_cfunc is None:
self.niDigital_ResetDevice_cfunc = self._get_library_function('niDigital_ResetDevice')
self.niDigital_ResetDevice_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_ResetDevice_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_ResetDevice_cfunc(vi)
def niDigital_SelfCalibrate(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_SelfCalibrate_cfunc is None:
self.niDigital_SelfCalibrate_cfunc = self._get_library_function('niDigital_SelfCalibrate')
self.niDigital_SelfCalibrate_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_SelfCalibrate_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SelfCalibrate_cfunc(vi)
def niDigital_SendSoftwareEdgeTrigger(self, vi, trigger, trigger_identifier): # noqa: N802
with self._func_lock:
if self.niDigital_SendSoftwareEdgeTrigger_cfunc is None:
self.niDigital_SendSoftwareEdgeTrigger_cfunc = self._get_library_function('niDigital_SendSoftwareEdgeTrigger')
self.niDigital_SendSoftwareEdgeTrigger_cfunc.argtypes = [ViSession, ViInt32, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_SendSoftwareEdgeTrigger_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SendSoftwareEdgeTrigger_cfunc(vi, trigger, trigger_identifier)
def niDigital_SetAttributeViBoolean(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_SetAttributeViBoolean_cfunc is None:
self.niDigital_SetAttributeViBoolean_cfunc = self._get_library_function('niDigital_SetAttributeViBoolean')
self.niDigital_SetAttributeViBoolean_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ViBoolean] # noqa: F405
self.niDigital_SetAttributeViBoolean_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SetAttributeViBoolean_cfunc(vi, channel_name, attribute, value)
def niDigital_SetAttributeViInt32(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_SetAttributeViInt32_cfunc is None:
self.niDigital_SetAttributeViInt32_cfunc = self._get_library_function('niDigital_SetAttributeViInt32')
self.niDigital_SetAttributeViInt32_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ViInt32] # noqa: F405
self.niDigital_SetAttributeViInt32_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SetAttributeViInt32_cfunc(vi, channel_name, attribute, value)
def niDigital_SetAttributeViInt64(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_SetAttributeViInt64_cfunc is None:
self.niDigital_SetAttributeViInt64_cfunc = self._get_library_function('niDigital_SetAttributeViInt64')
self.niDigital_SetAttributeViInt64_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ViInt64] # noqa: F405
self.niDigital_SetAttributeViInt64_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SetAttributeViInt64_cfunc(vi, channel_name, attribute, value)
def niDigital_SetAttributeViReal64(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_SetAttributeViReal64_cfunc is None:
self.niDigital_SetAttributeViReal64_cfunc = self._get_library_function('niDigital_SetAttributeViReal64')
self.niDigital_SetAttributeViReal64_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ViReal64] # noqa: F405
self.niDigital_SetAttributeViReal64_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SetAttributeViReal64_cfunc(vi, channel_name, attribute, value)
def niDigital_SetAttributeViString(self, vi, channel_name, attribute, value): # noqa: N802
with self._func_lock:
if self.niDigital_SetAttributeViString_cfunc is None:
self.niDigital_SetAttributeViString_cfunc = self._get_library_function('niDigital_SetAttributeViString')
self.niDigital_SetAttributeViString_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_SetAttributeViString_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_SetAttributeViString_cfunc(vi, channel_name, attribute, value)
def niDigital_TDR(self, vi, channel_list, apply_offsets, offsets_buffer_size, offsets, actual_num_offsets): # noqa: N802
with self._func_lock:
if self.niDigital_TDR_cfunc is None:
self.niDigital_TDR_cfunc = self._get_library_function('niDigital_TDR')
self.niDigital_TDR_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViBoolean, ViInt32, ctypes.POINTER(ViReal64), ctypes.POINTER(ViInt32)] # noqa: F405
self.niDigital_TDR_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_TDR_cfunc(vi, channel_list, apply_offsets, offsets_buffer_size, offsets, actual_num_offsets)
def niDigital_UnloadAllPatterns(self, vi, unload_keep_alive_pattern): # noqa: N802
with self._func_lock:
if self.niDigital_UnloadAllPatterns_cfunc is None:
self.niDigital_UnloadAllPatterns_cfunc = self._get_library_function('niDigital_UnloadAllPatterns')
self.niDigital_UnloadAllPatterns_cfunc.argtypes = [ViSession, ViBoolean] # noqa: F405
self.niDigital_UnloadAllPatterns_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_UnloadAllPatterns_cfunc(vi, unload_keep_alive_pattern)
def niDigital_UnloadSpecifications(self, vi, file_path): # noqa: N802
with self._func_lock:
if self.niDigital_UnloadSpecifications_cfunc is None:
self.niDigital_UnloadSpecifications_cfunc = self._get_library_function('niDigital_UnloadSpecifications')
self.niDigital_UnloadSpecifications_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_UnloadSpecifications_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_UnloadSpecifications_cfunc(vi, file_path)
def niDigital_UnlockSession(self, vi, caller_has_lock): # noqa: N802
with self._func_lock:
if self.niDigital_UnlockSession_cfunc is None:
self.niDigital_UnlockSession_cfunc = self._get_library_function('niDigital_UnlockSession')
self.niDigital_UnlockSession_cfunc.argtypes = [ViSession, ctypes.POINTER(ViBoolean)] # noqa: F405
self.niDigital_UnlockSession_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_UnlockSession_cfunc(vi, caller_has_lock)
def niDigital_WaitUntilDone(self, vi, timeout): # noqa: N802
with self._func_lock:
if self.niDigital_WaitUntilDone_cfunc is None:
self.niDigital_WaitUntilDone_cfunc = self._get_library_function('niDigital_WaitUntilDone')
self.niDigital_WaitUntilDone_cfunc.argtypes = [ViSession, ViReal64] # noqa: F405
self.niDigital_WaitUntilDone_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WaitUntilDone_cfunc(vi, timeout)
def niDigital_WriteSequencerFlag(self, vi, flag, value): # noqa: N802
with self._func_lock:
if self.niDigital_WriteSequencerFlag_cfunc is None:
self.niDigital_WriteSequencerFlag_cfunc = self._get_library_function('niDigital_WriteSequencerFlag')
self.niDigital_WriteSequencerFlag_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViBoolean] # noqa: F405
self.niDigital_WriteSequencerFlag_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WriteSequencerFlag_cfunc(vi, flag, value)
def niDigital_WriteSequencerRegister(self, vi, reg, value): # noqa: N802
with self._func_lock:
if self.niDigital_WriteSequencerRegister_cfunc is None:
self.niDigital_WriteSequencerRegister_cfunc = self._get_library_function('niDigital_WriteSequencerRegister')
self.niDigital_WriteSequencerRegister_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32] # noqa: F405
self.niDigital_WriteSequencerRegister_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WriteSequencerRegister_cfunc(vi, reg, value)
def niDigital_WriteSourceWaveformBroadcastU32(self, vi, waveform_name, waveform_size, waveform_data): # noqa: N802
with self._func_lock:
if self.niDigital_WriteSourceWaveformBroadcastU32_cfunc is None:
self.niDigital_WriteSourceWaveformBroadcastU32_cfunc = self._get_library_function('niDigital_WriteSourceWaveformBroadcastU32')
self.niDigital_WriteSourceWaveformBroadcastU32_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViInt32, ctypes.POINTER(ViUInt32)] # noqa: F405
self.niDigital_WriteSourceWaveformBroadcastU32_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WriteSourceWaveformBroadcastU32_cfunc(vi, waveform_name, waveform_size, waveform_data)
def niDigital_WriteSourceWaveformDataFromFileTDMS(self, vi, waveform_name, waveform_file_path): # noqa: N802
with self._func_lock:
if self.niDigital_WriteSourceWaveformDataFromFileTDMS_cfunc is None:
self.niDigital_WriteSourceWaveformDataFromFileTDMS_cfunc = self._get_library_function('niDigital_WriteSourceWaveformDataFromFileTDMS')
self.niDigital_WriteSourceWaveformDataFromFileTDMS_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_WriteSourceWaveformDataFromFileTDMS_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WriteSourceWaveformDataFromFileTDMS_cfunc(vi, waveform_name, waveform_file_path)
def niDigital_WriteSourceWaveformSiteUniqueU32(self, vi, site_list, waveform_name, num_waveforms, samples_per_waveform, waveform_data): # noqa: N802
with self._func_lock:
if self.niDigital_WriteSourceWaveformSiteUniqueU32_cfunc is None:
self.niDigital_WriteSourceWaveformSiteUniqueU32_cfunc = self._get_library_function('niDigital_WriteSourceWaveformSiteUniqueU32')
self.niDigital_WriteSourceWaveformSiteUniqueU32_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViChar), ViInt32, ViInt32, ctypes.POINTER(ViUInt32)] # noqa: F405
self.niDigital_WriteSourceWaveformSiteUniqueU32_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WriteSourceWaveformSiteUniqueU32_cfunc(vi, site_list, waveform_name, num_waveforms, samples_per_waveform, waveform_data)
def niDigital_WriteStatic(self, vi, channel_list, state): # noqa: N802
with self._func_lock:
if self.niDigital_WriteStatic_cfunc is None:
self.niDigital_WriteStatic_cfunc = self._get_library_function('niDigital_WriteStatic')
self.niDigital_WriteStatic_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViUInt8] # noqa: F405
self.niDigital_WriteStatic_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_WriteStatic_cfunc(vi, channel_list, state)
def niDigital_close(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_close_cfunc is None:
self.niDigital_close_cfunc = self._get_library_function('niDigital_close')
self.niDigital_close_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_close_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_close_cfunc(vi)
def niDigital_error_message(self, vi, error_code, error_message): # noqa: N802
with self._func_lock:
if self.niDigital_error_message_cfunc is None:
self.niDigital_error_message_cfunc = self._get_library_function('niDigital_error_message')
self.niDigital_error_message_cfunc.argtypes = [ViSession, ViStatus, ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_error_message_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_error_message_cfunc(vi, error_code, error_message)
def niDigital_reset(self, vi): # noqa: N802
with self._func_lock:
if self.niDigital_reset_cfunc is None:
self.niDigital_reset_cfunc = self._get_library_function('niDigital_reset')
self.niDigital_reset_cfunc.argtypes = [ViSession] # noqa: F405
self.niDigital_reset_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_reset_cfunc(vi)
def niDigital_self_test(self, vi, test_result, test_message): # noqa: N802
with self._func_lock:
if self.niDigital_self_test_cfunc is None:
self.niDigital_self_test_cfunc = self._get_library_function('niDigital_self_test')
self.niDigital_self_test_cfunc.argtypes = [ViSession, ctypes.POINTER(ViInt16), ctypes.POINTER(ViChar)] # noqa: F405
self.niDigital_self_test_cfunc.restype = ViStatus # noqa: F405
return self.niDigital_self_test_cfunc(vi, test_result, test_message)
| 76.338766 | 278 | 0.743591 | [
"MIT"
] | SukruthGrandhiNI/nimi-python | generated/nidigital/nidigital/_library.py | 65,575 | Python |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from chirpstack_api.as_pb.external.api import organization_pb2 as chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class OrganizationServiceStub(object):
"""OrganizationService is the service managing the organization access.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.List = channel.unary_unary(
'/api.OrganizationService/List',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationResponse.FromString,
)
self.Get = channel.unary_unary(
'/api.OrganizationService/Get',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationResponse.FromString,
)
self.Create = channel.unary_unary(
'/api.OrganizationService/Create',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationResponse.FromString,
)
self.Update = channel.unary_unary(
'/api.OrganizationService/Update',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Delete = channel.unary_unary(
'/api.OrganizationService/Delete',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListUsers = channel.unary_unary(
'/api.OrganizationService/ListUsers',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersResponse.FromString,
)
self.GetUser = channel.unary_unary(
'/api.OrganizationService/GetUser',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserResponse.FromString,
)
self.AddUser = channel.unary_unary(
'/api.OrganizationService/AddUser',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.AddOrganizationUserRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.UpdateUser = channel.unary_unary(
'/api.OrganizationService/UpdateUser',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationUserRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteUser = channel.unary_unary(
'/api.OrganizationService/DeleteUser',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationUserRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class OrganizationServiceServicer(object):
"""OrganizationService is the service managing the organization access.
"""
def List(self, request, context):
"""Get organization list.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""Get data for a particular organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Create a new organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Update an existing organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Delete an organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListUsers(self, request, context):
"""Get organization's user list.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetUser(self, request, context):
"""Get data for a particular organization user.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddUser(self, request, context):
"""Add a new user to an organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateUser(self, request, context):
"""Update a user in an organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteUser(self, request, context):
"""Delete a user from an organization.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OrganizationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListUsers': grpc.unary_unary_rpc_method_handler(
servicer.ListUsers,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersResponse.SerializeToString,
),
'GetUser': grpc.unary_unary_rpc_method_handler(
servicer.GetUser,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserResponse.SerializeToString,
),
'AddUser': grpc.unary_unary_rpc_method_handler(
servicer.AddUser,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.AddOrganizationUserRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'UpdateUser': grpc.unary_unary_rpc_method_handler(
servicer.UpdateUser,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationUserRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteUser': grpc.unary_unary_rpc_method_handler(
servicer.DeleteUser,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationUserRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'api.OrganizationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class OrganizationService(object):
"""OrganizationService is the service managing the organization access.
"""
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/List',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/Get',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/Create',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.CreateOrganizationResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/Update',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/Delete',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListUsers(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/ListUsers',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.ListOrganizationUsersResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/GetUser',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.GetOrganizationUserResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/AddUser',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.AddOrganizationUserRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/UpdateUser',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.UpdateOrganizationUserRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteUser(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.OrganizationService/DeleteUser',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_organization__pb2.DeleteOrganizationUserRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| 51.046196 | 158 | 0.704764 | [
"MIT"
] | GaiaFL/chirpstack-api | python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py | 18,785 | Python |
import sys
import base64
import platform
import functools
from keyring.util import properties
from keyring.backend import KeyringBackend
from keyring.errors import PasswordDeleteError, ExceptionRaisedContext
from . import file_base
try:
# prefer pywin32-ctypes
from win32ctypes import pywintypes
from win32ctypes import win32cred
# force demand import to raise ImportError
win32cred.__name__
except ImportError:
# fallback to pywin32
try:
import pywintypes
import win32cred
except ImportError:
pass
try:
import winreg
except ImportError:
try:
# Python 2 compatibility
import _winreg as winreg
except ImportError:
pass
try:
from . import _win_crypto
except ImportError:
pass
def has_pywin32():
"""
Does this environment have pywin32?
Should return False even when Mercurial's Demand Import allowed import of
win32cred.
"""
with ExceptionRaisedContext() as exc:
win32cred.__name__
return not bool(exc)
def has_wincrypto():
"""
Does this environment have wincrypto?
Should return False even when Mercurial's Demand Import allowed import of
_win_crypto, so accesses an attribute of the module.
"""
with ExceptionRaisedContext() as exc:
_win_crypto.__name__
return not bool(exc)
class EncryptedKeyring(file_base.Keyring):
"""
A File-based keyring secured by Windows Crypto API.
"""
@properties.ClassProperty
@classmethod
def priority(self):
"""
Preferred over file.EncryptedKeyring but not other, more sophisticated
Windows backends.
"""
if not platform.system() == 'Windows':
raise RuntimeError("Requires Windows")
return .8
filename = 'wincrypto_pass.cfg'
def encrypt(self, password):
"""Encrypt the password using the CryptAPI.
"""
return _win_crypto.encrypt(password)
def decrypt(self, password_encrypted):
"""Decrypt the password using the CryptAPI.
"""
return _win_crypto.decrypt(password_encrypted)
class RegistryKeyring(KeyringBackend):
"""
RegistryKeyring is a keyring which use Windows CryptAPI to encrypt
the user's passwords and store them under registry keys
"""
@properties.ClassProperty
@classmethod
def priority(self):
"""
Preferred on Windows when pywin32 isn't installed
"""
if platform.system() != 'Windows':
raise RuntimeError("Requires Windows")
if not has_wincrypto():
raise RuntimeError("Requires ctypes")
return 2
def get_password(self, service, username):
"""Get password of the username for the service
"""
try:
# fetch the password
key = r'Software\%s\Keyring' % service
hkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key)
password_saved = winreg.QueryValueEx(hkey, username)[0]
password_base64 = password_saved.encode('ascii')
# decode with base64
password_encrypted = base64.decodestring(password_base64)
# decrypted the password
password = _win_crypto.decrypt(password_encrypted).decode('utf-8')
except EnvironmentError:
password = None
return password
def set_password(self, service, username, password):
"""Write the password to the registry
"""
# encrypt the password
password_encrypted = _win_crypto.encrypt(password.encode('utf-8'))
# encode with base64
password_base64 = base64.encodestring(password_encrypted)
# encode again to unicode
password_saved = password_base64.decode('ascii')
# store the password
key_name = r'Software\%s\Keyring' % service
hkey = winreg.CreateKey(winreg.HKEY_CURRENT_USER, key_name)
winreg.SetValueEx(hkey, username, 0, winreg.REG_SZ, password_saved)
def delete_password(self, service, username):
"""Delete the password for the username of the service.
"""
try:
key_name = r'Software\%s\Keyring' % service
hkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key_name, 0,
winreg.KEY_ALL_ACCESS)
winreg.DeleteValue(hkey, username)
winreg.CloseKey(hkey)
except WindowsError:
e = sys.exc_info()[1]
raise PasswordDeleteError(e)
self._delete_key_if_empty(service)
def _delete_key_if_empty(self, service):
key_name = r'Software\%s\Keyring' % service
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key_name, 0,
winreg.KEY_ALL_ACCESS)
try:
winreg.EnumValue(key, 0)
return
except WindowsError:
pass
winreg.CloseKey(key)
# it's empty; delete everything
while key_name != 'Software':
parent, sep, base = key_name.rpartition('\\')
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, parent, 0,
winreg.KEY_ALL_ACCESS)
winreg.DeleteKey(key, base)
winreg.CloseKey(key)
key_name = parent
class OldPywinError(object):
"""
A compatibility wrapper for old PyWin32 errors, such as reported in
https://bitbucket.org/kang/python-keyring-lib/issue/140/
"""
def __init__(self, orig):
self.orig = orig
@property
def funcname(self):
return self.orig[1]
@property
def winerror(self):
return self.orig[0]
@classmethod
def wrap(cls, orig_err):
attr_check = functools.partial(hasattr, orig_err)
is_old = not all(map(attr_check, ['funcname', 'winerror']))
return cls(orig_err) if is_old else orig_err
| 30.06701 | 78 | 0.642208 | [
"MIT"
] | nficano/alexa-find-my-iphone | src/site-packages/keyrings/alt/Windows.py | 5,833 | Python |
def multiple(first,second):
return first * second
def add(x,y):
return x+y
| 16.6 | 27 | 0.662651 | [
"MIT"
] | tavleensasan/Tav | maths2.py | 83 | Python |
"""
This module is used to manage rules.
## Base format
A context and rule is written in JSON.
A context contains an identifier, AND one or three rules.
One of rules must specify the SCHC Compression/Decompression (CD).
Two specify SCHC Fragmentation/Reassembly (FR) if needed.
Therefore, a context has to be formed to either below structures.
{
"devL2Addr": ...,
"dstIID": ...,
"comp": { ... },
"fragSender": { ... },
"fragReceiver": { ... }
}
"comp": compression rule.
"fragSender": fragmentation rule for inbound.
"fragReceiver": fragmentation rule for outbound.
Or,
{
"devL2Addr": ...,
"dstIID": ...,
"profile": { ... },
"comp": { ... }
}
XXX Q. "profile" should be in the context ?
## Context
A context is uniquely identified by devL2Addr
specifying the L2 address of a SCHC device.
dstIID matches the IP address assigned
to the interface of the communication peer.
In the context of the SCHC device, dstIID indicates the IP address of
the interface at the SCHC Translator,
which is dedicated between the device and
the application.
In the context of the other side, dstIID indicates the IP address of
the SCHC device.
+--------+ +------------+ +-----+
| SCHC | | SCHC |---------| App |
| Device | | Translator | | |
+--------+ +------------+ +-----+
| D (IP addr) | T (IP addr)
| L (L2 addr) |
| |
+--// LPWAN //--| GW |------------+
In the above example, the context of each side is like below:
at the device:
{
"devL2Addr": "L",
"dstIID": "M"
}
at the translator:
{
"devL2Addr": "L",
"dstIID": "D"
}
"*" and "/" can be used for a wild-card match. (XXX should be implemented.)
## Rule
XXX is it true that both ruleID and ruleLength is unique key ?
XXX is the deivce L2 address the real key ?
A rule is uniquely identified by the rule ID of variable length.
Each rule must contain the following information:
{
"ruleID" : 2,
"ruleLength" : 3
}
where ruleID contains the rule ID value aligned on the right and ruleLength
gives
the size in bits of the ruleID. In the previous example, this corresponds to
the binary value 0b010.
if ruleLength is not specified the value is set to 1 byte.
The rule is either a compression/decompression rule
or a fragmentation/reassembly rule.
For C/D rules, the keyword "compression" must be defined.
For F/R rules, the keyword "fragmentation" and "fragmentation"
must be defined.
## Compression Rule
A compression rule is bidirectionnal.
## Fragmentation Rule
A fragmentation rule is uni directionnal.
The "fragmentation" keyword is used to give fragmentation mode and profile:
- one fragmentation mode keywork "noAck", "ackAlways" or "ackOnError".
- FRModeProfile parameters. Default values are automaticaly added.
- dtagSize, WSize and FCNSize are used to define the SCHC fragmentation header
- windowSize can be added if not 2^FCNSize - 1
For "ackOnError" the following parameter is defined:
- "ackBehavior" defined the ack behavior, i.e. when the Ack must be spontaneously sent
by the receiver and therefore when the sender must listen for Ack.
- "afterAll0" means that the sender waits for ack after sending an All-0
- "afterAll1" means that the sender waits only after sending the last fragment
- other behaviors may be defined in the future.
## data model of DB
db = [
{
"devL2Addr": ..,
"dstIID": ..,
"comp": {
"ruleID": ..,
"ruleLength": ..,
"compression": { ...}
},
"fragSender": {
"ruleID": ..,
"ruleLength": ..,
"fragmentation": { ...}
}
"fragReceiver": {
"ruleID": ..,
"ruleLength": ..,
"fragmentation": { ...}
}
}, ...
]
## method
- add_context(context, comp=None, fragSender=None, fragReceiver=None)
It adds the context. If it exists, raise ValueError.
- add_rules(context, comp=None, fragSender=None, fragReceiver=None)
It adds the list of rules into the context specified.
If it exists, raise ValueError.
If context is not specified, the rule will be added into the default
context.
## Rule to add a new key
Each key must be unique through a rule.
For example, below the keys of "profile" are not allowed.
{
"profile": { ... },
"compression": { "profile": ... }
}
## Examples
Example 1:
{
"ruleID" : 14,
"ruleLength" : 4 # rule 0b1110
"compression": { ... }
}
Example 2:
{
"ruleID" : 15,
"ruleLength" : 4 # rule 0b1110
"fragmentationOut": {
"FRMode" : "noAck" # or "ackAlways", "ackOnError"
"FRModeProfile" : {
"dtagSize" : 1,
"WSize": 3,
"FCNSize" : 3,
"windowSize", 7,
"ackBehavior": "afterAll1"
}
}
}
"""
try:
import struct
except ImportError:
import ustruct as struct
from copy import deepcopy
# XXX to be checked whether they are needed.
DEFAULT_FRAGMENT_RID = 1
DEFAULT_L2_SIZE = 8
DEFAULT_RECV_BUFSIZE = 512
DEFAULT_TIMER_T1 = 5
DEFAULT_TIMER_T2 = 10
DEFAULT_TIMER_T3 = 10
DEFAULT_TIMER_T4 = 12
DEFAULT_TIMER_T5 = 14
class DictToAttrDeep:
def __init__(self, **entries):
self.__update(**entries)
def __update(self, **entries):
for k,v in entries.items():
if isinstance(v, dict):
self.__dict__[k] = DictToAttrDeep(**v)
else:
self.__dict__.update(entries)
def __contains__(self, t):
""" t in this """
for k,v in self.__dict__.items():
if k == t:
return True
if isinstance(v, DictToAttrDeep):
if t in v:
return True
def __getitem__(self, t):
""" this[k] """
for k,v in self.__dict__.items():
if k == t:
return v
if isinstance(v, DictToAttrDeep):
if t in v:
return v[t]
def get(self, k, d=None):
""" this.get(k) """
if k not in self:
return d
return self.__getitem__(k)
def __repr__(self):
return "{{{}}}".format(str(", ".join(
['"{}": {}'.format(k,self.__reprx(v))
for k,v in self.__dict__.items()])))
def __reprx(self, t):
if isinstance(t, str):
return '"{}"'.format(t)
elif isinstance(t, dict):
return "{{{}}}".format(str(", ".join(
['"{}": {}'.format(k,self.__reprx(v))
for k,v in t.items()])))
elif isinstance(t, list):
return "[{}]".format(str(", ".join(
["{}".format(self.__reprx(i)) for i in t])))
else:
return repr(t)
class RuleManager:
"""RuleManager class is used to manage Compression/Decompression and Fragmentation/
Reassembly rules."""
def __init__(self):
#RM database
self._db = []
def _checkRuleValue(self, rule_id, rule_id_length):
"""this function looks if bits specified in ruleID are not outside of
rule_id_length"""
if rule_id_length > 32:
raise ValueError("Rule length should be less than 32")
r1 = rule_id
for k in range (32, rule_id_length, -1):
if (0x01 << k) & r1 !=0:
raise ValueError("rule ID too long")
def _ruleIncluded(self, r1ID, r1l, r2ID, r2l):
"""check if a conflict exists between to ruleID (i.e. same first bits equals) """
r1 = r1ID << (32-r1l)
r2 = r2ID << (32-r2l)
l = min(r1l, r2l)
for k in range (32-l, 32):
if ((r1 & (0x01 << k)) != (r2 & (0x01 << k))):
return False
return True
def _nameRule (self, r):
return "Rule {}/{}:".format(r["ruleID"], r["ruleLength"])
def find_rule_bypacket(self, context, packet_bbuf):
""" returns a compression rule or an fragmentation rule
in the context matching with the field value of rule id in the packet.
"""
for k in ["fragSender", "fragReceiver","fragSender2", "fragReceiver2", "comp"]:
r = context.get(k)
if r is not None:
rule_id = packet_bbuf.get_bits(r["ruleLength"],position=0)
if r["ruleID"] == rule_id:
print("--------------------RuleManage------------------")
print("ruleID ",rule_id)
print()
print("--------------------------------------------------")
return k, r
return None, None
def find_context_bydevL2addr(self, dev_L2addr):
""" find a context with dev_L2addr. """
# XXX needs to implement wildcard search or something like that.
for c in self._db:
if c["devL2Addr"] == dev_L2addr:
return c
if c["devL2Addr"] == "*":
return c
return None
def find_context_bydstiid(self, dst_iid):
""" find a context with dst_iid, which can be a wild card. """
# XXX needs to implement wildcard search or something like that.
for c in self._db:
if c["dstIID"] == dst_iid:
return c
if c["dstIID"] == "*":
return c
return None
def find_context_exact(self, dev_L2addr, dst_iid):
""" find a context by both devL2Addr and dstIID.
This is mainly for internal use. """
for c in self._db:
if c["devL2Addr"] == dev_L2addr and c["dstIID"] == dst_iid:
return c
return None
def add_context(self, context, comp=None, fragSender=None, fragReceiver=None, fragSender2=None, fragReceiver2=None):
""" add context into the db. """
if self.find_context_exact(context["devL2Addr"],context["dstIID"]) is not None:
raise ValueError("the context {}/{} exist.".format(
context["devL2Addr"], context["dstIID"]))
# add context
c = deepcopy(context)
self._db.append(c)
self.add_rules(c, comp, fragSender, fragReceiver, fragSender2, fragReceiver2)
def add_rules(self, context, comp=None, fragSender=None, fragReceiver=None, fragSender2=None, fragReceiver2=None):
""" add rules into the context specified. """
if comp is not None:
self.add_rule(context, "comp", comp)
if fragSender is not None:
self.add_rule(context, "fragSender", fragSender)
if fragReceiver is not None:
self.add_rule(context, "fragReceiver", fragReceiver)
if fragSender2 is not None:
self.add_rule(context, "fragSender2", fragSender2)
if fragReceiver2 is not None:
self.add_rule(context, "fragReceiver2", fragReceiver2)
def add_rule(self, context, key, rule):
""" Check rule integrity and uniqueless and add it to the db """
if not "ruleID" in rule:
raise ValueError ("Rule ID not defined.")
if not "ruleLength" in rule:
if rule["ruleID"] < 255:
rule["ruleLength"] = 8
else:
raise ValueError ("RuleID too large for default size on a byte")
# proceed to compression check (TBD)
if key == "comp":
self.check_rule_compression(rule)
elif key in ["fragSender", "fragReceiver","fragSender2", "fragReceiver2", "comp"]:
self.check_rule_fragmentation(rule)
else:
raise ValueError ("key must be either comp, fragSender, fragReceiver, fragSender2, fragReceiver2")
rule_id = rule["ruleID"]
rule_id_length = rule["ruleLength"]
self._checkRuleValue(rule_id, rule_id_length)
for k in ["fragSender", "fragReceiver","fragSender2", "fragReceiver2", "comp"]:
r = context.get(k)
if r is not None:
if rule_id_length == r.ruleLength and rule_id == r.ruleID:
raise ValueError ("Rule {}/{} exists".format(
rule_id, rule_id_length))
context[key] = DictToAttrDeep(**rule)
def check_rule_compression(self, rule):
""" compression rule check """
# XXX need more work.
if (not "compression" in rule or "fragmentation" in rule):
raise ValueError ("{} Invalid rule".format(self._nameRule(rule)))
canon_rule_set = []
for r in rule["compression"]["rule_set"]:
canon_r = {}
for k,v in r.items():
if isinstance(v, str):
canon_r[k.upper()] = v.upper()
else:
canon_r[k.upper()] = v
canon_rule_set.append(canon_r)
rule["compression"]["rule_set"] = canon_rule_set
def check_rule_fragmentation(self, rule):
""" fragmentation rule check """
if (not "fragmentation" in rule or "compression" in rule):
raise ValueError ("{} Invalid rule".format(self._nameRule(rule)))
if "fragmentation" in rule:
fragRule = rule["fragmentation"]
if not "FRMode" in fragRule:
raise ValueError ("{} Fragmentation mode must be specified".format(self._nameRule(rule)))
mode = fragRule["FRMode"]
if not mode in ("noAck", "ackAlways", "ackOnError"):
raise ValueError ("{} Unknown fragmentation mode".format(self._nameRule(rule)))
if not "FRModeProfile" in fragRule:
fragRule["FRModeProfile"] = {}
profile = fragRule["FRModeProfile"]
if not "dtagSize" in profile:
profile["dtagSize"] = 0
if not "WSize" in profile:
if mode == "noAck":
profile["WSize"] = 0
elif mode == "ackAlways":
profile["WSize"] = 1
elif mode == "ackOnError":
profile["WSize"] = 5
if not "FCNSize" in profile:
if mode == "noAck":
profile["FCNSize"] = 1
elif mode == "ackAlways":
profile["FCNSize"] = 3
elif mode == "ackOnError":
profile["FCNSize"] = 3
if "windowSize" in profile:
if profile["windowSize"] > (0x01 << profile["FCNSize"]) - 1 or\
profile["windowSize"] < 0:
raise ValueError ("{} illegal windowSize".format(self._nameRule(rule)))
else:
profile["windowSize"] = (0x01 << profile["FCNSize"]) - 1
if mode == "ackOnError":
if not "ackBehavior" in profile:
raise ValueError ("Ack on error behavior must be specified (afterAll1 or afterAll0)")
if not "tileSize" in profile:
profile["tileSize"] = 64
| 32.285417 | 120 | 0.545589 | [
"MIT"
] | n44hernandezp/openschc | rulemanager.py | 15,497 | Python |
"""
This module is to support *bbox_inches* option in savefig command.
"""
import warnings
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
def adjust_bbox(fig, format, bbox_inches):
"""
Temporarily adjust the figure so that only the specified area
(bbox_inches) is saved.
It modifies fig.bbox, fig.bbox_inches,
fig.transFigure._boxout, and fig.patch. While the figure size
changes, the scale of the original figure is conserved. A
function which restores the original values are returned.
"""
origBbox = fig.bbox
origBboxInches = fig.bbox_inches
_boxout = fig.transFigure._boxout
asp_list = []
locator_list = []
for ax in fig.axes:
pos = ax.get_position(original=False).frozen()
locator_list.append(ax.get_axes_locator())
asp_list.append(ax.get_aspect())
def _l(a, r, pos=pos):
return pos
ax.set_axes_locator(_l)
ax.set_aspect("auto")
def restore_bbox():
for ax, asp, loc in zip(fig.axes, asp_list, locator_list):
ax.set_aspect(asp)
ax.set_axes_locator(loc)
fig.bbox = origBbox
fig.bbox_inches = origBboxInches
fig.transFigure._boxout = _boxout
fig.transFigure.invalidate()
fig.patch.set_bounds(0, 0, 1, 1)
adjust_bbox_handler = _adjust_bbox_handler_d.get(format)
if adjust_bbox_handler is not None:
adjust_bbox_handler(fig, bbox_inches)
return restore_bbox
else:
warnings.warn("bbox_inches option for %s backend is not "
"implemented yet." % (format))
return None
def adjust_bbox_png(fig, bbox_inches):
"""
adjust_bbox for png (Agg) format
"""
tr = fig.dpi_scale_trans
_bbox = TransformedBbox(bbox_inches,
tr)
x0, y0 = _bbox.x0, _bbox.y0
fig.bbox_inches = Bbox.from_bounds(0, 0,
bbox_inches.width,
bbox_inches.height)
x0, y0 = _bbox.x0, _bbox.y0
w1, h1 = fig.bbox.width, fig.bbox.height
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0,
w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0 / w1, y0 / h1,
fig.bbox.width / w1, fig.bbox.height / h1)
def adjust_bbox_pdf(fig, bbox_inches):
"""
adjust_bbox for pdf & eps format
"""
if fig._cachedRenderer.__class__.__name__ == "RendererPgf":
tr = Affine2D().scale(fig.dpi)
f = 1.
else:
tr = Affine2D().scale(72)
f = 72. / fig.dpi
_bbox = TransformedBbox(bbox_inches, tr)
fig.bbox_inches = Bbox.from_bounds(0, 0,
bbox_inches.width,
bbox_inches.height)
x0, y0 = _bbox.x0, _bbox.y0
w1, h1 = fig.bbox.width * f, fig.bbox.height * f
fig.transFigure._boxout = Bbox.from_bounds(-x0, -y0,
w1, h1)
fig.transFigure.invalidate()
fig.bbox = TransformedBbox(fig.bbox_inches, tr)
fig.patch.set_bounds(x0 / w1, y0 / h1,
fig.bbox.width / w1, fig.bbox.height / h1)
def process_figure_for_rasterizing(figure,
bbox_inches_restore, mode):
"""
This need to be called when figure dpi changes during the drawing
(e.g., rasterizing). It recovers the bbox and re-adjust it with
the new dpi.
"""
bbox_inches, restore_bbox = bbox_inches_restore
restore_bbox()
r = adjust_bbox(figure, mode,
bbox_inches)
return bbox_inches, r
_adjust_bbox_handler_d = {}
for format in ["png", "raw", "rgba", "jpg", "jpeg", "tiff"]:
_adjust_bbox_handler_d[format] = adjust_bbox_png
for format in ["pdf", "eps", "svg", "svgz"]:
_adjust_bbox_handler_d[format] = adjust_bbox_pdf
| 29.602941 | 69 | 0.594883 | [
"Unlicense"
] | mattl1598/Project-Mochachino | editing files/Portable Python 3.2.5.1/App/Lib/site-packages/matplotlib/tight_bbox.py | 4,026 | Python |
import logging
from authlib.common.urls import add_params_to_uri
from .base import BaseGrant, AuthorizationEndpointMixin
from ..errors import (
OAuth2Error,
UnauthorizedClientError,
AccessDeniedError,
)
log = logging.getLogger(__name__)
class ImplicitGrant(BaseGrant, AuthorizationEndpointMixin):
"""The implicit grant type is used to obtain access tokens (it does not
support the issuance of refresh tokens) and is optimized for public
clients known to operate a particular redirection URI. These clients
are typically implemented in a browser using a scripting language
such as JavaScript.
Since this is a redirection-based flow, the client must be capable of
interacting with the resource owner's user-agent (typically a web
browser) and capable of receiving incoming requests (via redirection)
from the authorization server.
Unlike the authorization code grant type, in which the client makes
separate requests for authorization and for an access token, the
client receives the access token as the result of the authorization
request.
The implicit grant type does not include client authentication, and
relies on the presence of the resource owner and the registration of
the redirection URI. Because the access token is encoded into the
redirection URI, it may be exposed to the resource owner and other
applications residing on the same device::
+----------+
| Resource |
| Owner |
| |
+----------+
^
|
(B)
+----|-----+ Client Identifier +---------------+
| -+----(A)-- & Redirection URI --->| |
| User- | | Authorization |
| Agent -|----(B)-- User authenticates -->| Server |
| | | |
| |<---(C)--- Redirection URI ----<| |
| | with Access Token +---------------+
| | in Fragment
| | +---------------+
| |----(D)--- Redirection URI ---->| Web-Hosted |
| | without Fragment | Client |
| | | Resource |
| (F) |<---(E)------- Script ---------<| |
| | +---------------+
+-|--------+
| |
(A) (G) Access Token
| |
^ v
+---------+
| |
| Client |
| |
+---------+
"""
#: authorization_code grant type has authorization endpoint
AUTHORIZATION_ENDPOINT = True
#: Allowed client auth methods for token endpoint
TOKEN_ENDPOINT_AUTH_METHODS = ['none']
RESPONSE_TYPES = {'token'}
GRANT_TYPE = 'implicit'
ERROR_RESPONSE_FRAGMENT = True
def validate_authorization_request(self):
"""The client constructs the request URI by adding the following
parameters to the query component of the authorization endpoint URI
using the "application/x-www-form-urlencoded" format.
Per `Section 4.2.1`_.
response_type
REQUIRED. Value MUST be set to "token".
client_id
REQUIRED. The client identifier as described in Section 2.2.
redirect_uri
OPTIONAL. As described in Section 3.1.2.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in Section 10.12.
The client directs the resource owner to the constructed URI using an
HTTP redirection response, or by other means available to it via the
user-agent.
For example, the client directs the user-agent to make the following
HTTP request using TLS:
.. code-block:: http
GET /authorize?response_type=token&client_id=s6BhdRkqt3&state=xyz
&redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1
Host: server.example.com
.. _`Section 4.2.1`: https://tools.ietf.org/html/rfc6749#section-4.2.1
"""
# ignore validate for response_type, since it is validated by
# check_authorization_endpoint
# The implicit grant type is optimized for public clients
client = self.authenticate_token_endpoint_client()
log.debug('Validate authorization request of %r', client)
redirect_uri = self.validate_authorization_redirect_uri(
self.request, client)
response_type = self.request.response_type
if not client.check_response_type(response_type):
raise UnauthorizedClientError(
'The client is not authorized to use '
'"response_type={}"'.format(response_type),
state=self.request.state,
redirect_uri=redirect_uri,
redirect_fragment=True,
)
try:
self.request.client = client
self.validate_requested_scope()
self.execute_hook('after_validate_authorization_request')
except OAuth2Error as error:
error.redirect_uri = redirect_uri
error.redirect_fragment = True
raise error
return redirect_uri
def create_authorization_response(self, redirect_uri, grant_user):
"""If the resource owner grants the access request, the authorization
server issues an access token and delivers it to the client by adding
the following parameters to the fragment component of the redirection
URI using the "application/x-www-form-urlencoded" format.
Per `Section 4.2.2`_.
access_token
REQUIRED. The access token issued by the authorization server.
token_type
REQUIRED. The type of the token issued as described in
Section 7.1. Value is case insensitive.
expires_in
RECOMMENDED. The lifetime in seconds of the access token. For
example, the value "3600" denotes that the access token will
expire in one hour from the time the response was generated.
If omitted, the authorization server SHOULD provide the
expiration time via other means or document the default value.
scope
OPTIONAL, if identical to the scope requested by the client;
otherwise, REQUIRED. The scope of the access token as
described by Section 3.3.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
The authorization server MUST NOT issue a refresh token.
For example, the authorization server redirects the user-agent by
sending the following HTTP response:
.. code-block:: http
HTTP/1.1 302 Found
Location: http://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA
&state=xyz&token_type=example&expires_in=3600
Developers should note that some user-agents do not support the
inclusion of a fragment component in the HTTP "Location" response
header field. Such clients will require using other methods for
redirecting the client than a 3xx redirection response -- for
example, returning an HTML page that includes a 'continue' button
with an action linked to the redirection URI.
.. _`Section 4.2.2`: https://tools.ietf.org/html/rfc6749#section-4.2.2
:param redirect_uri: Redirect to the given URI for the authorization
:param grant_user: if resource owner granted the request, pass this
resource owner, otherwise pass None.
:returns: (status_code, body, headers)
"""
state = self.request.state
if grant_user:
self.request.user = grant_user
client = self.request.client
token = self.generate_token(
client, self.GRANT_TYPE,
user=grant_user,
scope=client.get_allowed_scope(self.request.scope),
include_refresh_token=False
)
log.debug('Grant token %r to %r', token, client)
self.save_token(token)
self.execute_hook('process_token', token=token)
params = [(k, token[k]) for k in token]
if state:
params.append(('state', state))
uri = add_params_to_uri(redirect_uri, params, fragment=True)
headers = [('Location', uri)]
return 302, '', headers
else:
raise AccessDeniedError(
state=state,
redirect_uri=redirect_uri,
redirect_fragment=True
)
| 40.478448 | 79 | 0.589607 | [
"BSD-3-Clause"
] | 2tunnels/authlib | authlib/oauth2/rfc6749/grants/implicit.py | 9,391 | Python |
import pandas as pd
import numpy as np
def load_from_tsfile_to_dataframe(full_file_path_and_name, replace_missing_vals_with='NaN'):
data_started = False
instance_list = []
class_val_list = []
has_time_stamps = False
has_class_labels = False
uses_tuples = False
is_first_case = True
with open(full_file_path_and_name, 'r') as f:
for line in f:
if line.strip():
if "@timestamps" in line.lower():
if "true" in line.lower():
has_time_stamps = True
raise Exception("Not suppoorted yet") # we don't have any data formatted to test with yet
elif "false" in line.lower():
has_time_stamps = False
else:
raise Exception("invalid timestamp argument")
if "@classlabel" in line.lower():
if "true" in line:
has_class_labels = True
elif "false" in line:
has_class_labels = False
else:
raise Exception("invalid classLabel argument")
if "@data" in line.lower():
data_started = True
continue
# if the 'data tag has been found, the header information has been cleared and now data can be loaded
if data_started:
line = line.replace("?", replace_missing_vals_with)
dimensions = line.split(":")
# perhaps not the best way to do this, but on the first row, initialise stored depending on the
# number of dimensions that are present and determine whether data is stored in a list or tuples
if is_first_case:
num_dimensions = len(dimensions)
if has_class_labels:
num_dimensions -= 1
is_first_case = False
for dim in range(0, num_dimensions):
instance_list.append([])
if dimensions[0].startswith("("):
uses_tuples = True
this_num_dimensions = len(dimensions)
if has_class_labels:
this_num_dimensions -= 1
# assuming all dimensions are included for all series, even if they are empty. If this is not true
# it could lead to confusing dimension indices (e.g. if a case only has dimensions 0 and 2 in the
# file, dimension 1 should be represented, even if empty, to make sure 2 doesn't get labelled as 1)
if this_num_dimensions != num_dimensions:
raise Exception("inconsistent number of dimensions")
# go through each dimension that is represented in the file
for dim in range(0, num_dimensions):
# handle whether tuples or list here
if uses_tuples:
without_brackets = dimensions[dim].replace("(", "").replace(")", "").split(",")
without_brackets = [float(i) for i in without_brackets]
indices = []
data = []
i = 0
while i < len(without_brackets):
indices.append(int(without_brackets[i]))
data.append(without_brackets[i + 1])
i += 2
instance_list[dim].append(pd.Series(data, indices))
else:
# if the data is expressed in list form, just read into a pandas.Series
data_series = dimensions[dim].split(",")
data_series = [float(i) for i in data_series]
instance_list[dim].append(pd.Series(data_series))
if has_class_labels:
class_val_list.append(dimensions[num_dimensions].strip())
# note: creating a pandas.DataFrame here, NOT an xpandas.xdataframe
x_data = pd.DataFrame(dtype=np.float32)
for dim in range(0, num_dimensions):
x_data['dim_' + str(dim)] = instance_list[dim]
if has_class_labels:
return x_data, np.asarray(class_val_list)
#
# # otherwise just return an XDataFrame
return x_data | 44.825243 | 119 | 0.509638 | [
"BSD-3-Clause"
] | TonyBagnall/boss_fork | sktime/utils/load_data.py | 4,617 | Python |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 17:14:19 2020
@author: Mitchell
model_training.py
~~~~~~~~~~~~~~~~~
This file serves as a script for building and training our VAE model. To do
so we used the VAE and DataSequence classes defined in the file `VAE.py`, as
well as helper functions from the file `dataset_utils` for loading and parsing
our datasets.
The user has the the ability to specify several parameters that control the
loading of our data, the structure of our model, as well as the traininig plan
for our model. After training is complete the script also plots metrics tracked
during training and saves the final model.
"""
# Imports
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from dataset_utils import load_training, load_validation
from VAE import VAE, DataSequence
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os, time, json
### Load Data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Parameters for shape of dataset (note these are also used for model def. and
# training.)
measures = 8
measure_len = 96
# training
training_foldername = '../../nesmdb24_seprsco/train/'
train_save_filename = 'transformed_dataset.json'
dataset , labels2int_map , int2labels_map = \
load_training(training_foldername, train_save_filename,
measures = measures, measure_len = measure_len)
# validation
validation_foldername = '../../nesmdb24_seprsco/valid/'
val_save_filename = 'transformed_val_dataset.json'
val_dataset = load_validation(validation_foldername,\
labels2int_map, val_save_filename,
measures = measures, measure_len = measure_len)
### Build Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### Model Parameters
latent_dim = 124
input_dims = [mapping.shape[0]-1 for mapping in int2labels_map]
dropout = .1
maxnorm = None
vae_b1 , vae_b2 = .02 , .1
# Build Model
model = VAE(latent_dim, input_dims, measures, measure_len, dropout,
maxnorm, vae_b1 , vae_b2)
model.build([tf.TensorShape([None, measures, measure_len, input_dims[i]])
for i in range(4)])
model.summary()
### Train Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Training Parameters
batch_size = 100
epochs = 10
# Cost Function
cost_function = model.vae_loss
# Learning_rate schedule
lr_0 = .001
decay_rate = .998
lr_decay = lambda t: lr_0 * decay_rate**t
lr_schedule = tf.keras.callbacks.LearningRateScheduler(lr_decay)
# Optimizer
optimizer = tf.keras.optimizers.Adam()
# Define callbacks
callbacks = [lr_schedule]
# Keras Sequences for Datasets (need to use since one-hot datasets too
# large for storing in memory)
training_seq = DataSequence(dataset, int2labels_map, batch_size)
validation_seq = DataSequence(val_dataset, int2labels_map, batch_size)
# Compile Model
model.compile(optimizer = optimizer,
loss = cost_function)
# Train model
tic = time.perf_counter()
history = model.fit_generator(generator = training_seq,
epochs = epochs)
toc = time.perf_counter()
print(f"Trained Model in {(toc - tic)/60:0.1f} minutes")
### Plot Training Metrics
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
training_loss = history.history['loss']
# Total Loss
plt.figure(1)
plt.plot(training_loss, 'b', label='Training')
plt.title('Loss vs Time')
plt.xlabel('Training Epoch')
plt.ylabel('Avg. Total Loss')
plt.legend()
plt.show()
### Save Model and History
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Save Model Weights
save_model = False
if save_model:
checkpoint_dir = '.\\training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "model_ckpt")
model.save_weights(checkpoint_prefix)
print('Model weights saved to files: '+checkpoint_prefix+'.*')
# Save Training History
save_history = False
if save_history:
checkpoint_dir = '.\\training_checkpoints'
history_filename = os.path.join(checkpoint_dir, "training_history.json")
with open(history_filename, 'w') as f:
json.dump({
key:[float(value) for value in history.history[key]]
for key in history.history
}, f)
print('Training history saved to file: '+ history_filename)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#----------------------------------END FILE------------------------------------
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 31.748299 | 79 | 0.611099 | [
"MIT"
] | youngmg1995/NES-Music-Maker | VAE/full_model/model_training.py | 4,667 | Python |
from rlpyt.utils.launching.affinity import encode_affinity
from rlpyt.utils.launching.exp_launcher import run_experiments
from rlpyt.utils.launching.variant import make_variants, VariantLevel
script = "rlpyt/experiments/scripts/atari/pg/train/atari_ff_a2c_gpu_multi.py"
affinity_code = encode_affinity(
n_cpu_cores=16,
n_gpu=8,
hyperthread_offset=24,
n_socket=2,
contexts_per_run=2,
# cpu_per_run=2,
)
runs_per_setting = 1
experiment_title = "atari_ff_a2c_multi"
variant_levels = list()
games = ["pong", "seaquest", "qbert", "chopper_command"]
values = list(zip(games))
dir_names = ["{}".format(*v) for v in values]
keys = [("env", "game")]
variant_levels.append(VariantLevel(keys, values, dir_names))
variants, log_dirs = make_variants(*variant_levels)
default_config_key = "0"
run_experiments(
script=script,
affinity_code=affinity_code,
experiment_title=experiment_title,
runs_per_setting=runs_per_setting,
variants=variants,
log_dirs=log_dirs,
common_args=(default_config_key,),
)
| 27.447368 | 77 | 0.760307 | [
"MIT"
] | DilipA/rlpyt | rlpyt/experiments/scripts/atari/pg/launch/pabti/launch_atari_ff_a2c_gpu_multi.py | 1,043 | Python |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_concurrency import processutils
from oslo_log import log
from nova import exception
from nova import utils
from novadocker.i18n import _
LOG = log.getLogger(__name__)
def teardown_network(container_id):
if os.name == 'nt':
return
try:
output, err = utils.execute('ip', '-o', 'netns', 'list')
for line in output.split('\n'):
if container_id == line.strip():
utils.execute('ip', 'netns', 'delete', container_id,
run_as_root=True)
break
except processutils.ProcessExecutionError:
LOG.warning(_('Cannot remove network namespace, netns id: %s'),
container_id)
def find_fixed_ip(instance, network_info):
for subnet in network_info['subnets']:
netmask = subnet['cidr'].split('/')[1]
for ip in subnet['ips']:
if ip['type'] == 'fixed' and ip['address']:
return ip['address'] + "/" + netmask
raise exception.InstanceDeployFailure(_('Cannot find fixed ip'),
instance_id=instance['uuid'])
def find_gateway(instance, network_info):
for subnet in network_info['subnets']:
return subnet['gateway']['address']
raise exception.InstanceDeployFailure(_('Cannot find gateway'),
instance_id=instance['uuid'])
# NOTE(arosen) - this method should be removed after it's moved into the
# linux_net code in nova.
def get_ovs_interfaceid(vif):
return vif.get('ovs_interfaceid') or vif['id']
| 33.742424 | 78 | 0.643018 | [
"Apache-2.0"
] | Juniper/nova-docker | novadocker/virt/docker/network.py | 2,227 | Python |
import unittest
from tests.test_utils import get_sample_pdf_with_labels, get_sample_pdf, get_sample_sdf, get_sample_pdf_with_extra_cols, get_sample_pdf_with_no_text_col ,get_sample_spark_dataframe
from nlu import *
class TestSarcasm(unittest.TestCase):
def test_sarcasm_model(self):
pipe = nlu.load('sarcasm',verbose=True)
df = pipe.predict(['I love pancaces. I hate Mondays', 'I love Fridays'], output_level='sentence')
print(df.columns)
print(df['sentence'], df[['sarcasm','sarcasm_confidence']])
df = pipe.predict(['I love pancaces. I hate Mondays', 'I love Fridays'], output_level='document')
self.assertIsInstance(df.iloc[0]['sarcasm'],str )
print(df.columns)
print(df['document'], df[['sarcasm','sarcasm_confidence']])
self.assertIsInstance(df.iloc[0]['sarcasm'], str)
#
# def test_sarcasm_model_bench(self):
# # Get dataset "
# # todo test light pipe for 50k+
# # ! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/sarcasm/train-balanced-sarcasm.csv
# # path = '/home/loan/Documents/freelancework/jsl/nlu/nlu_git/tests/datasets/train-balanced-sarcasm.csv'
# path = '/home/loan/Documents/freelancework/jsl/nlu/4realnlugit/tests/datasets/Musical_instruments_reviews.csv'
# sarcasm_df = pd.read_csv(path)
# # sarcasm_df['text'] = sarcasm_df['comment']
# # print(len(sarcasm_df))
# # max 50k , 60K dead
# # count = int(len(sarcasm_df)/15)
# # count = 50100
# # print('using ', count,' Rows')
# print(sarcasm_df.columns)
# #setting meta to true will output scores for keywords. Lower scores are better
# # Sentiment confidence is 2 because it sums the confidences of multiple sentences
# # df = nlu.load('en.classify.sarcasm',verbose=True).predict(sarcasm_df['reviewText'].iloc[0:100])
# df = nlu.load('bert',verbose=True).predict(sarcasm_df['reviewText'].iloc[0:100])
#
# # df = nlu.load('en.classify.sarcasm',verbose=True).predict('How are you today')
#
# # df = nlu.load('en.classify.sarcasm',verbose=True).predict(sarcasm_df['text'])
#
# print(df.columns)
# print(df['bert_embeddings'])
if __name__ == '__main__':
unittest.main()
| 45.980392 | 180 | 0.658422 | [
"Apache-2.0"
] | UPbook-innovations/nlu | tests/nlu_core_tests/component_tests/classifier_tests/sarcasm_tests.py | 2,345 | Python |
# Generated by Django 2.2.6 on 2019-10-22 15:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('project_core', '0050_added_organisationuid_modl'),
]
operations = [
migrations.RemoveField(
model_name='country',
name='date_created',
),
migrations.AddField(
model_name='country',
name='created_by',
field=models.ForeignKey(blank=True, help_text='User by which the entry was created', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='project_core_country_created_by_related', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='country',
name='created_on',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Date and time at which the entry was created'),
preserve_default=False,
),
migrations.AddField(
model_name='country',
name='modified_by',
field=models.ForeignKey(blank=True, help_text='User by which the entry was modified', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='project_core_country_modified_by_related', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='country',
name='modified_on',
field=models.DateTimeField(auto_now=True, help_text='Date and time at which the entry was modified', null=True),
),
]
| 40.302326 | 240 | 0.668782 | [
"MIT"
] | Swiss-Polar-Institute/project-application | ProjectApplication/project_core/migrations/0051_createmodify_country.py | 1,733 | Python |
def combination(n, r):
"""
:param n: the count of different items
:param r: the number of select
:return: combination
n! / (r! * (n - r)!)
"""
r = min(n - r, r)
result = 1
for i in range(n, n - r, -1):
result *= i
for i in range(1, r + 1):
result //= i
return result
def comb2():
# from scipy.misc import comb
pass
| 19.25 | 42 | 0.503896 | [
"MIT"
] | ta7uw/atcoder | lib/python-lib/combination.py | 385 | Python |
#!/usr/bin/env python
import sys
import numpy as np
from pandas import *
from sklearn.metrics import mean_absolute_error
from djeval import *
n_estimators = 200
n_jobs = -1
msg("Hi, reading yy_df.")
yy_df = read_pickle(sys.argv[1])
msg("Getting subset ready.")
train = yy_df[yy_df.elo.notnull()]
for blend in np.arange(0, 1.01, 0.1):
blended_prediction = (blend * train['ols_prediction']) + ((1.0 - blend) * train['rfr_prediction'])
blended_score = mean_absolute_error(blended_prediction, train['elo'])
print blend, blended_score
| 23.826087 | 102 | 0.722628 | [
"MIT"
] | Keesiu/meta-kaggle | data/external/repositories/137656/blundercheck-master/combine/20150301a/modeling/blend_models.py | 548 | Python |
import os
import json
import time
import datetime
import manageMonitoredUsersDB
pathToJSON = os.getcwd() + '/generatedJSON'
def get_local_json_timestamp_epoch(username, filename):
monitoredJSON = None
try:
monitoredJSON = json.load(open(pathToJSON + os.sep + filename, "r+"))
except:
with open(os.getcwd() + '/logs/fail_to_get_local_epoch', "a") as fileText:
fileText.write("The JSON fail to read is " + pathToJSON + os.sep + filename + " at " + str(datetime.datetime.now()) + "\n")
fileText.close()
if monitoredJSON == None:
return None
user_info = monitoredJSON["user_info"]
json_timestamp_epoch = user_info["json_timestamp_epoch"]
json_timestamp_epoch = float(json_timestamp_epoch) #Epoch LOCAL
return json_timestamp_epoch
def get_remote_json_timestamp_epoch(username):
user_infoRemote = None
monitoredUserSelected = manageMonitoredUsersDB.get_monitoredUserByName(username)
temp = monitoredUserSelected[2]
temp = temp.replace("'", "\"")
temp = temp.replace("True", "true")
temp = temp.replace("False", "false")
temp = json.loads(temp)
for key in temp.keys():
if key == "user_info":
user_infoRemote = temp[key]
if user_infoRemote != None:
json_timestamp_epochRemote = user_infoRemote["json_timestamp_epoch"]
return float(json_timestamp_epochRemote) #Epoch REMOTO, el guardado en monitoredUser.db
else:
print("\n" + "\033[91m" + "ERROR: No se ha podido obtener user_info en remoto, monitoredUser.db" + "\033[0m" + "\n")
with open(os.getcwd() + '/logs/fail_to_get_remote_epoch', "a") as fileText:
fileText.write("The username fail to read is " + username + " at " + str(datetime.datetime.now()) + "\n")
fileText.close()
def checkArrivedJSON():
for filename in sorted(os.listdir(pathToJSON)):
if filename.endswith(".json"):
username = filename.strip(".json")
#Obtención del epoch del JSON local
json_timestamp_epoch = get_local_json_timestamp_epoch(username, filename)
if json_timestamp_epoch == None:
continue
#Obtención del epoch del JSON remoto, en monitoredUser.db
json_timestamp_epochRemote = get_remote_json_timestamp_epoch(username)
#Comprobación del tiempo transcurrido entre local y remoto
#print("\033[92m" + "json_timestamp_epoch: " + str(json_timestamp_epoch) + "\033[0m" + "\n")
#print("\033[92m" + "json_timestamp_epochRemote: " + str(json_timestamp_epochRemote) + "\033[0m" + "\n")
if json_timestamp_epoch > json_timestamp_epochRemote:
monitoredJSON = json.load(open(pathToJSON + os.sep + filename, "r+"))
monitoredJSON = str(monitoredJSON)
manageMonitoredUsersDB.update_monitoredUserByName(username, monitoredJSON)
#MAIN
veces = 0
while True:
checkArrivedJSON()
time.sleep(1)
if veces >= 10:
print("Checking new user activities...\n")
veces = 0
veces += 1 | 43.676056 | 135 | 0.659787 | [
"Apache-2.0"
] | lmagellanic-cloud/phishers-monitor | TFG/checkEpochAndUpdateJSON.py | 3,104 | Python |
import torch
import torch.nn as nn
import torch.nn.functional as F
from starter_code.modules.networks import MLP, MinigridCNN
from mnist.embedded_mnist import MNIST_CNN
class SimpleValueFn(nn.Module):
def __init__(self, state_dim, hdim):
super(SimpleValueFn, self).__init__()
self.value_net = MLP(dims=[state_dim, *hdim, 1])
def forward(self, state):
state_values = self.value_net(state)
return state_values
class CNNValueFn(nn.Module):
def __init__(self, state_dim):
super(CNNValueFn, self).__init__()
self.state_dim = state_dim
if self.state_dim == (1, 64, 64):
self.encoder = MNIST_CNN(1)
self.decoder = lambda x: x
elif self.state_dim == (7, 7, 3):
self.encoder = MinigridCNN(*state_dim[:-1])
self.decoder = nn.Linear(self.encoder.image_embedding_size, 1)
else:
assert False
def forward(self, state):
state_values = self.decoder(self.encoder(state))
return state_values | 32.5625 | 74 | 0.650672 | [
"MIT"
] | mbchang/decentralized-rl | starter_code/modules/value_function.py | 1,042 | Python |
"""CD SEM structures."""
from functools import partial
from typing import Optional, Tuple
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.straight import straight as straight_function
from gdsfactory.components.text_rectangular import text_rectangular
from gdsfactory.cross_section import strip
from gdsfactory.grid import grid
from gdsfactory.types import ComponentFactory, CrossSectionFactory
text_rectangular_mini = partial(text_rectangular, size=1)
LINE_LENGTH = 420.0
@cell
def cdsem_straight(
widths: Tuple[float, ...] = (0.4, 0.45, 0.5, 0.6, 0.8, 1.0),
length: float = LINE_LENGTH,
cross_section: CrossSectionFactory = strip,
text: Optional[ComponentFactory] = text_rectangular_mini,
spacing: float = 3,
) -> Component:
"""Returns straight waveguide lines width sweep.
Args:
widths: for the sweep
length: for the line
cross_section: for the lines
text: optional text for labels
spacing: edge to edge spacing
"""
lines = []
for width in widths:
cross_section = partial(cross_section, width=width)
line = straight_function(length=length, cross_section=cross_section)
if text:
line = line.copy()
t = line << text(str(int(width * 1e3)))
t.xmin = line.xmax + 5
t.y = 0
lines.append(line)
return grid(lines, spacing=(0, spacing))
if __name__ == "__main__":
c = cdsem_straight()
c.show()
| 28.773585 | 76 | 0.685902 | [
"MIT"
] | gdsfactory/gdsfactory | gdsfactory/components/cdsem_straight.py | 1,525 | Python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from ....unittest import TestCase
from oauthlib.oauth2.rfc6749 import BaseEndpoint, catch_errors_and_unavailability
from oauthlib.oauth2 import Server, RequestValidator, FatalClientError, OAuth2Error
class BaseEndpointTest(TestCase):
def test_default_config(self):
endpoint = BaseEndpoint()
self.assertFalse(endpoint.catch_errors)
self.assertTrue(endpoint.available)
endpoint.catch_errors = True
self.assertTrue(endpoint.catch_errors)
endpoint.available = False
self.assertFalse(endpoint.available)
def test_error_catching(self):
validator = RequestValidator()
server = Server(validator)
server.catch_errors = True
h, b, s = server.create_authorization_response('https://example.com')
self.assertIn("server_error", b)
self.assertEqual(s, 500)
def test_unavailability(self):
validator = RequestValidator()
server = Server(validator)
server.available = False
h, b, s = server.create_authorization_response('https://example.com')
self.assertIn("temporarily_unavailable", b)
self.assertEqual(s, 503)
def test_wrapper(self):
class TestServer(Server):
@catch_errors_and_unavailability
def throw_error(self, uri):
raise ValueError()
@catch_errors_and_unavailability
def throw_oauth_error(self, uri):
raise OAuth2Error()
@catch_errors_and_unavailability
def throw_fatal_oauth_error(self, uri):
raise FatalClientError()
validator = RequestValidator()
server = TestServer(validator)
server.catch_errors = True
h, b, s = server.throw_error('a')
self.assertIn("server_error", b)
self.assertEqual(s, 500)
server.available = False
h, b, s = server.throw_error('a')
self.assertIn("temporarily_unavailable", b)
self.assertEqual(s, 503)
server.available = True
self.assertRaises(OAuth2Error, server.throw_oauth_error, 'a')
self.assertRaises(FatalClientError, server.throw_fatal_oauth_error, 'a')
server.catch_errors = False
self.assertRaises(OAuth2Error, server.throw_oauth_error, 'a')
self.assertRaises(FatalClientError, server.throw_fatal_oauth_error, 'a')
| 34.676056 | 83 | 0.668156 | [
"BSD-3-Clause"
] | Blitzen/oauthlib | tests/oauth2/rfc6749/endpoints/test_base_endpoint.py | 2,462 | Python |
from .protocol import *
import serial
import time
from threading import Lock
from PIL import Image, ImageDraw, ImageOps, ImageFont
class Device:
ser = None
inbuffer = ""
awaitingResponseLock = Lock()
testmode = False
nLeds = 0
dispW = 0
dispH = 0
rotFactor = 0
rotCircleSteps = 0
bannerHeight = 12 #Defines the height of top and bottom banner
imageBuffer = []
callbacks = {} #This object stores callback functions that react directly to a keypress reported via serial
ledState = None #Current LED status, so we can animate them over time
ledTime = None #Last time LEDs were set
debug = True
def connect(self, dev):
print("Connecting to ", dev, ".")
self.ser = serial.Serial(dev, 115200, timeout=1)
if not self.requestInfo(3):
self.disconnect()
return False
if self.testmode:
print("Connection to ", self.ser.name, " was successfull, but the device is running the hardware test firmware, which cannot be used for anything but testing. Please flash the proper inkkeys firmware to use it.")
return False
print("Connected to ", self.ser.name, ".")
return True
def disconnect(self):
if self.ser != None:
self.ser.close()
self.ser = None
def sendToDevice(self, command):
if self.debug:
print("Sending: " + command)
self.ser.write((command + "\n").encode())
def sendBinaryToDevice(self, data):
if self.debug:
print("Sending " + str(len(data)) + " bytes of binary data.")
self.ser.write(data)
def readFromDevice(self):
if self.ser.in_waiting > 0:
self.inbuffer += self.ser.read(self.ser.in_waiting).decode().replace("\r", "")
chunks = self.inbuffer.split("\n", 1)
if len(chunks) > 1:
cmd = chunks[0]
self.inbuffer = chunks[1]
if self.debug:
print("Received: " + cmd)
return cmd
return None
def poll(self):
with self.awaitingResponseLock:
input = self.readFromDevice()
if input != None:
if input[0] == KeyCode.JOG.value and (input[1:].isdecimal() or (input[1] == '-' and input[2:].isdecimal())):
if KeyCode.JOG.value in self.callbacks:
self.callbacks[KeyCode.JOG.value](int(input[1:]))
elif input in self.callbacks:
self.callbacks[input]()
def registerCallback(self, cb, key):
self.callbacks[key.value] = cb
def clearCallback(self, key):
if key.value in self.callbacks:
del self.callbacks[key.value]
def clearCallbacks(self):
self.callbacks = {}
def assignKey(self, key, sequence):
self.sendToDevice(CommandCode.ASSIGN.value + " " + key.value + (" " + " ".join(sequence) if len(sequence) > 0 else ""))
def sendLed(self, colors):
self.sendToDevice(CommandCode.LED.value + " " + " ".join(colors))
def requestInfo(self, timeout):
with self.awaitingResponseLock:
print("Requesting device info...")
start = time.time()
self.sendToDevice(CommandCode.INFO.value)
line = self.readFromDevice()
while line != "Inkkeys":
if time.time() - start > timeout:
return False
if line == None:
time.sleep(0.1)
line = self.readFromDevice()
continue
print("Skipping: ", line)
line = self.readFromDevice()
print("Header found. Waiting for infos...")
line = self.readFromDevice()
while line != "Done":
if time.time() - start > timeout:
return False
if line == None:
time.sleep(0.1)
line = self.readFromDevice()
continue
if line.startswith("TEST "):
self.testmode = line[5] != "0"
elif line.startswith("N_LED "):
self.nLeds = int(line[6:])
elif line.startswith("DISP_W "):
self.dispW = int(line[7:])
elif line.startswith("DISP_H "):
self.dispH = int(line[7:])
elif line.startswith("ROT_CIRCLE_STEPS "):
self.rotCircleSteps = int(line[17:])
else:
print("Skipping: ", line)
line = self.readFromDevice()
print("End of info received.")
print("Testmode: ", self.testmode)
print("Number of LEDs: ", self.nLeds)
print("Display width: ", self.dispW)
print("Display height: ", self.dispH)
print("Rotation circle steps: ", self.rotCircleSteps)
return True
def sendImage(self, x, y, image):
self.imageBuffer.append({"x": x, "y": y, "image": image.copy()})
w, h = image.size
data = image.convert("1").rotate(180).tobytes()
self.sendToDevice(CommandCode.DISPLAY.value + " " + str(x) + " " + str(y) + " " + str(w) + " " + str(h))
self.sendBinaryToDevice(data)
return True
def resendImageData(self):
for part in self.imageBuffer:
image = part["image"]
x = part["x"]
y = part["y"]
w, h = image.size
data = image.convert("1").rotate(180).tobytes()
self.sendToDevice(CommandCode.DISPLAY.value + " " + str(x) + " " + str(y) + " " + str(w) + " " + str(h))
self.sendBinaryToDevice(data)
self.imageBuffer = []
def updateDisplay(self, fullRefresh=False, timeout=5):
with self.awaitingResponseLock:
start = time.time()
self.sendToDevice(CommandCode.REFRESH.value + " " + (RefreshTypeCode.FULL.value if fullRefresh else RefreshTypeCode.PARTIAL.value))
line = self.readFromDevice()
while line != "ok":
if time.time() - start > timeout:
return False
if line == None:
time.sleep(0.1)
line = self.readFromDevice()
continue
line = self.readFromDevice()
self.resendImageData()
self.sendToDevice(CommandCode.REFRESH.value + " " + RefreshTypeCode.OFF.value)
line = self.readFromDevice()
while line != "ok":
if time.time() - start > timeout:
return False
if line == None:
time.sleep(0.1)
line = self.readFromDevice()
continue
line = self.readFromDevice()
def getAreaFor(self, function):
if function == "title":
return (0, self.dispH-self.bannerHeight, self.dispW, self.bannerHeight)
elif function == 1:
return (0, 0, self.dispW, self.bannerHeight)
elif function <= 5:
return (self.dispW//2, (5-function)*self.dispH//4+self.bannerHeight, self.dispW//2, self.dispH//4-2*self.bannerHeight)
else:
return (0, (9-function)*self.dispH//4+self.bannerHeight, self.dispW//2, self.dispH//4-2*self.bannerHeight)
def sendImageFor(self, function, image):
x, y, w, h = self.getAreaFor(function)
if (w, h) != image.size:
if self.debug:
print("Rescaling image from " + str(image.size) + " to " + str((w, h)) + ".")
image = image.resize((w, h))
self.sendImage(x, y, image)
def sendTextFor(self, function, text=""):
self.sendToDevice("T "+str(function)+" "+str(text))
def sendIconFor(self, function, icon, inverted=False, centered=True, marked=False, crossed=False):
x, y, w, h = self.getAreaFor(function)
img = Image.new("1", (w, h), color=(0 if inverted else 1))
imgIcon = Image.open(icon).convert("RGB")
if inverted:
imgIcon = ImageOps.invert(imgIcon)
wi, hi = imgIcon.size
if function < 6:
pos = ((w-wi)//2 if centered else 0, (h - hi)//2)
else:
pos = ((w-wi)//2 if centered else (w - wi), (h - hi)//2)
img.paste(imgIcon, pos)
if marked:
imgMarker = Image.open("icons/chevron-compact-right.png" if function < 6 else "icons/chevron-compact-left.png")
wm, hm = imgMarker.size
img.paste(imgMarker, (-16,(h - hm)//2) if function < 6 else (w-wm+16,(h - hm)//2), mask=ImageOps.invert(imgMarker.convert("RGB")).convert("1"))
if crossed:
d = ImageDraw.Draw(img)
d.line([pos[0]+5, pos[1]+5, pos[0]+wi-5, pos[1]+hi-5], width=3)
d.line([pos[0]+5, pos[1]+hi-5, pos[0]+wi-5, pos[1]+5], width=3)
self.sendImage(x, y, img)
def setLeds(self, leds):
ledStr = ['{:06x}'.format(i) for i in leds]
self.ledTime = time.time()
self.ledState = leds
self.sendLed(ledStr)
def setKeyLedFor(self, led, color):
self.sendToDevice(CommandCode.KEYLED.value + " " + str(led)+ " " + str(color))
def fadeLeds(self):
if self.ledState == None:
return
p = (3.5 - (time.time() - self.ledTime))/0.5 #Stay on for 3 seconds and then fade out over 0.5 seconds
if p >= 1:
return
if p <= 0:
self.ledState = None
self.sendLed(["000000" for i in range(self.nLeds)])
return
dimmedLeds = [(int((i & 0xff0000) * p) & 0xff0000) | (int((i & 0xff00) * p) & 0xff00) | (int((i & 0xff) * p) & 0xff) for i in self.ledState]
ledStr = ['{:06x}'.format(i) for i in dimmedLeds]
self.sendLed(ledStr)
| 38.753906 | 224 | 0.535934 | [
"MIT"
] | lubeda/Inkkey-SW-for-Macro-Keypad-Pro | inkkeys/device.py | 9,921 | Python |
# -*- coding: utf-8 -*-
# Copyright 2018 Joshua Bronson. All Rights Reserved.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Provides bidict duplication policies and the :class:`_OnDup` class."""
from collections import namedtuple
from ._marker import _Marker
_OnDup = namedtuple('_OnDup', 'key val kv')
class DuplicationPolicy(_Marker):
"""Base class for bidict's duplication policies.
*See also* :ref:`basic-usage:Values Must Be Unique`
"""
__slots__ = ()
#: Raise an exception when a duplication is encountered.
RAISE = DuplicationPolicy('DUP_POLICY.RAISE')
#: Overwrite an existing item when a duplication is encountered.
OVERWRITE = DuplicationPolicy('DUP_POLICY.OVERWRITE')
#: Keep the existing item and ignore the new item when a duplication is encountered.
IGNORE = DuplicationPolicy('DUP_POLICY.IGNORE')
| 27.054054 | 84 | 0.736264 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | gaborbernat/bidict | bidict/_dup.py | 1,001 | Python |
#!/usr/bin/env python3
# Day 13: Transparent Origami
# https://adventofcode.com/2021/day/13
import sys
data = open("input.txt" if len(sys.argv) == 1 else sys.argv[1]).read().splitlines()
n = 2000
grid = [["." for _ in range(n)] for _ in range(n)]
for line in data:
if not line:
break
x, y = map(int, line.split(","))
grid[y][x] = "#"
# for y, row in enumerate(grid):
# print("%3d" % y, "".join(str(x) for x in row))
part1 = False
for line in data:
if not line.startswith("fold"):
continue
if line.startswith("fold along x="):
fold = int(line.split("=")[1])
for y in range(len(grid)):
for x in range(fold):
if grid[y][fold + 1 + x] == "#":
grid[y][fold - 1 - x] = "#"
del grid[y][fold:]
elif line.startswith("fold along y="):
fold = int(line.split("=")[1])
for y in range(fold):
for x in range(len(grid[0])):
if grid[fold + 1 + y][x] == "#":
grid[fold - 1 - y][x] = "#"
del grid[fold:]
if not part1:
print(sum(1 for row in grid for cell in row if cell == "#"))
part1 = True
print()
for row in grid:
print("".join(str(x) for x in row))
| 23.849057 | 83 | 0.505538 | [
"Unlicense"
] | rene-d/advent-of-rust | 2021/day13/day13.py | 1,264 | Python |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from typing import Callable, Dict, List, Sequence
from reggen.field import Field
from reggen.register import Register
from reggen.reg_block import RegBlock
from shared.otbn_reggen import load_registers
from .trace import Trace
class ExtRegChange(Trace):
def __init__(self, op: str, written: int, from_hw: bool, new_value: int):
self.op = op
self.written = written
self.from_hw = from_hw
self.new_value = new_value
class TraceExtRegChange(Trace):
def __init__(self, name: str, erc: ExtRegChange):
self.name = name
self.erc = erc
def trace(self) -> str:
suff = (''
if self.erc.new_value == self.erc.written
else ' (now {:#010x})'.format(self.erc.new_value))
return ("otbn.{} {} {:#010x}{}{}"
.format(self.name,
self.erc.op,
self.erc.written,
' (from SW)' if not self.erc.from_hw else '',
suff))
def rtl_trace(self) -> str:
return '! otbn.{}: {:#010x}'.format(self.name, self.erc.new_value)
class RGField:
'''A wrapper around a field in a register as parsed by reggen'''
def __init__(self,
name: str,
width: int,
lsb: int,
reset_value: int,
swaccess: str):
# We only support some values of swaccess (the ones we need)
assert swaccess in ['rw1c', 'rw', 'wo', 'r0w1c', 'ro']
assert width > 0
assert lsb >= 0
self.name = name
self.width = width
self.lsb = lsb
self.value = reset_value
# swaccess
self.w1c = swaccess in ['rw1c', 'r0w1c']
self.read_only = swaccess == 'ro'
self.read_zero = swaccess in ['wo', 'r0w1c']
self.next_value = reset_value
@staticmethod
def from_field(field: Field) -> 'RGField':
name = field.name
assert isinstance(name, str)
width = field.bits.width()
assert isinstance(width, int)
lsb = field.bits.lsb
assert isinstance(lsb, int)
reset_value = field.resval or 0
assert isinstance(reset_value, int)
swaccess = field.swaccess.key
assert isinstance(swaccess, str)
return RGField(name, width, lsb, reset_value, swaccess)
def _next_sw_read(self) -> int:
return 0 if self.read_zero else self.next_value
def write(self, value: int, from_hw: bool) -> int:
'''Stage the effects of writing a value (see RGReg.write)'''
assert value >= 0
masked = value & ((1 << self.width) - 1)
if self.read_only and not from_hw:
pass
elif self.w1c and not from_hw:
self.next_value &= ~masked
else:
self.next_value = masked
return self._next_sw_read()
def set_bits(self, value: int) -> int:
'''Like write, but |=.'''
masked = value & ((1 << self.width) - 1)
self.next_value |= masked
return self._next_sw_read()
def clear_bits(self, value: int) -> int:
'''Like write, but &= ~.'''
self.next_value &= ~value
return self._next_sw_read()
def read(self, from_hw: bool) -> int:
return 0 if (self.read_zero and not from_hw) else self.value
def commit(self) -> None:
self.value = self.next_value
def abort(self) -> None:
self.next_value = self.value
class RGReg:
'''A wrapper around a register as parsed by reggen'''
def __init__(self, fields: List[RGField], double_flopped: bool):
self.fields = fields
self.double_flopped = double_flopped
self._trace = [] # type: List[ExtRegChange]
self._next_trace = [] # type: List[ExtRegChange]
@staticmethod
def from_register(reg: Register, double_flopped: bool) -> 'RGReg':
return RGReg([RGField.from_field(fd) for fd in reg.fields],
double_flopped)
def _apply_fields(self,
func: Callable[[RGField, int], int],
value: int) -> int:
new_val = 0
for field in self.fields:
field_new_val = func(field, value >> field.lsb)
new_val |= field_new_val << field.lsb
return new_val
def write(self, value: int, from_hw: bool) -> None:
'''Stage the effects of writing a value.
If from_hw is true, this write is from OTBN hardware (rather than the
bus).
'''
assert value >= 0
now = self._apply_fields(lambda fld, fv: fld.write(fv, from_hw), value)
trace = self._next_trace if self.double_flopped else self._trace
trace.append(ExtRegChange('=', value, from_hw, now))
def set_bits(self, value: int) -> None:
assert value >= 0
now = self._apply_fields(lambda fld, fv: fld.set_bits(fv), value)
trace = self._next_trace if self.double_flopped else self._trace
trace.append(ExtRegChange('=', value, False, now))
def read(self, from_hw: bool) -> int:
value = 0
for field in self.fields:
value |= field.read(from_hw) << field.lsb
return value
def commit(self) -> None:
for field in self.fields:
field.commit()
self._trace = self._next_trace
self._next_trace = []
def abort(self) -> None:
for field in self.fields:
field.abort()
self._trace = []
self._next_trace = []
def changes(self) -> List[ExtRegChange]:
return self._trace
def make_flag_reg(name: str, double_flopped: bool) -> RGReg:
return RGReg([RGField(name, 32, 0, 0, 'ro')], double_flopped)
class OTBNExtRegs:
'''A class representing OTBN's externally visible CSRs
This models an extra flop between the core and some of the externally
visible registers by ensuring that a write only becomes visible after an
intervening commit.
'''
double_flopped_regs = ['STATUS']
def __init__(self) -> None:
_, reg_block = load_registers()
self.regs = {} # type: Dict[str, RGReg]
self._dirty = 0
assert isinstance(reg_block, RegBlock)
for entry in reg_block.flat_regs:
assert isinstance(entry.name, str)
# reggen's validation should have checked that we have no
# duplicates.
assert entry.name not in self.regs
double_flopped = entry.name in self.double_flopped_regs
self.regs[entry.name] = RGReg.from_register(entry, double_flopped)
# Add a fake "STOP_PC" register.
#
# TODO: We might well add something like this to the actual design in
# the future (see issue #4327) but, for now, it's just used in
# simulation to help track whether RIG-generated binaries finished
# where they expected to finish.
self.regs['STOP_PC'] = make_flag_reg('STOP_PC', True)
# Add a fake "RND_REQ" register to allow us to tell otbn_core_model to
# generate an EDN request.
self.regs['RND_REQ'] = make_flag_reg('RND_REQ', True)
# Add a fake "WIPE_START" register. We set this for a single cycle when
# starting secure wipe and the C++ model can use this to trigger a dump
# of internal state before it gets zeroed out.
self.regs['WIPE_START'] = make_flag_reg('WIPE_START', False)
def _get_reg(self, reg_name: str) -> RGReg:
reg = self.regs.get(reg_name)
if reg is None:
raise ValueError('Unknown register name: {!r}.'.format(reg_name))
return reg
def write(self, reg_name: str, value: int, from_hw: bool) -> None:
'''Stage the effects of writing a value to a register'''
assert value >= 0
self._get_reg(reg_name).write(value, from_hw)
self._dirty = 2
def set_bits(self, reg_name: str, value: int) -> None:
'''Set some bits of a register (HW access only)'''
assert value >= 0
self._get_reg(reg_name).set_bits(value)
self._dirty = 2
def increment_insn_cnt(self) -> None:
'''Increment the INSN_CNT register'''
reg = self._get_reg('INSN_CNT')
assert len(reg.fields) == 1
fld = reg.fields[0]
reg.write(min(fld.value + 1, (1 << 32) - 1), True)
self._dirty = 2
def read(self, reg_name: str, from_hw: bool) -> int:
reg = self.regs.get(reg_name)
if reg is None:
raise ValueError('Unknown register name: {!r}.'.format(reg_name))
return reg.read(from_hw)
def changes(self) -> Sequence[Trace]:
if self._dirty == 0:
return []
trace = []
for name, reg in self.regs.items():
trace += [TraceExtRegChange(name, erc) for erc in reg.changes()]
return trace
def commit(self) -> None:
# We know that we'll only have any pending changes if self._dirty is
# positive, so needn't bother calling commit on each register if not.
if self._dirty > 0:
for reg in self.regs.values():
reg.commit()
self._dirty = max(0, self._dirty - 1)
def abort(self) -> None:
for reg in self.regs.values():
reg.abort()
self._dirty = 0
| 32.958333 | 79 | 0.591761 | [
"Apache-2.0"
] | Daasin/FOSS-fTPM | hw/ip/otbn/dv/otbnsim/sim/ext_regs.py | 9,492 | Python |
import json
import os
import sys
import numpy as np
import random
import math
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
from env import R2RBatch
from utils import padding_idx, add_idx, Tokenizer
import utils
import model
import param
from param import args
from collections import defaultdict
class BaseAgent(object):
''' Base class for an R2R agent to generate and save trajectories. '''
def __init__(self, env, results_path):
self.env = env
self.results_path = results_path
random.seed(1)
self.results = {}
self.losses = [] # For learning agents
def write_results(self):
output = [{'instr_id':k, 'trajectory': v} for k,v in self.results.items()]
with open(self.results_path, 'w') as f:
json.dump(output, f)
def get_results(self):
output = [{'instr_id': k, 'trajectory': v} for k, v in self.results.items()]
return output
def rollout(self, **args):
''' Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)] '''
raise NotImplementedError
@staticmethod
def get_agent(name):
return globals()[name+"Agent"]
def test(self, iters=None, **kwargs):
self.env.reset_epoch(shuffle=(iters is not None)) # If iters is not none, shuffle the env batch
self.losses = []
self.results = {}
# We rely on env showing the entire batch before repeating anything
looped = False
self.loss = 0
if iters is not None:
# For each time, it will run the first 'iters' iterations. (It was shuffled before)
for i in range(iters):
for traj in self.rollout(**kwargs):
self.loss = 0
self.results[traj['instr_id']] = traj['path']
else: # Do a full round
while True:
for traj in self.rollout(**kwargs):
if traj['instr_id'] in self.results:
looped = True
else:
self.loss = 0
self.results[traj['instr_id']] = traj['path']
if looped:
break
class Seq2SeqAgent(BaseAgent):
''' An agent based on an LSTM seq2seq model with attention. '''
# For now, the agent can't pick which forward move to make - just the one in the middle
env_actions = {
'left': (0,-1, 0), # left
'right': (0, 1, 0), # right
'up': (0, 0, 1), # up
'down': (0, 0,-1), # down
'forward': (1, 0, 0), # forward
'<end>': (0, 0, 0), # <end>
'<start>': (0, 0, 0), # <start>
'<ignore>': (0, 0, 0) # <ignore>
}
def __init__(self, env, results_path, tok, episode_len=20):
super(Seq2SeqAgent, self).__init__(env, results_path)
self.tok = tok
self.episode_len = episode_len
self.feature_size = self.env.feature_size
# Models
enc_hidden_size = args.rnn_dim//2 if args.bidir else args.rnn_dim
self.encoder = model.EncoderLSTM(tok.vocab_size(), args.wemb, enc_hidden_size, padding_idx,
args.dropout, bidirectional=args.bidir).cuda()
self.decoder = model.AttnDecoderLSTM(args.aemb, args.rnn_dim, args.dropout, feature_size=self.feature_size + args.angle_feat_size).cuda()
self.critic = model.Critic().cuda()
self.models = (self.encoder, self.decoder, self.critic)
# Optimizers
self.encoder_optimizer = args.optimizer(self.encoder.parameters(), lr=args.lr)
self.decoder_optimizer = args.optimizer(self.decoder.parameters(), lr=args.lr)
self.critic_optimizer = args.optimizer(self.critic.parameters(), lr=args.lr)
self.optimizers = (self.encoder_optimizer, self.decoder_optimizer, self.critic_optimizer)
# Evaluations
self.losses = []
self.criterion = nn.CrossEntropyLoss(ignore_index=args.ignoreid, size_average=False)
# Logs
sys.stdout.flush()
self.logs = defaultdict(list)
def _sort_batch(self, obs):
''' Extract instructions from a list of observations and sort by descending
sequence length (to enable PyTorch packing). '''
seq_tensor = np.array([ob['instr_encoding'] for ob in obs])
seq_lengths = np.argmax(seq_tensor == padding_idx, axis=1)
seq_lengths[seq_lengths == 0] = seq_tensor.shape[1] # Full length
seq_tensor = torch.from_numpy(seq_tensor)
seq_lengths = torch.from_numpy(seq_lengths)
# Sort sequences by lengths
seq_lengths, perm_idx = seq_lengths.sort(0, True) # True -> descending
sorted_tensor = seq_tensor[perm_idx]
mask = (sorted_tensor == padding_idx)[:,:seq_lengths[0]] # seq_lengths[0] is the Maximum length
return Variable(sorted_tensor, requires_grad=False).long().cuda(), \
mask.byte().cuda(), \
list(seq_lengths), list(perm_idx)
def _feature_variable(self, obs):
''' Extract precomputed features into variable. '''
features = np.empty((len(obs), args.views, self.feature_size + args.angle_feat_size), dtype=np.float32)
for i, ob in enumerate(obs):
features[i, :, :] = ob['feature'] # Image feat
return Variable(torch.from_numpy(features), requires_grad=False).cuda()
def _candidate_variable(self, obs):
candidate_leng = [len(ob['candidate']) + 1 for ob in obs] # +1 is for the end
candidate_feat = np.zeros((len(obs), max(candidate_leng), self.feature_size + args.angle_feat_size), dtype=np.float32)
# Note: The candidate_feat at len(ob['candidate']) is the feature for the END
# which is zero in my implementation
for i, ob in enumerate(obs):
for j, c in enumerate(ob['candidate']):
candidate_feat[i, j, :] = c['feature'] # Image feat
return torch.from_numpy(candidate_feat).cuda(), candidate_leng
def get_input_feat(self, obs):
input_a_t = np.zeros((len(obs), args.angle_feat_size), np.float32)
for i, ob in enumerate(obs):
input_a_t[i] = utils.angle_feature(ob['heading'], ob['elevation'])
input_a_t = torch.from_numpy(input_a_t).cuda()
f_t = self._feature_variable(obs) # Image features from obs
candidate_feat, candidate_leng = self._candidate_variable(obs)
return input_a_t, f_t, candidate_feat, candidate_leng
def _teacher_action(self, obs, ended):
"""
Extract teacher actions into variable.
:param obs: The observation.
:param ended: Whether the action seq is ended
:return:
"""
a = np.zeros(len(obs), dtype=np.int64)
for i, ob in enumerate(obs):
if ended[i]: # Just ignore this index
a[i] = args.ignoreid
else:
for k, candidate in enumerate(ob['candidate']):
if candidate['viewpointId'] == ob['teacher']: # Next view point
a[i] = k
break
else: # Stop here
assert ob['teacher'] == ob['viewpoint'] # The teacher action should be "STAY HERE"
a[i] = len(ob['candidate'])
return torch.from_numpy(a).cuda()
def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None):
"""
Interface between Panoramic view and Egocentric view
It will convert the action panoramic view action a_t to equivalent egocentric view actions for the simulator
"""
def take_action(i, idx, name):
if type(name) is int: # Go to the next view
self.env.env.sims[idx].makeAction(name, 0, 0)
else: # Adjust
self.env.env.sims[idx].makeAction(*self.env_actions[name])
state = self.env.env.sims[idx].getState()
if traj is not None:
traj[i]['path'].append((state.location.viewpointId, state.heading, state.elevation))
if perm_idx is None:
perm_idx = range(len(perm_obs))
for i, idx in enumerate(perm_idx):
action = a_t[i]
if action != -1: # -1 is the <stop> action
select_candidate = perm_obs[i]['candidate'][action]
src_point = perm_obs[i]['viewIndex']
trg_point = select_candidate['pointId']
src_level = (src_point ) // 12 # The point idx started from 0
trg_level = (trg_point ) // 12
while src_level < trg_level: # Tune up
take_action(i, idx, 'up')
src_level += 1
while src_level > trg_level: # Tune down
take_action(i, idx, 'down')
src_level -= 1
while self.env.env.sims[idx].getState().viewIndex != trg_point: # Turn right until the target
take_action(i, idx, 'right')
assert select_candidate['viewpointId'] == \
self.env.env.sims[idx].getState().navigableLocations[select_candidate['idx']].viewpointId
take_action(i, idx, select_candidate['idx'])
def rollout(self, train_ml=None, train_rl=True, reset=True, speaker=None):
"""
:param train_ml: The weight to train with maximum likelihood
:param train_rl: whether use RL in training
:param reset: Reset the environment
:param speaker: Speaker used in back translation.
If the speaker is not None, use back translation.
O.w., normal training
:return:
"""
if self.feedback == 'teacher' or self.feedback == 'argmax':
train_rl = False
if reset:
# Reset env
obs = np.array(self.env.reset())
else:
obs = np.array(self.env._get_obs())
batch_size = len(obs)
if speaker is not None: # Trigger the self_train mode!
noise = self.decoder.drop_env(torch.ones(self.feature_size).cuda())
batch = self.env.batch.copy()
speaker.env = self.env
insts = speaker.infer_batch(featdropmask=noise) # Use the same drop mask in speaker
# Create fake environments with the generated instruction
boss = np.ones((batch_size, 1), np.int64) * self.tok.word_to_index['<BOS>'] # First word is <BOS>
insts = np.concatenate((boss, insts), 1)
for i, (datum, inst) in enumerate(zip(batch, insts)):
if inst[-1] != self.tok.word_to_index['<PAD>']: # The inst is not ended!
inst[-1] = self.tok.word_to_index['<EOS>']
datum.pop('instructions')
datum.pop('instr_encoding')
datum['instructions'] = self.tok.decode_sentence(inst)
datum['instr_encoding'] = inst
obs = np.array(self.env.reset(batch))
# Reorder the language input for the encoder (do not ruin the original code)
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
perm_obs = obs[perm_idx]
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
ctx_mask = seq_mask
# Init the reward shaping
last_dist = np.zeros(batch_size, np.float32)
for i, ob in enumerate(perm_obs): # The init distance from the view point to the target
last_dist[i] = ob['distance']
# Record starting point
traj = [{
'instr_id': ob['instr_id'],
'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]
} for ob in perm_obs]
# For test result submission
visited = [set() for _ in perm_obs]
# Initialization the tracking state
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Init the logs
rewards = []
hidden_states = []
policy_log_probs = []
masks = []
entropys = []
ml_loss = 0.
h1 = h_t
for t in range(self.episode_len):
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs)
if speaker is not None: # Apply the env drop mask to the feat
candidate_feat[..., :-args.angle_feat_size] *= noise
f_t[..., :-args.angle_feat_size] *= noise
h_t, c_t, logit, h1 = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
already_dropfeat=(speaker is not None))
hidden_states.append(h_t)
# Mask outputs where agent can't move forward
# Here the logit is [b, max_candidate]
candidate_mask = utils.length2mask(candidate_leng)
if args.submit: # Avoding cyclic path
for ob_id, ob in enumerate(perm_obs):
visited[ob_id].add(ob['viewpoint'])
for c_id, c in enumerate(ob['candidate']):
if c['viewpointId'] in visited[ob_id]:
candidate_mask[ob_id][c_id] = 1
logit.masked_fill_(candidate_mask, -float('inf'))
# Supervised training
target = self._teacher_action(perm_obs, ended)
ml_loss += self.criterion(logit, target)
# Determine next model inputs
if self.feedback == 'teacher':
a_t = target # teacher forcing
elif self.feedback == 'argmax':
_, a_t = logit.max(1) # student forcing - argmax
a_t = a_t.detach()
log_probs = F.log_softmax(logit, 1) # Calculate the log_prob here
policy_log_probs.append(log_probs.gather(1, a_t.unsqueeze(1))) # Gather the log_prob for each batch
elif self.feedback == 'sample':
probs = F.softmax(logit, 1) # sampling an action from model
c = torch.distributions.Categorical(probs)
self.logs['entropy'].append(c.entropy().sum().item()) # For log
entropys.append(c.entropy()) # For optimization
a_t = c.sample().detach()
policy_log_probs.append(c.log_prob(a_t))
else:
print(self.feedback)
sys.exit('Invalid feedback option')
# Prepare environment action
# NOTE: Env action is in the perm_obs space
cpu_a_t = a_t.cpu().numpy()
for i, next_id in enumerate(cpu_a_t):
if next_id == (candidate_leng[i]-1) or next_id == args.ignoreid or ended[i]: # The last action is <end>
cpu_a_t[i] = -1 # Change the <end> and ignore action to -1
# Make action and get the new state
self.make_equiv_action(cpu_a_t, perm_obs, perm_idx, traj)
obs = np.array(self.env._get_obs())
perm_obs = obs[perm_idx] # Perm the obs for the resu
# Calculate the mask and reward
dist = np.zeros(batch_size, np.float32)
reward = np.zeros(batch_size, np.float32)
mask = np.ones(batch_size, np.float32)
for i, ob in enumerate(perm_obs):
dist[i] = ob['distance']
if ended[i]: # If the action is already finished BEFORE THIS ACTION.
reward[i] = 0.
mask[i] = 0.
else: # Calculate the reward
action_idx = cpu_a_t[i]
if action_idx == -1: # If the action now is end
if dist[i] < 3: # Correct
reward[i] = 2.
else: # Incorrect
reward[i] = -2.
else: # The action is not end
reward[i] = - (dist[i] - last_dist[i]) # Change of distance
if reward[i] > 0: # Quantification
reward[i] = 1
elif reward[i] < 0:
reward[i] = -1
else:
raise NameError("The action doesn't change the move")
rewards.append(reward)
masks.append(mask)
last_dist[:] = dist
# Update the finished actions
# -1 means ended or ignored (already ended)
ended[:] = np.logical_or(ended, (cpu_a_t == -1))
# Early exit if all ended
if ended.all():
break
if train_rl:
# Last action in A2C
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(perm_obs)
if speaker is not None:
candidate_feat[..., :-args.angle_feat_size] *= noise
f_t[..., :-args.angle_feat_size] *= noise
last_h_, _, _, _ = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
speaker is not None)
rl_loss = 0.
# NOW, A2C!!!
# Calculate the final discounted reward
last_value__ = self.critic(last_h_).detach() # The value esti of the last state, remove the grad for safety
discount_reward = np.zeros(batch_size, np.float32) # The inital reward is zero
for i in range(batch_size):
if not ended[i]: # If the action is not ended, use the value function as the last reward
discount_reward[i] = last_value__[i]
length = len(rewards)
total = 0
for t in range(length-1, -1, -1):
discount_reward = discount_reward * args.gamma + rewards[t] # If it ended, the reward will be 0
mask_ = Variable(torch.from_numpy(masks[t]), requires_grad=False).cuda()
clip_reward = discount_reward.copy()
r_ = Variable(torch.from_numpy(clip_reward), requires_grad=False).cuda()
v_ = self.critic(hidden_states[t])
a_ = (r_ - v_).detach()
# r_: The higher, the better. -ln(p(action)) * (discount_reward - value)
rl_loss += (-policy_log_probs[t] * a_ * mask_).sum()
rl_loss += (((r_ - v_) ** 2) * mask_).sum() * 0.5 # 1/2 L2 loss
if self.feedback == 'sample':
rl_loss += (- 0.01 * entropys[t] * mask_).sum()
self.logs['critic_loss'].append((((r_ - v_) ** 2) * mask_).sum().item())
total = total + np.sum(masks[t])
self.logs['total'].append(total)
# Normalize the loss function
if args.normalize_loss == 'total':
rl_loss /= total
elif args.normalize_loss == 'batch':
rl_loss /= batch_size
else:
assert args.normalize_loss == 'none'
self.loss += rl_loss
if train_ml is not None:
self.loss += ml_loss * train_ml / batch_size
if type(self.loss) is int: # For safety, it will be activated if no losses are added
self.losses.append(0.)
else:
self.losses.append(self.loss.item() / self.episode_len) # This argument is useless.
return traj
def _dijkstra(self):
"""
The dijkstra algorithm.
Was called beam search to be consistent with existing work.
But it actually finds the Exact K paths with smallest listener log_prob.
:return:
[{
"scan": XXX
"instr_id":XXX,
'instr_encoding": XXX
'dijk_path': [v1, v2, ..., vn] (The path used for find all the candidates)
"paths": {
"trajectory": [viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}
}]
"""
def make_state_id(viewpoint, action): # Make state id
return "%s_%s" % (viewpoint, str(action))
def decompose_state_id(state_id): # Make state id
viewpoint, action = state_id.split("_")
action = int(action)
return viewpoint, action
# Get first obs
obs = self.env._get_obs()
# Prepare the state id
batch_size = len(obs)
results = [{"scan": ob['scan'],
"instr_id": ob['instr_id'],
"instr_encoding": ob["instr_encoding"],
"dijk_path": [ob['viewpoint']],
"paths": []} for ob in obs]
# Encoder
seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
recover_idx = np.zeros_like(perm_idx)
for i, idx in enumerate(perm_idx):
recover_idx[idx] = i
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
ctx, h_t, c_t, ctx_mask = ctx[recover_idx], h_t[recover_idx], c_t[recover_idx], seq_mask[recover_idx] # Recover the original order
# Dijk Graph States:
id2state = [
{make_state_id(ob['viewpoint'], -95):
{"next_viewpoint": ob['viewpoint'],
"running_state": (h_t[i], h_t[i], c_t[i]),
"location": (ob['viewpoint'], ob['heading'], ob['elevation']),
"feature": None,
"from_state_id": None,
"score": 0,
"scores": [],
"actions": [],
}
}
for i, ob in enumerate(obs)
] # -95 is the start point
visited = [set() for _ in range(batch_size)]
finished = [set() for _ in range(batch_size)]
graphs = [utils.FloydGraph() for _ in range(batch_size)] # For the navigation path
ended = np.array([False] * batch_size)
# Dijk Algorithm
for _ in range(300):
# Get the state with smallest score for each batch
# If the batch is not ended, find the smallest item.
# Else use a random item from the dict (It always exists)
smallest_idXstate = [
max(((state_id, state) for state_id, state in id2state[i].items() if state_id not in visited[i]),
key=lambda item: item[1]['score'])
if not ended[i]
else
next(iter(id2state[i].items()))
for i in range(batch_size)
]
# Set the visited and the end seqs
for i, (state_id, state) in enumerate(smallest_idXstate):
assert (ended[i]) or (state_id not in visited[i])
if not ended[i]:
viewpoint, action = decompose_state_id(state_id)
visited[i].add(state_id)
if action == -1:
finished[i].add(state_id)
if len(finished[i]) >= args.candidates: # Get enough candidates
ended[i] = True
# Gather the running state in the batch
h_ts, h1s, c_ts = zip(*(idXstate[1]['running_state'] for idXstate in smallest_idXstate))
h_t, h1, c_t = torch.stack(h_ts), torch.stack(h1s), torch.stack(c_ts)
# Recover the env and gather the feature
for i, (state_id, state) in enumerate(smallest_idXstate):
next_viewpoint = state['next_viewpoint']
scan = results[i]['scan']
from_viewpoint, heading, elevation = state['location']
self.env.env.sims[i].newEpisode(scan, next_viewpoint, heading, elevation) # Heading, elevation is not used in panoramic
obs = self.env._get_obs()
# Update the floyd graph
# Only used to shorten the navigation length
# Will not effect the result
for i, ob in enumerate(obs):
viewpoint = ob['viewpoint']
if not graphs[i].visited(viewpoint): # Update the Graph
for c in ob['candidate']:
next_viewpoint = c['viewpointId']
dis = self.env.distances[ob['scan']][viewpoint][next_viewpoint]
graphs[i].add_edge(viewpoint, next_viewpoint, dis)
graphs[i].update(viewpoint)
results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], viewpoint))
input_a_t, f_t, candidate_feat, candidate_leng = self.get_input_feat(obs)
# Run one decoding step
h_t, c_t, alpha, logit, h1 = self.decoder(input_a_t, f_t, candidate_feat,
h_t, h1, c_t,
ctx, ctx_mask,
False)
# Update the dijk graph's states with the newly visited viewpoint
candidate_mask = utils.length2mask(candidate_leng)
logit.masked_fill_(candidate_mask, -float('inf'))
log_probs = F.log_softmax(logit, 1) # Calculate the log_prob here
_, max_act = log_probs.max(1)
for i, ob in enumerate(obs):
current_viewpoint = ob['viewpoint']
candidate = ob['candidate']
current_state_id, current_state = smallest_idXstate[i]
old_viewpoint, from_action = decompose_state_id(current_state_id)
assert ob['viewpoint'] == current_state['next_viewpoint']
if from_action == -1 or ended[i]: # If the action is <end> or the batch is ended, skip it
continue
for j in range(len(ob['candidate']) + 1): # +1 to include the <end> action
# score + log_prob[action]
modified_log_prob = log_probs[i][j].detach().cpu().item()
new_score = current_state['score'] + modified_log_prob
if j < len(candidate): # A normal action
next_id = make_state_id(current_viewpoint, j)
next_viewpoint = candidate[j]['viewpointId']
trg_point = candidate[j]['pointId']
heading = (trg_point % 12) * math.pi / 6
elevation = (trg_point // 12 - 1) * math.pi / 6
location = (next_viewpoint, heading, elevation)
else: # The end action
next_id = make_state_id(current_viewpoint, -1) # action is -1
next_viewpoint = current_viewpoint # next viewpoint is still here
location = (current_viewpoint, ob['heading'], ob['elevation'])
if next_id not in id2state[i] or new_score > id2state[i][next_id]['score']:
id2state[i][next_id] = {
"next_viewpoint": next_viewpoint,
"location": location,
"running_state": (h_t[i], h1[i], c_t[i]),
"from_state_id": current_state_id,
"feature": (f_t[i].detach().cpu(), candidate_feat[i][j].detach().cpu()),
"score": new_score,
"scores": current_state['scores'] + [modified_log_prob],
"actions": current_state['actions'] + [len(candidate)+1],
}
# The active state is zero after the updating, then setting the ended to True
for i in range(batch_size):
if len(visited[i]) == len(id2state[i]): # It's the last active state
ended[i] = True
# End?
if ended.all():
break
# Move back to the start point
for i in range(batch_size):
results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][-1], results[i]['dijk_path'][0]))
"""
"paths": {
"trajectory": [viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}
"""
# Gather the Path
for i, result in enumerate(results):
assert len(finished[i]) <= args.candidates
for state_id in finished[i]:
path_info = {
"trajectory": [],
"action": [],
"listener_scores": id2state[i][state_id]['scores'],
"listener_actions": id2state[i][state_id]['actions'],
"visual_feature": []
}
viewpoint, action = decompose_state_id(state_id)
while action != -95:
state = id2state[i][state_id]
path_info['trajectory'].append(state['location'])
path_info['action'].append(action)
path_info['visual_feature'].append(state['feature'])
state_id = id2state[i][state_id]['from_state_id']
viewpoint, action = decompose_state_id(state_id)
state = id2state[i][state_id]
path_info['trajectory'].append(state['location'])
for need_reverse_key in ["trajectory", "action", "visual_feature"]:
path_info[need_reverse_key] = path_info[need_reverse_key][::-1]
result['paths'].append(path_info)
return results
def beam_search(self, speaker):
"""
:param speaker: The speaker to be used in searching.
:return:
{
"scan": XXX
"instr_id":XXX,
"instr_encoding": XXX
"dijk_path": [v1, v2, ...., vn]
"paths": [{
"trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"speaker_scores": [log_prob_word1, log_prob_word2, ..., ],
}]
}
"""
self.env.reset()
results = self._dijkstra()
"""
return from self._dijkstra()
[{
"scan": XXX
"instr_id":XXX,
"instr_encoding": XXX
"dijk_path": [v1, v2, ...., vn]
"paths": [{
"trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],
"action": [act_1, act_2, ..., ],
"listener_scores": [log_prob_act1, log_prob_act2, ..., ],
"visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)
}]
}]
"""
# Compute the speaker scores:
for result in results:
lengths = []
num_paths = len(result['paths'])
for path in result['paths']:
assert len(path['trajectory']) == (len(path['visual_feature']) + 1)
lengths.append(len(path['visual_feature']))
max_len = max(lengths)
img_feats = torch.zeros(num_paths, max_len, 36, self.feature_size + args.angle_feat_size)
can_feats = torch.zeros(num_paths, max_len, self.feature_size + args.angle_feat_size)
for j, path in enumerate(result['paths']):
for k, feat in enumerate(path['visual_feature']):
img_feat, can_feat = feat
img_feats[j][k] = img_feat
can_feats[j][k] = can_feat
img_feats, can_feats = img_feats.cuda(), can_feats.cuda()
features = ((img_feats, can_feats), lengths)
insts = np.array([result['instr_encoding'] for _ in range(num_paths)])
seq_lengths = np.argmax(insts == self.tok.word_to_index['<EOS>'], axis=1) # len(seq + 'BOS') == len(seq + 'EOS')
insts = torch.from_numpy(insts).cuda()
speaker_scores = speaker.teacher_forcing(train=True, features=features, insts=insts, for_listener=True)
for j, path in enumerate(result['paths']):
path.pop("visual_feature")
path['speaker_scores'] = -speaker_scores[j].detach().cpu().numpy()[:seq_lengths[j]]
return results
def beam_search_test(self, speaker):
self.encoder.eval()
self.decoder.eval()
self.critic.eval()
looped = False
self.results = {}
while True:
for traj in self.beam_search(speaker):
if traj['instr_id'] in self.results:
looped = True
else:
self.results[traj['instr_id']] = traj
if looped:
break
def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, iters=None):
''' Evaluate once on each instruction in the current environment '''
self.feedback = feedback
if use_dropout:
self.encoder.train()
self.decoder.train()
self.critic.train()
else:
self.encoder.eval()
self.decoder.eval()
self.critic.eval()
super(Seq2SeqAgent, self).test(iters)
def zero_grad(self):
self.loss = 0.
self.losses = []
for model, optimizer in zip(self.models, self.optimizers):
model.train()
optimizer.zero_grad()
def accumulate_gradient(self, feedback='teacher', **kwargs):
if feedback == 'teacher':
self.feedback = 'teacher'
self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs)
elif feedback == 'sample':
self.feedback = 'teacher'
self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs)
self.feedback = 'sample'
self.rollout(train_ml=None, train_rl=True, **kwargs)
else:
assert False
def optim_step(self):
self.loss.backward()
torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.critic_optimizer.step()
def train(self, n_iters, feedback='teacher', **kwargs):
''' Train for a given number of iterations '''
self.feedback = feedback
self.encoder.train()
self.decoder.train()
self.critic.train()
self.losses = []
for iter in tqdm(range(1, n_iters + 1)):
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
self.critic_optimizer.zero_grad()
self.loss = 0
if feedback == 'teacher':
self.feedback = 'teacher'
self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs)
elif feedback == 'sample':
if args.ml_weight != 0:
self.feedback = 'teacher'
self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs)
self.feedback = 'sample'
self.rollout(train_ml=None, train_rl=True, **kwargs)
else:
assert False
self.loss.backward()
torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
self.encoder_optimizer.step()
self.decoder_optimizer.step()
self.critic_optimizer.step()
def save(self, epoch, path):
''' Snapshot models '''
the_dir, _ = os.path.split(path)
os.makedirs(the_dir, exist_ok=True)
states = {}
def create_state(name, model, optimizer):
states[name] = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
("critic", self.critic, self.critic_optimizer)]
for param in all_tuple:
create_state(*param)
torch.save(states, path)
def load(self, path):
''' Loads parameters (but not training state) '''
states = torch.load(path)
def recover_state(name, model, optimizer):
state = model.state_dict()
model_keys = set(state.keys())
load_keys = set(states[name]['state_dict'].keys())
if model_keys != load_keys:
print("NOTICE: DIFFERENT KEYS IN THE LISTEREN")
state.update(states[name]['state_dict'])
model.load_state_dict(state)
if args.loadOptim:
optimizer.load_state_dict(states[name]['optimizer'])
all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
("decoder", self.decoder, self.decoder_optimizer),
("critic", self.critic, self.critic_optimizer)]
for param in all_tuple:
recover_state(*param)
return states['encoder']['epoch'] - 1
| 44.722934 | 145 | 0.532655 | [
"MIT-0",
"MIT"
] | rcorona/R2R-EnvDrop | r2r_src/agent.py | 38,417 | Python |
from dataclasses import dataclass
from beebole.interfaces.entities import IdEntity, CountEntity
@dataclass
class Group(IdEntity):
name: str
groups: CountEntity
@dataclass
class ParentedGroup(IdEntity):
name: str
parent: IdEntity
| 15.625 | 61 | 0.764 | [
"MIT"
] | Dogeek/beebole | beebole/interfaces/entities/group.py | 250 | Python |
import math
n=int(input())
c=list(map(int, input().split()))
print(sum([abs(i) for i in c]))
print(math.sqrt(sum([i*i for i in c])))
print(max([abs(i) for i in c])) | 27.333333 | 39 | 0.628049 | [
"MIT"
] | consommee/AtCoder | ABC180/ABC180_B.py | 164 | Python |
import xarray as xr
import pandas as pd
import numpy as np
import xgboost as xgb
import time
import pickle
import sys
from xgboost import XGBRegressor
# load dataframe with maximal temp
def load_df_max_TREFHT(member, start_date, end_date):
path = "/glade/scratch/zhonghua/CESM-LE-members-csv/"
print("***************Start loading member",member,"***************")
t0 = time.time()
df = pd.read_csv(path+member+"_"+start_date+"_"+end_date+".csv")
elapsed_time = time.time() - t0
print("It takes elapsed_time", elapsed_time, "to read csv")
print("***************Start convert lat/lon to string***************")
t1=time.time()
df[["lat","lon"]]=df[["lat","lon"]].round(4).astype(str)
elapsed_time = time.time() - t1
print("It takes elapsed_time", elapsed_time, "to convert lat/lon to string")
print("***************Start One Hot Encoding***************")
# https://stackoverflow.com/questions/44124436/python-datetime-to-season
t2=time.time()
df["time"]=pd.to_datetime(df["time"],errors="coerce")
#df = df.dropna(subset=['time'])
months = ["Jan","Feb", "Mar", "Apr", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"]
month_to_months = dict(zip(range(1,13), months))
df = pd.concat([df,pd.get_dummies(df["time"].dt.month.map(month_to_months).astype('category'))],axis=1)
elapsed_time = time.time() - t2
print("It takes elapsed_time", elapsed_time, "to finish the one hot encoding")
return df
def XGB_test(df,year,lat,lon,member):
t_0=time.time()
#df_temp = df[(df["lat"]==lat) & (df["lon"]==lon)].reset_index()
df_lat = df[df["lat"]==lat]
df_temp = df_lat[df_lat["lon"]==lon]
vari_ls = ["QBOT","UBOT","VBOT",
"TREFHT",
"FLNS","FSNS",
"PRECT","PRSN",
"Jan","Feb", "Mar",
"Apr", "May", "June",
"July", "Aug", "Sept",
"Oct", "Nov", "Dec"]
XGBreg = pickle.load(open("/glade/scratch/zhonghua/ensem_model/"+year+"/"+"MX_"+lat+"_"+lon+".dat","rb"))
df_temp[member]=XGBreg.predict(df_temp[vari_ls])
#print("rmse:",np.sqrt(mean_squared_error(df_temp[member],df_temp[pred])))
#print("mae:",mean_absolute_error(df_temp[member],df_temp[pred]))
df_return=df_temp[["lat","lon","time",member,"TREFMXAV_U"]]
df_return[["lat","lon"]]=df_return[["lat","lon"]].astype(np.float32)
elapsed_time = time.time() - t_0
print("It takes elapsed_time", elapsed_time, "to apply the model")
return df_return.set_index(["lat","lon","time"])
#########################################################
lat_lon_dict=pickle.load(open("/glade/scratch/zhonghua/lat_lon_dict.dat","rb"))
member=sys.argv[1]
start_date=sys.argv[2]
end_date=sys.argv[3]
df = load_df_max_TREFHT(member, start_date, end_date)
i=1
df_final_ls=[]
for lat in lat_lon_dict:
print(lat)
for lon in lat_lon_dict[lat]:
df_final_ls.append(XGB_test(df,start_date,lat,lon,member))
i+=1
if (i%10==0):
print(i)
pd.concat(df_final_ls).to_csv("/glade/scratch/zhonghua/CESM_validation/"+start_date+"/"+member+"_ens.csv")
| 37.388235 | 109 | 0.602895 | [
"MIT"
] | zzheng93/code_uhws | 3_model_valid/pred/apply_model_members.py | 3,178 | Python |
'''
This is to fetch the tip table data for a telegram_id
Error Handling
==============
- /withdrawmemo tipuser11111 0.0001 TLOS pay_bill
- /withdrawmemo tipuser11111 0.00001 EOS pay_bill
{"code": 3050003, "name": "eosio_assert_message_exception", "what": "eosio_assert_message assertion failure"
, "details": [{"message": "assertion failure with message: there is no balances available corresponding to t
he parsed quantity symbol for the given from_id.", "file": "cf_system.cpp", "line_number": 14, "method": "eo
sio_assert"}, {"message": "pending console output: ", "file": "apply_context.cpp", "line_number": 143, "meth
od": "exec_one"}]}
- /withdrawmemo tipuser11117 0.0001 EOS pay_bill
{"code": 3010001, "name": "name_type_exception", "what": "Invalid name", "details": [{"message": "Name conta
ins invalid character: (7) ", "file": "name.hpp", "line_number": 26, "method": "char_to_symbol"}, {"message"
: "", "file": "abi_serializer.cpp", "line_number": 570, "method": "_variant_to_binary"}, {"message": "", "fi
le": "abi_serializer.cpp", "line_number": 570, "method": "_variant_to_binary"}, {"message": "", "file": "abi
_serializer.cpp", "line_number": 584, "method": "_variant_to_binary"}, {"message": "\"{"from_id":410894301,"
from_username":"abhi3700","to_ac":"tipuser11117","quantity":"0.0001 EOS","memo":"pay_bill"}\" is invalid arg
s for action \"withdraw\" code \"tippertipper\". expected \"[{"name":"from_id","type":"uint64"},{"name":"fro
m_username","type":"string"},{"name":"to_ac","type":"name"},{"name":"quantity","type":"asset"},{"name":"memo
","type":"string"}]\"", "file": "chain_plugin.cpp", "line_number": 3396, "method": "abi_json_to_bin"}, {"mes
sage": "code: tippertipper, action: withdraw, args: {"from_id":410894301,"from_username":"abhi3700","to_ac":
"tipuser11117","quantity":"0.0001 EOS","memo":"pay_bill"}", "file": "chain_plugin.cpp", "line_number": 3402,
"method": "abi_json_to_bin"}]}
'''
import json
import asyncio
from aioeos import EosAccount, EosJsonRpc, EosTransaction
from aioeos import types
from aioeos.exceptions import EosAccountDoesntExistException
from aioeos.exceptions import EosAssertMessageException
from aioeos.exceptions import EosDeadlineException
from aioeos.exceptions import EosRamUsageExceededException
from aioeos.exceptions import EosTxCpuUsageExceededException
from aioeos.exceptions import EosTxNetUsageExceededException
from input import *
# def validate(j):
# try:
# return json.load(j) # put JSON-data to a variable
# except json.decoder.JSONDecodeError:
# print("Invalid JSON") # in case json is invalid
# else:
# print("Valid JSON") # in case json is valid
async def balance(
from_id,
# chat
):
rpc = EosJsonRpc(url=Chain_URL)
table_response = await rpc.get_table_rows(
code=tip_eosio_ac,
scope= tip_eosio_ac,
table=tip_table,
lower_bound= from_id,
upper_bound= from_id
)
table_response = str(table_response).replace("\'", "\"")
table_response = table_response.replace("False", "false") # As False is invalid in JSON, so replace with false
# print(table_response)
for r in json.loads(table_response)['rows'][0]["balances"]:
prec, sym_name = r["key"]["sym"].split(",")
# print(f'token precision: {prec}') # precision
# print(f'token sym_name: {sym_name}') # symbol name
# print(f'val: {r["value"]/10**int(prec)}\n\n') # exact value
print(f'{r["value"]/10**int(prec)} {sym_name}') # result e.g. 2.0 EOS
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(balance(410894301)) | 51.223684 | 121 | 0.642949 | [
"MIT"
] | abhi3700/tipuser_bot | app/chain_table.py | 3,893 | Python |
#!/usr/bin/env python
#Copyright (c) 2008 Erik Tollerud ([email protected])
from __future__ import division,with_statement
from glob import glob
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup,find_packages
from distutils.command.build_py import build_py as du_build_py
from distutils.core import Command
from astropysics.version import version as versionstr
from astropysics.config import _recpkgs,_guipkgs
descrip = """
`astropysics` contains a variety of utilities and algorithms for reducing, analyzing, and visualizing astronomical data.
See http://packages.python.org/Astropysics/ for detailed documentation.
"""
apyspkgs = find_packages(exclude=['tests'])
scripts = glob('scripts/*')
#recommended/gui packages are stored in config module - used in extras
recpkgs = [pkg.name for pkg in _recpkgs]
guipkgs = [pkg.name for pkg in _guipkgs]
#custom build_py overwrites version.py with a version overwriting the revno-generating version.py
class apy_build_py(du_build_py):
def run(self):
from os import path
res = du_build_py.run(self)
versfile = path.join(self.build_lib,'astropysics','version.py')
print 'freezing version number to',versfile
with open(versfile,'w') as f: #this overwrites the actual version.py
f.write(self.get_version_py())
return res
def get_version_py(self):
import datetime
from astropysics.version import _frozen_version_py_template
from astropysics.version import version,major,minor,bugfix,dev
timestamp = str(datetime.datetime.now())
t = (timestamp,version,major,minor,bugfix,dev)
return _frozen_version_py_template%t
#custom sphinx builder just makes the directory to build if it hasn't already been made
try:
from sphinx.setup_command import BuildDoc
class apy_build_sphinx(BuildDoc):
def finalize_options(self):
from os.path import isfile
from distutils.cmd import DistutilsOptionError
if self.build_dir is not None:
if isfile(self.build_dir):
raise DistutilsOptionError('Attempted to build_sphinx into a file '+self.build_dir)
self.mkpath(self.build_dir)
return BuildDoc.finalize_options(self)
except ImportError: #sphinx not present
apy_build_sphinx = None
#command to count the number of lines of code (mostly for curiosity's sake) in the main dirs
class CountLines(Command):
# Brief (40-50 characters) description of the command
description = "Print the number of lines in the major directories to the terminal."
# List of option tuples: long name, short name (None if no short
# name), and help string.
user_options = [('includeempty', 'e',
"Include empty lines in the count"),
]
def initialize_options (self):
self.includeempty = False
def finalize_options (self):
pass
def visit_files(self,lists,dirname,fnames):
lcountlist,fcountlist = lists
from os import path
#prefilter for valid extentions
if dirname != 'scripts':
fnames = [fn for fn in fnames if (fn.endswith('.py') or fn.endswith('.pyx')) ]
cnt = 0
for fn in fnames:
fn = path.join(dirname,fn)
with open(fn) as f:
if self.includeempty:
for l in f:
cnt += 1
else:
for l in f:
if l.strip()!='':
cnt += 1
lcountlist.append(cnt)
fcountlist.append(len(fnames))
def run(self):
from os import path
dir,name = path.split(__file__)
apydir = path.join(dir,'astropysics')
apyllst,apyflst = [],[]
path.walk(apydir,self.visit_files,(apyllst,apyflst))
self.apylinecount = sum(apyllst)
self.apyfilecount = sum(apyflst)
scrdir = path.join(dir,'scripts')
scrllst,scrflst = [],[]
path.walk(scrdir,self.visit_files,(scrllst,scrflst))
self.scrlinecount = sum(scrllst)
self.scrfilecount = sum(scrflst)
tstdir = path.join(dir,'tests')
tstllst,tstflst = [],[]
path.walk(tstdir,self.visit_files,(tstllst,tstflst))
self.tstlinecount = sum(tstllst)
self.tstfilecount = sum(tstflst)
self.linecount = self.apylinecount + self.scrlinecount + self.tstlinecount
self.filecount = self.apyfilecount + self.scrfilecount + self.tstfilecount
print 'Astropysics source directory has %i lines in %i files'%(self.apylinecount,self.apyfilecount)
print 'Scripts directory has %i lines in %i files'%(self.scrlinecount,self.scrfilecount)
print 'Tests directory has %i lines in %i files'%(self.tstlinecount,self.tstfilecount)
print 'Total %i lines in %i files'%(self.linecount,self.filecount)
cmdclassd = {'build_py' : apy_build_py,'count_lines':CountLines}
if apy_build_sphinx is not None:
cmdclassd['build_sphinx'] = apy_build_sphinx
setup(name='Astropysics',
version=versionstr,
description='Astrophysics libraries for Python',
packages=apyspkgs,
package_data={'astropysics':['data/*']},
scripts=scripts,
requires=['numpy','scipy'],
install_requires=['numpy'],
provides=['astropysics'],
extras_require={'all':recpkgs+guipkgs,
'nogui':recpkgs},
author='Erik Tollerud',
author_email='[email protected]',
license = 'Apache License 2.0',
url='http://packages.python.org/Astropysics/',
long_description=descrip,
cmdclass = cmdclassd
)
| 35.52381 | 120 | 0.635389 | [
"Apache-2.0"
] | bezthomas/astropysics | setup.py | 5,968 | Python |
#!/usr/bin/env python
import argparse, re, os
from StringIO import StringIO
import language
#* Build instruction
#*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*#
def roundup(x, to=8):
return x if x % to == 0 else x + to - x % to
def build_form(ins):
form = ["%i"]
for operand in ins[1:]:
if operand.startswith("r"):
form.append("%r")
elif operand.startswith("q"):
form.append("%q")
elif operand.startswith("#") or operand.startswith("$"):
if "." in operand:
form.append("%f")
else:
form.append("%n")
else:
print "Error: Bad operand for instruction!"
return form
def getnum(strnum):
if strnum.startswith("0x"):
return int(strnum[2:], 16)
elif strnum.startswith("0b"):
return int(strnum[2:], 2)
else:
return int(strnum)
def build_ins(line):
line = line.strip()
line = re.sub(" |,", " ", line)
ins = line.split()
hx = []
# print ins
if ins[0] in ["data", "byte", "d.byte", "d.int", "long"]:
if ins[0] in ["data", "byte", "d.byte"]:
hx.append( format(getnum(ins[1]), "08b") )
elif ins[0] == "d.int":
hx.append( format(getnum(ins[1]), "032b") )
# print hx
return [], [hx]
else:
# print ins
# print build_form(ins)
# print language.ins[ins[0]]
form = build_form(ins)
opcode = language.ins[ins[0]]["opcode"][language.ins[ins[0]]["form"].index(" ".join(form))]
for f,op,i in zip(form, ins, range(len(ins))):
if f == "%i":
hx.append( format(opcode, "07b") )
if f == "%r":
hx.append( format(int(op[1:]), "07b") )
if f == "%q":
hx.append( format(int(op[1:])+(language.registers/2), "07b") )
if f == "%f":
hx.append( format( language.float_to_bits(float(op[1:])), "032b") )
if f == "%n":
if op[0] == "$":
hx.append( op )
elif i == 1:
hx.append( format( (getnum(op[1:]) + (1 << 57)) % (1 << 57), "057b") )
elif i == 2:
hx.append( format( (getnum(op[1:]) + (1 << 50)) % (1 << 50), "050b") )
elif i == 3:
hx.append( format( (getnum(op[1:]) + (1 << 43)) % (1 << 43), "043b") )
return [hx], []
def assemble(code):
# read in the file
if type(code) is file:
lines = [l.rstrip().lower() for l in code.readlines()]
else:
lines = [l.rstrip().lower() for l in code.splitlines()]
# remove comments
lines = [l for l in lines if not l.lstrip().startswith("#")]
# remove blank lines
lines = [l for l in lines if not l.strip() == ""]
# print lines
labels = {}
addr = 0
ins = []
data = []
hexbytes = StringIO()
# build the bit tuple for each instruction as well as label table
for line in lines:
# print line
if line.startswith((" ", "\t")):
i, d = build_ins(line)
ins.extend(i)
data.extend(d)
if line.strip().startswith("d."):
addr += 4
else:
addr = addr + 8
elif line.endswith(":"):
if "@" in line:
key, address = line.split("@")
labels[key] = int(address[:-1])
else:
labels[line[:-1]] = addr
# print labels
# second pass, find all labels and replace them with their program address component
for inst in ins:
# print inst
for p,i in zip(inst, range(len(inst))):
if p[0] == "$":
if i == 1:
inst[1] = format(labels[p[1:]], "057b")
elif i == 2:
inst[2] = format(labels[p[1:]], "050b")
elif i == 3:
inst[3] = format(labels[p[1:]], "043b")
# convert the instructions to hex byte stream and write one instruction per line
for inst in ins:
inst = "".join(inst).ljust(64, "0")
# print inst, len(inst)
inst = format(int(inst, 2), "08x").rjust(16, "0")
# print inst, len(inst)
inst = " ".join(map(''.join, zip(*[iter(inst)]*2)))
# print inst
hexbytes.write(inst+"\n")
# may need to fix this as we could have undefined behaviour if people put data before program
# instructions!
for d in data:
d = "".join(d)
d = d.rjust(roundup(len(d)), "0")
# print d
fstr = "0"+str(roundup(len(d)/4, 2))+"x"
d = format(int(d, 2), fstr)
d = " ".join(map(''.join, zip(*[iter(d)]*2)))
hexbytes.write(d+"\n")
return hexbytes.getvalue().strip()
#* Main
#*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*#
if __name__ == "__main__":
ap = argparse.ArgumentParser(description="SuperScalar assembler")
ap.add_argument("file",
type=str,
nargs=1,
help="Assembler file to assemble.")
ap.add_argument("--out", "-o",
type=str,
nargs=1,
metavar="FILE",
dest="output",
help="Specify an output file for the machine code")
args = ap.parse_args()
if args.output:
hex_path = args.output[0]
else:
hex_path = os.path.splitext(args.file[0])[0]+".hex"
if not os.path.exists(os.path.dirname(hex_path)):
os.makedirs(os.path.dirname(hex_path))
fp = open(args.file[0], "r")
fpx = open(hex_path, "w")
language.assign_opcodes()
fpx.write(assemble(fp))
# print args.file[0],"->",hex_path,"("+str(addr)+" bytes)"
| 25.694737 | 100 | 0.566776 | [
"MIT"
] | brgmnn/uob-cpu-simulator | assembler.py | 4,882 | Python |
import time
import serial
print "Iniciando Comunicao Serial com Arduino"
# Iniciando conexao serial
comport = serial.Serial('/dev/ttyACM0', 115200)
#comport = serial.Serial('/dev/ttyUSB0', 115200)
LED_ON='l'
LED_OFF='d'
# Time entre a conexao serial e o tempo para escrever (enviar algo)
time.sleep(1.8) # Entre 1.5s a 2s
print "-*- LOOP -*-"
try:
while True:
print "Led ON"
comport.write(LED_ON)
time.sleep(1)
print "Led OFF"
comport.write(LED_OFF)
time.sleep(1)
except:
# Fechando conexao serial
comport.close()
pass
| 19.821429 | 67 | 0.691892 | [
"MIT"
] | cleitonbueno/arduinoday | comunicacao_serial.py | 555 | Python |
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
To run these tests:
$ pip install webtest nosegae
$ nosetests --with-gae --gae-lib-root ~/google_appengine/
"""
import unittest
import webtest
import cloudstorage as gcs
import main
import gcs_async
import gcs_async_test
write = gcs_async_test.write
app = webtest.TestApp(main.app)
JUNIT_SUITE = """<testsuite tests="8" failures="0" time="1000.24">
<testcase name="First" classname="Example e2e suite" time="0">
<skipped/>
</testcase>
<testcase name="Second" classname="Example e2e suite" time="36.49"/>
<testcase name="Third" classname="Example e2e suite" time="96.49">
<failure>/go/src/k8s.io/kubernetes/test.go:123
Error Goes Here</failure>
</testcase>
</testsuite>"""
def init_build(build_dir, started=True, finished=True,
finished_has_version=False):
"""Create faked files for a build."""
start_json = {'timestamp': 1406535800}
finish_json = {'result': 'SUCCESS', 'timestamp': 1406536800}
(finish_json if finished_has_version else start_json)['version'] = 'v1+56'
if started:
write(build_dir + 'started.json', start_json)
if finished:
write(build_dir + 'finished.json', finish_json)
write(build_dir + 'artifacts/junit_01.xml', JUNIT_SUITE)
class TestBase(unittest.TestCase):
def init_stubs(self):
self.testbed.init_memcache_stub()
self.testbed.init_app_identity_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_app_identity_stub()
# redirect GCS calls to the local proxy
gcs_async.GCS_API_URL = gcs.common.local_api_url()
class AppTest(TestBase):
# pylint: disable=too-many-public-methods
BUILD_DIR = '/kubernetes-jenkins/logs/somejob/1234/'
def setUp(self):
self.init_stubs()
init_build(self.BUILD_DIR)
def test_index(self):
"""Test that the index works."""
response = app.get('/')
self.assertIn('kubernetes-e2e-gce', response)
def test_nodelog_missing_files(self):
"""Test that a missing all files gives a 404."""
build_dir = self.BUILD_DIR + 'nodelog?pod=abc'
response = app.get('/build' + build_dir, status=404)
self.assertIn('Unable to find', response)
def test_nodelog_kubelet(self):
"""Test for a kubelet file with junit file.
- missing the default kube-apiserver"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_nodelog_apiserver(self):
"""Test for default apiserver file
- no kubelet file to find objrefdict
- no file with junit file"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log',
'apiserver pod abc\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_nodelog_no_junit(self):
"""Test for when no junit in same folder
- multiple folders"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-2/kube-apiserver.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("tmp-node-2", response)
def test_nodelog_no_junit_apiserver(self):
"""Test for when no junit in same folder
- multiple folders
- no kube-apiserver.log"""
nodelog_url = self.BUILD_DIR + 'nodelog?pod=abc&junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/docker.log',
'Containers\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-2/kubelet.log',
'apiserver pod abc\n')
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
response = app.get('/build' + nodelog_url)
self.assertIn("tmp-node-2", response)
def test_no_failed_pod(self):
"""Test that filtering page still loads when no failed pod name is given"""
nodelog_url = self.BUILD_DIR + 'nodelog?junit=junit_01.xml'
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log',
'abc\nEvent(api.ObjectReference{Name:"abc", UID:"podabc"} failed)\n')
response = app.get('/build' + nodelog_url)
self.assertIn("Wrap line", response)
def test_parse_by_timestamp(self):
"""Test parse_by_timestamp and get_woven_logs
- Weave separate logs together by timestamp
- Check that lines without timestamp are combined
- Test different timestamp formats"""
kubelet_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log'
kubeapi_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kube-apiserver.log'
query_string = 'nodelog?pod=abc&junit=junit_01.xml&weave=on&logfiles=%s&logfiles=%s' % (
kubelet_filepath, kubeapi_filepath)
nodelog_url = self.BUILD_DIR + query_string
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(kubelet_filepath,
'abc\n0101 01:01:01.001 Event(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
write(kubeapi_filepath,
'0101 01:01:01.000 kubeapi\n0101 01:01:01.002 pod\n01-01T01:01:01.005Z last line')
expected = ('0101 01:01:01.000 kubeapi\n'
'<span class="highlight">abc0101 01:01:01.001 Event(api.ObjectReference{Name:'
'"<span class="keyword">abc</span>", UID:"podabc"})</span>\n'
'0101 01:01:01.002 pod\n'
'01-01T01:01:01.005Z last line')
response = app.get('/build' + nodelog_url)
print response
self.assertIn(expected, response)
def test_timestamp_no_apiserver(self):
"""Test parse_by_timestamp and get_woven_logs without an apiserver file
- Weave separate logs together by timestamp
- Check that lines without timestamp are combined
- Test different timestamp formats
- no kube-apiserver.log"""
kubelet_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kubelet.log'
proxy_filepath = self.BUILD_DIR + 'artifacts/tmp-node-image/kube-proxy.log'
query_string = 'nodelog?pod=abc&junit=junit_01.xml&weave=on&logfiles=%s&logfiles=%s' % (
kubelet_filepath, proxy_filepath)
nodelog_url = self.BUILD_DIR + query_string
init_build(self.BUILD_DIR)
write(self.BUILD_DIR + 'artifacts/tmp-node-image/junit_01.xml', JUNIT_SUITE)
write(kubelet_filepath,
'abc\n0101 01:01:01.001 Event(api.ObjectReference{Name:"abc", UID:"podabc"})\n')
write(proxy_filepath,
'0101 01:01:01.000 proxy\n0101 01:01:01.002 pod\n01-01T01:01:01.005Z last line')
expected = ('0101 01:01:01.000 proxy\n'
'<span class="highlight">abc0101 01:01:01.001 Event(api.ObjectReference{Name:'
'"<span class="keyword">abc</span>", UID:"podabc"})</span>\n'
'0101 01:01:01.002 pod\n'
'01-01T01:01:01.005Z last line')
response = app.get('/build' + nodelog_url)
self.assertIn(expected, response)
| 43.913462 | 98 | 0.657105 | [
"Apache-2.0"
] | justaugustus/test-infra | gubernator/main_test.py | 9,134 | Python |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (9, 9, 9):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_gdalconst')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_gdalconst')
_gdalconst = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_gdalconst', [dirname(__file__)])
except ImportError:
import _gdalconst
return _gdalconst
try:
_mod = imp.load_module('_gdalconst', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_gdalconst = swig_import_helper()
del swig_import_helper
else:
import _gdalconst
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_method(set):
def set_attr(self, name, value):
if (name == "thisown"):
return self.this.own(value)
if hasattr(self, name) or (name == "this"):
set(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
GDT_Unknown = _gdalconst.GDT_Unknown
GDT_Byte = _gdalconst.GDT_Byte
GDT_UInt16 = _gdalconst.GDT_UInt16
GDT_Int16 = _gdalconst.GDT_Int16
GDT_UInt32 = _gdalconst.GDT_UInt32
GDT_Int32 = _gdalconst.GDT_Int32
GDT_Float32 = _gdalconst.GDT_Float32
GDT_Float64 = _gdalconst.GDT_Float64
GDT_CInt16 = _gdalconst.GDT_CInt16
GDT_CInt32 = _gdalconst.GDT_CInt32
GDT_CFloat32 = _gdalconst.GDT_CFloat32
GDT_CFloat64 = _gdalconst.GDT_CFloat64
GDT_TypeCount = _gdalconst.GDT_TypeCount
GA_ReadOnly = _gdalconst.GA_ReadOnly
GA_Update = _gdalconst.GA_Update
GF_Read = _gdalconst.GF_Read
GF_Write = _gdalconst.GF_Write
GRIORA_NearestNeighbour = _gdalconst.GRIORA_NearestNeighbour
GRIORA_Bilinear = _gdalconst.GRIORA_Bilinear
GRIORA_Cubic = _gdalconst.GRIORA_Cubic
GRIORA_CubicSpline = _gdalconst.GRIORA_CubicSpline
GRIORA_Lanczos = _gdalconst.GRIORA_Lanczos
GRIORA_Average = _gdalconst.GRIORA_Average
GRIORA_Mode = _gdalconst.GRIORA_Mode
GRIORA_Gauss = _gdalconst.GRIORA_Gauss
GCI_Undefined = _gdalconst.GCI_Undefined
GCI_GrayIndex = _gdalconst.GCI_GrayIndex
GCI_PaletteIndex = _gdalconst.GCI_PaletteIndex
GCI_RedBand = _gdalconst.GCI_RedBand
GCI_GreenBand = _gdalconst.GCI_GreenBand
GCI_BlueBand = _gdalconst.GCI_BlueBand
GCI_AlphaBand = _gdalconst.GCI_AlphaBand
GCI_HueBand = _gdalconst.GCI_HueBand
GCI_SaturationBand = _gdalconst.GCI_SaturationBand
GCI_LightnessBand = _gdalconst.GCI_LightnessBand
GCI_CyanBand = _gdalconst.GCI_CyanBand
GCI_MagentaBand = _gdalconst.GCI_MagentaBand
GCI_YellowBand = _gdalconst.GCI_YellowBand
GCI_BlackBand = _gdalconst.GCI_BlackBand
GCI_YCbCr_YBand = _gdalconst.GCI_YCbCr_YBand
GCI_YCbCr_CrBand = _gdalconst.GCI_YCbCr_CrBand
GCI_YCbCr_CbBand = _gdalconst.GCI_YCbCr_CbBand
GRA_NearestNeighbour = _gdalconst.GRA_NearestNeighbour
GRA_Bilinear = _gdalconst.GRA_Bilinear
GRA_Cubic = _gdalconst.GRA_Cubic
GRA_CubicSpline = _gdalconst.GRA_CubicSpline
GRA_Lanczos = _gdalconst.GRA_Lanczos
GRA_Average = _gdalconst.GRA_Average
GRA_Mode = _gdalconst.GRA_Mode
GRA_Max = _gdalconst.GRA_Max
GRA_Min = _gdalconst.GRA_Min
GRA_Med = _gdalconst.GRA_Med
GRA_Q1 = _gdalconst.GRA_Q1
GRA_Q3 = _gdalconst.GRA_Q3
GPI_Gray = _gdalconst.GPI_Gray
GPI_RGB = _gdalconst.GPI_RGB
GPI_CMYK = _gdalconst.GPI_CMYK
GPI_HLS = _gdalconst.GPI_HLS
CXT_Element = _gdalconst.CXT_Element
CXT_Text = _gdalconst.CXT_Text
CXT_Attribute = _gdalconst.CXT_Attribute
CXT_Comment = _gdalconst.CXT_Comment
CXT_Literal = _gdalconst.CXT_Literal
CE_None = _gdalconst.CE_None
CE_Debug = _gdalconst.CE_Debug
CE_Warning = _gdalconst.CE_Warning
CE_Failure = _gdalconst.CE_Failure
CE_Fatal = _gdalconst.CE_Fatal
CPLE_None = _gdalconst.CPLE_None
CPLE_AppDefined = _gdalconst.CPLE_AppDefined
CPLE_OutOfMemory = _gdalconst.CPLE_OutOfMemory
CPLE_FileIO = _gdalconst.CPLE_FileIO
CPLE_OpenFailed = _gdalconst.CPLE_OpenFailed
CPLE_IllegalArg = _gdalconst.CPLE_IllegalArg
CPLE_NotSupported = _gdalconst.CPLE_NotSupported
CPLE_AssertionFailed = _gdalconst.CPLE_AssertionFailed
CPLE_NoWriteAccess = _gdalconst.CPLE_NoWriteAccess
CPLE_UserInterrupt = _gdalconst.CPLE_UserInterrupt
CPLE_ObjectNull = _gdalconst.CPLE_ObjectNull
CPLE_HttpResponse = _gdalconst.CPLE_HttpResponse
CPLE_AWSBucketNotFound = _gdalconst.CPLE_AWSBucketNotFound
CPLE_AWSObjectNotFound = _gdalconst.CPLE_AWSObjectNotFound
CPLE_AWSAccessDenied = _gdalconst.CPLE_AWSAccessDenied
CPLE_AWSInvalidCredentials = _gdalconst.CPLE_AWSInvalidCredentials
CPLE_AWSSignatureDoesNotMatch = _gdalconst.CPLE_AWSSignatureDoesNotMatch
OF_ALL = _gdalconst.OF_ALL
OF_RASTER = _gdalconst.OF_RASTER
OF_VECTOR = _gdalconst.OF_VECTOR
OF_GNM = _gdalconst.OF_GNM
OF_READONLY = _gdalconst.OF_READONLY
OF_UPDATE = _gdalconst.OF_UPDATE
OF_SHARED = _gdalconst.OF_SHARED
OF_VERBOSE_ERROR = _gdalconst.OF_VERBOSE_ERROR
DMD_LONGNAME = _gdalconst.DMD_LONGNAME
DMD_HELPTOPIC = _gdalconst.DMD_HELPTOPIC
DMD_MIMETYPE = _gdalconst.DMD_MIMETYPE
DMD_EXTENSION = _gdalconst.DMD_EXTENSION
DMD_EXTENSIONS = _gdalconst.DMD_EXTENSIONS
DMD_CONNECTION_PREFIX = _gdalconst.DMD_CONNECTION_PREFIX
DMD_CREATIONOPTIONLIST = _gdalconst.DMD_CREATIONOPTIONLIST
DMD_CREATIONDATATYPES = _gdalconst.DMD_CREATIONDATATYPES
DMD_CREATIONFIELDDATATYPES = _gdalconst.DMD_CREATIONFIELDDATATYPES
DMD_SUBDATASETS = _gdalconst.DMD_SUBDATASETS
DCAP_OPEN = _gdalconst.DCAP_OPEN
DCAP_CREATE = _gdalconst.DCAP_CREATE
DCAP_CREATECOPY = _gdalconst.DCAP_CREATECOPY
DCAP_VIRTUALIO = _gdalconst.DCAP_VIRTUALIO
DCAP_RASTER = _gdalconst.DCAP_RASTER
DCAP_VECTOR = _gdalconst.DCAP_VECTOR
DCAP_NOTNULL_FIELDS = _gdalconst.DCAP_NOTNULL_FIELDS
DCAP_DEFAULT_FIELDS = _gdalconst.DCAP_DEFAULT_FIELDS
DCAP_NOTNULL_GEOMFIELDS = _gdalconst.DCAP_NOTNULL_GEOMFIELDS
CPLES_BackslashQuotable = _gdalconst.CPLES_BackslashQuotable
CPLES_XML = _gdalconst.CPLES_XML
CPLES_URL = _gdalconst.CPLES_URL
CPLES_SQL = _gdalconst.CPLES_SQL
CPLES_CSV = _gdalconst.CPLES_CSV
GFT_Integer = _gdalconst.GFT_Integer
GFT_Real = _gdalconst.GFT_Real
GFT_String = _gdalconst.GFT_String
GFU_Generic = _gdalconst.GFU_Generic
GFU_PixelCount = _gdalconst.GFU_PixelCount
GFU_Name = _gdalconst.GFU_Name
GFU_Min = _gdalconst.GFU_Min
GFU_Max = _gdalconst.GFU_Max
GFU_MinMax = _gdalconst.GFU_MinMax
GFU_Red = _gdalconst.GFU_Red
GFU_Green = _gdalconst.GFU_Green
GFU_Blue = _gdalconst.GFU_Blue
GFU_Alpha = _gdalconst.GFU_Alpha
GFU_RedMin = _gdalconst.GFU_RedMin
GFU_GreenMin = _gdalconst.GFU_GreenMin
GFU_BlueMin = _gdalconst.GFU_BlueMin
GFU_AlphaMin = _gdalconst.GFU_AlphaMin
GFU_RedMax = _gdalconst.GFU_RedMax
GFU_GreenMax = _gdalconst.GFU_GreenMax
GFU_BlueMax = _gdalconst.GFU_BlueMax
GFU_AlphaMax = _gdalconst.GFU_AlphaMax
GFU_MaxCount = _gdalconst.GFU_MaxCount
GRTT_THEMATIC = _gdalconst.GRTT_THEMATIC
GRTT_ATHEMATIC = _gdalconst.GRTT_ATHEMATIC
GMF_ALL_VALID = _gdalconst.GMF_ALL_VALID
GMF_PER_DATASET = _gdalconst.GMF_PER_DATASET
GMF_ALPHA = _gdalconst.GMF_ALPHA
GMF_NODATA = _gdalconst.GMF_NODATA
GDAL_DATA_COVERAGE_STATUS_UNIMPLEMENTED = _gdalconst.GDAL_DATA_COVERAGE_STATUS_UNIMPLEMENTED
GDAL_DATA_COVERAGE_STATUS_DATA = _gdalconst.GDAL_DATA_COVERAGE_STATUS_DATA
GDAL_DATA_COVERAGE_STATUS_EMPTY = _gdalconst.GDAL_DATA_COVERAGE_STATUS_EMPTY
GARIO_PENDING = _gdalconst.GARIO_PENDING
GARIO_UPDATE = _gdalconst.GARIO_UPDATE
GARIO_ERROR = _gdalconst.GARIO_ERROR
GARIO_COMPLETE = _gdalconst.GARIO_COMPLETE
GTO_TIP = _gdalconst.GTO_TIP
GTO_BIT = _gdalconst.GTO_BIT
GTO_BSQ = _gdalconst.GTO_BSQ
| 36.4375 | 92 | 0.799528 | [
"MIT"
] | Jackintoshh/Web-Mapping | venv/Lib/site-packages/osgeo/gdalconst.py | 9,328 | Python |
import pandas as pd
from math import exp, log,sqrt
from numpy import cumsum,std,sum, mean
def outData(ts,actionHist,indx,startIndex=0):
out=pd.DataFrame(ts,index=indx,columns=['ts']).applymap(lambda x: x/100)
out=out[startIndex:]
out['cum_log_ts']=cumsum([log(1+i) for i in out['ts']])
out['Action_Hist']=actionHist[startIndex:]
out['trading rets']=calculateTradingReturn(out['Action_Hist'],out['ts'])
out['cum_log_rets']=cumsum([log(1+x) for x in out['trading rets']])
return out
def calculateTradingReturn(actionHistory,tsReturn,delta=0):
if ((type(tsReturn)==pd.core.frame.DataFrame) or (type(tsReturn)==pd.core.frame.Series)):
rets=pd.Series(index=tsReturn.index)
else:
rets=[0 for i in range(len(tsReturn))]
for t in range(len(tsReturn)-1):
cost=delta*abs(actionHistory[t+1]-actionHistory[t])
rets[t]=(1+(actionHistory[t]*tsReturn[t]))*(1-cost)-1
return rets
def maximumDrawdown(ts):
return min(ts)
def annualisedSharpe(rs,rf=0):
rs=rs[:-1]
if (type(rf)==int)|(type(rf)==float):
rf=[rf for i in rs]
mean_ann_ret=mean([(rs[i]*252)-rf[i] for i in range(len(rs))])
stand= std(rs)*sqrt(252)
return (mean_ann_ret)/stand
def percentOfOutperformedMonths(tradingRets,tsRets):
monthlyTrating=tradingRets.resample('M').apply(logCumSum)
monthlyMkt=tsRets.resample('M',how=logCumSum)
numOutperform=0
for i in range(len(monthlyMkt)):
if monthlyTrating[i]>monthlyMkt[i]:
numOutperform+=1
return 100*((1.0*numOutperform)/len(monthlyMkt))
def numTradesPerYear(actionHistory):
count=0
for i in range(1,len(actionHistory)):
if actionHistory[i]!=actionHistory[i-1]:
count+=1
return count/252
def totalReturn(log_returns):
return exp(sum(log_returns+1))-1
def logCumSum(ts):
return sum([log(1+t) for t in ts])
pass
| 33.333333 | 93 | 0.670526 | [
"BSD-3-Clause"
] | samstern/MSc-Project | pybrain/rl/environments/timeseries/performanceEvaluation.py | 1,900 | Python |
from enum import IntFlag, IntEnum
import numpy as np
from . import Base
from . import ByteIO
from .axis_interp_rule import AxisInterpRule
from .jiggle_bone import JiggleRule
from .quat_interp_bone import QuatInterpRule
from ....utils.math_utilities import quat_to_matrix
class BoneFlags(IntFlag):
# BONE_CALCULATE_MASK = 0x1F
PHYSICALLY_SIMULATED = 0x01 # bone is physically simulated when physics are active
PHYSICS_PROCEDURAL = 0x02 # procedural when physics is active
ALWAYS_PROCEDURAL = 0x04 # bone is always procedurally animated
# bone aligns to the screen, not constrained in motion.
SCREEN_ALIGN_SPHERE = 0x08
# bone aligns to the screen, constrained by it's own axis.
SCREEN_ALIGN_CYLINDER = 0x10
# BONE_USED_MASK = 0x0007FF00
USED_BY_ANYTHING = 0x0007FF00
USED_BY_HITBOX = 0x00000100 # bone (or child) is used by a hit box
# bone (or child) is used by an attachment point
USED_BY_ATTACHMENT = 0x00000200
USED_BY_VERTEX_MASK = 0x0003FC00
# bone (or child) is used by the toplevel model via skinned vertex
USED_BY_VERTEX_LOD0 = 0x00000400
USED_BY_VERTEX_LOD1 = 0x00000800
USED_BY_VERTEX_LOD2 = 0x00001000
USED_BY_VERTEX_LOD3 = 0x00002000
USED_BY_VERTEX_LOD4 = 0x00004000
USED_BY_VERTEX_LOD5 = 0x00008000
USED_BY_VERTEX_LOD6 = 0x00010000
USED_BY_VERTEX_LOD7 = 0x00020000
# bone is available for bone merge to occur against it
USED_BY_BONE_MERGE = 0x00040000
class Contents(IntFlag):
# EMPTY = 0 # No contents
SOLID = 0x1 # an eye is never valid in a solid
WINDOW = 0x2 # translucent, but not watery (glass)
AUX = 0x4
# alpha-tested "grate" textures. Bullets/sight pass through, but solids don't
GRATE = 0x8
SLIME = 0x10
WATER = 0x20
BLOCKLOS = 0x40 # block AI line of sight
# things that cannot be seen through (may be non-solid though)
OPAQUE = 0x80
TESTFOGVOLUME = 0x100
UNUSED = 0x200
# unused
# NOTE: If it's visible, grab from the top + update LAST_VISIBLE_CONTENTS
# if not visible, then grab from the bottom.
# OPAQUE + SURF_NODRAW count as OPAQUE (shadow-casting
# toolsblocklight textures)
BLOCKLIGHT = 0x400
TEAM1 = 0x800 # per team contents used to differentiate collisions
TEAM2 = 0x1000 # between players and objects on different teams
# ignore OPAQUE on surfaces that have SURF_NODRAW
IGNORE_NODRAW_OPAQUE = 0x2000
# hits entities which are MOVETYPE_PUSH (doors, plats, etc.)
MOVEABLE = 0x4000
# remaining contents are non-visible, and don't eat brushes
AREAPORTAL = 0x8000
PLAYERCLIP = 0x10000
MONSTERCLIP = 0x20000
# currents can be added to any other contents, and may be mixed
CURRENT_0 = 0x40000
CURRENT_90 = 0x80000
CURRENT_180 = 0x100000
CURRENT_270 = 0x200000
CURRENT_UP = 0x400000
CURRENT_DOWN = 0x800000
ORIGIN = 0x1000000 # removed before bsping an entity
MONSTER = 0x2000000 # should never be on a brush, only in game
DEBRIS = 0x4000000
DETAIL = 0x8000000 # brushes to be added after vis leafs
TRANSLUCENT = 0x10000000 # auto set if any surface has trans
LADDER = 0x20000000
HITBOX = 0x40000000 # use accurate hitboxes on trace
# NOTE: These are stored in a short in the engine now. Don't use more
# than 16 bits
SURF_LIGHT = 0x0001 # value will hold the light strength
# don't draw, indicates we should skylight + draw 2d sky but not draw the
# 3D skybox
SURF_SKY2D = 0x0002
SURF_SKY = 0x0004 # don't draw, but add to skybox
SURF_WARP = 0x0008 # turbulent water warp
SURF_TRANS = 0x0010
SURF_NOPORTAL = 0x0020 # the surface can not have a portal placed on it
# FIXME: This is an xbox hack to work around elimination of trigger
# surfaces, which breaks occluders
SURF_TRIGGER = 0x0040
SURF_NODRAW = 0x0080 # don't bother referencing the texture
SURF_HINT = 0x0100 # make a primary bsp splitter
SURF_SKIP = 0x0200 # completely ignore, allowing non-closed brushes
SURF_NOLIGHT = 0x0400 # Don't calculate light
SURF_BUMPLIGHT = 0x0800 # calculate three lightmaps for the surface for bumpmapping
SURF_NOSHADOWS = 0x1000 # Don't receive shadows
SURF_NODECALS = 0x2000 # Don't receive decals
SURF_NOPAINT = SURF_NODECALS # the surface can not have paint placed on it
SURF_NOCHOP = 0x4000 # Don't subdivide patches on this surface
SURF_HITBOX = 0x8000 # surface is part of a hitbox
class ProceduralBoneType(IntEnum):
AXISINTERP = 1
QUATINTERP = 2
AIMATBONE = 3
AIMATATTACH = 4
JIGGLE = 5
class BoneV36(Base):
def __init__(self, bone_id: int):
self.bone_id = bone_id
self.name = ""
self.parent_bone_index = 0
self.bone_controller_index = []
self.scale = 0
self.position = []
self.quat = []
self.anim_channels = 0
self.rotation = []
self.position_scale = []
self.rotation_scale = []
self.pose_to_bone = []
self.q_alignment = []
self.flags = BoneFlags(0)
self.procedural_rule_type = 0
self.physics_bone_index = 0
self.contents = Contents(0)
self.surface_prop = ''
self.procedural_rule = None
@property
def children(self):
from ..v36.mdl_file import MdlV36
mdl: MdlV36 = self.get_value("MDL")
childes = []
if mdl.bones:
bone_index = mdl.bones.index(self)
for bone in mdl.bones:
if bone.name == self.name:
continue
if bone.parent_bone_index == bone_index:
childes.append(bone)
return childes
@property
def matrix(self):
r_matrix = quat_to_matrix(self.quat)
tmp = np.identity(4)
tmp[0, :3] = r_matrix[0]
tmp[1, :3] = r_matrix[1]
tmp[2, :3] = r_matrix[2]
t_matrix = np.array([
[1, 0, 0, self.position[0]],
[0, 1, 0, self.position[1]],
[0, 0, 1, self.position[2]],
[0, 0, 0, 1],
], dtype=np.float32)
return np.identity(4) @ t_matrix @ tmp
@property
def parent(self):
from ..v36.mdl_file import MdlV36
mdl: MdlV36 = self.get_value("MDL")
if mdl.bones and self.parent_bone_index != -1:
return mdl.bones[self.parent_bone_index]
return None
def read(self, reader: ByteIO):
entry = reader.tell()
self.name = reader.read_source1_string(entry)
self.parent_bone_index = reader.read_int32()
self.bone_controller_index = reader.read_fmt('6f')
self.position = reader.read_fmt('3f')
self.rotation = reader.read_fmt('3f')
self.position_scale = reader.read_fmt('3f')
self.rotation_scale = reader.read_fmt('3f')
self.pose_to_bone = np.array(reader.read_fmt('12f')).reshape((3, 4)).transpose()
self.q_alignment = reader.read_fmt('4f')
self.flags = BoneFlags(reader.read_uint32())
self.procedural_rule_type = reader.read_uint32()
procedural_rule_offset = reader.read_uint32()
self.physics_bone_index = reader.read_uint32()
self.surface_prop = reader.read_source1_string(entry)
self.quat = reader.read_fmt('4f')
self.contents = Contents(reader.read_uint32())
reader.skip(3 * 4)
if self.procedural_rule_type != 0 and procedural_rule_offset != 0:
with reader.save_current_pos():
reader.seek(entry + procedural_rule_offset)
if self.procedural_rule_type == ProceduralBoneType.AXISINTERP:
self.procedural_rule = AxisInterpRule()
if self.procedural_rule_type == ProceduralBoneType.QUATINTERP:
self.procedural_rule = QuatInterpRule()
if self.procedural_rule_type == ProceduralBoneType.JIGGLE:
self.procedural_rule = JiggleRule()
if self.procedural_rule:
self.procedural_rule.read(reader)
class BoneV49(BoneV36):
def read(self, reader: ByteIO):
entry = reader.tell()
self.name = reader.read_source1_string(entry)
self.parent_bone_index = reader.read_int32()
self.bone_controller_index = reader.read_fmt('6f')
self.position = reader.read_fmt('3f')
self.quat = reader.read_fmt('4f')
self.rotation = reader.read_fmt('3f')
self.position_scale = reader.read_fmt('3f')
self.rotation_scale = reader.read_fmt('3f')
self.pose_to_bone = np.array(reader.read_fmt('12f')).reshape((3, 4)).transpose()
self.q_alignment = reader.read_fmt('4f')
self.flags = BoneFlags(reader.read_uint32())
self.procedural_rule_type = reader.read_uint32()
procedural_rule_offset = reader.read_uint32()
self.physics_bone_index = reader.read_uint32()
self.surface_prop = reader.read_source1_string(entry)
self.contents = Contents(reader.read_uint32())
if self.get_value('mdl_version') >= 44:
_ = [reader.read_uint32() for _ in range(8)]
if self.get_value('mdl_version') >= 53:
reader.skip(4 * 7)
if self.procedural_rule_type != 0 and procedural_rule_offset != 0:
with reader.save_current_pos():
reader.seek(entry + procedural_rule_offset)
if self.procedural_rule_type == ProceduralBoneType.AXISINTERP:
self.procedural_rule = AxisInterpRule()
if self.procedural_rule_type == ProceduralBoneType.QUATINTERP:
self.procedural_rule = QuatInterpRule()
if self.procedural_rule_type == ProceduralBoneType.JIGGLE:
self.procedural_rule = JiggleRule()
if self.procedural_rule:
self.procedural_rule.read(reader)
| 37.496241 | 88 | 0.6538 | [
"MIT"
] | anderlli0053/SourceIO | library/source1/mdl/structs/bone.py | 9,974 | Python |
"""
Core OpenBCI object for handling connections and samples from the WiFi Shield
Note that the LIB will take care on its own to print incoming ASCII messages if any (FIXME, BTW).
EXAMPLE USE:
def handle_sample(sample):
print(sample.channels_data)
wifi = OpenBCIWifi()
wifi.start(handle_sample)
TODO: Cyton/Ganglion JSON
TODO: Ganglion Raw
TODO: Cyton Raw
"""
import asyncore
import atexit
import json
import logging
import re
import socket
import timeit
try:
import urllib2
except ImportError:
import urllib
import requests
import xmltodict
from openbci.utils import k, ParseRaw, OpenBCISample, ssdp
SAMPLE_RATE = 0 # Hz
'''
#Commands for in SDK
command_stop = "s";
command_startBinary = "b";
'''
class OpenBCIWiFi(object):
"""
Handle a connection to an OpenBCI wifi shield.
Args:
ip_address: The IP address of the WiFi Shield, "None" to attempt auto-detect.
shield_name: The unique name of the WiFi Shield, such as `OpenBCI-2AD4`, will use SSDP to get IP address still,
if `shield_name` is "None" and `ip_address` is "None", will connect to the first WiFi Shield found using SSDP
sample_rate: The sample rate to set the attached board to. If the sample rate picked is not a sample rate the attached
board can support, i.e. you send 300 to Cyton, then error will be thrown.
log:
timeout: in seconds, disconnect / reconnect after a period without new data -- should be high if impedance check
max_packets_to_skip: will try to disconnect / reconnect after too many packets are skipped
"""
def __init__(self, ip_address=None, shield_name=None, sample_rate=None, log=True, timeout=3,
max_packets_to_skip=20, latency=10000, high_speed=True, ssdp_attempts=5,
num_channels=8, local_ip_address=None):
# these one are used
self.daisy = False
self.gains = None
self.high_speed = high_speed
self.impedance = False
self.ip_address = ip_address
self.latency = latency
self.log = log # print_incoming_text needs log
self.max_packets_to_skip = max_packets_to_skip
self.num_channels = num_channels
self.sample_rate = sample_rate
self.shield_name = shield_name
self.ssdp_attempts = ssdp_attempts
self.streaming = False
self.timeout = timeout
# might be handy to know API
self.board_type = "none"
# number of EEG channels
self.eeg_channels_per_sample = 0
self.read_state = 0
self.log_packet_count = 0
self.packets_dropped = 0
self.time_last_packet = 0
if self.log:
print("Welcome to OpenBCI Native WiFi Shield Driver - Please contribute code!")
self.local_ip_address = local_ip_address
if not self.local_ip_address:
self.local_ip_address = self._get_local_ip_address()
# Intentionally bind to port 0
self.local_wifi_server = WiFiShieldServer(self.local_ip_address, 0)
self.local_wifi_server_port = self.local_wifi_server.socket.getsockname()[1]
if self.log:
print("Opened socket on %s:%d" % (self.local_ip_address, self.local_wifi_server_port))
if ip_address is None:
for i in range(ssdp_attempts):
try:
self.find_wifi_shield(wifi_shield_cb=self.on_shield_found)
break
except OSError:
# Try again
if self.log:
print("Did not find any WiFi Shields")
else:
self.on_shield_found(ip_address)
def on_shield_found(self, ip_address):
self.ip_address = ip_address
self.connect()
# Disconnects from board when terminated
atexit.register(self.disconnect)
def loop(self):
asyncore.loop()
def _get_local_ip_address(self):
"""
Gets the local ip address of this computer
@returns str Local IP address
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
local_ip_address = s.getsockname()[0]
s.close()
return local_ip_address
def getBoardType(self):
""" Returns the version of the board """
return self.board_type
def setImpedance(self, flag):
""" Enable/disable impedance measure """
self.impedance = bool(flag)
def connect(self):
""" Connect to the board and configure it. Note: recreates various objects upon call. """
if self.ip_address is None:
raise ValueError('self.ip_address cannot be None')
if self.log:
print("Init WiFi connection with IP: " + self.ip_address)
"""
Docs on these HTTP requests and more are found:
https://app.swaggerhub.com/apis/pushtheworld/openbci-wifi-server/1.3.0
"""
res_board = requests.get("http://%s/board" % self.ip_address)
if res_board.status_code == 200:
board_info = res_board.json()
if not board_info['board_connected']:
raise RuntimeError("No board connected to WiFi Shield. To learn how to connect to a Cyton or Ganglion visit http://docs.openbci.com/Tutorials/03-Wifi_Getting_Started_Guide")
self.board_type = board_info['board_type']
self.eeg_channels_per_sample = board_info['num_channels']
if self.log:
print("Connected to %s with %s channels" % (self.board_type, self.eeg_channels_per_sample))
self.gains = None
if self.board_type == k.BOARD_CYTON:
self.gains = [24, 24, 24, 24, 24, 24, 24, 24]
self.daisy = False
elif self.board_type == k.BOARD_DAISY:
self.gains = [24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24]
self.daisy = True
elif self.board_type == k.BOARD_GANGLION:
self.gains = [51, 51, 51, 51]
self.daisy = False
self.local_wifi_server.set_daisy(daisy=self.daisy)
self.local_wifi_server.set_parser(ParseRaw(gains=self.gains, board_type=self.board_type))
if self.high_speed:
output_style = 'raw'
else:
output_style = 'json'
res_tcp_post = requests.post("http://%s/tcp" % self.ip_address,
json={
'ip': self.local_ip_address,
'port': self.local_wifi_server_port,
'output': output_style,
'delimiter': True,
'latency': self.latency
})
if res_tcp_post.status_code == 200:
tcp_status = res_tcp_post.json()
if tcp_status['connected']:
if self.log:
print("WiFi Shield to Python TCP Socket Established")
else:
raise RuntimeWarning("WiFi Shield is not able to connect to local server. Please open an issue.")
def init_streaming(self):
""" Tell the board to record like crazy. """
res_stream_start = requests.get("http://%s/stream/start" % self.ip_address)
if res_stream_start.status_code == 200:
self.streaming = True
self.packets_dropped = 0
self.time_last_packet = timeit.default_timer()
else:
raise EnvironmentError("Unable to start streaming. Check API for status code %d on /stream/start" % res_stream_start.status_code)
def find_wifi_shield(self, shield_name=None, wifi_shield_cb=None):
"""Detects Ganglion board MAC address -- if more than 1 around, will select first. Needs root privilege."""
if self.log:
print("Try to find WiFi shields on your local wireless network")
print("Scanning for %d seconds nearby devices..." % self.timeout)
list_ip = []
list_id = []
found_shield = False
def wifi_shield_found(response):
res = requests.get(response.location, verify=False).text
device_description = xmltodict.parse(res)
cur_shield_name = str(device_description['root']['device']['serialNumber'])
cur_base_url = str(device_description['root']['URLBase'])
cur_ip_address = re.findall(r'[0-9]+(?:\.[0-9]+){3}', cur_base_url)[0]
list_id.append(cur_shield_name)
list_ip.append(cur_ip_address)
found_shield = True
if shield_name is None:
print("Found WiFi Shield %s with IP Address %s" % (cur_shield_name, cur_ip_address))
if wifi_shield_cb is not None:
wifi_shield_cb(cur_ip_address)
else:
if shield_name == cur_shield_name:
if wifi_shield_cb is not None:
wifi_shield_cb(cur_ip_address)
ssdp_hits = ssdp.discover("urn:schemas-upnp-org:device:Basic:1", timeout=self.timeout, wifi_found_cb=wifi_shield_found)
nb_wifi_shields = len(list_id)
if nb_wifi_shields < 1:
print("No WiFi Shields found ;(")
raise OSError('Cannot find OpenBCI WiFi Shield with local name')
if nb_wifi_shields > 1:
print(
"Found " + str(nb_wifi_shields) +
", selecting first named: " + list_id[0] +
" with IPV4: " + list_ip[0])
return list_ip[0]
def wifi_write(self, output):
"""
Pass through commands from the WiFi Shield to the Carrier board
:param output:
:return:
"""
res_command_post = requests.post("http://%s/command" % self.ip_address,
json={'command': output})
if res_command_post.status_code == 200:
ret_val = res_command_post.text
if self.log:
print(ret_val)
return ret_val
else:
if self.log:
print("Error code: %d %s" % (res_command_post.status_code, res_command_post.text))
raise RuntimeError("Error code: %d %s" % (res_command_post.status_code, res_command_post.text))
def getSampleRate(self):
return self.sample_rate
def getNbEEGChannels(self):
"""Will not get new data on impedance check."""
return self.eeg_channels_per_sample
def start_streaming(self, callback, lapse=-1):
"""
Start handling streaming data from the board. Call a provided callback
for every single sample that is processed
Args:
callback: A callback function -- or a list of functions -- that will receive a single argument of the
OpenBCISample object captured.
"""
start_time = timeit.default_timer()
# Enclose callback function in a list if it comes alone
if not isinstance(callback, list):
self.local_wifi_server.set_callback(callback)
else:
self.local_wifi_server.set_callback(callback[0])
if not self.streaming:
self.init_streaming()
# while self.streaming:
# # should the board get disconnected and we could not wait for notification anymore, a reco should be attempted through timeout mechanism
# try:
# # at most we will get one sample per packet
# self.waitForNotifications(1. / self.getSampleRate())
# except Exception as e:
# print("Something went wrong while waiting for a new sample: " + str(e))
# # retrieve current samples on the stack
# samples = self.delegate.getSamples()
# self.packets_dropped = self.delegate.getMaxPacketsDropped()
# if samples:
# self.time_last_packet = timeit.default_timer()
# for call in callback:
# for sample in samples:
# call(sample)
#
# if (lapse > 0 and timeit.default_timer() - start_time > lapse):
# self.stop();
# if self.log:
# self.log_packet_count = self.log_packet_count + 1;
#
# # Checking connection -- timeout and packets dropped
# self.check_connection()
def test_signal(self, signal):
""" Enable / disable test signal """
if signal == 0:
self.warn("Disabling synthetic square wave")
try:
self.wifi_write(']')
except Exception as e:
print("Something went wrong while setting signal: " + str(e))
elif signal == 1:
self.warn("Enabling synthetic square wave")
try:
self.wifi_write('[')
except Exception as e:
print("Something went wrong while setting signal: " + str(e))
else:
self.warn("%s is not a known test signal. Valid signal is 0-1" % signal)
def set_channel(self, channel, toggle_position):
""" Enable / disable channels """
try:
if channel > self.num_channels:
raise ValueError('Cannot set non-existant channel')
# Commands to set toggle to on position
if toggle_position == 1:
if channel is 1:
self.wifi_write('!')
if channel is 2:
self.wifi_write('@')
if channel is 3:
self.wifi_write('#')
if channel is 4:
self.wifi_write('$')
if channel is 5:
self.wifi_write('%')
if channel is 6:
self.wifi_write('^')
if channel is 7:
self.wifi_write('&')
if channel is 8:
self.wifi_write('*')
if channel is 9:
self.wifi_write('Q')
if channel is 10:
self.wifi_write('W')
if channel is 11:
self.wifi_write('E')
if channel is 12:
self.wifi_write('R')
if channel is 13:
self.wifi_write('T')
if channel is 14:
self.wifi_write('Y')
if channel is 15:
self.wifi_write('U')
if channel is 16:
self.wifi_write('I')
# Commands to set toggle to off position
elif toggle_position == 0:
if channel is 1:
self.wifi_write('1')
if channel is 2:
self.wifi_write('2')
if channel is 3:
self.wifi_write('3')
if channel is 4:
self.wifi_write('4')
if channel is 5:
self.wifi_write('5')
if channel is 6:
self.wifi_write('6')
if channel is 7:
self.wifi_write('7')
if channel is 8:
self.wifi_write('8')
if channel is 9:
self.wifi_write('q')
if channel is 10:
self.wifi_write('w')
if channel is 11:
self.wifi_write('e')
if channel is 12:
self.wifi_write('r')
if channel is 13:
self.wifi_write('t')
if channel is 14:
self.wifi_write('y')
if channel is 15:
self.wifi_write('u')
if channel is 16:
self.wifi_write('i')
except Exception as e:
print("Something went wrong while setting channels: " + str(e))
# See Cyton SDK for options
def set_channel_settings(self, channel, enabled=True, gain=24, input_type=0, include_bias=True, use_srb2=True, use_srb1=True):
try:
if channel > self.num_channels:
raise ValueError('Cannot set non-existant channel')
if self.board_type == k.BOARD_GANGLION:
raise ValueError('Cannot use with Ganglion')
ch_array = list("12345678QWERTYUI")
#defaults
command = list("x1060110X")
# Set channel
command[1] = ch_array[channel-1]
# Set power down if needed (default channel enabled)
if not enabled:
command[2] = '1'
# Set gain (default 24)
if gain == 1:
command[3] = '0'
if gain == 2:
command[3] = '1'
if gain == 4:
command[3] = '2'
if gain == 6:
command[3] = '3'
if gain == 8:
command[3] = '4'
if gain == 12:
command[3] = '5'
#TODO: Implement input type (default normal)
# Set bias inclusion (default include)
if not include_bias:
command[5] = '0'
# Set srb2 use (default use)
if not use_srb2:
command[6] = '0'
# Set srb1 use (default don't use)
if use_srb1:
command[6] = '1'
command_send = ''.join(command)
self.wifi_write(command_send)
#Make sure to update gain in wifi
self.gains[channel-1] = gain
self.local_wifi_server.set_gains(gains=self.gains)
self.local_wifi_server.set_parser(ParseRaw(gains=self.gains, board_type=self.board_type))
except ValueError as e:
print("Something went wrong while setting channel settings: " + str(e))
def set_sample_rate(self, sample_rate):
""" Change sample rate """
try:
if self.board_type == k.BOARD_CYTON or self.board_type == k.BOARD_DAISY:
if sample_rate == 250:
self.wifi_write('~6')
elif sample_rate == 500:
self.wifi_write('~5')
elif sample_rate == 1000:
self.wifi_write('~4')
elif sample_rate == 2000:
self.wifi_write('~3')
elif sample_rate == 4000:
self.wifi_write('~2')
elif sample_rate == 8000:
self.wifi_write('~1')
elif sample_rate == 16000:
self.wifi_write('~0')
else:
print("Sample rate not supported: " + str(sample_rate))
elif self.board_type == k.BOARD_GANGLION:
if sample_rate == 200:
self.wifi_write('~7')
elif sample_rate == 400:
self.wifi_write('~6')
elif sample_rate == 800:
self.wifi_write('~5')
elif sample_rate == 1600:
self.wifi_write('~4')
elif sample_rate == 3200:
self.wifi_write('~3')
elif sample_rate == 6400:
self.wifi_write('~2')
elif sample_rate == 12800:
self.wifi_write('~1')
elif sample_rate == 25600:
self.wifi_write('~0')
else:
print("Sample rate not supported: " + str(sample_rate))
else:
print("Board type not supported for setting sample rate")
except Exception as e:
print("Something went wrong while setting sample rate: " + str(e))
def set_accelerometer(self, toggle_position):
""" Enable / disable accelerometer """
try:
if self.board_type == k.BOARD_GANGLION:
# Commands to set toggle to on position
if toggle_position == 1:
self.wifi_write('n')
# Commands to set toggle to off position
elif toggle_position == 0:
self.wifi_write('N')
else:
print("Board type not supported for setting accelerometer")
except Exception as e:
print("Something went wrong while setting accelerometer: " + str(e))
"""
Clean Up (atexit)
"""
def stop(self):
print("Stopping streaming...")
self.streaming = False
# connection might be already down here
try:
if self.impedance:
print("Stopping with impedance testing")
self.wifi_write('Z')
else:
self.wifi_write('s')
except Exception as e:
print("Something went wrong while asking the board to stop streaming: " + str(e))
if self.log:
logging.warning('sent <s>: stopped streaming')
def disconnect(self):
if self.streaming:
self.stop()
# should not try to read/write anything after that, will crash
"""
SETTINGS AND HELPERS
"""
def warn(self, text):
if self.log:
# log how many packets where sent succesfully in between warnings
if self.log_packet_count:
logging.info('Data packets received:' + str(self.log_packet_count))
self.log_packet_count = 0
logging.warning(text)
print("Warning: %s" % text)
def check_connection(self):
""" Check connection quality in term of lag and number of packets drop. Reinit connection if necessary. FIXME: parameters given to the board will be lost."""
# stop checking when we're no longer streaming
if not self.streaming:
return
# check number of dropped packets and duration without new packets, deco/reco if too large
if self.packets_dropped > self.max_packets_to_skip:
self.warn("Too many packets dropped, attempt to reconnect")
self.reconnect()
elif self.timeout > 0 and timeit.default_timer() - self.time_last_packet > self.timeout:
self.warn("Too long since got new data, attempt to reconnect")
# if error, attempt to reconect
self.reconnect()
def reconnect(self):
""" In case of poor connection, will shut down and relaunch everything. FIXME: parameters given to the board will be lost."""
self.warn('Reconnecting')
self.stop()
self.disconnect()
self.connect()
self.init_streaming()
class WiFiShieldHandler(asyncore.dispatcher_with_send):
def __init__(self, sock, callback=None, high_speed=True,
parser=None, daisy=False):
asyncore.dispatcher_with_send.__init__(self, sock)
self.callback = callback
self.daisy = daisy
self.high_speed = high_speed
self.last_odd_sample = OpenBCISample()
self.parser = parser if parser is not None else ParseRaw(gains=[24, 24, 24, 24, 24, 24, 24, 24])
def handle_read(self):
data = self.recv(3000) # 3000 is the max data the WiFi shield is allowed to send over TCP
if len(data) > 2:
if self.high_speed:
packets = int(len(data)/33)
raw_data_packets = []
for i in range(packets):
raw_data_packets.append(bytearray(data[i * k.RAW_PACKET_SIZE: i * k.RAW_PACKET_SIZE + k.RAW_PACKET_SIZE]))
samples = self.parser.transform_raw_data_packets_to_sample(raw_data_packets=raw_data_packets)
for sample in samples:
# if a daisy module is attached, wait to concatenate two samples (main board + daisy)
# before passing it to callback
if self.daisy:
# odd sample: daisy sample, save for later
if ~sample.sample_number % 2:
self.last_odd_sample = sample
# even sample: concatenate and send if last sample was the first part, otherwise drop the packet
elif sample.sample_number - 1 == self.last_odd_sample.sample_number:
# the aux data will be the average between the two samples, as the channel
# samples themselves have been averaged by the board
daisy_sample = self.parser.make_daisy_sample_object_wifi(self.last_odd_sample, sample)
if self.callback is not None:
self.callback(daisy_sample)
else:
if self.callback is not None:
self.callback(sample)
else:
try:
possible_chunks = data.split('\r\n')
if len(possible_chunks) > 1:
possible_chunks = possible_chunks[:-1]
for possible_chunk in possible_chunks:
if len(possible_chunk) > 2:
chunk_dict = json.loads(possible_chunk)
if 'chunk' in chunk_dict:
for sample in chunk_dict['chunk']:
if self.callback is not None:
self.callback(sample)
else:
print("not a sample packet")
except ValueError as e:
print("failed to parse: %s" % data)
print(e)
except BaseException as e:
print(e)
class WiFiShieldServer(asyncore.dispatcher):
def __init__(self, host, port, callback=None, gains=None, high_speed=True, daisy=False):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.daisy = daisy
self.listen(5)
self.callback = None
self.handler = None
self.parser = ParseRaw(gains=gains)
self.high_speed = high_speed
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
print('Incoming connection from %s' % repr(addr))
self.handler = WiFiShieldHandler(sock, self.callback, high_speed=self.high_speed,
parser=self.parser, daisy=self.daisy)
def set_callback(self, callback):
self.callback = callback
if self.handler is not None:
self.handler.callback = callback
def set_daisy(self, daisy):
self.daisy = daisy
if self.handler is not None:
self.handler.daisy = daisy
def set_gains(self, gains):
self.parser.set_ads1299_scale_factors(gains)
def set_parser(self, parser):
self.parser = parser
if self.handler is not None:
self.handler.parser = parser
| 39.516691 | 189 | 0.551585 | [
"MIT"
] | daniellasry/OpenBCI_Python | openbci/wifi.py | 27,227 | Python |
"""
Code for understanding type annotations.
This file contains functions that turn various representations of
Python type annotations into :class:`pyanalyze.value.Value` objects.
There are three major functions:
- :func:`type_from_runtime` takes a runtime Python object, for example
``type_from_value(int)`` -> ``TypedValue(int)``.
- :func:`type_from_value` takes an existing :class:`pyanalyze.value.Value`
object. For example, evaluating the expression ``int`` will produce
``KnownValue(int)``, and calling :func:`type_from_value` on that value
will produce ``TypedValue(int)``.
- :func:`type_from_ast` takes an AST node and evaluates it into a type.
These functions all rely on each other. For example, when a forward
reference is found in a runtime annotation, the code parses it and calls
:func:`type_from_ast` to evaluate it.
These functions all use :class:`Context` objects to resolve names and
show errors.
"""
import contextlib
from dataclasses import dataclass, InitVar, field
import typing
import typing_inspect
import qcore
import ast
import builtins
from collections.abc import Callable, Iterable, Hashable
import sys
from typing import (
Any,
Container,
NamedTuple,
cast,
TypeVar,
ContextManager,
Mapping,
NewType,
Sequence,
Optional,
Tuple,
Union,
TYPE_CHECKING,
)
from typing_extensions import ParamSpec, TypedDict
from .error_code import ErrorCode
from .extensions import (
AsynqCallable,
CustomCheck,
ExternalType,
HasAttrGuard,
NoReturnGuard,
ParameterTypeGuard,
TypeGuard,
)
from .find_unused import used
from .functions import FunctionDefNode
from .node_visitor import ErrorContext
from .signature import ELLIPSIS_PARAM, SigParameter, Signature, ParameterKind
from .safe import is_typing_name, is_instance_of_typing_name
from . import type_evaluation
from .value import (
AnnotatedValue,
AnySource,
AnyValue,
CallableValue,
CustomCheckExtension,
Extension,
HasAttrGuardExtension,
KnownValue,
MultiValuedValue,
NO_RETURN_VALUE,
NoReturnGuardExtension,
ParamSpecArgsValue,
ParamSpecKwargsValue,
ParameterTypeGuardExtension,
SelfTVV,
TypeGuardExtension,
TypedValue,
SequenceIncompleteValue,
annotate_value,
unite_values,
Value,
GenericValue,
SubclassValue,
TypedDictValue,
NewTypeValue,
TypeVarValue,
_HashableValue,
)
if TYPE_CHECKING:
from .name_check_visitor import NameCheckVisitor
try:
from typing import get_origin, get_args # Python 3.9
from types import GenericAlias
except ImportError:
GenericAlias = None
def get_origin(obj: object) -> Any:
return None
def get_args(obj: object) -> Tuple[Any, ...]:
return ()
CONTEXT_MANAGER_TYPES = (typing.ContextManager, contextlib.AbstractContextManager)
if sys.version_info >= (3, 7):
ASYNC_CONTEXT_MANAGER_TYPES = (
typing.AsyncContextManager,
# Doesn't exist on 3.6
# static analysis: ignore[undefined_attribute]
contextlib.AbstractAsyncContextManager,
)
else:
ASYNC_CONTEXT_MANAGER_TYPES = (typing.AsyncContextManager,)
@dataclass
class Context:
"""A context for evaluating annotations.
The base implementation does very little. Subclass this to do something more useful.
"""
should_suppress_undefined_names: bool = field(default=False, init=False)
"""While this is True, no errors are shown for undefined names."""
def suppress_undefined_names(self) -> ContextManager[None]:
"""Temporarily suppress errors about undefined names."""
return qcore.override(self, "should_suppress_undefined_names", True)
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.invalid_annotation,
node: Optional[ast.AST] = None,
) -> None:
"""Show an error found while evaluating an annotation."""
pass
def get_name(self, node: ast.Name) -> Value:
"""Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name."""
return AnyValue(AnySource.inference)
def handle_undefined_name(self, name: str) -> Value:
if self.should_suppress_undefined_names:
return AnyValue(AnySource.inference)
self.show_error(
f"Undefined name {name!r} used in annotation", ErrorCode.undefined_name
)
return AnyValue(AnySource.error)
def get_name_from_globals(self, name: str, globals: Mapping[str, Any]) -> Value:
if name in globals:
return KnownValue(globals[name])
elif hasattr(builtins, name):
return KnownValue(getattr(builtins, name))
return self.handle_undefined_name(name)
@dataclass
class RuntimeEvaluator(type_evaluation.Evaluator, Context):
globals: Mapping[str, object] = field(repr=False)
func: typing.Callable[..., Any]
def evaluate_type(self, node: ast.AST) -> Value:
return type_from_ast(node, ctx=self)
def evaluate_value(self, node: ast.AST) -> Value:
return value_from_ast(node, ctx=self, error_on_unrecognized=False)
def get_name(self, node: ast.Name) -> Value:
"""Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name."""
return self.get_name_from_globals(node.id, self.globals)
@dataclass
class SyntheticEvaluator(type_evaluation.Evaluator):
error_ctx: ErrorContext
annotations_context: Context
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.invalid_annotation,
node: Optional[ast.AST] = None,
) -> None:
self.error_ctx.show_error(node or self.node, message, error_code=error_code)
def evaluate_type(self, node: ast.AST) -> Value:
return type_from_ast(node, ctx=self.annotations_context)
def evaluate_value(self, node: ast.AST) -> Value:
return value_from_ast(
node, ctx=self.annotations_context, error_on_unrecognized=False
)
def get_name(self, node: ast.Name) -> Value:
"""Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name."""
return self.annotations_context.get_name(node)
@classmethod
def from_visitor(
cls,
node: FunctionDefNode,
visitor: "NameCheckVisitor",
return_annotation: Value,
) -> "SyntheticEvaluator":
return cls(
node,
return_annotation,
visitor,
_DefaultContext(visitor, node, use_name_node_for_error=True),
)
@used # part of an API
def type_from_ast(
ast_node: ast.AST,
visitor: Optional["NameCheckVisitor"] = None,
ctx: Optional[Context] = None,
) -> Value:
"""Given an AST node representing an annotation, return a
:class:`Value <pyanalyze.value.Value>`.
:param ast_node: AST node to evaluate.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
"""
if ctx is None:
ctx = _DefaultContext(visitor, ast_node)
return _type_from_ast(ast_node, ctx)
def type_from_annotations(
annotations: Mapping[str, object],
key: str,
*,
globals: Optional[Mapping[str, object]] = None,
ctx: Optional[Context] = None,
) -> Optional[Value]:
try:
annotation = annotations[key]
except Exception:
# Malformed __annotations__
return None
else:
maybe_val = type_from_runtime(annotation, globals=globals, ctx=ctx)
if maybe_val != AnyValue(AnySource.incomplete_annotation):
return maybe_val
return None
def type_from_runtime(
val: object,
visitor: Optional["NameCheckVisitor"] = None,
node: Optional[ast.AST] = None,
globals: Optional[Mapping[str, object]] = None,
ctx: Optional[Context] = None,
) -> Value:
"""Given a runtime annotation object, return a
:class:`Value <pyanalyze.value.Value>`.
:param val: Object to evaluate. This will usually come from an
``__annotations__`` dictionary.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param node: AST node that the annotation derives from. This is
used for showing errors. Ignored if `ctx` is given.
:param globals: Dictionary of global variables that can be used
to resolve names. Ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
"""
if ctx is None:
ctx = _DefaultContext(visitor, node, globals)
return _type_from_runtime(val, ctx)
def type_from_value(
value: Value,
visitor: Optional["NameCheckVisitor"] = None,
node: Optional[ast.AST] = None,
ctx: Optional[Context] = None,
is_typeddict: bool = False,
) -> Value:
"""Given a :class:`Value <pyanalyze.value.Value` representing an annotation,
return a :class:`Value <pyanalyze.value.Value>` representing the type.
The input value represents an expression, the output value represents
a type. For example, the :term:`impl` of ``typing.cast(typ, val)``
calls :func:`type_from_value` on the value it receives for its
`typ` argument and returns the result.
:param value: :class:`Value <pyanalyze.value.Value` to evaluate.
:param visitor: Visitor class to use. This is used in the default
:class:`Context` to resolve names and show errors.
This is ignored if `ctx` is given.
:param node: AST node that the annotation derives from. This is
used for showing errors. Ignored if `ctx` is given.
:param ctx: :class:`Context` to use for evaluation.
:param is_typeddict: Whether we are at the top level of a `TypedDict`
definition.
"""
if ctx is None:
ctx = _DefaultContext(visitor, node)
return _type_from_value(value, ctx, is_typeddict=is_typeddict)
def value_from_ast(
ast_node: ast.AST, ctx: Context, *, error_on_unrecognized: bool = True
) -> Value:
val = _Visitor(ctx).visit(ast_node)
if val is None:
if error_on_unrecognized:
ctx.show_error("Invalid type annotation", node=ast_node)
return AnyValue(AnySource.error)
return val
def _type_from_ast(node: ast.AST, ctx: Context, is_typeddict: bool = False) -> Value:
val = value_from_ast(node, ctx)
return _type_from_value(val, ctx, is_typeddict=is_typeddict)
def _type_from_runtime(val: Any, ctx: Context, is_typeddict: bool = False) -> Value:
if isinstance(val, str):
return _eval_forward_ref(val, ctx, is_typeddict=is_typeddict)
elif isinstance(val, tuple):
# This happens under some Python versions for types
# nested in tuples, e.g. on 3.6:
# > typing_inspect.get_args(Union[Set[int], List[str]])
# ((typing.Set, int), (typing.List, str))
if not val:
# from Tuple[()]
return KnownValue(())
origin = val[0]
if len(val) == 2:
args = (val[1],)
else:
args = val[1:]
return _value_of_origin_args(origin, args, val, ctx)
elif GenericAlias is not None and isinstance(val, GenericAlias):
origin = get_origin(val)
args = get_args(val)
if origin is tuple and not args:
return SequenceIncompleteValue(tuple, [])
return _value_of_origin_args(origin, args, val, ctx)
elif typing_inspect.is_literal_type(val):
args = typing_inspect.get_args(val)
if len(args) == 0:
return KnownValue(args[0])
else:
return unite_values(*[KnownValue(arg) for arg in args])
elif typing_inspect.is_union_type(val):
args = typing_inspect.get_args(val)
return unite_values(*[_type_from_runtime(arg, ctx) for arg in args])
elif typing_inspect.is_tuple_type(val):
args = typing_inspect.get_args(val)
if not args:
return TypedValue(tuple)
elif len(args) == 2 and args[1] is Ellipsis:
return GenericValue(tuple, [_type_from_runtime(args[0], ctx)])
elif len(args) == 1 and args[0] == ():
return SequenceIncompleteValue(tuple, []) # empty tuple
else:
args_vals = [_type_from_runtime(arg, ctx) for arg in args]
return SequenceIncompleteValue(tuple, args_vals)
elif is_instance_of_typing_name(val, "_TypedDictMeta"):
required_keys = getattr(val, "__required_keys__", None)
# 3.8's typing.TypedDict doesn't have __required_keys__. With
# inheritance, this makes it apparently impossible to figure out which
# keys are required at runtime.
total = getattr(val, "__total__", True)
return TypedDictValue(
{
key: _get_typeddict_value(value, ctx, key, required_keys, total)
for key, value in val.__annotations__.items()
}
)
elif val is InitVar:
# On 3.6 and 3.7, InitVar[T] just returns InitVar at runtime, so we can't
# get the actual type out.
return AnyValue(AnySource.inference)
elif isinstance(val, InitVar):
# val.type exists only on 3.8+, but on earlier versions
# InitVar instances aren't being created
# static analysis: ignore[undefined_attribute]
return type_from_runtime(val.type)
elif is_instance_of_typing_name(val, "AnnotatedMeta"):
# Annotated in 3.6's typing_extensions
origin, metadata = val.__args__
return _make_annotated(
_type_from_runtime(origin, ctx), [KnownValue(v) for v in metadata], ctx
)
elif is_instance_of_typing_name(val, "_AnnotatedAlias"):
# Annotated in typing and newer typing_extensions
return _make_annotated(
_type_from_runtime(val.__origin__, ctx),
[KnownValue(v) for v in val.__metadata__],
ctx,
)
elif typing_inspect.is_generic_type(val):
origin = typing_inspect.get_origin(val)
args = typing_inspect.get_args(val)
if getattr(val, "_special", False):
args = [] # distinguish List from List[T] on 3.7 and 3.8
return _value_of_origin_args(origin, args, val, ctx, is_typeddict=is_typeddict)
elif typing_inspect.is_callable_type(val):
args = typing_inspect.get_args(val)
return _value_of_origin_args(Callable, args, val, ctx)
elif val is AsynqCallable:
return CallableValue(Signature.make([ELLIPSIS_PARAM], is_asynq=True))
elif isinstance(val, type):
return _maybe_typed_value(val)
elif val is None:
return KnownValue(None)
elif is_typing_name(val, "NoReturn") or is_typing_name(val, "Never"):
return NO_RETURN_VALUE
elif is_typing_name(val, "Self"):
return SelfTVV
elif val is typing.Any:
return AnyValue(AnySource.explicit)
elif hasattr(val, "__supertype__"):
if isinstance(val.__supertype__, type):
# NewType
return NewTypeValue(val)
elif typing_inspect.is_tuple_type(val.__supertype__):
# TODO figure out how to make NewTypes over tuples work
return AnyValue(AnySource.inference)
else:
ctx.show_error(f"Invalid NewType {val}")
return AnyValue(AnySource.error)
elif typing_inspect.is_typevar(val):
tv = cast(TypeVar, val)
return make_type_var_value(tv, ctx)
elif is_instance_of_typing_name(val, "ParamSpec"):
return TypeVarValue(val, is_paramspec=True)
elif is_instance_of_typing_name(val, "ParamSpecArgs"):
return ParamSpecArgsValue(val.__origin__)
elif is_instance_of_typing_name(val, "ParamSpecKwargs"):
return ParamSpecKwargsValue(val.__origin__)
elif is_typing_name(val, "Final") or is_typing_name(val, "ClassVar"):
return AnyValue(AnySource.incomplete_annotation)
elif typing_inspect.is_classvar(val) or typing_inspect.is_final_type(val):
if hasattr(val, "__type__"):
# 3.6
typ = val.__type__
else:
# 3.7+
typ = val.__args__[0]
return _type_from_runtime(typ, ctx)
elif is_instance_of_typing_name(val, "_ForwardRef") or is_instance_of_typing_name(
val, "ForwardRef"
):
# This has issues because the forward ref may be defined in a different file, in
# which case we don't know which names are valid in it.
with ctx.suppress_undefined_names():
try:
code = ast.parse(val.__forward_arg__)
except SyntaxError:
ctx.show_error(
f"Syntax error in forward reference: {val.__forward_arg__}"
)
return AnyValue(AnySource.error)
return _type_from_ast(code.body[0], ctx, is_typeddict=is_typeddict)
elif val is Ellipsis:
# valid in Callable[..., ]
return AnyValue(AnySource.explicit)
elif is_instance_of_typing_name(val, "_TypeAlias"):
# typing.Pattern and Match, which are not normal generic types for some reason
return GenericValue(val.impl_type, [_type_from_runtime(val.type_var, ctx)])
elif isinstance(val, TypeGuard):
return AnnotatedValue(
TypedValue(bool),
[TypeGuardExtension(_type_from_runtime(val.guarded_type, ctx))],
)
elif is_instance_of_typing_name(val, "_TypeGuard"):
# 3.6 only
return AnnotatedValue(
TypedValue(bool),
[TypeGuardExtension(_type_from_runtime(val.__type__, ctx))],
)
elif isinstance(val, AsynqCallable):
params = _callable_args_from_runtime(val.args, "AsynqCallable", ctx)
sig = Signature.make(
params, _type_from_runtime(val.return_type, ctx), is_asynq=True
)
return CallableValue(sig)
elif isinstance(val, ExternalType):
try:
typ = qcore.helpers.object_from_string(val.type_path)
except Exception:
ctx.show_error(f"Cannot resolve type {val.type_path!r}")
return AnyValue(AnySource.error)
return _type_from_runtime(typ, ctx)
# Python 3.6 only (on later versions Required/NotRequired match
# is_generic_type).
elif is_instance_of_typing_name(val, "_MaybeRequired"):
required = is_instance_of_typing_name(val, "_Required")
if is_typeddict:
return Pep655Value(required, _type_from_runtime(val.__type__, ctx))
else:
cls = "Required" if required else "NotRequired"
ctx.show_error(f"{cls}[] used in unsupported context")
return AnyValue(AnySource.error)
elif is_typing_name(val, "TypeAlias"):
return AnyValue(AnySource.incomplete_annotation)
elif is_typing_name(val, "TypedDict"):
return KnownValue(TypedDict)
else:
origin = get_origin(val)
if isinstance(origin, type):
return _maybe_typed_value(origin)
elif val is NamedTuple:
return TypedValue(tuple)
ctx.show_error(f"Invalid type annotation {val}")
return AnyValue(AnySource.error)
def make_type_var_value(tv: TypeVar, ctx: Context) -> TypeVarValue:
if tv.__bound__ is not None:
bound = _type_from_runtime(tv.__bound__, ctx)
else:
bound = None
if tv.__constraints__:
constraints = tuple(
_type_from_runtime(constraint, ctx) for constraint in tv.__constraints__
)
else:
constraints = ()
return TypeVarValue(tv, bound=bound, constraints=constraints)
def _callable_args_from_runtime(
arg_types: Any, label: str, ctx: Context
) -> Sequence[SigParameter]:
if arg_types is Ellipsis or arg_types == [Ellipsis]:
return [ELLIPSIS_PARAM]
elif type(arg_types) in (tuple, list):
if len(arg_types) == 1:
(arg,) = arg_types
if arg is Ellipsis:
return [ELLIPSIS_PARAM]
elif is_typing_name(getattr(arg, "__origin__", None), "Concatenate"):
return _args_from_concatenate(arg, ctx)
elif is_instance_of_typing_name(arg, "ParamSpec"):
param_spec = TypeVarValue(arg, is_paramspec=True)
param = SigParameter(
"__P", kind=ParameterKind.PARAM_SPEC, annotation=param_spec
)
return [param]
types = [_type_from_runtime(arg, ctx) for arg in arg_types]
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.PARAM_SPEC
if isinstance(typ, TypeVarValue) and typ.is_paramspec
else ParameterKind.POSITIONAL_ONLY,
annotation=typ,
)
for i, typ in enumerate(types)
]
return params
elif is_instance_of_typing_name(arg_types, "ParamSpec"):
param_spec = TypeVarValue(arg_types, is_paramspec=True)
param = SigParameter(
"__P", kind=ParameterKind.PARAM_SPEC, annotation=param_spec
)
return [param]
elif is_typing_name(getattr(arg_types, "__origin__", None), "Concatenate"):
return _args_from_concatenate(arg_types, ctx)
else:
ctx.show_error(f"Invalid arguments to {label}: {arg_types!r}")
return [ELLIPSIS_PARAM]
def _args_from_concatenate(concatenate: Any, ctx: Context) -> Sequence[SigParameter]:
types = [_type_from_runtime(arg, ctx) for arg in concatenate.__args__]
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.PARAM_SPEC
if i == len(types) - 1
else ParameterKind.POSITIONAL_ONLY,
annotation=annotation,
)
for i, annotation in enumerate(types)
]
return params
def _get_typeddict_value(
value: Value,
ctx: Context,
key: str,
required_keys: Optional[Container[str]],
total: bool,
) -> Tuple[bool, Value]:
val = _type_from_runtime(value, ctx, is_typeddict=True)
if isinstance(val, Pep655Value):
return (val.required, val.value)
if required_keys is None:
required = total
else:
required = key in required_keys
return required, val
def _eval_forward_ref(val: str, ctx: Context, is_typeddict: bool = False) -> Value:
try:
tree = ast.parse(val, mode="eval")
except SyntaxError:
ctx.show_error(f"Syntax error in type annotation: {val}")
return AnyValue(AnySource.error)
else:
return _type_from_ast(tree.body, ctx, is_typeddict=is_typeddict)
def _type_from_value(value: Value, ctx: Context, is_typeddict: bool = False) -> Value:
if isinstance(value, KnownValue):
return _type_from_runtime(value.val, ctx, is_typeddict=is_typeddict)
elif isinstance(value, TypeVarValue):
return value
elif isinstance(value, MultiValuedValue):
return unite_values(*[_type_from_value(val, ctx) for val in value.vals])
elif isinstance(value, AnnotatedValue):
return _type_from_value(value.value, ctx)
elif isinstance(value, _SubscriptedValue):
return _type_from_subscripted_value(
value.root, value.members, ctx, is_typeddict=is_typeddict
)
elif isinstance(value, AnyValue):
return value
elif isinstance(value, SubclassValue) and value.exactly:
return value.typ
elif isinstance(value, TypedValue) and isinstance(value.typ, str):
# Synthetic type
return value
else:
ctx.show_error(f"Unrecognized annotation {value}")
return AnyValue(AnySource.error)
def _type_from_subscripted_value(
root: Optional[Value],
members: Sequence[Value],
ctx: Context,
is_typeddict: bool = False,
) -> Value:
if isinstance(root, GenericValue):
if len(root.args) == len(members):
return GenericValue(
root.typ, [_type_from_value(member, ctx) for member in members]
)
if isinstance(root, _SubscriptedValue):
root_type = _type_from_value(root, ctx)
return _type_from_subscripted_value(root_type, members, ctx)
elif isinstance(root, MultiValuedValue):
return unite_values(
*[
_type_from_subscripted_value(subval, members, ctx, is_typeddict)
for subval in root.vals
]
)
if (
isinstance(root, SubclassValue)
and root.exactly
and isinstance(root.typ, TypedValue)
):
return GenericValue(
root.typ.typ, [_type_from_value(elt, ctx) for elt in members]
)
if isinstance(root, TypedValue) and isinstance(root.typ, str):
return GenericValue(root.typ, [_type_from_value(elt, ctx) for elt in members])
if not isinstance(root, KnownValue):
if root != AnyValue(AnySource.error):
ctx.show_error(f"Cannot resolve subscripted annotation: {root}")
return AnyValue(AnySource.error)
root = root.val
if root is typing.Union:
return unite_values(*[_type_from_value(elt, ctx) for elt in members])
elif is_typing_name(root, "Literal"):
# Note that in Python 3.8, the way typing's internal cache works means that
# Literal[1] and Literal[True] are cached to the same value, so if you use
# both, you'll get whichever one was used first in later calls. There's nothing
# we can do about that.
if all(isinstance(elt, KnownValue) for elt in members):
return unite_values(*members)
else:
ctx.show_error(f"Arguments to Literal[] must be literals, not {members}")
return AnyValue(AnySource.error)
elif root is typing.Tuple or root is tuple:
if len(members) == 2 and members[1] == KnownValue(Ellipsis):
return GenericValue(tuple, [_type_from_value(members[0], ctx)])
elif len(members) == 1 and members[0] == KnownValue(()):
return SequenceIncompleteValue(tuple, [])
else:
return SequenceIncompleteValue(
tuple, [_type_from_value(arg, ctx) for arg in members]
)
elif root is typing.Optional:
if len(members) != 1:
ctx.show_error("Optional[] takes only one argument")
return AnyValue(AnySource.error)
return unite_values(KnownValue(None), _type_from_value(members[0], ctx))
elif root is typing.Type or root is type:
if len(members) != 1:
ctx.show_error("Type[] takes only one argument")
return AnyValue(AnySource.error)
argument = _type_from_value(members[0], ctx)
return SubclassValue.make(argument)
elif is_typing_name(root, "Annotated"):
origin, *metadata = members
return _make_annotated(_type_from_value(origin, ctx), metadata, ctx)
elif is_typing_name(root, "TypeGuard"):
if len(members) != 1:
ctx.show_error("TypeGuard requires a single argument")
return AnyValue(AnySource.error)
return AnnotatedValue(
TypedValue(bool), [TypeGuardExtension(_type_from_value(members[0], ctx))]
)
elif is_typing_name(root, "Required"):
if not is_typeddict:
ctx.show_error("Required[] used in unsupported context")
return AnyValue(AnySource.error)
if len(members) != 1:
ctx.show_error("Required[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(True, _type_from_value(members[0], ctx))
elif is_typing_name(root, "NotRequired"):
if not is_typeddict:
ctx.show_error("NotRequired[] used in unsupported context")
return AnyValue(AnySource.error)
if len(members) != 1:
ctx.show_error("NotRequired[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(False, _type_from_value(members[0], ctx))
elif root is Callable or root is typing.Callable:
if len(members) == 2:
args, return_value = members
return _make_callable_from_value(args, return_value, ctx)
ctx.show_error("Callable requires exactly two arguments")
return AnyValue(AnySource.error)
elif root is AsynqCallable:
if len(members) == 2:
args, return_value = members
return _make_callable_from_value(args, return_value, ctx, is_asynq=True)
ctx.show_error("AsynqCallable requires exactly two arguments")
return AnyValue(AnySource.error)
elif typing_inspect.is_generic_type(root):
origin = typing_inspect.get_origin(root)
if origin is None:
# On Python 3.9 at least, get_origin() of a class that inherits
# from Generic[T] is None.
origin = root
origin = _maybe_get_extra(origin)
return GenericValue(origin, [_type_from_value(elt, ctx) for elt in members])
elif isinstance(root, type):
return GenericValue(root, [_type_from_value(elt, ctx) for elt in members])
else:
# In Python 3.9, generics are implemented differently and typing.get_origin
# can help.
origin = get_origin(root)
if isinstance(origin, type):
return GenericValue(origin, [_type_from_value(elt, ctx) for elt in members])
ctx.show_error(f"Unrecognized subscripted annotation: {root}")
return AnyValue(AnySource.error)
def _maybe_get_extra(origin: type) -> Union[type, str]:
# ContextManager is defined oddly and we lose the Protocol if we don't use
# synthetic types.
if any(origin is cls for cls in CONTEXT_MANAGER_TYPES):
return "typing.ContextManager"
elif any(origin is cls for cls in ASYNC_CONTEXT_MANAGER_TYPES):
return "typing.AsyncContextManager"
else:
# turn typing.List into list in some Python versions
# compare https://github.com/ilevkivskyi/typing_inspect/issues/36
extra_origin = getattr(origin, "__extra__", None)
if extra_origin is not None:
return extra_origin
return origin
class _DefaultContext(Context):
def __init__(
self,
visitor: "NameCheckVisitor",
node: Optional[ast.AST],
globals: Optional[Mapping[str, object]] = None,
use_name_node_for_error: bool = False,
) -> None:
super().__init__()
self.visitor = visitor
self.node = node
self.globals = globals
self.use_name_node_for_error = use_name_node_for_error
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.invalid_annotation,
node: Optional[ast.AST] = None,
) -> None:
if node is None:
node = self.node
if self.visitor is not None and node is not None:
self.visitor.show_error(node, message, error_code)
def get_name(self, node: ast.Name) -> Value:
if self.visitor is not None:
val, _ = self.visitor.resolve_name(
node,
error_node=node if self.use_name_node_for_error else self.node,
suppress_errors=self.should_suppress_undefined_names,
)
return val
elif self.globals is not None:
if node.id in self.globals:
return KnownValue(self.globals[node.id])
elif hasattr(builtins, node.id):
return KnownValue(getattr(builtins, node.id))
if self.should_suppress_undefined_names:
return AnyValue(AnySource.inference)
self.show_error(
f"Undefined name {node.id!r} used in annotation",
ErrorCode.undefined_name,
node=node,
)
return AnyValue(AnySource.error)
@dataclass(frozen=True)
class _SubscriptedValue(Value):
root: Optional[Value]
members: Tuple[Value, ...]
@dataclass
class Pep655Value(Value):
required: bool
value: Value
class _Visitor(ast.NodeVisitor):
def __init__(self, ctx: Context) -> None:
self.ctx = ctx
def generic_visit(self, node: ast.AST) -> None:
raise NotImplementedError(f"no visitor implemented for {node!r}")
def visit_Name(self, node: ast.Name) -> Value:
return self.ctx.get_name(node)
def visit_Subscript(self, node: ast.Subscript) -> Value:
value = self.visit(node.value)
index = self.visit(node.slice)
if isinstance(index, SequenceIncompleteValue):
members = index.members
else:
members = (index,)
return _SubscriptedValue(value, members)
def visit_Attribute(self, node: ast.Attribute) -> Optional[Value]:
root_value = self.visit(node.value)
if isinstance(root_value, KnownValue):
try:
return KnownValue(getattr(root_value.val, node.attr))
except AttributeError:
self.ctx.show_error(
f"{root_value.val!r} has no attribute {node.attr!r}", node=node
)
return AnyValue(AnySource.error)
elif not isinstance(root_value, AnyValue):
self.ctx.show_error(f"Cannot resolve annotation {root_value}", node=node)
return AnyValue(AnySource.error)
def visit_Tuple(self, node: ast.Tuple) -> Value:
elts = [self.visit(elt) for elt in node.elts]
return SequenceIncompleteValue(tuple, elts)
def visit_List(self, node: ast.List) -> Value:
elts = [self.visit(elt) for elt in node.elts]
return SequenceIncompleteValue(list, elts)
def visit_Index(self, node: ast.Index) -> Value:
# class is unused in 3.9
return self.visit(node.value) # static analysis: ignore[undefined_attribute]
def visit_Ellipsis(self, node: ast.Ellipsis) -> Value:
return KnownValue(Ellipsis)
def visit_Constant(self, node: ast.Constant) -> Value:
return KnownValue(node.value)
def visit_NameConstant(self, node: ast.NameConstant) -> Value:
return KnownValue(node.value)
def visit_Num(self, node: ast.Num) -> Value:
return KnownValue(node.n)
def visit_Str(self, node: ast.Str) -> Value:
return KnownValue(node.s)
def visit_Bytes(self, node: ast.Bytes) -> Value:
return KnownValue(node.s)
def visit_Expr(self, node: ast.Expr) -> Value:
return self.visit(node.value)
def visit_BinOp(self, node: ast.BinOp) -> Optional[Value]:
if isinstance(node.op, ast.BitOr):
return _SubscriptedValue(
KnownValue(Union), (self.visit(node.left), self.visit(node.right))
)
else:
return None
def visit_UnaryOp(self, node: ast.UnaryOp) -> Optional[Value]:
# Only int and float negation on literals are supported.
if isinstance(node.op, ast.USub):
operand = self.visit(node.operand)
if isinstance(operand, KnownValue) and isinstance(
operand.val, (int, float)
):
return KnownValue(-operand.val)
return None
def visit_Call(self, node: ast.Call) -> Optional[Value]:
func = self.visit(node.func)
if not isinstance(func, KnownValue):
return None
if func.val == NewType:
arg_values = [self.visit(arg) for arg in node.args]
kwarg_values = [(kw.arg, self.visit(kw.value)) for kw in node.keywords]
args = []
kwargs = {}
for arg_value in arg_values:
if isinstance(arg_value, KnownValue):
args.append(arg_value.val)
else:
return None
for name, kwarg_value in kwarg_values:
if name is None:
if isinstance(kwarg_value, KnownValue) and isinstance(
kwarg_value.val, dict
):
kwargs.update(kwarg_value.val)
else:
return None
else:
if isinstance(kwarg_value, KnownValue):
kwargs[name] = kwarg_value.val
else:
return None
return KnownValue(func.val(*args, **kwargs))
elif func.val == TypeVar:
arg_values = [self.visit(arg) for arg in node.args]
kwarg_values = [(kw.arg, self.visit(kw.value)) for kw in node.keywords]
if not arg_values:
self.ctx.show_error(
"TypeVar() requires at least one argument", node=node
)
return None
name_val = arg_values[0]
if not isinstance(name_val, KnownValue):
self.ctx.show_error("TypeVar name must be a literal", node=node.args[0])
return None
constraints = []
for arg_value in arg_values[1:]:
constraints.append(_type_from_value(arg_value, self.ctx))
bound = None
for name, kwarg_value in kwarg_values:
if name in ("covariant", "contravariant"):
continue
elif name == "bound":
bound = _type_from_value(kwarg_value, self.ctx)
else:
self.ctx.show_error(f"Unrecognized TypeVar kwarg {name}", node=node)
return None
tv = TypeVar(name_val.val)
return TypeVarValue(tv, bound, tuple(constraints))
elif is_typing_name(func.val, "ParamSpec"):
arg_values = [self.visit(arg) for arg in node.args]
kwarg_values = [(kw.arg, self.visit(kw.value)) for kw in node.keywords]
if not arg_values:
self.ctx.show_error(
"ParamSpec() requires at least one argument", node=node
)
return None
name_val = arg_values[0]
if not isinstance(name_val, KnownValue):
self.ctx.show_error(
"ParamSpec name must be a literal", node=node.args[0]
)
return None
for name, _ in kwarg_values:
self.ctx.show_error(f"Unrecognized ParamSpec kwarg {name}", node=node)
return None
tv = ParamSpec(name_val.val)
return TypeVarValue(tv, is_paramspec=True)
elif isinstance(func.val, type):
if func.val is object:
return AnyValue(AnySource.inference)
return TypedValue(func.val)
else:
return None
def _value_of_origin_args(
origin: object,
args: Sequence[object],
val: object,
ctx: Context,
is_typeddict: bool = False,
) -> Value:
if origin is typing.Type or origin is type:
if not args:
return TypedValue(type)
return SubclassValue.make(_type_from_runtime(args[0], ctx))
elif origin is typing.Tuple or origin is tuple:
if not args:
return TypedValue(tuple)
elif len(args) == 2 and args[1] is Ellipsis:
return GenericValue(tuple, [_type_from_runtime(args[0], ctx)])
elif len(args) == 1 and args[0] == ():
return SequenceIncompleteValue(tuple, [])
else:
args_vals = [_type_from_runtime(arg, ctx) for arg in args]
return SequenceIncompleteValue(tuple, args_vals)
elif origin is typing.Union:
return unite_values(*[_type_from_runtime(arg, ctx) for arg in args])
elif origin is Callable or origin is typing.Callable:
if len(args) == 0:
return TypedValue(Callable)
*arg_types, return_type = args
if len(arg_types) == 1 and isinstance(arg_types[0], list):
arg_types = arg_types[0]
params = _callable_args_from_runtime(arg_types, "Callable", ctx)
sig = Signature.make(params, _type_from_runtime(return_type, ctx))
return CallableValue(sig)
elif is_typing_name(origin, "Annotated"):
origin, metadata = args
# This should never happen
if not isinstance(metadata, Iterable):
ctx.show_error("Unexpected format in Annotated")
return AnyValue(AnySource.error)
return _make_annotated(
_type_from_runtime(origin, ctx),
[KnownValue(data) for data in metadata],
ctx,
)
elif isinstance(origin, type):
origin = _maybe_get_extra(origin)
if args:
args_vals = [_type_from_runtime(val, ctx) for val in args]
return GenericValue(origin, args_vals)
else:
return _maybe_typed_value(origin)
elif is_typing_name(origin, "TypeGuard"):
if len(args) != 1:
ctx.show_error("TypeGuard requires a single argument")
return AnyValue(AnySource.error)
return AnnotatedValue(
TypedValue(bool), [TypeGuardExtension(_type_from_runtime(args[0], ctx))]
)
elif is_typing_name(origin, "Final"):
if len(args) != 1:
ctx.show_error("Final requires a single argument")
return AnyValue(AnySource.error)
# TODO(#160): properly support Final
return _type_from_runtime(args[0], ctx)
elif is_typing_name(origin, "ClassVar"):
if len(args) != 1:
ctx.show_error("ClassVar requires a single argument")
return AnyValue(AnySource.error)
return _type_from_runtime(args[0], ctx)
elif is_typing_name(origin, "Required"):
if not is_typeddict:
ctx.show_error("Required[] used in unsupported context")
return AnyValue(AnySource.error)
if len(args) != 1:
ctx.show_error("Required[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(True, _type_from_runtime(args[0], ctx))
elif is_typing_name(origin, "NotRequired"):
if not is_typeddict:
ctx.show_error("NotRequired[] used in unsupported context")
return AnyValue(AnySource.error)
if len(args) != 1:
ctx.show_error("NotRequired[] requires a single argument")
return AnyValue(AnySource.error)
return Pep655Value(False, _type_from_runtime(args[0], ctx))
elif origin is None and isinstance(val, type):
# This happens for SupportsInt in 3.7.
return _maybe_typed_value(val)
else:
ctx.show_error(
f"Unrecognized annotation {origin}[{', '.join(map(repr, args))}]"
)
return AnyValue(AnySource.error)
def _maybe_typed_value(val: Union[type, str]) -> Value:
if val is type(None):
return KnownValue(None)
elif val is Hashable:
return _HashableValue(val)
return TypedValue(val)
def _make_callable_from_value(
args: Value, return_value: Value, ctx: Context, is_asynq: bool = False
) -> Value:
return_annotation = _type_from_value(return_value, ctx)
if args == KnownValue(Ellipsis):
return CallableValue(
Signature.make(
[ELLIPSIS_PARAM], return_annotation=return_annotation, is_asynq=is_asynq
)
)
elif isinstance(args, SequenceIncompleteValue):
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.POSITIONAL_ONLY,
annotation=_type_from_value(arg, ctx),
)
for i, arg in enumerate(args.members)
]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
elif isinstance(args, KnownValue) and is_instance_of_typing_name(
args.val, "ParamSpec"
):
annotation = TypeVarValue(args.val, is_paramspec=True)
params = [
SigParameter("__P", kind=ParameterKind.PARAM_SPEC, annotation=annotation)
]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
elif isinstance(args, TypeVarValue) and args.is_paramspec:
params = [SigParameter("__P", kind=ParameterKind.PARAM_SPEC, annotation=args)]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
elif (
isinstance(args, _SubscriptedValue)
and isinstance(args.root, KnownValue)
and is_typing_name(args.root.val, "Concatenate")
):
annotations = [_type_from_value(arg, ctx) for arg in args.members]
params = [
SigParameter(
f"__arg{i}",
kind=ParameterKind.PARAM_SPEC
if i == len(annotations) - 1
else ParameterKind.POSITIONAL_ONLY,
annotation=annotation,
)
for i, annotation in enumerate(annotations)
]
sig = Signature.make(params, return_annotation, is_asynq=is_asynq)
return CallableValue(sig)
else:
ctx.show_error(f"Unrecognized Callable type argument {args}")
return AnyValue(AnySource.error)
def _make_annotated(origin: Value, metadata: Sequence[Value], ctx: Context) -> Value:
metadata = [_value_from_metadata(entry, ctx) for entry in metadata]
return annotate_value(origin, metadata)
def _value_from_metadata(entry: Value, ctx: Context) -> Union[Value, Extension]:
if isinstance(entry, KnownValue):
if isinstance(entry.val, ParameterTypeGuard):
return ParameterTypeGuardExtension(
entry.val.varname, _type_from_runtime(entry.val.guarded_type, ctx)
)
elif isinstance(entry.val, NoReturnGuard):
return NoReturnGuardExtension(
entry.val.varname, _type_from_runtime(entry.val.guarded_type, ctx)
)
elif isinstance(entry.val, HasAttrGuard):
return HasAttrGuardExtension(
entry.val.varname,
_type_from_runtime(entry.val.attribute_name, ctx),
_type_from_runtime(entry.val.attribute_type, ctx),
)
elif isinstance(entry.val, CustomCheck):
return CustomCheckExtension(entry.val)
return entry
| 38.000818 | 88 | 0.636988 | [
"Apache-2.0"
] | nbdaaron/pyanalyze | pyanalyze/annotations.py | 46,475 | Python |
"""
Unittests for gpsdio load
"""
from click.testing import CliRunner
import gpsdio
import gpsdio.cli.main
def test_load(types_json_path, types_msg_gz_path, tmpdir, compare_msg):
pth = str(tmpdir.mkdir('test').join('test_load'))
with open(types_json_path) as f:
stdin_input = f.read()
result = CliRunner().invoke(gpsdio.cli.main.main_group, [
'load',
'--o-drv', 'NewlineJSON',
'--o-cmp', 'GZIP',
pth
], input=stdin_input)
assert result.exit_code is 0
with gpsdio.open(types_msg_gz_path) as expected, \
gpsdio.open(pth, driver='NewlineJSON', compression='GZIP') as actual:
for e, a in zip(expected, actual):
assert compare_msg(e, a)
| 23.0625 | 81 | 0.639566 | [
"Apache-2.0"
] | GlobalFishingWatch/gpsdio | tests/test_cli_load.py | 738 | Python |
# -*- coding: utf-8 -*-
'''
Pillar data from vCenter or an ESXi host
.. versionadded:: 2017.7.0
:depends: - pyVmomi
This external pillar can pull attributes from objects in vCenter or an ESXi host and provide those attributes
as pillar data to minions. This can allow for pillar based targeting of minions on ESXi host, Datastore, VM
configuration, etc. This setup requires only the salt master have access to the vCenter server/ESXi hosts.
The pillar will return an empty dict if the 'os' or 'virtual' grain are not 'VMWare', 'ESXi', or 'VMWare ESXi'.
Defaults
========
- The external pillar will search for Virtual Machines with the VM name matching the minion id.
- Data will be returned into the 'vmware' pillar key.
- The external pillar has a default set of properties to return for both VirtualMachine and HostSystem types.
Configuring the VMWare pillar
=============================
The required minimal configuration in the salt master ext_pillar setup:
.. code-block:: yaml
ext_pillar:
- vmware:
host: <vcenter/esx host>
username: <user to connect with>
password: <password>
Optionally, the following keyword arguments can be passed to the ext_pillar for customized configuration:
pillar_key
Optionally set the pillar key to return the data into. Default is ``vmware``.
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
property_name
Property name to match the minion id against. Defaults to ``name``.
property_types
Optionally specify a list of pyVmomi vim types to search for the minion id in 'property_name'.
Default is ``['VirtualMachine']``.
For example, to search both vim.VirtualMachine and vim.HostSystem object types:
.. code-block:: yaml
ext_pillar:
- vmware:
host: myesx
username: root
password: complex_password
property_types:
- VirtualMachine
- HostSystem
Additionally, the list of property types can be dicts, the item of the dict being a list specifying
the attribute to return for that vim object type.
The pillar will attempt to recurse the attribute and return all child attributes.
To explicitly specify deeper attributes without attempting to recurse an attribute, convert the list
item to a dict with the item of the dict being the child attributes to return. Follow this pattern
to return attributes as deep within the object as necessary.
.. note::
Be careful when specifying custom attributes! Many attributes have objects as attributes which
have the parent object as an attribute and which will cause the pillar to fail due to the attempt
to convert all sub-objects recursively (i.e. infinite attribute loops). Specifying only the
sub-attributes you would like returned will keep the infinite recursion from occurring.
A maximum recursion exception will occur in this case and the pillar will not return as desired.
.. code-block:: yaml
ext_pillar:
- vmware:
host: myvcenter
username: my_user
password: my_pass
replace_default_attributes: True
property_types:
- VirtualMachine:
- config:
- bootOptions:
- bootDelay
- bootRetryDelay
- HostSystem:
- datastore:
- name
The above ext_pillar example would return a pillar like the following for a VirtualMachine object that's
name matched the minion id:
.. code-block:: yaml
vmware:
config:
bootOptions:
bootDelay: 1000
bootRetryDelay: 1000
If you were to retrieve these virtual machine attributes via pyVmomi directly, this would be the same as
.. code-block:: python
vmObject.config.bootOptions.bootDelay
vmObject.config.bootOptionis.bootRetryDelay
The above ext_pillar example would return a pillar like the following for a HostySystem object that's name
matched the minion id:
.. code-block:: yaml
vmware:
datastore:
- name: Datastore1
- name: Datastore2
The 'datastore' property of a HostSystem object is a list of datastores, thus a list is returned.
replace_default_attributes
If custom attributes are specified by the property_types parameter, replace_default_attributes determines
if those will be added to default attributes (False) or replace the default attributes completely (True).
The default setting is 'False'.
.. note::
vCenter "Custom Attributes" (i.e. Annotations) will always be returned if it exists on the object as
part of the pillar regardless of this setting.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
import salt.utils.dictupdate as dictupdate
import salt.utils.vmware
# Import 3rd-party libs
from salt.ext import six
try:
# pylint: disable=no-name-in-module
from pyVmomi import vim
from pyVim.connect import Disconnect
HAS_LIBS = True
# pylint: enable=no-name-in-module
except ImportError:
HAS_LIBS = False
__virtualname__ = 'vmware'
# Set up logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Only return if python-etcd is installed
'''
return __virtualname__ if HAS_LIBS else False
def ext_pillar(minion_id,
pillar, # pylint: disable=W0613
**kwargs):
'''
Check vmware/vcenter for all data
'''
vmware_pillar = {}
host = None
username = None
password = None
property_types = []
property_name = 'name'
protocol = None
port = None
pillar_key = 'vmware'
replace_default_attributes = False
type_specific_pillar_attributes = {
'VirtualMachine': [
{
'config':
[
'version',
'guestId',
'files',
'tools',
'flags',
'memoryHotAddEnabled',
'cpuHotAddEnabled',
'cpuHotRemoveEnabled',
'datastoreUrl',
'swapPlacement',
'bootOptions',
'scheduledHardwareUpgradeInfo',
'memoryAllocation',
'cpuAllocation',
]
},
{
'summary':
[
{
'runtime':
[
{
'host':
[
'name',
{'parent': 'name'},
]
},
'bootTime',
]
},
{
'guest':
[
'toolsStatus',
'toolsVersionStatus',
'toolsVersionStatus2',
'toolsRunningStatus',
]
},
{
'config':
[
'cpuReservation',
'memoryReservation',
]
},
{
'storage':
[
'committed',
'uncommitted',
'unshared',
]
},
{'dasVmProtection': ['dasProtected']},
]
},
{
'storage':
[
{
'perDatastoreUsage':
[
{
'datastore': 'name'
},
'committed',
'uncommitted',
'unshared',
]
}
]
},
],
'HostSystem': [
{
'datastore':
[
'name',
'overallStatus',
{
'summary':
[
'url',
'freeSpace',
'maxFileSize',
'maxVirtualDiskCapacity',
'maxPhysicalRDMFileSize',
'maxVirtualRDMFileSize',
{
'vmfs':
[
'capacity',
'blockSizeMb',
'maxBlocks',
'majorVersion',
'version',
'uuid',
{
'extent':
[
'diskName',
'partition',
]
},
'vmfsUpgradeable',
'ssd',
'local',
],
},
],
},
{'vm': 'name'}
]
},
{
'vm':
[
'name',
'overallStatus',
{
'summary':
[
{'runtime': 'powerState'},
]
},
]
},
]
}
pillar_attributes = [
{
'summary':
[
'overallStatus'
]
},
{
'network':
[
'name',
{'config': {'distributedVirtualSwitch': 'name'}},
]
},
{
'datastore':
[
'name',
]
},
{
'parent':
[
'name'
]
},
]
if 'pillar_key' in kwargs:
pillar_key = kwargs['pillar_key']
vmware_pillar[pillar_key] = {}
if 'host' not in kwargs:
log.error('VMWare external pillar configured but host is not specified in ext_pillar configuration.')
return vmware_pillar
else:
host = kwargs['host']
log.debug('vmware_pillar -- host = %s', host)
if 'username' not in kwargs:
log.error('VMWare external pillar requested but username is not specified in ext_pillar configuration.')
return vmware_pillar
else:
username = kwargs['username']
log.debug('vmware_pillar -- username = %s', username)
if 'password' not in kwargs:
log.error('VMWare external pillar requested but password is not specified in ext_pillar configuration.')
return vmware_pillar
else:
password = kwargs['password']
log.debug('vmware_pillar -- password = %s', password)
if 'replace_default_attributes' in kwargs:
replace_default_attributes = kwargs['replace_default_attributes']
if replace_default_attributes:
pillar_attributes = []
type_specific_pillar_attributes = {}
if 'property_types' in kwargs:
for prop_type in kwargs['property_types']:
if isinstance(prop_type, dict):
property_types.append(getattr(vim, prop_type.keys()[0]))
if isinstance(prop_type[prop_type.keys()[0]], list):
pillar_attributes = pillar_attributes + prop_type[prop_type.keys()[0]]
else:
log.warning('A property_type dict was specified, but its value is not a list')
else:
property_types.append(getattr(vim, prop_type))
else:
property_types = [vim.VirtualMachine]
log.debug('vmware_pillar -- property_types = %s', property_types)
if 'property_name' in kwargs:
property_name = kwargs['property_name']
else:
property_name = 'name'
log.debug('vmware_pillar -- property_name = %s', property_name)
if 'protocol' in kwargs:
protocol = kwargs['protocol']
log.debug('vmware_pillar -- protocol = %s', protocol)
if 'port' in kwargs:
port = kwargs['port']
log.debug('vmware_pillar -- port = %s', port)
virtualgrain = None
osgrain = None
if 'virtual' in __grains__:
virtualgrain = __grains__['virtual'].lower()
if 'os' in __grains__:
osgrain = __grains__['os'].lower()
if virtualgrain == 'vmware' or osgrain == 'vmware esxi' or osgrain == 'esxi':
vmware_pillar[pillar_key] = {}
try:
_conn = salt.utils.vmware.get_service_instance(
host, username, password, protocol, port,
verify_ssl=kwargs.get("verify_ssl", True)
)
if _conn:
data = None
for prop_type in property_types:
data = salt.utils.vmware.get_mor_by_property(_conn,
prop_type,
minion_id,
property_name=property_name)
if data:
type_name = type(data).__name__.replace('vim.', '')
if hasattr(data, 'availableField'):
vmware_pillar[pillar_key]['annotations'] = {}
for availableField in data.availableField:
for customValue in data.customValue:
if availableField.key == customValue.key:
vmware_pillar[pillar_key]['annotations'][availableField.name] = customValue.value
type_specific_pillar_attribute = []
if type_name in type_specific_pillar_attributes:
type_specific_pillar_attribute = type_specific_pillar_attributes[type_name]
vmware_pillar[pillar_key] = dictupdate.update(vmware_pillar[pillar_key],
_crawl_attribute(data,
pillar_attributes +
type_specific_pillar_attribute))
break
# explicitly disconnect from vCenter when we are done, connections linger idle otherwise
Disconnect(_conn)
else:
log.error(
'Unable to obtain a connection with %s, please verify '
'your vmware ext_pillar configuration', host
)
except RuntimeError:
log.error(('A runtime error occurred in the vmware_pillar, '
'this is likely caused by an infinite recursion in '
'a requested attribute. Verify your requested attributes '
'and reconfigure the pillar.'))
return vmware_pillar
else:
return {}
def _recurse_config_to_dict(t_data):
'''
helper function to recurse through a vim object and attempt to return all child objects
'''
if not isinstance(t_data, type(None)):
if isinstance(t_data, list):
t_list = []
for i in t_data:
t_list.append(_recurse_config_to_dict(i))
return t_list
elif isinstance(t_data, dict):
t_dict = {}
for k, v in six.iteritems(t_data):
t_dict[k] = _recurse_config_to_dict(v)
return t_dict
else:
if hasattr(t_data, '__dict__'):
return _recurse_config_to_dict(t_data.__dict__)
else:
return _serializer(t_data)
def _crawl_attribute(this_data, this_attr):
'''
helper function to crawl an attribute specified for retrieval
'''
if isinstance(this_data, list):
t_list = []
for d in this_data:
t_list.append(_crawl_attribute(d, this_attr))
return t_list
else:
if isinstance(this_attr, dict):
t_dict = {}
for k in this_attr:
if hasattr(this_data, k):
t_dict[k] = _crawl_attribute(getattr(this_data, k, None), this_attr[k])
return t_dict
elif isinstance(this_attr, list):
this_dict = {}
for l in this_attr:
this_dict = dictupdate.update(this_dict, _crawl_attribute(this_data, l))
return this_dict
else:
return {this_attr: _recurse_config_to_dict(getattr(this_data, this_attr, None))}
def _serializer(obj):
'''
helper function to serialize some objects for prettier return
'''
import datetime
if isinstance(obj, datetime.datetime):
if obj.utcoffset() is not None:
obj = obj - obj.utcoffset()
return obj.__str__()
return obj
| 36.734463 | 121 | 0.463396 | [
"Apache-2.0"
] | aaannz/salt-1 | salt/pillar/vmware_pillar.py | 19,506 | Python |
from .step import Step
from .step_0 import Step_0
from .step_1 import Step_1
from .step_vr_1 import Step_VR_1
from .step_vr_2 import Step_VR_2
from .step_vr_3 import Step_VR_3
from .step_vr_4 import Step_VR_4
from .step_vr_5 import Step_VR_5
from .step_vr_6 import Step_VR_6
from .step_vr_7 import Step_VR_7
from .step_vr_8 import Step_VR_8
from .step_ab_1 import Step_AB_1
from .step_ab_3 import Step_AB_3
from .step_ab_5 import Step_AB_5
from .step_ab_6 import Step_AB_6
from .step_ab_7 import Step_AB_7
from .step_ab_8 import Step_AB_8
| 29.944444 | 32 | 0.842301 | [
"MIT"
] | 8by8-org/usvotes | app/services/steps/__init__.py | 539 | Python |
import datetime
x = datetime.datetime.now()
print(x.strftime("%j"))
# Author: Bryan G
| 12.714286 | 27 | 0.685393 | [
"MIT"
] | Web-Dev-Collaborative/PYTHON_PRAC | mini-scripts/Python_Datetime_day_number_of_year.txt.py | 89 | Python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from pyes.tests import ESTestCase
class IndexStatsTestCase(ESTestCase):
def setUp(self):
super(IndexStatsTestCase, self).setUp()
self.conn.indices.create_index(self.index_name)
self.conn.indices.put_mapping(self.document_type, {'properties': self.get_default_mapping()}, self.index_name)
self.conn.indices.put_mapping("test-type2", {"_parent": {"type": self.document_type}}, self.index_name)
self.conn.index({"name": "Joe Tester", "parsedtext": "Joe Testere nice guy", "uuid": "11111", "position": 1},
self.index_name, self.document_type, 1)
self.conn.index({"name": "data1", "value": "value1"}, self.index_name, "test-type2", 1, parent=1)
self.conn.index({"name": "Bill Baloney", "parsedtext": "Bill Testere nice guy", "uuid": "22222", "position": 2},
self.index_name, self.document_type, 2)
self.conn.index({"name": "data2", "value": "value2"}, self.index_name, "test-type2", 2, parent=2)
self.conn.index({"name": "Bill Clinton", "parsedtext": """Bill is not
nice guy""", "uuid": "33333", "position": 3}, self.index_name, self.document_type, 3)
self.conn.default_indices = self.index_name
self.conn.indices.refresh()
def test_all_indices(self):
result = self.conn.indices.stats()
self.assertEqual(5, result._all.total.docs.count)
def test_select_indices(self):
result = self.conn.indices.stats(self.index_name)
self.assertEqual(5, result._all.total.docs.count)
def test_optimize(self):
result = self.conn.indices.optimize(indices=self.index_name, wait_for_merge=True, max_num_segments=1)
self.assertEqual(result._shards["failed"], 0)
if __name__ == "__main__":
unittest.main()
| 46.55 | 120 | 0.664876 | [
"BSD-3-Clause"
] | aparo/pyes | tests/test_index_stats.py | 1,862 | Python |
from allennlp_models.generation.dataset_readers.copynet_seq2seq import CopyNetDatasetReader
from allennlp_models.generation.dataset_readers.seq2seq import Seq2SeqDatasetReader
from allennlp_models.generation.dataset_readers.cnn_dm import CNNDailyMailDatasetReader
| 66 | 91 | 0.920455 | [
"Apache-2.0"
] | DendiHust/allennlp-models | allennlp_models/generation/dataset_readers/__init__.py | 264 | Python |
import logging
logger = logging.getLogger(__name__)
from mykrobe.species_data import DataDir
def describe(parser, args):
args = parser.parse_args()
ddir = DataDir(args.panels_dir)
print(f"Gathering data from {ddir.root_dir}")
ddir.print_panels_summary()
def update_metadata(parser, args):
args = parser.parse_args()
ddir = DataDir(args.panels_dir)
ddir.update_manifest(filename=args.filename)
def update_species(parser, args):
args = parser.parse_args()
ddir = DataDir(args.panels_dir)
logger.info(f"Loaded panels metdata from {ddir.root_dir}")
if args.remove:
if args.species == "all":
raise NotImplementedError("Can only delete individual species")
ddir.remove_species(args.species)
logger.info(f"Removed species {args.species}")
else:
if args.species == "all":
ddir.update_all_species()
else:
ddir.update_species(args.species)
| 29.090909 | 75 | 0.683333 | [
"MIT"
] | Zhicheng-Liu/mykrobe | src/mykrobe/cmds/panels.py | 960 | Python |
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.utils.tools import group_entries
log = logging.getLogger('best_quality')
entry_actions = {
'accept': Entry.accept,
'reject': Entry.reject,
}
class FilterBestQuality(object):
schema = {
'type': 'object',
'properties': {
'identified_by': {'type': 'string', 'default': 'auto'},
'on_best': {'type': 'string', 'enum': ['accept', 'reject', 'do_nothing'], 'default': 'do_nothing'},
'on_lower': {'type': 'string', 'enum': ['accept', 'reject', 'do_nothing'], 'default': 'reject'},
},
'additionalProperties': False
}
def on_task_filter(self, task, config):
if not config:
return
identified_by = '{{ id }}' if config['identified_by'] == 'auto' else config['identified_by']
action_on_best = entry_actions[config['on_best']] if config['on_best'] != 'do_nothing' else None
action_on_lower = entry_actions[config['on_lower']] if config['on_lower'] != 'do_nothing' else None
grouped_entries = group_entries(task.accepted + task.undecided, identified_by)
for identifier, entries in grouped_entries.items():
if not entries:
continue
# Sort entities in order of quality and best proper
entries.sort(key=lambda e: (e['quality'], e.get('proper_count', 0)), reverse=True)
# First entry will be the best quality
best = entries.pop(0)
if action_on_best:
action_on_best(best, 'has the best quality for identifier %s' % identifier)
if action_on_lower:
for entry in entries:
action_on_lower(entry, 'lower quality for identifier %s' % identifier)
@event('plugin.register')
def register_plugin():
plugin.register(FilterBestQuality, 'best_quality', api_ver=2)
| 33.933333 | 111 | 0.629175 | [
"MIT"
] | Daeymien/Flexget | flexget/plugins/filter/best_quality.py | 2,036 | Python |
import doctest
import pytest
from insights.parsers import lscpu, SkipException
from insights.tests import context_wrap
LSCPU_1 = """
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 2
On-line CPU(s) list: 0,1
Thread(s) per core: 2
Core(s) per socket: 1
Socket(s): 1
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 60
Model name: Intel Core Processor (Haswell, no TSX)
Stepping: 1
CPU MHz: 2793.530
BogoMIPS: 5587.06
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 32K
L1i cache: 32K
L2 cache: 4096K
NUMA node0 CPU(s): 0,1
""".strip()
LSCPU_2 = """
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 2
On-line CPU(s) list: 0
Off-line CPU(s) list: 1
Thread(s) per core: 1
Core(s) per socket: 1
Socket(s): 1
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 60
Model name: Intel Core Processor (Haswell, no TSX)
Stepping: 1
CPU MHz: 2793.530
BogoMIPS: 5587.06
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 32K
L1i cache: 32K
L2 cache: 4096K
NUMA node0 CPU(s): 0
""".strip()
BLANK = """
""".strip()
BAD_LSCPU = """
Architecture: x86_64
CPU op-mode(s) = 32-bit, 64-bit
""".strip()
def test_lscpu_output():
output = lscpu.LsCPU(context_wrap(LSCPU_1))
assert output.info['Architecture'] == 'x86_64'
assert len(output.info) == 22
assert output.info['CPUs'] == '2'
assert output.info['Threads per core'] == '2'
assert output.info['Cores per socket'] == '1'
assert output.info['Sockets'] == '1'
output = lscpu.LsCPU(context_wrap(LSCPU_2))
assert output.info['Architecture'] == 'x86_64'
assert output.info['CPUs'] == '2'
assert output.info['On-line CPUs list'] == '0'
assert output.info['Off-line CPUs list'] == '1'
assert output.info['Cores per socket'] == '1'
assert output.info['Sockets'] == '1'
def test_lscpu_blank_output():
with pytest.raises(SkipException) as e:
lscpu.LsCPU(context_wrap(BLANK))
assert "No data." in str(e)
def test_documentation():
failed_count, tests = doctest.testmod(
lscpu,
globs={'output': lscpu.LsCPU(context_wrap(LSCPU_1))}
)
assert failed_count == 0
| 27.206186 | 61 | 0.57105 | [
"Apache-2.0"
] | CodeHeeler/insights-core | insights/parsers/tests/test_lscpu.py | 2,639 | Python |
from mpp.models import SQLTestCase
from mpp.models import SQLConcurrencyTestCase
class HcatalogPrimitiveTypes(SQLConcurrencyTestCase):
"""
@product_version gpdb: [2.0-]
@db_name pxfautomation
@concurrency 1
@gpdiff True
"""
sql_dir = 'sql'
ans_dir = 'expected'
out_dir = 'output'
| 22.714286 | 53 | 0.694969 | [
"Apache-2.0"
] | ashuka24/pxf | automation/tincrepo/main/pxf/features/hcatalog/primitive_types/runTest.py | 318 | Python |
'''
Given one or more regular expressions on the command line, searches
the PATH for all files that match.
Copyright (C) 2002 GDS Software
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
See http://www.gnu.org/licenses/licenses.html for more details.
'''
import sys, getopt, re, os, string
__version__ = "$Id: where.py,v 1.4 2002/08/22 02:25:57 donp Exp $"
ignore_caseG = 0
matches = {} # They'll get stored in here by filename so that there are
# no duplicates.
def CheckDirectory(dir, regexps):
'''dir is a directory name, regexps is a list of compiled
regular expressions.
'''
global matches
currdir = os.getcwd()
try:
os.chdir(dir)
tmp = os.listdir(dir)
files = []
for f in tmp:
if os.path.isfile(f):
files.append(f)
for file in files:
for regexp in regexps:
if regexp.search(file) != None:
matches[dir + "/" + file] = ""
except:
sys.stderr.write("Warning: directory '%s' in PATH not found\n" % dir)
os.chdir(currdir)
def main():
global ignore_caseG
try:
optlist, regexps = getopt.getopt(sys.argv[1:], "i")
except getopt.error, str:
print str
sys.exit(1)
for opt in optlist:
if opt[0] == "-i":
ignore_caseG = 1
if len(regexps) == 0:
print "Usage: where [-i] regexp1 [regexp2...]"
print " regexps are python re style"
sys.exit(1)
# Get a list of the directories in the path
sep = ":"
key = "PATH"
if sys.platform == "win32":
sep = ";"
if key in os.environ.keys():
PATH = os.environ[key]
path = re.split(sep, os.environ[key])
else:
print "No PATH variable in environment"
sys.exit(1)
# Make a list of compiled regular expressions
regexp_list = []
for regex in regexps:
if ignore_caseG:
regexp_list.append(re.compile(regex, re.I))
else:
regexp_list.append(re.compile(regex))
# Now check each command line regexp in each directory
for dir in path:
CheckDirectory(dir, regexp_list)
list = []
for key in matches.keys():
list.append(key)
list.sort()
for file in list:
print string.replace(file, "\\", "/")
main()
| 30.408163 | 78 | 0.624832 | [
"CC0-1.0"
] | raychorn/chrome_gui | vyperlogix/gds/where.py | 2,980 | Python |
import numpy as np
import pandas as pd
from shapely import prepared
from geopandas import GeoDataFrame
from geopandas import _compat as compat
from geopandas.array import _check_crs, _crs_mismatch_warn
def sjoin(
left_df, right_df, how="inner", op="intersects", lsuffix="left", rsuffix="right"
):
"""Spatial join of two GeoDataFrames.
Parameters
----------
left_df, right_df : GeoDataFrames
how : string, default 'inner'
The type of join:
* 'left': use keys from left_df; retain only left_df geometry column
* 'right': use keys from right_df; retain only right_df geometry column
* 'inner': use intersection of keys from both dfs; retain only
left_df geometry column
op : string, default 'intersects'
Binary predicate, one of {'intersects', 'contains', 'within'}.
See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.
lsuffix : string, default 'left'
Suffix to apply to overlapping column names (left GeoDataFrame).
rsuffix : string, default 'right'
Suffix to apply to overlapping column names (right GeoDataFrame).
"""
if not isinstance(left_df, GeoDataFrame):
raise ValueError(
"'left_df' should be GeoDataFrame, got {}".format(type(left_df))
)
if not isinstance(right_df, GeoDataFrame):
raise ValueError(
"'right_df' should be GeoDataFrame, got {}".format(type(right_df))
)
allowed_hows = ["left", "right", "inner"]
if how not in allowed_hows:
raise ValueError(
'`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
)
allowed_ops = ["contains", "within", "intersects"]
if op not in allowed_ops:
raise ValueError(
'`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
)
if not _check_crs(left_df, right_df):
_crs_mismatch_warn(left_df, right_df, stacklevel=3)
index_left = "index_%s" % lsuffix
index_right = "index_%s" % rsuffix
# due to GH 352
if any(left_df.columns.isin([index_left, index_right])) or any(
right_df.columns.isin([index_left, index_right])
):
raise ValueError(
"'{0}' and '{1}' cannot be names in the frames being"
" joined".format(index_left, index_right)
)
# Attempt to re-use spatial indexes, otherwise generate the spatial index
# for the longer dataframe. If we are joining to an empty dataframe,
# don't bother generating the index.
if right_df._sindex_generated or (
not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]
):
tree_idx = right_df.sindex if len(left_df) > 0 else None
tree_idx_right = True
else:
tree_idx = left_df.sindex if len(right_df) > 0 else None
tree_idx_right = False
# the rtree spatial index only allows limited (numeric) index types, but an
# index in geopandas may be any arbitrary dtype. so reset both indices now
# and store references to the original indices, to be reaffixed later.
# GH 352
left_df = left_df.copy(deep=True)
try:
left_index_name = left_df.index.name
left_df.index = left_df.index.rename(index_left)
except TypeError:
index_left = [
"index_%s" % lsuffix + str(pos)
for pos, ix in enumerate(left_df.index.names)
]
left_index_name = left_df.index.names
left_df.index = left_df.index.rename(index_left)
left_df = left_df.reset_index()
right_df = right_df.copy(deep=True)
try:
right_index_name = right_df.index.name
right_df.index = right_df.index.rename(index_right)
except TypeError:
index_right = [
"index_%s" % rsuffix + str(pos)
for pos, ix in enumerate(right_df.index.names)
]
right_index_name = right_df.index.names
right_df.index = right_df.index.rename(index_right)
right_df = right_df.reset_index()
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
tree_idx_right = not tree_idx_right
r_idx = np.empty((0, 0))
l_idx = np.empty((0, 0))
# get rtree spatial index. If tree_idx does not exist, it is due to either a
# failure to generate the index (e.g., if the column is empty), or the
# other dataframe is empty so it wasn't necessary to generate it.
if tree_idx_right and tree_idx:
idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(
lambda x: list(tree_idx.intersection(x)) if not x == () else []
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
# indexes of overlapping boundaries
if idxmatch.shape[0] > 0:
r_idx = np.concatenate(idxmatch.values)
l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
elif not tree_idx_right and tree_idx:
# tree_idx_df == 'left'
idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(
lambda x: list(tree_idx.intersection(x)) if not x == () else []
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
if idxmatch.shape[0] > 0:
# indexes of overlapping boundaries
l_idx = np.concatenate(idxmatch.values)
r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
if len(r_idx) > 0 and len(l_idx) > 0:
if compat.USE_PYGEOS:
import pygeos
predicate_d = {
"intersects": pygeos.intersects,
"contains": pygeos.contains,
"within": pygeos.contains,
}
check_predicates = predicate_d[op]
else:
# Vectorize predicate operations
def find_intersects(a1, a2):
return a1.intersects(a2)
def find_contains(a1, a2):
return a1.contains(a2)
predicate_d = {
"intersects": find_intersects,
"contains": find_contains,
"within": find_contains,
}
check_predicates = np.vectorize(predicate_d[op])
if compat.USE_PYGEOS:
res = check_predicates(
left_df.geometry[l_idx].values.data,
right_df[right_df.geometry.name][r_idx].values.data,
)
else:
res = check_predicates(
left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],
right_df[right_df.geometry.name][r_idx],
)
result = pd.DataFrame(np.column_stack([l_idx, r_idx, res]))
result.columns = ["_key_left", "_key_right", "match_bool"]
result = pd.DataFrame(result[result["match_bool"] == 1]).drop(
"match_bool", axis=1
)
else:
# when output from the join has no overlapping geometries
result = pd.DataFrame(columns=["_key_left", "_key_right"], dtype=float)
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
result = result.rename(
columns={"_key_left": "_key_right", "_key_right": "_key_left"}
)
if how == "inner":
result = result.set_index("_key_left")
joined = (
left_df.merge(result, left_index=True, right_index=True)
.merge(
right_df.drop(right_df.geometry.name, axis=1),
left_on="_key_right",
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
if isinstance(index_left, list):
joined.index.names = left_index_name
else:
joined.index.name = left_index_name
elif how == "left":
result = result.set_index("_key_left")
joined = (
left_df.merge(result, left_index=True, right_index=True, how="left")
.merge(
right_df.drop(right_df.geometry.name, axis=1),
how="left",
left_on="_key_right",
right_index=True,
suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
)
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
if isinstance(index_left, list):
joined.index.names = left_index_name
else:
joined.index.name = left_index_name
else: # how == 'right':
joined = (
left_df.drop(left_df.geometry.name, axis=1)
.merge(
result.merge(
right_df, left_on="_key_right", right_index=True, how="right"
),
left_index=True,
right_on="_key_left",
how="right",
)
.set_index(index_right)
.drop(["_key_left", "_key_right"], axis=1)
)
if isinstance(index_right, list):
joined.index.names = right_index_name
else:
joined.index.name = right_index_name
return joined
| 36.140625 | 84 | 0.590899 | [
"BSD-3-Clause"
] | anathnathphy67/geopandas | geopandas/tools/sjoin.py | 9,252 | Python |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/build.py."""
# pylint: disable=invalid-name
import StringIO
import collections
import json
import os
import random
import subprocess
import sys
import tempfile
import threading
# pylint: disable=relative-import
import build
from core.tests import test_utils
# pylint: enable=relative-import
TEST_DIR = os.path.join('core', 'tests', 'build', '')
TEST_SOURCE_DIR = os.path.join('core', 'tests', 'build_sources')
MOCK_ASSETS_DEV_DIR = os.path.join(TEST_SOURCE_DIR, 'assets', '')
MOCK_ASSETS_OUT_DIR = os.path.join(TEST_DIR, 'static', 'assets', '')
MOCK_EXTENSIONS_DEV_DIR = os.path.join(TEST_SOURCE_DIR, 'extensions', '')
MOCK_EXTENSIONS_COMPILED_JS_DIR = os.path.join(
TEST_SOURCE_DIR, 'local_compiled_js', 'extensions', '')
MOCK_TEMPLATES_DEV_DIR = os.path.join(TEST_SOURCE_DIR, 'templates', '')
MOCK_TEMPLATES_COMPILED_JS_DIR = os.path.join(
TEST_SOURCE_DIR, 'local_compiled_js', 'templates', '')
MOCK_COMPILED_JS_DIR = os.path.join(TEST_SOURCE_DIR, 'compiled_js_dir', '')
MOCK_TSC_OUTPUT_LOG_FILEPATH = os.path.join(
TEST_SOURCE_DIR, 'mock_tsc_output_log.txt')
INVALID_INPUT_FILEPATH = os.path.join(
TEST_DIR, 'invalid', 'path', 'to', 'input.js')
INVALID_OUTPUT_FILEPATH = os.path.join(
TEST_DIR, 'invalid', 'path', 'to', 'output.js')
EMPTY_DIR = os.path.join(TEST_DIR, 'empty', '')
# Override Pylint's protected access rule due to multiple private functions in
# the file.
# pylint: disable=protected-access
class BuildTests(test_utils.GenericTestBase):
"""Test the build methods."""
def tearDown(self):
super(BuildTests, self).tearDown()
build.safe_delete_directory_tree(TEST_DIR)
build.safe_delete_directory_tree(EMPTY_DIR)
def test_minify(self):
"""Tests _minify with an invalid filepath."""
with self.assertRaises(subprocess.CalledProcessError) as called_process:
build._minify(INVALID_INPUT_FILEPATH, INVALID_OUTPUT_FILEPATH)
# `returncode` is the exit status of the child process.
self.assertEqual(called_process.exception.returncode, 1)
def test_minify_and_create_sourcemap(self):
"""Tests _minify_and_create_sourcemap with an invalid filepath."""
with self.assertRaises(subprocess.CalledProcessError) as called_process:
build._minify_and_create_sourcemap(
INVALID_INPUT_FILEPATH, INVALID_OUTPUT_FILEPATH)
# `returncode` is the exit status of the child process.
self.assertEqual(called_process.exception.returncode, 1)
def test_ensure_files_exist(self):
"""Test _ensure_files_exist raises exception with a non-existent
filepath.
"""
non_existent_filepaths = [INVALID_INPUT_FILEPATH]
# Exception will be raised at first file determined to be non-existent.
with self.assertRaisesRegexp(
OSError, ('File %s does not exist.') % non_existent_filepaths[0]):
build._ensure_files_exist(non_existent_filepaths)
def test_join_files(self):
"""Determine third_party.js contains the content of the first 10 JS
files in /third_party/static.
"""
# Prepare a file_stream object from StringIO.
third_party_js_stream = StringIO.StringIO()
# Get all filepaths from manifest.json.
dependency_filepaths = build.get_dependencies_filepaths()
# Join and write all JS files in /third_party/static to file_stream.
build._join_files(dependency_filepaths['js'], third_party_js_stream)
counter = 0
# Only checking first 10 files.
JS_FILE_COUNT = 10
for js_filepath in dependency_filepaths['js']:
if counter == JS_FILE_COUNT:
break
with open(js_filepath, 'r') as js_file:
# Assert that each line is copied over to file_stream object.
for line in js_file:
self.assertIn(line, third_party_js_stream.getvalue())
counter += 1
def test_generate_copy_tasks_for_fonts(self):
"""Test _generate_copy_tasks_for_fonts ensures that the number of copy
tasks matches the number of font files.
"""
copy_tasks = collections.deque()
# Get all filepaths from manifest.json.
dependency_filepaths = build.get_dependencies_filepaths()
# Setup a sandbox folder for copying fonts.
test_target = os.path.join('target', 'fonts', '')
self.assertEqual(len(copy_tasks), 0)
copy_tasks += build._generate_copy_tasks_for_fonts(
dependency_filepaths['fonts'], test_target)
# Asserting the same number of copy tasks and number of font files.
self.assertEqual(len(copy_tasks), len(dependency_filepaths['fonts']))
def test_insert_hash(self):
"""Test _insert_hash returns correct filenames with provided hashes."""
self.assertEqual(
build._insert_hash('file.js', '123456'), 'file.123456.js')
self.assertEqual(
build._insert_hash(
'path/to/file.js', '654321'), 'path/to/file.654321.js')
self.assertEqual(
build._insert_hash('file.min.js', 'abcdef'), 'file.min.abcdef.js')
self.assertEqual(
build._insert_hash(
'path/to/file.min.js', 'fedcba'), 'path/to/file.min.fedcba.js')
def test_get_file_count(self):
"""Test get_file_count returns the correct number of files, excluding
file with extensions in FILE_EXTENSIONS_TO_IGNORE and files that should
not be built.
"""
all_inclusive_file_count = 0
for _, _, files in os.walk(MOCK_EXTENSIONS_DEV_DIR):
all_inclusive_file_count += len(files)
ignored_file_count = 0
for _, _, files in os.walk(MOCK_EXTENSIONS_DEV_DIR):
for filename in files:
if not build.should_file_be_built(filename) or any(
filename.endswith(p)
for p in build.FILE_EXTENSIONS_TO_IGNORE):
ignored_file_count += 1
self.assertEqual(
all_inclusive_file_count - ignored_file_count,
build.get_file_count(MOCK_EXTENSIONS_DEV_DIR))
def test_compare_file_count(self):
"""Test _compare_file_count raises exception when there is a
mismatched file count between 2 dirs list.
"""
# Test when both lists contain single directory.
build.ensure_directory_exists(EMPTY_DIR)
source_dir_file_count = build.get_file_count(EMPTY_DIR)
assert source_dir_file_count == 0
target_dir_file_count = build.get_file_count(MOCK_ASSETS_DEV_DIR)
# Ensure that ASSETS_DEV_DIR has at least 1 file.
assert target_dir_file_count > 0
with self.assertRaisesRegexp(
ValueError, (
'%s files in first dir list != %s files in second dir list') %
(source_dir_file_count, target_dir_file_count)):
build._compare_file_count([EMPTY_DIR], [MOCK_ASSETS_DEV_DIR])
# Test when one of the lists contain multiple directories.
MOCK_EXTENSIONS_DIR_LIST = [
MOCK_EXTENSIONS_DEV_DIR, MOCK_EXTENSIONS_COMPILED_JS_DIR]
target_dir_file_count = build.get_file_count(
MOCK_EXTENSIONS_DEV_DIR) + build.get_file_count(
MOCK_EXTENSIONS_COMPILED_JS_DIR)
# Ensure that MOCK_EXTENSIONS_DIR has at least 1 file.
assert target_dir_file_count > 0
with self.assertRaisesRegexp(
ValueError, (
'%s files in first dir list != %s files in second dir list') %
(source_dir_file_count, target_dir_file_count)):
build._compare_file_count([EMPTY_DIR], MOCK_EXTENSIONS_DIR_LIST)
# Reset EMPTY_DIRECTORY to clean state.
build.safe_delete_directory_tree(EMPTY_DIR)
def test_verify_filepath_hash(self):
"""Test _verify_filepath_hash raises exception:
1) When there is an empty hash dict.
2) When a filename is expected to contain hash but does not.
3) When there is a hash in filename that cannot be found in
hash dict.
"""
# Final filepath example: base.240933e7564bd72a4dde42ee23260c5f.html.
file_hashes = dict()
base_filename = 'base.html'
with self.assertRaisesRegexp(ValueError, 'Hash dict is empty'):
build._verify_filepath_hash(base_filename, file_hashes)
# Generate a random hash dict for base.html.
file_hashes = {base_filename: random.getrandbits(128)}
with self.assertRaisesRegexp(
ValueError, '%s is expected to contain MD5 hash' % base_filename):
build._verify_filepath_hash(base_filename, file_hashes)
bad_filepath = 'README'
with self.assertRaisesRegexp(
ValueError, 'Filepath has less than 2 partitions after splitting'):
build._verify_filepath_hash(bad_filepath, file_hashes)
hashed_base_filename = build._insert_hash(
base_filename, random.getrandbits(128))
with self.assertRaisesRegexp(
KeyError,
'Hash from file named %s does not match hash dict values' %
hashed_base_filename):
build._verify_filepath_hash(hashed_base_filename, file_hashes)
def test_process_html(self):
"""Test process_html removes whitespaces and adds hash to filepaths."""
BASE_HTML_SOURCE_PATH = os.path.join(
MOCK_TEMPLATES_DEV_DIR, 'base.html')
BASE_JS_RELATIVE_PATH = os.path.join('pages', 'Base.js')
BASE_JS_SOURCE_PATH = os.path.join(
MOCK_TEMPLATES_COMPILED_JS_DIR, BASE_JS_RELATIVE_PATH)
build._ensure_files_exist([BASE_HTML_SOURCE_PATH, BASE_JS_SOURCE_PATH])
# Prepare a file_stream object from StringIO.
minified_html_file_stream = StringIO.StringIO()
# Obtain actual file hashes of /templates to add hash to all filepaths
# within the HTML file. The end result will look like:
# E.g <script ... App.js></script>
# --> <script ... App.[hash].js></script>.
# Only need to hash Base.js.
with self.swap(build, 'FILE_EXTENSIONS_TO_IGNORE', ('.html',)):
file_hashes = build.get_file_hashes(MOCK_TEMPLATES_DEV_DIR)
file_hashes.update(
build.get_file_hashes(MOCK_TEMPLATES_COMPILED_JS_DIR))
# Assert that base.html has white spaces and has original filepaths.
with open(BASE_HTML_SOURCE_PATH, 'r') as source_base_file:
source_base_file_content = source_base_file.read()
self.assertRegexpMatches(
source_base_file_content, r'\s{2,}',
msg='No white spaces detected in %s unexpectedly'
% BASE_HTML_SOURCE_PATH)
# Look for templates/pages/Base.js in source_base_file_content.
self.assertIn(BASE_JS_RELATIVE_PATH, source_base_file_content)
# Build base.html file.
with open(BASE_HTML_SOURCE_PATH, 'r') as source_base_file:
build.process_html(
source_base_file, minified_html_file_stream, file_hashes)
minified_html_file_content = minified_html_file_stream.getvalue()
self.assertNotRegexpMatches(
minified_html_file_content, r'\s{2,}',
msg='All white spaces must be removed from %s' %
BASE_HTML_SOURCE_PATH)
# Assert that hashes are inserted into filenames in base.html.
# Final filepath in base.html example:
# /build/templates/head/pages/Base.081ce90f17ecdf07701d83cb860985c2.js.
final_filename = build._insert_hash(
BASE_JS_RELATIVE_PATH, file_hashes[BASE_JS_RELATIVE_PATH])
# Look for templates/pages/Base.081ce90f17ecdf07701d83cb860985c2.js in
# minified_html_file_content.
self.assertIn(final_filename, minified_html_file_content)
def test_should_file_be_built(self):
"""Test should_file_be_built returns the correct boolean value for
filepath that should be built.
"""
service_js_filepath = os.path.join(
'local_compiled_js', 'core', 'pages', 'AudioService.js')
generated_parser_js_filepath = os.path.join(
'core', 'expressions', 'ExpressionParserService.js')
compiled_generated_parser_js_filepath = os.path.join(
'local_compiled_js', 'core', 'expressions',
'ExpressionParserService.js')
service_ts_filepath = os.path.join('core', 'pages', 'AudioService.ts')
spec_js_filepath = os.path.join('core', 'pages', 'AudioServiceSpec.js')
protractor_filepath = os.path.join('extensions', 'protractor.js')
python_controller_filepath = os.path.join('base.py')
pyc_test_filepath = os.path.join(
'core', 'controllers', 'base.pyc')
python_test_filepath = os.path.join(
'core', 'tests', 'base_test.py')
self.assertFalse(build.should_file_be_built(spec_js_filepath))
self.assertFalse(build.should_file_be_built(protractor_filepath))
self.assertTrue(build.should_file_be_built(service_js_filepath))
self.assertFalse(build.should_file_be_built(service_ts_filepath))
self.assertFalse(build.should_file_be_built(python_test_filepath))
self.assertFalse(build.should_file_be_built(pyc_test_filepath))
self.assertTrue(build.should_file_be_built(python_controller_filepath))
# Swapping out constants to check if the reverse is true.
# ALL JS files that ends with ...Service.js should not be built.
with self.swap(
build, 'JS_FILENAME_SUFFIXES_TO_IGNORE', ('Service.js',)):
self.assertFalse(build.should_file_be_built(service_js_filepath))
self.assertTrue(build.should_file_be_built(spec_js_filepath))
with self.swap(
build, 'JS_FILEPATHS_NOT_TO_BUILD', (
'core/expressions/ExpressionParserService.js',)):
self.assertFalse(
build.should_file_be_built(generated_parser_js_filepath))
self.assertTrue(
build.should_file_be_built(
compiled_generated_parser_js_filepath))
def test_hash_should_be_inserted(self):
"""Test hash_should_be_inserted returns the correct boolean value
for filepath that should be hashed.
"""
with self.swap(
build, 'FILEPATHS_NOT_TO_RENAME', (
'*.py', 'path/to/fonts/*', 'path/to/third_party.min.js.map',
'path/to/third_party.min.css.map')):
self.assertFalse(build.hash_should_be_inserted(
'path/to/fonts/fontawesome-webfont.svg'))
self.assertFalse(build.hash_should_be_inserted(
'path/to/third_party.min.css.map'))
self.assertFalse(build.hash_should_be_inserted(
'path/to/third_party.min.js.map'))
self.assertTrue(build.hash_should_be_inserted(
'path/to/wrongFonts/fonta.eot'))
self.assertTrue(build.hash_should_be_inserted(
'rich_text_components/Video/protractor.js'))
self.assertFalse(build.hash_should_be_inserted(
'main.py'))
self.assertFalse(build.hash_should_be_inserted(
'extensions/domain.py'))
def test_generate_copy_tasks_to_copy_from_source_to_target(self):
"""Test generate_copy_tasks_to_copy_from_source_to_target queues up
the same number of copy tasks as the number of files in the directory.
"""
assets_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
total_file_count = build.get_file_count(MOCK_ASSETS_DEV_DIR)
copy_tasks = collections.deque()
self.assertEqual(len(copy_tasks), 0)
copy_tasks += build.generate_copy_tasks_to_copy_from_source_to_target(
MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, assets_hashes)
self.assertEqual(len(copy_tasks), total_file_count)
def test_is_file_hash_provided_to_frontend(self):
"""Test is_file_hash_provided_to_frontend returns the correct boolean
value for filepath that should be provided to frontend.
"""
with self.swap(
build, 'FILEPATHS_PROVIDED_TO_FRONTEND',
('path/to/file.js', 'path/to/file.html', 'file.js')):
self.assertTrue(
build.is_file_hash_provided_to_frontend('path/to/file.js'))
self.assertTrue(
build.is_file_hash_provided_to_frontend('path/to/file.html'))
self.assertTrue(build.is_file_hash_provided_to_frontend('file.js'))
with self.swap(
build, 'FILEPATHS_PROVIDED_TO_FRONTEND',
('path/to/*', '*.js', '*_end.html')):
self.assertTrue(
build.is_file_hash_provided_to_frontend('path/to/file.js'))
self.assertTrue(
build.is_file_hash_provided_to_frontend('path/to/file.html'))
self.assertTrue(build.is_file_hash_provided_to_frontend('file.js'))
self.assertFalse(
build.is_file_hash_provided_to_frontend('path/file.css'))
self.assertTrue(
build.is_file_hash_provided_to_frontend('good_end.html'))
self.assertFalse(
build.is_file_hash_provided_to_frontend('bad_end.css'))
def test_get_filepaths_by_extensions(self):
"""Test get_filepaths_by_extensions only returns filepaths in
directory with given extensions.
"""
filepaths = []
build.ensure_directory_exists(MOCK_ASSETS_DEV_DIR)
extensions = ('.json', '.svg',)
self.assertEqual(len(filepaths), 0)
filepaths = build.get_filepaths_by_extensions(
MOCK_ASSETS_DEV_DIR, extensions)
for filepath in filepaths:
self.assertTrue(any(filepath.endswith(p) for p in extensions))
file_count = 0
for _, _, filenames in os.walk(MOCK_ASSETS_DEV_DIR):
for filename in filenames:
if any(filename.endswith(p) for p in extensions):
file_count += 1
self.assertEqual(len(filepaths), file_count)
filepaths = []
extensions = ('.pdf', '.viminfo', '.idea',)
self.assertEqual(len(filepaths), 0)
filepaths = build.get_filepaths_by_extensions(
MOCK_ASSETS_DEV_DIR, extensions)
self.assertEqual(len(filepaths), 0)
def test_get_file_hashes(self):
"""Test get_file_hashes gets hashes of all files in directory,
excluding file with extensions in FILE_EXTENSIONS_TO_IGNORE.
"""
# Prevent getting hashes of HTML files.
with self.swap(build, 'FILE_EXTENSIONS_TO_IGNORE', ('.html',)):
file_hashes = dict()
self.assertEqual(len(file_hashes), 0)
file_hashes = build.get_file_hashes(MOCK_EXTENSIONS_DEV_DIR)
self.assertGreater(len(file_hashes), 0)
# Assert that each hash's filepath exists and does not include files
# with extensions in FILE_EXTENSIONS_TO_IGNORE.
for filepath in file_hashes:
abs_filepath = os.path.join(MOCK_EXTENSIONS_DEV_DIR, filepath)
self.assertTrue(os.path.isfile(abs_filepath))
self.assertFalse(filepath.endswith('.html'))
def test_filter_hashes(self):
"""Test filter_hashes filters the provided hash correctly."""
# Set constant to provide everything to frontend.
with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('*',)):
hashes = {'path/to/file.js': '123456',
'path/file.min.js': '123456'}
filtered_hashes = build.filter_hashes(hashes)
self.assertEqual(
filtered_hashes['/path/to/file.js'],
hashes['path/to/file.js'])
self.assertEqual(
filtered_hashes['/path/file.min.js'],
hashes['path/file.min.js'])
with self.swap(
build, 'FILEPATHS_PROVIDED_TO_FRONTEND',
('test_path/*', 'path/to/file.js')):
hashes = {'path/to/file.js': '123456',
'test_path/to/file.html': '123456',
'test_path/to/file.js': 'abcdef',
'path/path/file.js': 'zyx123',
'file.html': '321xyz'}
filtered_hashes = build.filter_hashes(hashes)
self.assertTrue(filtered_hashes.has_key('/path/to/file.js'))
self.assertTrue(filtered_hashes.has_key('/test_path/to/file.html'))
self.assertTrue(filtered_hashes.has_key('/test_path/to/file.js'))
self.assertFalse(filtered_hashes.has_key('/path/path/file.js'))
self.assertFalse(filtered_hashes.has_key('/file.html'))
def test_get_hashes_json_file_contents(self):
"""Test get_hashes_json_file_contents parses provided hash dict
correctly to JSON format.
"""
# Set constant to provide everything to frontend.
with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('*',)):
hashes = {'path/file.js': '123456'}
self.assertEqual(
build.get_hashes_json_file_contents(hashes),
'var hashes = JSON.parse(\'{"/path/file.js": "123456"}\');')
hashes = {'file.js': '123456', 'file.min.js': '654321'}
self.assertEqual(
build.get_hashes_json_file_contents(hashes),
('var hashes = JSON.parse(\'{"/file.min.js": "654321", '
'"/file.js": "123456"}\');'))
def test_execute_tasks(self):
"""Test _execute_tasks joins all threads after executing all tasks."""
build_tasks = collections.deque()
TASK_COUNT = 2
count = TASK_COUNT
while count:
task = threading.Thread(
target=build._minify,
args=(INVALID_INPUT_FILEPATH, INVALID_OUTPUT_FILEPATH))
build_tasks.append(task)
count -= 1
self.assertEqual(threading.active_count(), 1)
build._execute_tasks(build_tasks)
with self.assertRaisesRegexp(
OSError, 'threads can only be started once'):
build._execute_tasks(build_tasks)
# Assert that all threads are joined.
self.assertEqual(threading.active_count(), 1)
def test_generate_build_tasks_to_build_all_files_in_directory(self):
"""Test generate_build_tasks_to_build_all_files_in_directory queues up
the same number of build tasks as the number of files in the source
directory.
"""
asset_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
tasks = collections.deque()
self.assertEqual(len(tasks), 0)
# Build all files.
tasks = build.generate_build_tasks_to_build_all_files_in_directory(
MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, asset_hashes)
total_file_count = build.get_file_count(MOCK_ASSETS_DEV_DIR)
self.assertEqual(len(tasks), total_file_count)
def test_generate_build_tasks_to_build_files_from_filepaths(self):
"""Test generate_build_tasks_to_build_files_from_filepaths queues up a
corresponding number of build tasks to the number of file changes.
"""
new_filename = 'manifest.json'
recently_changed_filenames = [
os.path.join(MOCK_ASSETS_DEV_DIR, new_filename)]
asset_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
build_tasks = collections.deque()
self.assertEqual(len(build_tasks), 0)
build_tasks += build.generate_build_tasks_to_build_files_from_filepaths(
MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR,
recently_changed_filenames, asset_hashes)
self.assertEqual(len(build_tasks), len(recently_changed_filenames))
build_tasks.clear()
svg_filepaths = build.get_filepaths_by_extensions(
MOCK_ASSETS_DEV_DIR, ('.svg',))
# Make sure there is at least 1 SVG file.
self.assertGreater(len(svg_filepaths), 0)
self.assertEqual(len(build_tasks), 0)
build_tasks += build.generate_build_tasks_to_build_files_from_filepaths(
MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, svg_filepaths,
asset_hashes)
self.assertEqual(len(build_tasks), len(svg_filepaths))
def test_generate_build_tasks_to_build_directory(self):
"""Test generate_build_tasks_to_build_directory queues up a
corresponding number of build tasks according to the given scenario.
"""
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {
'dev_dir': MOCK_EXTENSIONS_DEV_DIR,
'compiled_js_dir': MOCK_EXTENSIONS_COMPILED_JS_DIR,
'staging_dir': os.path.join(
TEST_DIR, 'backend_prod_files', 'extensions', ''),
'out_dir': os.path.join(TEST_DIR, 'build', 'extensions', '')
}
file_hashes = build.get_file_hashes(MOCK_EXTENSIONS_DEV_DIR)
compiled_js_file_hashes = build.get_file_hashes(
MOCK_EXTENSIONS_COMPILED_JS_DIR)
build_dir_tasks = collections.deque()
build_all_files_tasks = (
build.generate_build_tasks_to_build_all_files_in_directory(
MOCK_EXTENSIONS_DEV_DIR,
EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
file_hashes))
build_all_files_tasks += (
build.generate_build_tasks_to_build_all_files_in_directory(
MOCK_EXTENSIONS_COMPILED_JS_DIR,
EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
compiled_js_file_hashes))
self.assertGreater(len(build_all_files_tasks), 0)
# Test for building all files when staging dir does not exist.
self.assertEqual(len(build_dir_tasks), 0)
build_dir_tasks += build.generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS, file_hashes)
self.assertEqual(len(build_dir_tasks), len(build_all_files_tasks))
build.safe_delete_directory_tree(TEST_DIR)
build_dir_tasks.clear()
# Test for building only new files when staging dir exists.
build.ensure_directory_exists(
EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'])
self.assertEqual(len(build_dir_tasks), 0)
source_hashes = file_hashes
source_hashes.update(compiled_js_file_hashes)
build_dir_tasks += build.generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS, source_hashes)
self.assertEqual(len(build_dir_tasks), len(build_all_files_tasks))
build.safe_delete_directory_tree(TEST_DIR)
# Build all files and save to final directory.
build.ensure_directory_exists(
EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'])
build._execute_tasks(build_dir_tasks)
self.assertEqual(threading.active_count(), 1)
build._execute_tasks(
build.generate_copy_tasks_to_copy_from_source_to_target(
EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'],
EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'], file_hashes))
build_dir_tasks.clear()
# Test for only building files that need to be rebuilt.
self.assertEqual(len(build_dir_tasks), 0)
build_dir_tasks += build.generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS, build_dir_tasks)
file_extensions_to_always_rebuild = ('.html', '.py',)
always_rebuilt_filepaths = build.get_filepaths_by_extensions(
MOCK_EXTENSIONS_DEV_DIR, file_extensions_to_always_rebuild)
self.assertGreater(len(always_rebuilt_filepaths), 0)
self.assertEqual(len(build_dir_tasks), len(always_rebuilt_filepaths))
build.safe_delete_directory_tree(TEST_DIR)
def test_get_recently_changed_filenames(self):
"""Test get_recently_changed_filenames detects file recently added."""
# Create an empty folder.
build.ensure_directory_exists(EMPTY_DIR)
# Get hashes from ASSETS_DEV_DIR to simulate a folder with built files.
assets_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR)
recently_changed_filenames = []
self.assertEqual(len(recently_changed_filenames), 0)
recently_changed_filenames = build.get_recently_changed_filenames(
assets_hashes, EMPTY_DIR)
# Since all HTML and Python files are already built, they are ignored.
with self.swap(build, 'FILE_EXTENSIONS_TO_IGNORE', ('.html', '.py',)):
self.assertEqual(
len(recently_changed_filenames), build.get_file_count(
MOCK_ASSETS_DEV_DIR))
build.safe_delete_directory_tree(EMPTY_DIR)
def test_generate_delete_tasks_to_remove_deleted_files(self):
"""Test generate_delete_tasks_to_remove_deleted_files queues up the
same number of deletion task as the number of deleted files.
"""
delete_tasks = collections.deque()
# The empty dict means that all files should be removed.
file_hashes = dict()
self.assertEqual(len(delete_tasks), 0)
delete_tasks += build.generate_delete_tasks_to_remove_deleted_files(
file_hashes, MOCK_TEMPLATES_DEV_DIR)
self.assertEqual(
len(delete_tasks), build.get_file_count(MOCK_TEMPLATES_DEV_DIR))
def test_compiled_js_dir_validation(self):
"""Test that build.COMPILED_JS_DIR is validated correctly with
outDir in build.TSCONFIG_FILEPATH.
"""
build.require_compiled_js_dir_to_be_valid()
out_dir = ''
with open(build.TSCONFIG_FILEPATH) as f:
config_data = json.load(f)
out_dir = os.path.join(config_data['compilerOptions']['outDir'], '')
with self.assertRaisesRegexp(
Exception,
'COMPILED_JS_DIR: %s does not match the output directory '
'in %s: %s' % (
MOCK_COMPILED_JS_DIR, build.TSCONFIG_FILEPATH,
out_dir)), self.swap(
build, 'COMPILED_JS_DIR', MOCK_COMPILED_JS_DIR):
build.require_compiled_js_dir_to_be_valid()
def test_compiled_js_dir_is_deleted_before_compilation(self):
"""Test that compiled_js_dir is deleted before a fresh compilation."""
def mock_check_call(unused_cmd):
pass
def mock_require_compiled_js_dir_to_be_valid():
pass
with self.swap(
build, 'COMPILED_JS_DIR', MOCK_COMPILED_JS_DIR), self.swap(
build, 'require_compiled_js_dir_to_be_valid',
mock_require_compiled_js_dir_to_be_valid):
if not os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR)):
os.mkdir(os.path.dirname(MOCK_COMPILED_JS_DIR))
with self.swap(subprocess, 'check_call', mock_check_call):
build.compile_typescript_files('.')
self.assertFalse(
os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR)))
def test_compiled_js_dir_is_deleted_before_watch_mode_compilation(self):
"""Test that compiled_js_dir is deleted before a fresh watch mode
compilation.
"""
# pylint: disable=unused-argument
def mock_call(unused_cmd, shell, stdout):
pass
def mock_popen(unused_cmd, stdout):
pass
# pylint: enable=unused-argument
def mock_require_compiled_js_dir_to_be_valid():
pass
with self.swap(
build, 'COMPILED_JS_DIR', MOCK_COMPILED_JS_DIR), self.swap(
build, 'require_compiled_js_dir_to_be_valid',
mock_require_compiled_js_dir_to_be_valid):
if not os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR)):
os.mkdir(os.path.dirname(MOCK_COMPILED_JS_DIR))
with self.swap(subprocess, 'Popen', mock_popen), self.swap(
subprocess, 'call', mock_call), self.swap(
build, 'TSC_OUTPUT_LOG_FILEPATH',
MOCK_TSC_OUTPUT_LOG_FILEPATH):
build.compile_typescript_files_continuously('.')
self.assertFalse(
os.path.exists(os.path.dirname(MOCK_COMPILED_JS_DIR)))
def test_generate_app_yaml(self):
mock_dev_yaml_filepath = 'mock_app_dev.yaml'
mock_yaml_filepath = 'mock_app.yaml'
app_dev_yaml_filepath_swap = self.swap(
build, 'APP_DEV_YAML_FILEPATH', mock_dev_yaml_filepath)
app_yaml_filepath_swap = self.swap(
build, 'APP_YAML_FILEPATH', mock_yaml_filepath)
app_dev_yaml_temp_file = tempfile.NamedTemporaryFile()
app_dev_yaml_temp_file.name = mock_dev_yaml_filepath
with open(mock_dev_yaml_filepath, 'w') as tmp:
tmp.write('Some content in mock_app_dev.yaml')
app_yaml_temp_file = tempfile.NamedTemporaryFile()
app_yaml_temp_file.name = mock_yaml_filepath
with open(mock_yaml_filepath, 'w') as tmp:
tmp.write('Initial content in mock_app.yaml')
with app_dev_yaml_filepath_swap, app_yaml_filepath_swap:
build.generate_app_yaml()
with open(mock_yaml_filepath, 'r') as yaml_file:
content = yaml_file.read()
self.assertEqual(
content,
'# THIS FILE IS AUTOGENERATED, DO NOT MODIFY\n'
'Some content in mock_app_dev.yaml')
app_yaml_temp_file.close()
app_dev_yaml_temp_file.close()
def test_safe_delete_file(self):
temp_file = tempfile.NamedTemporaryFile()
temp_file.name = 'some_file.txt'
with open('some_file.txt', 'w') as tmp:
tmp.write('Some content.')
self.assertTrue(os.path.isfile('some_file.txt'))
build.safe_delete_file('some_file.txt')
self.assertFalse(os.path.isfile('some_file.txt'))
def test_minify_third_party_libs(self):
def _mock_safe_delete_file(unused_filepath):
"""Mocks build.safe_delete_file()."""
pass
self.assertFalse(os.path.isfile(
'core/tests/data/third_party/css/third_party.min.css'))
self.assertFalse(os.path.isfile(
'core/tests/data/third_party/js/third_party.min.js'))
self.assertFalse(os.path.isfile(
'core/tests/data/third_party/js/third_party.min.js.map'))
with self.swap(build, 'safe_delete_file', _mock_safe_delete_file):
build.minify_third_party_libs('core/tests/data/third_party')
self.assertTrue(os.path.isfile(
'core/tests/data/third_party/css/third_party.min.css'))
self.assertTrue(os.path.isfile(
'core/tests/data/third_party/js/third_party.min.js'))
self.assertTrue(os.path.isfile(
'core/tests/data/third_party/js/third_party.min.js.map'))
self.assertLess(
os.path.getsize(
'core/tests/data/third_party/css/third_party.min.css'),
os.path.getsize('core/tests/data/third_party/css/third_party.css'))
self.assertLess(
os.path.getsize(
'core/tests/data/third_party/js/third_party.min.js'),
os.path.getsize('core/tests/data/third_party/js/third_party.js'))
build.safe_delete_file(
'core/tests/data/third_party/css/third_party.min.css')
build.safe_delete_file(
'core/tests/data/third_party/js/third_party.min.js')
build.safe_delete_file(
'core/tests/data/third_party/js/third_party.min.js.map')
def test_build_with_prod_env(self):
check_function_calls = {
'build_using_webpack_gets_called': False,
'ensure_files_exist_gets_called': False,
'compile_typescript_files_gets_called': False
}
expected_check_function_calls = {
'build_using_webpack_gets_called': True,
'ensure_files_exist_gets_called': True,
'compile_typescript_files_gets_called': True
}
def mock_build_using_webpack():
check_function_calls['build_using_webpack_gets_called'] = True
def mock_ensure_files_exist(unused_filepaths):
check_function_calls['ensure_files_exist_gets_called'] = True
def mock_compile_typescript_files(unused_project_dir):
check_function_calls['compile_typescript_files_gets_called'] = True
ensure_files_exist_swap = self.swap(
build, '_ensure_files_exist', mock_ensure_files_exist)
build_using_webpack_swap = self.swap(
build, 'build_using_webpack', mock_build_using_webpack)
compile_typescript_files_swap = self.swap(
build, 'compile_typescript_files', mock_compile_typescript_files)
args_swap = self.swap(sys, 'argv', ['build.py', '--prod_env'])
with ensure_files_exist_swap, build_using_webpack_swap, (
compile_typescript_files_swap), args_swap:
build.build()
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_build_with_watcher(self):
check_function_calls = {
'ensure_files_exist_gets_called': False,
'compile_typescript_files_continuously_gets_called': False
}
expected_check_function_calls = {
'ensure_files_exist_gets_called': True,
'compile_typescript_files_continuously_gets_called': True
}
def mock_ensure_files_exist(unused_filepaths):
check_function_calls['ensure_files_exist_gets_called'] = True
def mock_compile_typescript_files_continuously(unused_project_dir):
check_function_calls[
'compile_typescript_files_continuously_gets_called'] = True
ensure_files_exist_swap = self.swap(
build, '_ensure_files_exist', mock_ensure_files_exist)
compile_typescript_files_continuously_swap = self.swap(
build, 'compile_typescript_files_continuously',
mock_compile_typescript_files_continuously)
args_swap = self.swap(sys, 'argv', ['build.py', '--enable_watcher'])
with ensure_files_exist_swap, (
compile_typescript_files_continuously_swap), args_swap:
build.build()
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_cannot_minify_third_party_libs_in_dev_mode(self):
check_function_calls = {
'ensure_files_exist_gets_called': False,
'compile_typescript_files_gets_called': False
}
expected_check_function_calls = {
'ensure_files_exist_gets_called': True,
'compile_typescript_files_gets_called': True
}
def mock_ensure_files_exist(unused_filepaths):
check_function_calls['ensure_files_exist_gets_called'] = True
def mock_compile_typescript_files(unused_project_dir):
check_function_calls['compile_typescript_files_gets_called'] = True
ensure_files_exist_swap = self.swap(
build, '_ensure_files_exist', mock_ensure_files_exist)
compile_typescript_files_swap = self.swap(
build, 'compile_typescript_files', mock_compile_typescript_files)
args_swap = self.swap(
sys, 'argv', ['build.py', '--minify_third_party_libs_only'])
assert_raises_regexp_context_manager = self.assertRaisesRegexp(
Exception,
'minify_third_party_libs_only should not be set in non-prod mode.')
with ensure_files_exist_swap, compile_typescript_files_swap, (
assert_raises_regexp_context_manager), args_swap:
build.build()
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_build_using_webpack_command(self):
def mock_check_call(cmd, **unused_kwargs):
self.assertEqual(
cmd,
'%s --config %s'
% (build.WEBPACK_FILE, build.WEBPACK_PROD_CONFIG))
with self.swap(subprocess, 'check_call', mock_check_call):
build.build_using_webpack()
# pylint: enable=protected-access
| 44.825137 | 80 | 0.665464 | [
"Apache-2.0"
] | muarachmann/oppia | scripts/build_test.py | 41,015 | Python |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
import pytest
from assertpy import assert_that
from cfn_stacks_factory import CfnStack
from remote_command_executor import RemoteCommandExecutor
from troposphere import Template
from troposphere.route53 import HostedZone, HostedZoneVPCs
from utils import generate_stack_name
from tests.common.mpi_common import _test_mpi
from tests.common.schedulers_common import get_scheduler_commands
from tests.common.utils import fetch_instance_slots
@pytest.mark.usefixtures("os")
def test_hit_no_cluster_dns_mpi(scheduler, region, instance, pcluster_config_reader, clusters_factory, test_datadir):
logging.info("Testing HIT cluster with cluster DNS disabled.")
scaledown_idletime = 3
max_queue_size = 3
min_queue_size = 1
slots_per_instance = fetch_instance_slots(region, instance)
cluster_config = pcluster_config_reader(
scaledown_idletime=scaledown_idletime, max_queue_size=max_queue_size, min_queue_size=min_queue_size
)
cluster = clusters_factory(cluster_config)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
# Assert that compute hostname cannot be pinged directly
compute_nodes = scheduler_commands.get_compute_nodes()
result = remote_command_executor.run_remote_command("ping -c 3 {}".format(compute_nodes[0]), raise_on_error=False)
assert_that(result.failed).is_true()
# Assert compute hostname is the same as nodename
_test_hostname_same_as_nodename(scheduler_commands, remote_command_executor, compute_nodes)
# This verifies that the job completes correctly
_test_mpi(
remote_command_executor,
slots_per_instance,
scheduler,
region,
cluster.cfn_name,
scaledown_idletime,
verify_scaling=False,
)
@pytest.mark.usefixtures("os", "instance")
def test_existing_hosted_zone(
hosted_zone_factory,
pcluster_config_reader,
clusters_factory,
vpc_stack,
cfn_stacks_factory,
key_name,
scheduler,
region,
instance,
):
"""Test hosted_zone_id is provided in the config file."""
num_computes = 2
hosted_zone_id, domain_name = hosted_zone_factory()
cluster_config = pcluster_config_reader(existing_hosted_zone=hosted_zone_id, queue_size=num_computes)
cluster = clusters_factory(cluster_config, upper_case_cluster_name=True)
remote_command_executor = RemoteCommandExecutor(cluster)
scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
# Test run mpi job
_test_mpi(
remote_command_executor,
slots_per_instance=fetch_instance_slots(region, instance),
scheduler=scheduler,
region=region,
stack_name=cluster.cfn_name,
scaledown_idletime=3,
verify_scaling=False,
)
# Assert compute hostname is the same as nodename
compute_nodes = scheduler_commands.get_compute_nodes()
_test_hostname_same_as_nodename(scheduler_commands, remote_command_executor, compute_nodes)
# Test domain name matches expected domain name
resolv_conf = remote_command_executor.run_remote_command("cat /etc/resolv.conf").stdout
assert_that(resolv_conf).contains(cluster.cfn_name.lower() + "." + domain_name)
@pytest.fixture(scope="class")
def hosted_zone_factory(vpc_stack, cfn_stacks_factory, request, region):
"""Create a hosted zone stack."""
hosted_zone_stack_name = generate_stack_name(
"integ-tests-hosted-zone", request.config.getoption("stackname_suffix")
)
domain_name = hosted_zone_stack_name + ".com"
def create_hosted_zone():
hosted_zone_template = Template()
hosted_zone_template.set_version("2010-09-09")
hosted_zone_template.set_description("Hosted zone stack created for testing existing DNS")
hosted_zone_template.add_resource(
HostedZone(
"HostedZoneResource",
Name=domain_name,
VPCs=[HostedZoneVPCs(VPCId=vpc_stack.cfn_outputs["VpcId"], VPCRegion=region)],
)
)
hosted_zone_stack = CfnStack(
name=hosted_zone_stack_name,
region=region,
template=hosted_zone_template.to_json(),
)
cfn_stacks_factory.create_stack(hosted_zone_stack)
return hosted_zone_stack.cfn_resources["HostedZoneResource"], domain_name
yield create_hosted_zone
if not request.config.getoption("no_delete"):
cfn_stacks_factory.delete_stack(hosted_zone_stack_name, region)
def _test_hostname_same_as_nodename(scheduler_commands, remote_command_executor, compute_nodes):
result = scheduler_commands.submit_command("hostname > /shared/compute_hostname")
job_id = scheduler_commands.assert_job_submitted(result.stdout)
scheduler_commands.wait_job_completed(job_id)
hostname = remote_command_executor.run_remote_command("cat /shared/compute_hostname").stdout
assert_that(compute_nodes).contains(hostname)
| 40.143885 | 118 | 0.757706 | [
"Apache-2.0"
] | Chen188/aws-parallelcluster | tests/integration-tests/tests/dns/test_dns.py | 5,580 | Python |
import csv
import os
import numpy as np
dir_path = os.path.dirname(os.path.realpath(__file__))
survey_path = os.path.join(dir_path, '../data/test_two_entries.csv')
NUM_QUESTIONS = 8
RESPONSE_PERSON = ['pat', 'jeremy', 'zach']
TASTE_PROFILE_TYPES = ['deliciousness', 'heaviness', 'reliability', 'frequency', 'between']
i = 0
person_responses = []
with open(survey_path) as f:
data = csv.reader(f, delimiter=',', quotechar='|')
for row in data:
if i == 1:
sando_type_row = row
if i > 1:
person_responses.append(row)
i += 1
num_sando_types = int(
(len(sando_type_row) - 3)
/ NUM_QUESTIONS
)
end_index = 2 + num_sando_types
sando_types = sando_type_row[2:end_index]
global_taste_profile = {}
j = 0
for response in person_responses:
taste_profile = {}
name = RESPONSE_PERSON[j]
## Loop through deliciousness, heaviness, etc.
## Pull out deliciousness, etc. scores and store in taste_profile[type]
for data_type in TASTE_PROFILE_TYPES:
start_index = 2 + (1 + TASTE_PROFILE_TYPES.index(data_type)) * num_sando_types
end_index = start_index + num_sando_types
raw_profile = np.array(response[start_index:end_index])
if data_type in ['deliciousness', 'heaviness', 'reliability']:
float_profile = raw_profile.astype(np.float) * 0.01
taste_profile[data_type] = float_profile
else:
int_profile = raw_profile.astype(np.int)
taste_profile[data_type] = int_profile
profile_csv_path = os.path.join(dir_path, '../data/users/profiles', (name + '.csv'))
with open(profile_csv_path, 'w') as f:
profile_writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
header = ['sando_type']
for data_type in TASTE_PROFILE_TYPES:
header.append(data_type)
profile_writer.writerow(header)
## Loop through sando types and dump to CSV
for sando in sando_types:
sando_index = sando_types.index(sando)
sando_row = [sando]
for data_type in TASTE_PROFILE_TYPES:
sando_row.append(taste_profile[data_type][sando_index])
profile_writer.writerow(sando_row)
| 33.147059 | 95 | 0.658829 | [
"MIT"
] | zchuruk/sandwichbot | sandwichbot/survey_processor.py | 2,254 | Python |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
r"""TensorFlow AmoebaNet Example.
GCP Run Example
python amoeba_net.py --data_dir=gs://cloud-tpu-datasets/imagenet-data --model_dir=gs://cloud-tpu-ckpts/models/ameoba_net_x/ \
--drop_connect_keep_prob=1.0 --cell_name=evol_net_x --num_cells=12 --reduction_size=256 --image_size=299 --num_epochs=48 \
--train_batch_size=256 --num_epochs_per_eval=4.0 --lr_decay_value=0.89 --lr_num_epochs_per_decay=1 --alsologtostderr \
--tpu=huangyp-tpu-0
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import itertools
import math
import os
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import numpy as np
from PIL import Image
import tensorflow as tf
import amoeba_net_model as model_lib
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# General Parameters
flags.DEFINE_integer(
'num_shards', 8,
'Number of shards (TPU cores).')
flags.DEFINE_integer(
'distributed_group_size', 1,
help='Size of the distributed batch norm. group.'
'Default is normalization over local examples only.'
'When set to a value greater than 1, it will enable'
'a distribtued batch norm. To enable a global batch norm.'
'set distributed_group_size to FLAGS.num_shards')
flags.DEFINE_bool(
'use_tpu', True,
'Use TPUs rather than CPU or GPU.')
flags.DEFINE_string(
'data_dir', '',
'Directory where input data is stored')
flags.DEFINE_string(
'model_dir', None,
'Directory where model output is stored')
flags.DEFINE_string(
'export_dir', None,
'The directory where the exported SavedModel will be stored.')
flags.DEFINE_bool(
'export_to_tpu', False,
help='Whether to export additional metagraph with "serve, tpu" tags'
' in addition to "serve" only metagraph.')
flags.DEFINE_integer(
'iterations_per_loop', 500,
'Number of iterations per TPU training loop.')
flags.DEFINE_integer(
'train_batch_size', 256,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer(
'eval_batch_size', 256,
'Global (not per-shard) batch size for evaluation')
flags.DEFINE_float(
'num_epochs', 48.,
'Number of steps use for training.')
flags.DEFINE_float(
'num_epochs_per_eval', 1.,
'Number of training epochs to run between evaluations.')
flags.DEFINE_string(
'mode', 'train_and_eval',
'Mode to run: train, eval, train_and_eval, or predict')
flags.DEFINE_integer(
'save_checkpoints_steps', None,
'Interval (in steps) at which the model data '
'should be checkpointed. Set to 0 to disable.')
flags.DEFINE_bool(
'enable_hostcall', True,
'Skip the host_call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --enable_hostcall=True, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the TPU-side computation.')
# Model specific parameters
flags.DEFINE_bool('use_aux_head', True, 'Include aux head or not.')
flags.DEFINE_float(
'aux_scaling', 0.4, 'Scaling factor of aux_head')
flags.DEFINE_float(
'batch_norm_decay', 0.9, 'Batch norm decay.')
flags.DEFINE_float(
'batch_norm_epsilon', 1e-5, 'Batch norm epsilon.')
flags.DEFINE_float(
'dense_dropout_keep_prob', None, 'Dense dropout keep probability.')
flags.DEFINE_float(
'drop_connect_keep_prob', 1.0, 'Drop connect keep probability.')
flags.DEFINE_string(
'drop_connect_version', None, 'Drop connect version.')
flags.DEFINE_string(
'cell_name', 'amoeba_net_d', 'Which network to run.')
flags.DEFINE_integer(
'num_cells', 12, 'Total number of cells.')
flags.DEFINE_integer(
'reduction_size', 256, 'Default cell reduction size.')
flags.DEFINE_integer(
'stem_reduction_size', 32, 'Stem filter size.')
flags.DEFINE_float(
'weight_decay', 4e-05, 'Weight decay for slim model.')
flags.DEFINE_integer(
'num_label_classes', 1001, 'The number of classes that images fit into.')
# Training hyper-parameters
flags.DEFINE_float(
'lr', 0.64, 'Learning rate.')
flags.DEFINE_string(
'optimizer', 'rmsprop',
'Optimizer (one of sgd, rmsprop, momentum)')
flags.DEFINE_float(
'moving_average_decay', 0.9999,
'moving average decay rate')
flags.DEFINE_float(
'lr_decay_value', 0.9,
'Exponential decay rate used in learning rate adjustment')
flags.DEFINE_integer(
'lr_num_epochs_per_decay', 1,
'Exponential decay epochs used in learning rate adjustment')
flags.DEFINE_string(
'lr_decay_method', 'exponential',
'Method of decay: exponential, cosine, constant, stepwise')
flags.DEFINE_float(
'lr_warmup_epochs', 3.0,
'Learning rate increased from zero linearly to lr for the first '
'lr_warmup_epochs.')
flags.DEFINE_float('gradient_clipping_by_global_norm', 0,
'gradient_clipping_by_global_norm')
flags.DEFINE_integer(
'image_size', 299, 'Size of image, assuming image height and width.')
flags.DEFINE_integer(
'num_train_images', 1281167, 'The number of images in the training set.')
flags.DEFINE_integer(
'num_eval_images', 50000, 'The number of images in the evaluation set.')
flags.DEFINE_bool(
'use_bp16', True, 'If True, use bfloat16 for activations')
flags.DEFINE_integer(
'eval_timeout', 60*60*24,
'Maximum seconds between checkpoints before evaluation terminates.')
# Inference configuration.
flags.DEFINE_bool(
'inference_with_all_cores', True, 'Whether to round-robin'
'among all cores visible to the host for TPU inference.')
flags.DEFINE_bool(
'add_warmup_requests', True,
'Whether to add warmup requests into the export saved model dir,'
'especially for TPU inference.')
flags.DEFINE_string('model_name', 'amoeba_net',
'Serving model name used for the model server.')
flags.DEFINE_multi_integer(
'inference_batch_sizes', [8],
'Known inference batch sizes used to warm up for each core.')
FLAGS = flags.FLAGS
def build_run_config():
"""Return RunConfig for TPU estimator."""
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
eval_steps = FLAGS.num_eval_images // FLAGS.eval_batch_size
iterations_per_loop = (eval_steps if FLAGS.mode == 'eval'
else FLAGS.iterations_per_loop)
save_checkpoints_steps = FLAGS.save_checkpoints_steps or iterations_per_loop
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=None,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=FLAGS.num_shards,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
))
return run_config
def build_image_serving_input_receiver_fn(shape,
dtype=tf.float32):
"""Returns a input_receiver_fn for raw images during serving."""
def _preprocess_image(encoded_image):
"""Preprocess a single raw image."""
image = tf.image.decode_image(encoded_image, channels=shape[-1])
image.set_shape(shape)
return tf.cast(image, dtype)
def serving_input_receiver_fn():
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
return tf.estimator.export.TensorServingInputReceiver(
features=images, receiver_tensors=image_bytes_list)
return serving_input_receiver_fn
def _encode_image(image_array, fmt='PNG'):
"""encodes an (numpy) image array to string.
Args:
image_array: (numpy) image array
fmt: image format to use
Returns:
encoded image string
"""
pil_image = Image.fromarray(image_array)
image_io = io.BytesIO()
pil_image.save(image_io, format=fmt)
return image_io.getvalue()
def write_warmup_requests(savedmodel_dir,
model_name,
image_size,
batch_sizes=None,
num_requests=8):
"""Writes warmup requests for inference into a tfrecord file.
Args:
savedmodel_dir: string, the file to the exported model folder.
model_name: string, a model name used inside the model server.
image_size: int, size of image, assuming image height and width.
batch_sizes: list, a list of batch sizes to create different input requests.
num_requests: int, number of requests per batch size.
Raises:
ValueError: if batch_sizes is not a valid integer list.
"""
if not isinstance(batch_sizes, list) or not batch_sizes:
raise ValueError('batch sizes should be a valid non-empty list.')
extra_assets_dir = os.path.join(savedmodel_dir, 'assets.extra')
tf.gfile.MkDir(extra_assets_dir)
with tf.python_io.TFRecordWriter(
os.path.join(extra_assets_dir, 'tf_serving_warmup_requests')) as writer:
for batch_size in batch_sizes:
for _ in range(num_requests):
request = predict_pb2.PredictRequest()
image = np.uint8(np.random.rand(image_size, image_size, 3) * 255)
request.inputs['input'].CopyFrom(
tf.make_tensor_proto(
[_encode_image(image)] * batch_size, shape=[batch_size]))
request.model_spec.name = model_name
request.model_spec.signature_name = 'serving_default'
log = prediction_log_pb2.PredictionLog(
predict_log=prediction_log_pb2.PredictLog(request=request))
writer.write(log.SerializeToString())
# TODO(ereal): simplify this.
def override_with_flags(hparams):
"""Overrides parameters with flag values."""
override_flag_names = [
'aux_scaling',
'train_batch_size',
'batch_norm_decay',
'batch_norm_epsilon',
'dense_dropout_keep_prob',
'drop_connect_keep_prob',
'drop_connect_version',
'eval_batch_size',
'gradient_clipping_by_global_norm',
'lr',
'lr_decay_method',
'lr_decay_value',
'lr_num_epochs_per_decay',
'moving_average_decay',
'image_size',
'num_cells',
'reduction_size',
'stem_reduction_size',
'num_epochs',
'num_epochs_per_eval',
'optimizer',
'enable_hostcall',
'use_aux_head',
'use_bp16',
'use_tpu',
'lr_warmup_epochs',
'weight_decay',
'num_shards',
'distributed_group_size',
'num_train_images',
'num_eval_images',
'num_label_classes',
]
for flag_name in override_flag_names:
flag_value = getattr(FLAGS, flag_name, 'INVALID')
if flag_value == 'INVALID':
tf.logging.fatal('Unknown flag %s.' % str(flag_name))
if flag_value is not None:
_set_or_add_hparam(hparams, flag_name, flag_value)
def build_hparams():
"""Build tf.Hparams for training Amoeba Net."""
hparams = model_lib.build_hparams(FLAGS.cell_name)
override_with_flags(hparams)
return hparams
def _terminate_eval():
tf.logging.info('Timeout passed with no new checkpoints ... terminating eval')
return True
def _get_next_checkpoint():
return tf.contrib.training.checkpoints_iterator(
FLAGS.model_dir,
timeout=FLAGS.eval_timeout,
timeout_fn=_terminate_eval)
def _set_or_add_hparam(hparams, name, value):
if getattr(hparams, name, None) is None:
hparams.add_hparam(name, value)
else:
hparams.set_hparam(name, value)
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = tf.train.NewCheckpointReader(
tf.train.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(tf.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def main(_):
mode = FLAGS.mode
data_dir = FLAGS.data_dir
model_dir = FLAGS.model_dir
hparams = build_hparams()
estimator_parmas = {}
train_steps_per_epoch = int(
math.ceil(hparams.num_train_images / float(hparams.train_batch_size)))
eval_steps = hparams.num_eval_images // hparams.eval_batch_size
eval_batch_size = (None if mode == 'train' else
hparams.eval_batch_size)
model = model_lib.AmoebaNetEstimatorModel(hparams, model_dir)
if hparams.use_tpu:
run_config = build_run_config()
image_classifier = tf.contrib.tpu.TPUEstimator(
model_fn=model.model_fn,
use_tpu=True,
config=run_config,
params=estimator_parmas,
predict_batch_size=eval_batch_size,
train_batch_size=hparams.train_batch_size,
eval_batch_size=eval_batch_size,
export_to_tpu=FLAGS.export_to_tpu,
experimental_exported_model_uses_all_cores=FLAGS
.inference_with_all_cores)
else:
save_checkpoints_steps = (FLAGS.save_checkpoints_steps or
FLAGS.iterations_per_loop)
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.model_dir,
save_checkpoints_steps=save_checkpoints_steps)
image_classifier = tf.estimator.Estimator(
model_fn=model.model_fn,
config=run_config,
params=estimator_parmas)
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
imagenet_train = model_lib.InputPipeline(
is_training=True, data_dir=data_dir, hparams=hparams)
imagenet_eval = model_lib.InputPipeline(
is_training=False, data_dir=data_dir, hparams=hparams)
if hparams.moving_average_decay < 1:
eval_hooks = [model_lib.LoadEMAHook(model_dir,
hparams.moving_average_decay)]
else:
eval_hooks = []
if mode == 'eval':
for checkpoint in _get_next_checkpoint():
tf.logging.info('Starting to evaluate.')
try:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
hooks=eval_hooks,
checkpoint_path=checkpoint)
tf.logging.info('Evaluation results: %s' % eval_results)
except tf.errors.NotFoundError:
# skip checkpoint if it gets deleted prior to evaluation
tf.logging.info('Checkpoint %s no longer exists ... skipping')
elif mode == 'train_and_eval':
current_step = _load_global_step_from_checkpoint_dir(model_dir)
tf.logging.info('Starting training at step=%d.' % current_step)
train_steps_per_eval = int(
hparams.num_epochs_per_eval * train_steps_per_epoch)
# Final Evaluation if training is finished.
if current_step >= hparams.num_epochs * train_steps_per_epoch:
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
while current_step < hparams.num_epochs * train_steps_per_epoch:
image_classifier.train(
input_fn=imagenet_train.input_fn, steps=train_steps_per_eval)
current_step += train_steps_per_eval
tf.logging.info('Starting evaluation at step=%d.' % current_step)
eval_results = image_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
elif mode == 'predict':
for checkpoint in _get_next_checkpoint():
tf.logging.info('Starting prediction ...')
time_hook = model_lib.SessionTimingHook()
eval_hooks.append(time_hook)
result_iter = image_classifier.predict(
input_fn=imagenet_eval.input_fn,
hooks=eval_hooks,
checkpoint_path=checkpoint,
yield_single_examples=False)
results = list(itertools.islice(result_iter, eval_steps))
tf.logging.info('Inference speed = {} images per second.'.format(
time_hook.compute_speed(len(results) * eval_batch_size)))
elif mode == 'train':
current_step = _load_global_step_from_checkpoint_dir(model_dir)
total_step = int(hparams.num_epochs * train_steps_per_epoch)
if current_step < total_step:
tf.logging.info('Starting training ...')
image_classifier.train(
input_fn=imagenet_train.input_fn,
steps=total_step-current_step)
else:
tf.logging.info('Mode not found.')
if FLAGS.export_dir is not None:
tf.logging.info('Starting exporting saved model ...')
serving_shape = [hparams.image_size, hparams.image_size, 3]
export_path = image_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=build_image_serving_input_receiver_fn(
serving_shape),
as_text=True)
if FLAGS.add_warmup_requests:
write_warmup_requests(
export_path,
FLAGS.model_name,
hparams.image_size,
batch_sizes=FLAGS.inference_batch_sizes)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| 35.579655 | 125 | 0.710579 | [
"MIT"
] | boristown/tpu | models/official/amoeba_net/amoeba_net.py | 18,537 | Python |
# Copyright (C) 2020-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
import csv
import os
import os.path as osp
from datumaro.components.annotation import (
AnnotationType, Bbox, Label, LabelCategories, Points,
)
from datumaro.components.converter import Converter
from datumaro.components.extractor import DatasetItem, Extractor, Importer
from datumaro.util.image import find_images
class VggFace2Path:
ANNOTATION_DIR = "bb_landmark"
IMAGE_EXT = '.jpg'
BBOXES_FILE = 'loose_bb_'
LANDMARKS_FILE = 'loose_landmark_'
LABELS_FILE = 'labels.txt'
IMAGES_DIR_NO_LABEL = 'no_label'
class VggFace2Extractor(Extractor):
def __init__(self, path):
subset = None
if osp.isdir(path):
self._path = path
elif osp.isfile(path):
subset = osp.splitext(osp.basename(path).split('_')[2])[0]
self._path = osp.dirname(path)
else:
raise Exception("Can't read annotations from '%s'" % path)
annotation_files = [p for p in os.listdir(self._path)
if (osp.basename(p).startswith(VggFace2Path.BBOXES_FILE) or \
osp.basename(p).startswith(VggFace2Path.LANDMARKS_FILE)) and \
p.endswith('.csv')]
if len(annotation_files) < 1:
raise Exception("Can't find annotations in the directory '%s'" % path)
super().__init__()
self._dataset_dir = osp.dirname(self._path)
self._subsets = {subset} if subset else set(
osp.splitext(f.split('_')[2])[0] for f in annotation_files
)
self._categories = {}
self._items = []
self._load_categories()
for subset in self._subsets:
self._items.extend(list(self._load_items(subset).values()))
def __iter__(self):
return iter(self._items)
def categories(self):
return self._categories
def _load_categories(self):
label_cat = LabelCategories()
path = osp.join(self._dataset_dir, VggFace2Path.LABELS_FILE)
if osp.isfile(path):
with open(path, encoding='utf-8') as labels_file:
lines = [s.strip() for s in labels_file]
for line in lines:
objects = line.split()
label = objects[0]
class_name = None
if 1 < len(objects):
class_name = objects[1]
label_cat.add(label, parent=class_name)
else:
for subset in self._subsets:
subset_path = osp.join(self._dataset_dir, subset)
if osp.isdir(subset_path):
for images_dir in sorted(os.listdir(subset_path)):
if osp.isdir(osp.join(subset_path, images_dir)) and \
images_dir != VggFace2Path.IMAGES_DIR_NO_LABEL:
label_cat.add(images_dir)
self._categories[AnnotationType.label] = label_cat
def _load_items(self, subset):
def _get_label(path):
label_name = path.split('/')[0]
label = None
if label_name != VggFace2Path.IMAGES_DIR_NO_LABEL:
label = \
self._categories[AnnotationType.label].find(label_name)[0]
return label
items = {}
image_dir = osp.join(self._dataset_dir, subset)
if osp.isdir(image_dir):
images = {
osp.splitext(osp.relpath(p, image_dir))[0].replace('\\', '/'): p
for p in find_images(image_dir, recursive=True)
}
else:
images = {}
landmarks_path = osp.join(self._dataset_dir, VggFace2Path.ANNOTATION_DIR,
VggFace2Path.LANDMARKS_FILE + subset + '.csv')
if osp.isfile(landmarks_path):
with open(landmarks_path, encoding='utf-8') as content:
landmarks_table = list(csv.DictReader(content))
for row in landmarks_table:
item_id = row['NAME_ID']
label = None
if '/' in item_id:
label = _get_label(item_id)
if item_id not in items:
items[item_id] = DatasetItem(id=item_id, subset=subset,
image=images.get(row['NAME_ID']))
annotations = items[item_id].annotations
if [a for a in annotations if a.type == AnnotationType.points]:
raise Exception("Item %s: an image can have only one "
"set of landmarks" % item_id)
if len([p for p in row if row[p] == '']) == 0 and len(row) == 11:
annotations.append(Points(
[float(row[p]) for p in row if p != 'NAME_ID'],
label=label)
)
elif label is not None:
annotations.append(Label(label=label))
bboxes_path = osp.join(self._dataset_dir, VggFace2Path.ANNOTATION_DIR,
VggFace2Path.BBOXES_FILE + subset + '.csv')
if osp.isfile(bboxes_path):
with open(bboxes_path, encoding='utf-8') as content:
bboxes_table = list(csv.DictReader(content))
for row in bboxes_table:
item_id = row['NAME_ID']
label = None
if '/' in item_id:
label = _get_label(item_id)
if item_id not in items:
items[item_id] = DatasetItem(id=item_id, subset=subset,
image=images.get(row['NAME_ID']))
annotations = items[item_id].annotations
if [a for a in annotations if a.type == AnnotationType.bbox]:
raise Exception("Item %s: an image can have only one "
"bbox" % item_id)
if len([p for p in row if row[p] == '']) == 0 and len(row) == 5:
annotations.append(Bbox(float(row['X']), float(row['Y']),
float(row['W']), float(row['H']), label=label))
return items
class VggFace2Importer(Importer):
@classmethod
def find_sources(cls, path):
if osp.isdir(path):
annotation_dir = osp.join(path, VggFace2Path.ANNOTATION_DIR)
if osp.isdir(annotation_dir):
return [{
'url': annotation_dir, 'format': VggFace2Extractor.NAME,
}]
elif osp.isfile(path):
if (osp.basename(path).startswith(VggFace2Path.LANDMARKS_FILE) or \
osp.basename(path).startswith(VggFace2Path.BBOXES_FILE)) and \
path.endswith('.csv'):
return [{
'url': path, 'format': VggFace2Extractor.NAME,
}]
return []
class VggFace2Converter(Converter):
DEFAULT_IMAGE_EXT = VggFace2Path.IMAGE_EXT
def apply(self):
def _get_name_id(item_parts, label_name):
if 1 < len(item_parts) and item_parts[0] == label_name:
return '/'.join([label_name, *item_parts[1:]])
else:
return '/'.join([label_name, *item_parts])
save_dir = self._save_dir
os.makedirs(save_dir, exist_ok=True)
labels_path = osp.join(save_dir, VggFace2Path.LABELS_FILE)
labels_file = ''
for label in self._extractor.categories()[AnnotationType.label]:
labels_file += '%s' % label.name
if label.parent:
labels_file += ' %s' % label.parent
labels_file += '\n'
with open(labels_path, 'w', encoding='utf-8') as f:
f.write(labels_file)
label_categories = self._extractor.categories()[AnnotationType.label]
for subset_name, subset in self._extractor.subsets().items():
bboxes_table = []
landmarks_table = []
for item in subset:
item_parts = item.id.split('/')
if item.has_image and self._save_images:
labels = set(p.label for p in item.annotations
if getattr(p, 'label') != None)
if labels:
for label in labels:
image_dir = label_categories[label].name
if 1 < len(item_parts) and image_dir == item_parts[0]:
image_dir = ''
self._save_image(item, subdir=osp.join(subset_name,
image_dir))
else:
image_dir = VggFace2Path.IMAGES_DIR_NO_LABEL
if 1 < len(item_parts) and image_dir == item_parts[0]:
image_dir = ''
self._save_image(item, subdir=osp.join(subset_name,
image_dir))
landmarks = [a for a in item.annotations
if a.type == AnnotationType.points]
if 1 < len(landmarks):
raise Exception("Item (%s, %s): an image can have only one "
"set of landmarks" % (item.id, item.subset))
if landmarks:
if landmarks[0].label is not None and \
label_categories[landmarks[0].label].name:
name_id = _get_name_id(item_parts,
label_categories[landmarks[0].label].name)
else:
name_id = _get_name_id(item_parts,
VggFace2Path.IMAGES_DIR_NO_LABEL)
points = landmarks[0].points
if len(points) != 10:
landmarks_table.append({'NAME_ID': name_id})
else:
landmarks_table.append({'NAME_ID': name_id,
'P1X': points[0], 'P1Y': points[1],
'P2X': points[2], 'P2Y': points[3],
'P3X': points[4], 'P3Y': points[5],
'P4X': points[6], 'P4Y': points[7],
'P5X': points[8], 'P5Y': points[9]})
bboxes = [a for a in item.annotations
if a.type == AnnotationType.bbox]
if 1 < len(bboxes):
raise Exception("Item (%s, %s): an image can have only one "
"bbox" % (item.id, item.subset))
if bboxes:
if bboxes[0].label is not None and \
label_categories[bboxes[0].label].name:
name_id = _get_name_id(item_parts,
label_categories[bboxes[0].label].name)
else:
name_id = _get_name_id(item_parts,
VggFace2Path.IMAGES_DIR_NO_LABEL)
bboxes_table.append({'NAME_ID': name_id, 'X': bboxes[0].x,
'Y': bboxes[0].y, 'W': bboxes[0].w, 'H': bboxes[0].h})
labels = [a for a in item.annotations
if a.type == AnnotationType.label]
for label in labels:
if label.label is not None and \
label_categories[label.label].name:
name_id = _get_name_id(item_parts,
label_categories[labels[0].label].name)
else:
name_id = _get_name_id(item_parts,
VggFace2Path.IMAGES_DIR_NO_LABEL)
landmarks_table.append({'NAME_ID': name_id})
if not landmarks and not bboxes and not labels:
landmarks_table.append({'NAME_ID': _get_name_id(item_parts,
VggFace2Path.IMAGES_DIR_NO_LABEL)})
landmarks_path = osp.join(save_dir, VggFace2Path.ANNOTATION_DIR,
VggFace2Path.LANDMARKS_FILE + subset_name + '.csv')
os.makedirs(osp.dirname(landmarks_path), exist_ok=True)
with open(landmarks_path, 'w', encoding='utf-8', newline='') as file:
columns = ['NAME_ID', 'P1X', 'P1Y', 'P2X', 'P2Y',
'P3X', 'P3Y', 'P4X', 'P4Y', 'P5X', 'P5Y']
writer = csv.DictWriter(file, fieldnames=columns)
writer.writeheader()
writer.writerows(landmarks_table)
if bboxes_table:
bboxes_path = osp.join(save_dir, VggFace2Path.ANNOTATION_DIR,
VggFace2Path.BBOXES_FILE + subset_name + '.csv')
os.makedirs(osp.dirname(bboxes_path), exist_ok=True)
with open(bboxes_path, 'w', encoding='utf-8', newline='') as file:
columns = ['NAME_ID', 'X', 'Y', 'W', 'H']
writer = csv.DictWriter(file, fieldnames=columns)
writer.writeheader()
writer.writerows(bboxes_table)
| 43.41 | 86 | 0.522 | [
"MIT"
] | openvinotoolkit/datumaro | datumaro/plugins/vgg_face2_format.py | 13,023 | Python |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import argparse
from mo.graph.graph import Graph
from mo.pipeline.common import get_ir_version
from mo.utils import class_registration
def unified_pipeline(argv: argparse.Namespace):
graph = Graph(cmd_params=argv, name=argv.model_name, ir_version=get_ir_version(argv))
class_registration.apply_replacements(graph, [
class_registration.ClassType.LOADER,
class_registration.ClassType.FRONT_REPLACER,
class_registration.ClassType.MIDDLE_REPLACER,
class_registration.ClassType.BACK_REPLACER
])
return graph
| 31.85 | 89 | 0.78022 | [
"Apache-2.0"
] | AkillesAILimited/openvino | model-optimizer/mo/pipeline/unified.py | 637 | Python |
from pyFilter.py_filter import PyFilter
if __name__ == "__main__":
p = PyFilter()
try:
p.run()
except KeyboardInterrupt:
print("\nClosing PyFilter")
finally:
p.make_persistent(loop=False) # Save any outstanding bans without the constant loop
if p.settings["database"] == "sqlite":
p.database_connection.sqlite_connection.close()
print("Closed sqlite connection")
| 31.214286 | 92 | 0.647597 | [
"MIT"
] | Jason2605/PyFilter | run.py | 437 | Python |
import codecs
import json
from tqdm import tqdm
import copy
submit_result2 = []
with codecs.open('dialog_chinese-macbert.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result2.append(dialogue_)
submit_result4 = []
with codecs.open('macbert2-f-f.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result4.append(dialogue_)
submit_result3 = []
with codecs.open('macbert2-f.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result3.append(dialogue_)
submit_result5 = []
with codecs.open('mcbert.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result5.append(dialogue_)
submit_result6 = []
with codecs.open('medbert.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
submit_result6.append(dialogue_)
submit_result = []
with codecs.open('macbert2-f.txt', mode='r', encoding='utf8') as f:
reader = f.readlines(f)
data_list = []
for dialogue_idx_, dialogue_ in enumerate(tqdm(reader)):
dialogue_ = json.loads(dialogue_)
for content_idx_, contents_ in enumerate(dialogue_['dialog_info']):
terms_ = contents_['ner']
if len(terms_) != 0:
idx_ = 0
for _ner_idx, term_ in enumerate(terms_):
if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阳性' and dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阴性' and dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result3[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
if submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '不标注':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阳性':
if submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '其他':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result2[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result4[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阴性':
if submit_result4[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '不标注':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result4[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阴性':
if submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '不标注':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
# elif submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '其他':
# dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result5[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
elif dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] != submit_result6[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']:
if dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '阳性':
if submit_result6[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] == '其他':
dialogue_['dialog_info'][content_idx_]['ner'][_ner_idx]['attr'] = submit_result6[dialogue_idx_]['dialog_info'][content_idx_]['ner'][_ner_idx]['attr']
submit_result.append(dialogue_)
with open('./result.txt', 'w', encoding='utf-8') as output_data:
for json_content in submit_result:
output_data.write(json.dumps(json_content, ensure_ascii=False) + '\n') | 51.537815 | 268 | 0.612098 | [
"Apache-2.0"
] | DataArk/CHIP2021-Task1-Top1 | predict/ensemble.py | 6,187 | Python |
"""hvac init."""
| 8.5 | 16 | 0.470588 | [
"Apache-2.0"
] | jokajak/infinity_tracker | hvac/__init__.py | 17 | Python |
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.randn(10,3),columns=['a','b','c'],index=list('abcdefghij'))
print(df)
df.ix[::2,0] = np.nan; df.ix[::4,1] = np.nan; df.ix[::3,2] = np.nan;
df = df.dropna(subset=['a','b']) #mid delete rows where df['htm3']==na
bins = np.arange(-3,3,0.1)
bins = [-100,0,100]
indices = np.digitize(df.a,bins)
'''
bins代表若干连续的区间0:[-1,2),1:[2,7),2:[7,9),3:[9,10),用数组表示为:[-1,2,7,9,10]
np.digitize()函数生成一列数,对应位置的值表示参数一对应值在bins中所属区段的编号。
'''
groups = df.groupby(indices)
print('#'*20)
for i,group in groups:
print(i,len(group))
print(group)
print('#'*20)
print(groups.mean())
| 26.25 | 87 | 0.631746 | [
"MIT"
] | UpSea/midProjects | BasicOperations/05_Pandas/05_Pandas_02_groupby.py | 726 | Python |
"""All functions return a Component so you can easily pipe or compose them.
There are two types of functions:
- decorators: return the original component
- containers: return a new component
"""
from functools import lru_cache
import numpy as np
from omegaconf import OmegaConf
from pydantic import validate_arguments
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.text_rectangular import text_rectangular_multi_layer
from gdsfactory.functools_ import partial
from gdsfactory.port import auto_rename_ports
from gdsfactory.types import (
Anchor,
Axis,
ComponentFactory,
ComponentOrFactory,
Float2,
Layer,
List,
Optional,
Strs,
)
cache = lru_cache(maxsize=None)
def add_port(component: Component, **kwargs) -> Component:
"""Return Component with a new port."""
component.add_port(**kwargs)
return component
@cell
def add_text(
component: ComponentOrFactory,
text: str = "",
text_offset: Float2 = (0, 0),
text_anchor: Anchor = "cc",
text_factory: ComponentFactory = text_rectangular_multi_layer,
) -> Component:
"""Return component inside a new component with text geometry.
Args:
component:
text: text string.
text_offset: relative to component anchor. Defaults to center (cc).
text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).
text_factory: function to add text labels.
"""
component = component() if callable(component) else component
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
t = component_new << text_factory(text)
t.move((np.array(text_offset) + getattr(ref.size_info, text_anchor)))
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
def add_texts(
components: List[ComponentOrFactory],
prefix: str = "",
index0: int = 0,
**kwargs,
) -> List[Component]:
"""Return a list of Component with text labels.
Args:
components: list of components
prefix: Optional prefix for the labels
index0: defaults to 0 (0, for first component, 1 for second ...)
keyword Args:
text_offset: relative to component size info anchor. Defaults to center.
text_anchor: relative to component (ce cw nc ne nw sc se sw center cc).
text_factory: function to add text labels.
"""
return [
add_text(component, text=f"{prefix}{i+index0}", **kwargs)
for i, component in enumerate(components)
]
@cell
def rotate(
component: ComponentOrFactory,
angle: float = 90,
) -> Component:
"""Return rotated component inside a new component.
Most times you just need to place a reference and rotate it.
This rotate function just encapsulates the rotated reference into a new component.
Args:
component:
angle: in degrees
"""
component = component() if callable(component) else component
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.rotate(angle)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
rotate90 = partial(rotate, angle=90)
rotate90n = partial(rotate, angle=-90)
rotate180 = partial(rotate, angle=180)
@cell
def mirror(component: Component, p1: Float2 = (0, 1), p2: Float2 = (0, 0)) -> Component:
"""Return new Component with a mirrored reference.
Args:
p1: first point to define mirror axis
p2: second point to define mirror axis
"""
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.mirror(p1=p1, p2=p2)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
@cell
def move(
component: Component,
origin=(0, 0),
destination=None,
axis: Optional[Axis] = None,
) -> Component:
"""Return new Component with a moved reference to the original component.
Args:
origin: of component
destination:
axis: x or y axis
"""
component_new = Component()
component_new.component = component
ref = component_new.add_ref(component)
ref.move(origin=origin, destination=destination, axis=axis)
component_new.add_ports(ref.ports)
component_new.copy_child_info(component)
return component_new
def move_port_to_zero(component: Component, port_name: str = "o1"):
"""Return a container that contains a reference to the original component.
where the new component has port_name in (0, 0)
"""
if port_name not in component.ports:
raise ValueError(
f"port_name = {port_name!r} not in {list(component.ports.keys())}"
)
return move(component, -component.ports[port_name].midpoint)
def update_info(component: Component, **kwargs) -> Component:
"""Return Component with updated info."""
component.info.update(**kwargs)
return component
@validate_arguments
def add_settings_label(
component: Component, layer_label: Layer = (66, 0), settings: Optional[Strs] = None
) -> Component:
"""Add a settings label to a component.
Args:
component:
layer_label:
settings: tuple or list of settings. if None, adds all changed settings
"""
d = (
{setting: component.get_setting(setting) for setting in settings}
if settings
else component.info.changed
)
component.add_label(text=OmegaConf.to_yaml(d), layer=layer_label)
return component
__all__ = (
"add_port",
"add_text",
"add_settings_label",
"auto_rename_ports",
"cache",
"mirror",
"move",
"move_port_to_zero",
"rotate",
"update_info",
)
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.mmi1x2(
length_mmi=10,
decorator=gf.partial(add_settings_label, settings=["name", "length_mmi"]),
)
# c.show()
cr = c.rotate()
cr.pprint()
cr.show()
# cm = move(c, destination=(20, 20))
# cm.show()
# cm = mirror(c)
# cm.show()
# cm = c.mirror()
# cm.show()
# cm2 = move_port_to_zero(cm)
# cm2.show()
# cm3 = add_text(c, "hi")
# cm3.show()
# cr = rotate(component=c)
# cr.show()
# print(component_rotated)
# component_rotated.pprint
# component_netlist = component.get_netlist()
# component.pprint_netlist()
| 26.166667 | 88 | 0.676069 | [
"MIT"
] | jorgepadilla19/gdsfactory | gdsfactory/functions.py | 6,594 | Python |
#!/usr/bin/python3 -B
exec(open("../index.py").read())
from waitress import serve
serve(application, host='0.0.0.0', port=8080, threads=1, channel_timeout=1)
| 26.833333 | 76 | 0.695652 | [
"MIT"
] | shark555/websnake_demo | scripts/serve.py | 161 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/2/19 11:06
# @User : zhunishengrikuaile
# @File : TrainTicket.py
# @Email : [email protected]
# @MyBlog : WWW.SHUJIAN.ORG
# @NetName : 書劍
# @Software: 百度识图Api封装
# 火车票识别
import os
import base64
import requests
from bin.AccessToken.AccessToken import AccessToken
from config.config import LOCALHOST_PATH, URL_LIST_URL
ACCESS_TOKEN = AccessToken().getToken()['access_token']
TRAIN_TICKET_URL = URL_LIST_URL['TRAIN_TICKET'] + '?access_token={}'.format(ACCESS_TOKEN)
class TrainTicketSuper(object):
pass
class TrainTicket(TrainTicketSuper):
'''
异步接口获取ID
@image
'''
def __init__(self, image=None):
self.HEADER = {
'Content-Type': 'application/x-www-form-urlencoded',
}
self.IMAGE_CONFIG = {
}
if image is not None:
imagepath = os.path.exists(LOCALHOST_PATH['PATH'] + image)
if imagepath == True:
images = LOCALHOST_PATH['PATH'] + image
with open(images, 'rb') as images:
self.IMAGE_CONFIG['image'] = base64.b64encode(images.read())
def postTrainTicket(self):
if self.IMAGE_CONFIG.get('image', None) == None:
return 'image参数不能为空!'
trainTicket = requests.post(url=TRAIN_TICKET_URL, headers=self.HEADER,
data=self.IMAGE_CONFIG)
return trainTicket.json()
| 28.019231 | 89 | 0.621139 | [
"Apache-2.0"
] | haodaohong/zimt8 | utils/BaiduTextApi/BaiduTextApi/bin/TrainTicket/TrainTicket.py | 1,509 | Python |
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
#
# License: BSD (3-clause)
from warnings import warn
from copy import deepcopy
import os.path as op
import numpy as np
from scipy import linalg
from ..externals.six import BytesIO
from datetime import datetime as dt
from .open import fiff_open
from .tree import dir_tree_find, copy_tree
from .constants import FIFF
from .tag import read_tag
from .proj import read_proj, write_proj
from .ctf import read_ctf_comp, write_ctf_comp
from .channels import read_bad_channels
from .write import (start_file, end_file, start_block, end_block,
write_string, write_dig_point, write_float, write_int,
write_coord_trans, write_ch_info, write_name_list,
write_julian)
from ..utils import logger, verbose
def _summarize_str(st):
"""Aux function"""
return st[:56][::-1].split(',', 1)[-1][::-1] + ', ...'
class Info(dict):
""" Info class to nicely represent info dicts
"""
def __repr__(self):
"""Summarize info instead of printing all"""
strs = ['<Info | %s non-empty fields']
non_empty = 0
for k, v in self.items():
if k in ['bads', 'ch_names']:
entr = (', '.join(b for ii, b in enumerate(v) if ii < 10)
if v else '0 items')
if len(entr) >= 56:
# get rid of of half printed ch names
entr = _summarize_str(entr)
elif k == 'filename' and v:
path, fname = op.split(v)
entr = path[:10] + '.../' + fname
elif k == 'projs' and v:
entr = ', '.join(p['desc'] + ': o%s' %
{0: 'ff', 1: 'n'}[p['active']] for p in v)
if len(entr) >= 56:
entr = _summarize_str(entr)
elif k == 'meas_date' and np.iterable(v):
# first entire in meas_date is meaningful
entr = dt.fromtimestamp(v[0]).strftime('%Y-%m-%d %H:%M:%S')
else:
this_len = (len(v) if hasattr(v, '__len__') else
('%s' % v if v is not None else None))
entr = (('%d items' % this_len) if isinstance(this_len, int)
else ('%s' % this_len if this_len else ''))
if entr:
non_empty += 1
entr = ' | ' + entr
strs.append('%s : %s%s' % (k, str(type(v))[7:-2], entr))
strs_non_empty = sorted(s for s in strs if '|' in s)
strs_empty = sorted(s for s in strs if '|' not in s)
st = '\n '.join(strs_non_empty + strs_empty)
st += '\n>'
st %= non_empty
return st
def _anonymize(self):
if self.get('subject_info') is not None:
del self['subject_info']
def read_fiducials(fname):
"""Read fiducials from a fiff file
Returns
-------
pts : list of dicts
List of digitizer points (each point in a dict).
coord_frame : int
The coordinate frame of the points (one of
mne.fiff.FIFF.FIFFV_COORD_...)
"""
fid, tree, _ = fiff_open(fname)
with fid:
isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)
isotrak = isotrak[0]
pts = []
coord_frame = FIFF.FIFFV_COORD_UNKNOWN
for k in range(isotrak['nent']):
kind = isotrak['directory'][k].kind
pos = isotrak['directory'][k].pos
if kind == FIFF.FIFF_DIG_POINT:
tag = read_tag(fid, pos)
pts.append(tag.data)
elif kind == FIFF.FIFF_MNE_COORD_FRAME:
tag = read_tag(fid, pos)
coord_frame = tag.data[0]
if coord_frame == FIFF.FIFFV_COORD_UNKNOWN:
err = ("No coordinate frame was found in the file %r, it is probably "
"not a valid fiducials file." % fname)
raise ValueError(err)
# coord_frame is not stored in the tag
for pt in pts:
pt['coord_frame'] = coord_frame
return pts, coord_frame
def write_fiducials(fname, pts, coord_frame=0):
"""Write fiducials to a fiff file
Parameters
----------
fname : str
Destination file name.
pts : iterator of dict
Iterator through digitizer points. Each point is a dictionary with
the keys 'kind', 'ident' and 'r'.
coord_frame : int
The coordinate frame of the points (one of
mne.fiff.FIFF.FIFFV_COORD_...)
"""
pts_frames = set((pt.get('coord_frame', coord_frame) for pt in pts))
bad_frames = pts_frames - set((coord_frame,))
if len(bad_frames) > 0:
err = ("Points have coord_frame entries that are incompatible with "
"coord_frame=%i: %s." % (coord_frame, str(tuple(bad_frames))))
raise ValueError(err)
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_ISOTRAK)
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame)
for pt in pts:
write_dig_point(fid, pt)
end_block(fid, FIFF.FIFFB_ISOTRAK)
end_file(fid)
@verbose
def read_info(fname, verbose=None):
"""Read measurement info from a file
Parameters
----------
fname : str
File name.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
info : instance of mne.fiff.meas_info.Info
Info on dataset.
"""
f, tree, _ = fiff_open(fname)
with f as fid:
info = read_meas_info(fid, tree)[0]
return info
@verbose
def read_meas_info(fid, tree, verbose=None):
"""Read the measurement info
Parameters
----------
fid : file
Open file descriptor.
tree : tree
FIF tree structure.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
info : instance of mne.fiff.meas_info.Info
Info on dataset.
meas : dict
Node in tree that contains the info.
"""
# Find the desired blocks
meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)
if len(meas) == 0:
raise ValueError('Could not find measurement data')
if len(meas) > 1:
raise ValueError('Cannot read more that 1 measurement data')
meas = meas[0]
meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO)
if len(meas_info) == 0:
raise ValueError('Could not find measurement info')
if len(meas_info) > 1:
raise ValueError('Cannot read more that 1 measurement info')
meas_info = meas_info[0]
# Read measurement info
dev_head_t = None
ctf_head_t = None
meas_date = None
highpass = None
lowpass = None
nchan = None
sfreq = None
chs = []
experimenter = None
description = None
proj_id = None
proj_name = None
line_freq = None
p = 0
for k in range(meas_info['nent']):
kind = meas_info['directory'][k].kind
pos = meas_info['directory'][k].pos
if kind == FIFF.FIFF_NCHAN:
tag = read_tag(fid, pos)
nchan = int(tag.data)
elif kind == FIFF.FIFF_SFREQ:
tag = read_tag(fid, pos)
sfreq = float(tag.data)
elif kind == FIFF.FIFF_CH_INFO:
tag = read_tag(fid, pos)
chs.append(tag.data)
p += 1
elif kind == FIFF.FIFF_LOWPASS:
tag = read_tag(fid, pos)
lowpass = float(tag.data)
elif kind == FIFF.FIFF_HIGHPASS:
tag = read_tag(fid, pos)
highpass = float(tag.data)
elif kind == FIFF.FIFF_MEAS_DATE:
tag = read_tag(fid, pos)
meas_date = tag.data
elif kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, pos)
cand = tag.data
if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \
cand['to'] == FIFF.FIFFV_COORD_HEAD:
dev_head_t = cand
elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \
cand['to'] == FIFF.FIFFV_COORD_HEAD:
ctf_head_t = cand
elif kind == FIFF.FIFF_EXPERIMENTER:
tag = read_tag(fid, pos)
experimenter = tag.data
elif kind == FIFF.FIFF_DESCRIPTION:
tag = read_tag(fid, pos)
description = tag.data
elif kind == FIFF.FIFF_PROJ_ID:
tag = read_tag(fid, pos)
proj_id = tag.data
elif kind == FIFF.FIFF_PROJ_NAME:
tag = read_tag(fid, pos)
proj_name = tag.data
elif kind == FIFF.FIFF_LINE_FREQ:
tag = read_tag(fid, pos)
line_freq = float(tag.data)
# Check that we have everything we need
if nchan is None:
raise ValueError('Number of channels in not defined')
if sfreq is None:
raise ValueError('Sampling frequency is not defined')
if len(chs) == 0:
raise ValueError('Channel information not defined')
if len(chs) != nchan:
raise ValueError('Incorrect number of channel definitions found')
if dev_head_t is None or ctf_head_t is None:
hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)
if len(hpi_result) == 1:
hpi_result = hpi_result[0]
for k in range(hpi_result['nent']):
kind = hpi_result['directory'][k].kind
pos = hpi_result['directory'][k].pos
if kind == FIFF.FIFF_COORD_TRANS:
tag = read_tag(fid, pos)
cand = tag.data
if cand['from'] == FIFF.FIFFV_COORD_DEVICE and \
cand['to'] == FIFF.FIFFV_COORD_HEAD:
dev_head_t = cand
elif cand['from'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \
cand['to'] == FIFF.FIFFV_COORD_HEAD:
ctf_head_t = cand
# Locate the Polhemus data
isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK)
dig = None
if len(isotrak) == 0:
logger.info('Isotrak not found')
elif len(isotrak) > 1:
warn('Multiple Isotrak found')
else:
isotrak = isotrak[0]
dig = []
for k in range(isotrak['nent']):
kind = isotrak['directory'][k].kind
pos = isotrak['directory'][k].pos
if kind == FIFF.FIFF_DIG_POINT:
tag = read_tag(fid, pos)
dig.append(tag.data)
dig[-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
# Locate the acquisition information
acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS)
acq_pars = None
acq_stim = None
if len(acqpars) == 1:
acqpars = acqpars[0]
for k in range(acqpars['nent']):
kind = acqpars['directory'][k].kind
pos = acqpars['directory'][k].pos
if kind == FIFF.FIFF_DACQ_PARS:
tag = read_tag(fid, pos)
acq_pars = tag.data
elif kind == FIFF.FIFF_DACQ_STIM:
tag = read_tag(fid, pos)
acq_stim = tag.data
# Load the SSP data
projs = read_proj(fid, meas_info)
# Load the CTF compensation data
comps = read_ctf_comp(fid, meas_info, chs)
# Load the bad channel list
bads = read_bad_channels(fid, meas_info)
#
# Put the data together
#
if tree['id'] is not None:
info = Info(file_id=tree['id'])
else:
info = Info(file_id=None)
subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT)
if len(subject_info) == 1:
subject_info = subject_info[0]
si = dict()
for k in range(subject_info['nent']):
kind = subject_info['directory'][k].kind
pos = subject_info['directory'][k].pos
if kind == FIFF.FIFF_SUBJ_ID:
tag = read_tag(fid, pos)
si['id'] = int(tag.data)
elif kind == FIFF.FIFF_SUBJ_HIS_ID:
tag = read_tag(fid, pos)
si['his_id'] = str(tag.data)
elif kind == FIFF.FIFF_SUBJ_LAST_NAME:
tag = read_tag(fid, pos)
si['last_name'] = str(tag.data)
elif kind == FIFF.FIFF_SUBJ_FIRST_NAME:
tag = read_tag(fid, pos)
si['first_name'] = str(tag.data)
elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY:
tag = read_tag(fid, pos)
si['birthday'] = tag.data
elif kind == FIFF.FIFF_SUBJ_SEX:
tag = read_tag(fid, pos)
si['sex'] = int(tag.data)
elif kind == FIFF.FIFF_SUBJ_HAND:
tag = read_tag(fid, pos)
si['hand'] = int(tag.data)
else:
si = None
info['subject_info'] = si
# Load extra information blocks
read_extra_meas_info(fid, tree, info)
# Make the most appropriate selection for the measurement id
if meas_info['parent_id'] is None:
if meas_info['id'] is None:
if meas['id'] is None:
if meas['parent_id'] is None:
info['meas_id'] = info['file_id']
else:
info['meas_id'] = meas['parent_id']
else:
info['meas_id'] = meas['id']
else:
info['meas_id'] = meas_info['id']
else:
info['meas_id'] = meas_info['parent_id']
info['experimenter'] = experimenter
info['description'] = description
info['proj_id'] = proj_id
info['proj_name'] = proj_name
if meas_date is None:
info['meas_date'] = [info['meas_id']['secs'], info['meas_id']['usecs']]
else:
info['meas_date'] = meas_date
info['nchan'] = nchan
info['sfreq'] = sfreq
info['highpass'] = highpass if highpass is not None else 0
info['lowpass'] = lowpass if lowpass is not None else info['sfreq'] / 2.0
info['line_freq'] = line_freq
# Add the channel information and make a list of channel names
# for convenience
info['chs'] = chs
info['ch_names'] = [ch['ch_name'] for ch in chs]
#
# Add the coordinate transformations
#
info['dev_head_t'] = dev_head_t
info['ctf_head_t'] = ctf_head_t
if dev_head_t is not None and ctf_head_t is not None:
head_ctf_trans = linalg.inv(ctf_head_t['trans'])
dev_ctf_trans = np.dot(head_ctf_trans, info['dev_head_t']['trans'])
info['dev_ctf_t'] = {'from': FIFF.FIFFV_COORD_DEVICE,
'to': FIFF.FIFFV_MNE_COORD_CTF_HEAD,
'trans': dev_ctf_trans}
else:
info['dev_ctf_t'] = None
# All kinds of auxliary stuff
info['dig'] = dig
info['bads'] = bads
info['projs'] = projs
info['comps'] = comps
info['acq_pars'] = acq_pars
info['acq_stim'] = acq_stim
return info, meas
def read_extra_meas_info(fid, tree, info):
"""Read extra blocks from fid"""
# current method saves them into a BytesIO file instance for simplicity
# this and its partner, write_extra_meas_info, could be made more
# comprehensive (i.e.., actually parse and read the data instead of
# just storing it for later)
blocks = [FIFF.FIFFB_EVENTS, FIFF.FIFFB_HPI_RESULT, FIFF.FIFFB_HPI_MEAS,
FIFF.FIFFB_PROCESSING_HISTORY]
info['orig_blocks'] = blocks
fid_str = BytesIO()
fid_str = start_file(fid_str)
start_block(fid_str, FIFF.FIFFB_MEAS_INFO)
for block in blocks:
nodes = dir_tree_find(tree, block)
copy_tree(fid, tree['id'], nodes, fid_str)
info['orig_fid_str'] = fid_str
def write_extra_meas_info(fid, info):
"""Write otherwise left out blocks of data"""
# uses BytesIO fake file to read the appropriate blocks
if 'orig_blocks' in info and info['orig_blocks'] is not None:
# Blocks from the original
blocks = info['orig_blocks']
fid_str, tree, _ = fiff_open(info['orig_fid_str'])
for block in blocks:
nodes = dir_tree_find(tree, block)
copy_tree(fid_str, tree['id'], nodes, fid)
def write_meas_info(fid, info, data_type=None, reset_range=True):
"""Write measurement info into a file id (from a fif file)
Parameters
----------
fid : file
Open file descriptor
info : instance of mne.fiff.meas_info.Info
The measurement info structure
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for
raw data.
reset_range : bool
If True, info['chs'][k]['range'] will be set to unity.
Note
----
Tags are written in a particular order for compatibility with maxfilter
"""
# Measurement info
start_block(fid, FIFF.FIFFB_MEAS_INFO)
# Extra measurement info
write_extra_meas_info(fid, info)
# Polhemus data
if info['dig'] is not None:
start_block(fid, FIFF.FIFFB_ISOTRAK)
for d in info['dig']:
write_dig_point(fid, d)
end_block(fid, FIFF.FIFFB_ISOTRAK)
# megacq parameters
if info['acq_pars'] is not None or info['acq_stim'] is not None:
start_block(fid, FIFF.FIFFB_DACQ_PARS)
if info['acq_pars'] is not None:
write_string(fid, FIFF.FIFF_DACQ_PARS, info['acq_pars'])
if info['acq_stim'] is not None:
write_string(fid, FIFF.FIFF_DACQ_STIM, info['acq_stim'])
end_block(fid, FIFF.FIFFB_DACQ_PARS)
# Coordinate transformations if the HPI result block was not there
if info['dev_head_t'] is not None:
write_coord_trans(fid, info['dev_head_t'])
if info['ctf_head_t'] is not None:
write_coord_trans(fid, info['ctf_head_t'])
# Projectors
write_proj(fid, info['projs'])
# CTF compensation info
write_ctf_comp(fid, info['comps'])
# Bad channels
if len(info['bads']) > 0:
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, info['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# General
if info.get('experimenter') is not None:
write_string(fid, FIFF.FIFF_EXPERIMENTER, info['experimenter'])
if info.get('description') is not None:
write_string(fid, FIFF.FIFF_DESCRIPTION, info['description'])
if info.get('proj_id') is not None:
write_int(fid, FIFF.FIFF_PROJ_ID, info['proj_id'])
if info.get('proj_name') is not None:
write_string(fid, FIFF.FIFF_PROJ_NAME, info['proj_name'])
if info.get('meas_date') is not None:
write_int(fid, FIFF.FIFF_MEAS_DATE, info['meas_date'])
write_int(fid, FIFF.FIFF_NCHAN, info['nchan'])
write_float(fid, FIFF.FIFF_SFREQ, info['sfreq'])
write_float(fid, FIFF.FIFF_LOWPASS, info['lowpass'])
write_float(fid, FIFF.FIFF_HIGHPASS, info['highpass'])
if info.get('line_freq') is not None:
write_float(fid, FIFF.FIFF_LINE_FREQ, info['line_freq'])
if data_type is not None:
write_int(fid, FIFF.FIFF_DATA_PACK, data_type)
# Channel information
for k, c in enumerate(info['chs']):
# Scan numbers may have been messed up
c = deepcopy(c)
c['scanno'] = k + 1
# for float/double, the "range" param is unnecessary
if reset_range is True:
c['range'] = 1.0
write_ch_info(fid, c)
# Subject information
if info.get('subject_info') is not None:
start_block(fid, FIFF.FIFFB_SUBJECT)
si = info['subject_info']
if si.get('id') is not None:
write_int(fid, FIFF.FIFF_SUBJ_ID, si['id'])
if si.get('his_id') is not None:
write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si['his_id'])
if si.get('last_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si['last_name'])
if si.get('first_name') is not None:
write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si['first_name'])
if si.get('birthday') is not None:
write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si['birthday'])
if si.get('sex') is not None:
write_int(fid, FIFF.FIFF_SUBJ_SEX, si['sex'])
if si.get('hand') is not None:
write_int(fid, FIFF.FIFF_SUBJ_HAND, si['hand'])
end_block(fid, FIFF.FIFFB_SUBJECT)
end_block(fid, FIFF.FIFFB_MEAS_INFO)
def write_info(fname, info, data_type=None, reset_range=True):
"""Write measurement info in fif file.
Parameters
----------
fname : str
The name of the file. Should end by -info.fif.
info : instance of mne.fiff.meas_info.Info
The measurement info structure
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), or 16 (mne.fiff.FIFF.FIFFT_DAU_PACK16) for
raw data.
reset_range : bool
If True, info['chs'][k]['range'] will be set to unity.
"""
fid = start_file(fname)
start_block(fid, FIFF.FIFFB_MEAS)
write_meas_info(fid, info, data_type, reset_range)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
| 34.535714 | 79 | 0.58414 | [
"BSD-3-Clause"
] | Anevar/mne-python | mne/fiff/meas_info.py | 21,274 | Python |
import ctypes
import pytest
c_lib = ctypes.CDLL('../solutions/0709-to-lower/to-lower.so')
@pytest.mark.parametrize('string, ans',
[(b"Hello", b"hello"),
(b"here", b"here"),
(b"LOVELY", b"lovely")])
def test_to_lower(string, ans):
c_lib.toLowerCase(string)
assert string == ans
| 27.769231 | 61 | 0.542936 | [
"MIT"
] | msztylko/2020ify-leetcoding | tests/test_0709.py | 361 | Python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import numpy as np
import tensorflow as tf
from niftynet.layer.activation import ActiLayer
from niftynet.layer.convolution import ConvolutionalLayer
from niftynet.layer.deconvolution import DeconvolutionalLayer
from niftynet.layer.fully_connected import FullyConnectedLayer
from niftynet.layer.gan_blocks import BaseDiscriminator
from niftynet.layer.gan_blocks import BaseGenerator
from niftynet.layer.gan_blocks import GANImageBlock
class SimulatorGAN(GANImageBlock):
"""
implementation of
Hu et al., "Freehand Ultrasound Image Simulation with Spatially-Conditioned
Generative Adversarial Networks", MICCAI RAMBO 2017
https://arxiv.org/abs/1707.05392
"""
def __init__(self, name='simulator_GAN'):
super(SimulatorGAN, self).__init__(
generator=ImageGenerator(name='generator'),
discriminator=ImageDiscriminator(name='discriminator'),
clip=None,
name=name)
class ImageGenerator(BaseGenerator):
def __init__(self, name):
super(ImageGenerator, self).__init__(name=name)
self.initializers = {'w': tf.random_normal_initializer(0, 0.02),
'b': tf.constant_initializer(0.001)}
self.noise_channels_per_layer = 0
self.with_conditionings = [True, True, True, True, False]
def layer_op(self, random_source, image_size, conditioning, is_training):
keep_prob_ph = 1 # not passed in as a placeholder
add_noise = self.noise_channels_per_layer
if conditioning is not None:
conditioning_channels = conditioning.shape.as_list()[-1]
conditioning_channels = conditioning_channels + add_noise
else:
conditioning_channels = add_noise
# feature channels design pattern
ch = [512]
sz = image_size[:-1]
for i in range(4):
# compute output n_feature_channels of i-th layer
new_ch = ch[-1] + conditioning_channels * self.with_conditionings[i]
new_ch = round(new_ch / 2)
ch.append(new_ch)
# compute output spatial size of i-th layer
sz = [int(round(spatial_len / 2)) for spatial_len in sz]
ch.append(1) # last layer single channel image
# resizing utilities
spatial_rank = len(image_size) - 1
if spatial_rank == 3:
def resize_func(x, sz):
sz_x = x.shape.as_list()
r1 = tf.image.resize_images(
tf.reshape(x, sz_x[:3] + [-1]), sz[0:2])
r2 = tf.image.resize_images(
tf.reshape(r1, [sz_x[0], sz[0] * sz[1], sz_x[3], -1]),
[sz[0] * sz[1], sz[2]])
resized_3d = tf.reshape(r2, [sz_x[0]] + sz + [sz_x[-1]])
return resized_3d
elif spatial_rank == 2:
resize_func = tf.image.resize_bilinear
def concat_cond(x, with_conditioning):
noise = []
if add_noise:
feature_shape = x.shape.as_list()[0:-1]
noise_shape = feature_shape + [add_noise]
noise = [tf.random_normal(noise_shape, 0.0, 0.1)]
if with_conditioning and conditioning is not None:
with tf.name_scope('concat_conditioning'):
spatial_shape = x.shape.as_list()[1:-1]
resized_cond = resize_func(conditioning, spatial_shape)
return tf.concat([x, resized_cond] + noise, axis=-1)
return x
def conv(ch, x):
with tf.name_scope('conv'):
conv_layer = ConvolutionalLayer(
n_output_chns=ch,
kernel_size=3,
feature_normalization='batch',
with_bias=False,
acti_func='relu',
w_initializer=self.initializers['w'])
return conv_layer(x, is_training=is_training)
def up(ch, x):
with tf.name_scope('up'):
deconv_layer = DeconvolutionalLayer(
n_output_chns=ch,
kernel_size=3,
stride=2,
feature_normalization='batch',
with_bias=False,
acti_func='relu',
w_initializer=self.initializers['w'])
return deconv_layer(x, is_training=is_training)
def up_block(ch, x, with_conditioning):
with tf.name_scope('up_block'):
u = up(ch, x)
cond = concat_cond(u, with_conditioning)
return conv(cond.shape.as_list()[-1], cond)
def noise_to_image(sz, ch, rand_tensor, with_conditioning):
batch_size = rand_tensor.shape.as_list()[0]
output_shape = [batch_size] + sz + [ch]
with tf.name_scope('noise_to_image'):
g_no_0 = np.prod(sz) * ch
fc_layer = FullyConnectedLayer(
n_output_chns=g_no_0,
feature_normalization=None,
with_bias=True,
w_initializer=self.initializers['w'],
b_initializer=self.initializers['b'])
g_h1p = fc_layer(rand_tensor, keep_prob=keep_prob_ph)
g_h1p = tf.reshape(g_h1p, output_shape)
g_h1p = concat_cond(g_h1p, with_conditioning)
return conv(ch + conditioning_channels, g_h1p)
def final_image(n_chns, x):
with tf.name_scope('final_image'):
if add_noise > 0:
feature_shape = x.shape.as_list()[0:-1]
noise_shape = feature_shape + [add_noise]
noise = tf.random_normal(noise_shape, 0, .1)
x = tf.concat([x, noise], axis=3)
conv_layer = ConvolutionalLayer(
n_output_chns=n_chns,
kernel_size=3,
acti_func='tanh',
feature_normalization=None,
with_bias=True,
w_initializer=self.initializers['w'],
b_initializer=self.initializers['b'])
x_sample = conv_layer(
x, is_training=is_training, keep_prob=keep_prob_ph)
return tf.image.resize_images(x_sample, image_size[:-1])
# let the tensors flow...
flow = random_source
for (idx, chns) in enumerate(ch):
if idx == 0: # first layer fully-connected
flow = noise_to_image(
sz, chns, flow, self.with_conditionings[idx])
elif idx == len(ch) - 1: # final conv without bn
return final_image(chns, flow)
else: # upsampling block
flow = up_block(chns, flow, self.with_conditionings[idx])
class ImageDiscriminator(BaseDiscriminator):
def __init__(self, name):
super(ImageDiscriminator, self).__init__(name=name)
w_init = tf.random_normal_initializer(0, 0.02)
b_init = tf.constant_initializer(0.001)
# w_init = tf.contrib.layers.variance_scaling_initializer()
# b_init = tf.constant_initializer(0)
self.initializers = {'w': w_init, 'b': b_init}
self.chns = [32, 64, 128, 256, 512, 1024, 1]
def layer_op(self, image, conditioning, is_training):
batch_size = image.shape.as_list()[0]
def down(ch, x):
with tf.name_scope('downsample'):
conv_layer = ConvolutionalLayer(
n_output_chns=ch,
kernel_size=3,
stride=2,
feature_normalization='batch',
acti_func='selu',
w_initializer=self.initializers['w'])
return conv_layer(x, is_training=is_training)
def convr(ch, x):
conv_layer = ConvolutionalLayer(
n_output_chns=ch,
kernel_size=3,
feature_normalization='batch',
acti_func='selu',
w_initializer=self.initializers['w'])
return conv_layer(x, is_training=is_training)
def conv(ch, x, s):
conv_layer = ConvolutionalLayer(
n_output_chns=ch,
kernel_size=3,
feature_normalization='batch',
w_initializer=self.initializers['w'])
acti_layer = ActiLayer(func='selu')
# combining two flows
res_flow = conv_layer(x, is_training=is_training) + s
return acti_layer(res_flow)
def down_block(ch, x):
with tf.name_scope('down_resnet'):
s = down(ch, x)
r = convr(ch, s)
return conv(ch, r, s)
def feature_block(ch, image):
with tf.name_scope('feature'):
conv_layer = ConvolutionalLayer(
n_output_chns=ch,
kernel_size=5,
with_bias=True,
feature_normalization=None,
acti_func='selu',
w_initializer=self.initializers['w'],
b_initializer=self.initializers['b'])
d_h1s = conv_layer(image, is_training=is_training)
d_h1r = convr(ch, d_h1s)
return conv(ch, d_h1r, d_h1s)
def fully_connected(ch, features):
with tf.name_scope('fully_connected'):
# with bn?
fc_layer = FullyConnectedLayer(
n_output_chns=ch, feature_normalization=None, with_bias=True)
return fc_layer(features, is_training=is_training)
if conditioning is not None:
image = tf.concat([image, conditioning], axis=-1)
# let the tensors flow...
flow = image
for (idx, n_chns) in enumerate(self.chns):
if idx == 0: # first layer
flow = feature_block(n_chns, flow)
elif idx == len(self.chns) - 1: # last layer
return fully_connected(n_chns, flow)
else:
flow = down_block(n_chns, flow)
| 40.671937 | 81 | 0.558212 | [
"Apache-2.0"
] | LucasFidon/NiftyNet-RobustOptim | niftynet/network/simulator_gan.py | 10,290 | Python |
# Desafio 42 - Aula 12 : Refazer Desasfio 35 e mostrar qual o tipo do triangulo.
# A/ Equilatero.
# B/ Isósceles.
# C/ Escaleno.
print('\033[32mATENÇÃO! VAMOS MONTAR UM TRIÂNGULO!!!\033[m')
a = int(input('Digite a primeira medida: '))
b = int(input('Digite a segunda medida: '))
c = int(input('Digite a terceira medida: '))
if a < b + c and b < a + c and c < b + a:
print('\033[34mTemos um triangulo!\033[m')
if a==b==c:
print('Este triângulo é \033[32mEQUILATERO\033[m! Pois possui todos os lados iguais.')
elif a==b or a==c or b==c:
print('Neste caso este triângulo possui dois lados iguais, portanto é \033[32mISÓSCELES\033[m!')
else:
print('Todos os lados são diferentes portante temos um triângulo \033[32mESCALENO\033[m!')
else:
print('\033[31mNão temos um triângulo!\033[m') | 39.47619 | 104 | 0.659831 | [
"MIT"
] | duartecgustavo/Python---Estudos- | desafios/Mundo 2/Ex042.py | 842 | Python |
"""
pyAutoSpec
Spectral learning for WFA/MPS
"""
from .wfa import Wfa, SpectralLearning
from .mps import Mps
from .plots import parallel_plot
from .function_wfa import FunctionWfa
from .function_mps import FunctionMps
from .dataset_mps import DatasetMps
from .image_wfa import ImageWfa
__all__ = ["Wfa", "Mps", "parallel_plot", "SpectralLearning", "FunctionWfa", "FunctionMps", "DatasetMps", "ImageWfa"]
| 25.4375 | 117 | 0.77887 | [
"MIT"
] | lucamarx/pyAutoSpec | pyautospec/__init__.py | 407 | Python |
# -*- coding: utf-8 -*-
import sys
from os.path import dirname, abspath, normpath, join, realpath
from os import listdir, remove, system
import json
from datetime import datetime
begin = len(normpath(abspath(join(dirname(__file__), "../.."))))
end = len(normpath(abspath(join(dirname(__file__), ".."))))
MAIN_DIR = dirname(realpath(__file__))
package_name = MAIN_DIR[begin + 1 : end]
# Add the directory to the python path
sys.path.append(MAIN_DIR[:begin])
exec(
"from "
+ package_name
+ ".Generator.ClassGenerator.class_generator import generate_class"
)
exec("from " + package_name + ".Generator.read_fct import read_all")
exec("from " + package_name + ".definitions import MAIN_DIR, DOC_DIR, INT_DIR")
# List of the main packages (to sort the classes)
PACKAGE_LIST = ["Geometry", "Machine", "Material", "Slot", "Import"]
def generate_code(root_path, gen_dict=None):
"""Generate pyleecan Classes code according to doc in root_path
Parameters
----------
root_path : str
Path to the main folder of Pyleecan
gen_dict : dict
Generation dictionnary (contains all the csv data)
Returns
-------
None
"""
CLASS_DIR = join(root_path, "Classes")
FUNC_DIR = join(root_path, "Functions")
DOC_DIR = join(root_path, "Generator", "ClassesRef")
print("Reading classes csv in: " + DOC_DIR)
print("Saving generated files in: " + CLASS_DIR)
path = __file__[__file__.index(package_name) :]
path = path.replace("\\", "/")
# Deleting all the previous class
print("Deleting old class files...")
for file_name in listdir(CLASS_DIR):
if file_name[0] != "_":
remove(join(CLASS_DIR, file_name))
# A file to import every classes quickly
import_file = open(join(CLASS_DIR, "import_all.py"), "w")
import_file.write("# -*- coding: utf-8 -*-\n\n")
import_file.write('"""File generated by generate_code() - \n')
import_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n')
# A file to select the constructor according to a string
load_file = open(join(FUNC_DIR, "load_switch.py"), "w")
load_file.write("# -*- coding: utf-8 -*-\n")
load_file.write('"""File generated by generate_code() - \n')
load_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n')
load_file.write("from ..Classes.import_all import *\n\n")
load_file.write("load_switch = {\n")
# Read all the csv files
if gen_dict is None:
gen_dict = read_all(DOC_DIR)
# Generate all the class files (sorted to remove "commit noise")
for class_name, _ in iter(sorted(list(gen_dict.items()))):
import_file.write(
"from ..Classes." + class_name + " import " + class_name + "\n"
)
load_file.write(' "' + class_name + '": ' + class_name + ",\n")
print("Generation of " + class_name + " class")
generate_class(gen_dict, class_name, CLASS_DIR)
import_file.close()
load_file.write("}\n")
load_file.close()
print("Generation of load_switch.py")
print("Generation of import_all.py")
# Save gen_dict
class_dict_file = join(CLASS_DIR, "Class_Dict.json")
with open(class_dict_file, "w") as json_file:
json.dump(gen_dict, json_file, sort_keys=True, indent=4, separators=(",", ": "))
if __name__ == "__main__":
gen_dict = read_all(DOC_DIR, is_internal=False, in_path=INT_DIR)
generate_code(MAIN_DIR, gen_dict)
# Run black
try:
import black
system('"{}" -m black .'.format(sys.executable))
if black.__version__.split(".")[0] != "20":
print("\n############################################")
print(
"WARNING: The official version of black for pyleecan is 20, please update your black version"
)
print("############################################\n")
except ImportError:
print("/!\\ Please install and run black (version 20) /!\\")
now = datetime.now()
print("End at: ", now.strftime("%H:%M:%S"))
| 34.888889 | 109 | 0.626654 | [
"Apache-2.0"
] | IrakozeFD/pyleecan | pyleecan/Generator/run_generate_classes.py | 4,082 | Python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Single NN
@author: xuping
"""
import numpy as np
import scipy.io
#from threeNN import sigmoid
def layer_sizes(X, Y):
n_in = X.shape[0]
n_out = Y.shape[0]
return(n_in, n_out)
def initialize_parameters(dim):
np.random.seed(3)
W = np.random.randn(dim, dim)*0.01
b = np.zeros((dim, 1))
return W,b
def prop(W,b,X,Y,lambd):
m = X.shape[1]
#forward
A = sigmoid(np.dot(W, X) + b)
cost = 1./m*np.sum(np.sum(np.square(A-Y),axis=0,keepdims=True)) + lambd/(2*m)*np.sum(np.sum(W*W))
#cost = 1./m*np.sum(np.sum(np.square(A-Y)))
#backward
Z = np.dot(W, X) + b
dZ = 2*(A-Y)*sigmoid(Z)*(1-sigmoid(Z))
dW = 1./m*np.dot(dZ, X.T) + lambd/m*W
#dW = 1./m*np.dot(dZ, X.T)
db = 1./m*np.sum(dZ,axis=1,keepdims=True)
grads = {"dW":dW, "db":db}
return grads, cost
def nn_model(X,Y,num_iterations, lambd, learning_rate, print_cost=True):
#np.random.seed(3)
costs = []
W, b = initialize_parameters(X.shape[0])
for i in range(num_iterations):
grads, cost = prop(W,b,X,Y,lambd)
dW = grads["dW"]
db = grads["db"]
W = W-learning_rate*dW
b = b-learning_rate*db
if print_cost and i%1000==0:
print("cost after iteration %i: %f" %(i, cost))
costs.append(cost)
parameters={"W":W, "b":b}
grads={"dW":dW, "db":db}
return parameters, costs
def predict(parameters, X):
W=parameters["W"]
b=parameters["b"]
A = sigmoid(np.dot(W, X) + b)
return A
def load_data():
data=scipy.io.loadmat('U_Train.mat')
X = data['ud']
Y10 = data['tauR10']
Y5 = data['tauR5']
Y6 = data['tauR6']
return X, Y5, Y6, Y10
if __name__ == "__main__":
#load data
X, Y5, Y6, Y10 = load_data()
X5 = X[:5, :]
X6 = X[:6, :]
X10 = X[:10, :]
num_iterations = 30000
lambd = 10
learning_rate = 3
"""
X=X6
Y=Y6
np.random.seed(3)
dim=X.shape[0]
W = np.random.randn(dim, dim)*0.01
b = np.zeros((dim, 1))
Z = np.dot(W, X) + b
A = sigmoid(Z)
cost = A-Y
#dZ = 2*(A-Y)*sigmoid(Z)*(1-sigmoid(Z))
#dW = 1/m*np.dot(dZ, X.T)
#db = 1/m*np.sum(dZ,axis=1,keepdims=True)
"""
#parameters5, cost5 = nn_model(X5, Y5, num_iterations, lambd, learning_rate, print_cost=True)
parameters6, cost6 = nn_model(X6, Y6, num_iterations, lambd, learning_rate, print_cost=True)
#parameters10, cost10 = nn_model(X10, Y10, num_iterations, lambd, learning_rate, print_cost=True)
#W5=parameters5["W"]
#b5=parameters5["b"]
W6=parameters6["W"]
b6=parameters6["b"]
#W10=parameters10["W"]
#b10=parameters10["b"]
#scipy.io.savemat('weights6.mat',{'W6':W6})
#scipy.io.savemat('bias.mat',{'b6':b6})
| 24.260504 | 101 | 0.553516 | [
"MIT"
] | xupingxie/deep-learning-models | NN_buildingblock/SingleNN.py | 2,887 | Python |
import os
import math
import numpy as np
root_path = '/home/project/I3D/data/Kinetics/train_256'
num_frames = 16
data_list = []
id_list = []
label_list = []
erro_data = []
label = 0
id = 0
for file_path in sorted(os.listdir(root_path)):
for video_path in sorted(os.listdir(os.path.join(root_path, file_path))):
frame_num = len(os.listdir(os.path.join(root_path, file_path, video_path)))
print('Process: ' + os.path.join(root_path, file_path, video_path), frame_num)
if frame_num > 0:
data_list.append(os.path.join(root_path, file_path, video_path))
id_list.append(id)
label_list.append(label)
id += 1
else:
erro_data.append(os.path.join(root_path, file_path, video_path))
label += 1
if label == 100:
break
print(erro_data)
print(len(data_list))
print(len(id_list))
print(len(label_list))
np.save('./train_data_list_%d.npy'%label, data_list)
np.save('./train_label_list_%d.npy'%label, label_list)
| 28.111111 | 86 | 0.666996 | [
"Apache-2.0"
] | LossNAN/3D-Resnet-tensorflow | experiments/kinetics-400/data_list/gen_train_list.py | 1,012 | Python |
# the TestEnv environment is used to simply simulate the network
from flow.envs import TestEnv
# the Experiment class is used for running simulations
from flow.core.experiment import Experiment
# the base network class
from flow.networks import Network
from flow.envs.base import Env
# all other imports are standard
from flow.core.params import VehicleParams, SumoCarFollowingParams, SumoLaneChangeParams
from flow.controllers import IDMController
from flow.core.params import InFlows
from flow.core.params import NetParams
from flow.core.params import TrafficLightParams
from flow.core.params import InitialConfig
from flow.core.params import EnvParams
from flow.controllers import IDMController, RLController, StaticLaneChanger
from gym.spaces.box import Box
import numpy as np
import collections
# create some default parameters parameters
HORIZON = 3000
env_params = EnvParams(
horizon=HORIZON,
sims_per_step=1,
warmup_steps=0,
additional_params={
"max_accel": 3,
"max_decel": -2,
"target_velocity": 20,
"lane_change_duration": 4,
"num_rl": 5,
})
initial_config = InitialConfig(edges_distribution=['highway_0'])
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(IDMController, {
"noise": 0.2
}),
# lane_change_controller=(StaticLaneChanger, {}),
car_following_params=SumoCarFollowingParams(
speed_mode="obey_safe_speed",
),
lane_change_params=SumoLaneChangeParams(
lane_change_mode=1621,
model="SL2015",
lc_impatience="0.1",
lc_time_to_impatience="1.0"
))
vehicles.add(
veh_id="rl",
acceleration_controller=(RLController, {}),
lane_change_controller=(StaticLaneChanger, {}),
# routing_controller=(HighwayRouter, {}),
car_following_params=SumoCarFollowingParams(
speed_mode="obey_safe_speed",
),
lane_change_params=SumoLaneChangeParams(
lane_change_mode=256,
model="SL2015",
lc_impatience="0.1",
lc_time_to_impatience="1.0"
),
num_vehicles=0)
from flow.core.params import SumoParams
sim_params = SumoParams(
sim_step=0.2,
render=True,
lateral_resolution=1.0,
restart_instance=True,
)
import os
inflow = InFlows()
inflow.add(veh_type="human",
edge="WC",
# depart_lane="best",
depart_lane=1,
arrivalLane=0,
probability=0.1,
depart_speed="random",
)
inflow.add(veh_type="human",
edge="WC",
# depart_lane="best",
depart_lane=0,
arrivalLane=1,
probability=0.1,
depart_speed="random",
)
inflow.add(veh_type="human",
edge="EC",
# depart_lane="best",
# vehs_per_hour=2000,
depart_lane=1,
arrivalLane=0,
probability=0.1,
depart_speed="random",
)
inflow.add(veh_type="human",
edge="EC",
# depart_lane="best",
# vehs_per_hour=2000,
depart_lane=0,
arrivalLane=1,
probability=0.1,
depart_speed="random",
)
inflow.add(
veh_type="rl",
edge="WC",
vehs_per_hour=100,
depart_lane="free",
depart_speed=5)
net_params = NetParams(
template={
"net":"/home/rong/Safe-RL-for-Driving/traci_pedestrian_crossing/pedcrossing.net.xml",
# features associated with the routes vehicles take
"vtype": "/home/rong/Safe-RL-for-Driving/traci_pedestrian_crossing/pedcrossing.add.xml",
# 和下方specify_routes一致
"rou":"/home/rong/Safe-RL-for-Driving/traci_pedestrian_crossing/data/pedcrossing.rou.xml",
"trip":"/home/rong/Safe-RL-for-Driving/traci_pedestrian_crossing/pedestrians.trip.xml"
},
inflows=inflow,
)
# specify the edges vehicles can originate on
initial_config = InitialConfig(
edges_distribution=["WC"]
)
tl_logic = TrafficLightParams(baseline=False)
phases = [{"duration": "100000", "state": "GGGGr"},
{"duration": "4", "state": "yyyyr"},
{"duration": "10", "state": "rrrrG"},
{"duration": "10", "state": "rrrrr"}]
tl_logic.add("C", phases=phases, programID="custom", offset="0")
# specify the routes for vehicles in the network
class PedCrossing(Network):
def specify_routes(self, net_params):
return {'EC': ['EC', 'CW'],
'WC': ['WC', 'CE']}
class MoveXYPedEnv(Env):
def __init__(self, env_params, sim_params, network, simulator='traci'):
super().__init__(env_params, sim_params, network, simulator)
# 环境相关
self.activeRequest = False
self.greenTimeSoFar = 0
# minimum green time for the vehicles
self.MIN_GREEN_TIME = 15
# the first phase in tls plan. see 'pedcrossing.tll.xml'
self.VEHICLE_GREEN_PHASE = 0
self.PEDESTRIAN_GREEN_PHASE = 2
# the id of the traffic light (there is only one). This is identical to the
# id of the controlled intersection (by default)
self.TLSID = 'C'
# pedestrian edges at the controlled intersection
self.WALKINGAREAS = [':C_w0', ':C_w1']
self.CROSSINGS = [':C_c0']
# Move xy相关
self.num_lanes = max(self.k.network.num_lanes(edge)
for edge in self.k.network.get_edge_list())
self.visible = []
self.stuck = False
# variables used to sort vehicles by their initial position plus
# distance traveled
self.prev_pos = dict()
self.absolute_position = dict()
# maximum number of controlled vehicles
self.num_rl = env_params.additional_params["num_rl"]
# queue of rl vehicles waiting to be controlled
self.rl_queue = collections.deque()
# names of the rl vehicles controlled at any step
self.rl_veh = []
# used for visualization: the vehicles behind and after RL vehicles
# (ie the observed vehicles) will have a different color
self.leader = []
self.follower = []
@property
def action_space(self):
"""See class definition."""
max_decel = self.env_params.additional_params["max_decel"]
max_accel = self.env_params.additional_params["max_accel"]
lb = [1, -0.2] * self.num_rl
ub = [2, 0.2] * self.num_rl
# print("num_rl_vehicles:", self.num_rl)
return Box(np.array(lb), np.array(ub), dtype=np.float32)
@property
def observation_space(self):
"""See class definition."""
# print("observation sapce shape: ", 4 * self.num_rl *
# self.num_lanes + self.num_rl)
return Box(
low=-1000,
high=3000,
shape=(4 * self.num_rl *
self.num_lanes + 2 * self.num_rl, ),
dtype=np.float32)
def compute_reward(self, rl_actions, **kwargs):
"""See class definition."""
reward = 0
# rl 车辆向前,并惩罚停止
rl_velocity = np.array(self.k.vehicle.get_speed(self.rl_veh))
target_vel = self.env_params.additional_params['target_velocity']
max_cost = np.array([target_vel] * self.num_rl)
max_cost = np.linalg.norm(max_cost)
cost = rl_velocity - target_vel
cost = np.linalg.norm(cost)
# epsilon term (to deal with ZeroDivisionError exceptions)
eps = np.finfo(np.float32).eps
reward += max(max_cost - cost, 0) / (max_cost + eps)
gain = 0.5
thresh = 0.3
penalize = len(rl_velocity[rl_velocity < thresh])
reward -= gain * penalize
# punish excessive lane changes by reducing the reward by a set value
# every time an rl car changes lanes (10% of max reward)
for veh_id in self.rl_veh:
if self.k.vehicle.get_last_lc(veh_id) == self.time_counter:
reward -= 10
if self.stuck:
reward -= 100
# print("reward: ", reward)
return reward
def _apply_rl_actions(self, actions):
"""See class definition."""
acceleration = actions[::2]
direction = actions[1::2]
# represents vehicles that are allowed to change lanes
# non_lane_changing_veh = []
# non_lane_changing_veh = \
# [self.time_counter <=
# self.env_params.additional_params["lane_change_duration"]
# + self.k.vehicle.get_last_lc(veh_id)
# for veh_id in self.rl_veh]
# # vehicle that are not allowed to change have their directions set to 0
# print(non_lane_changing_veh)
# direction[non_lane_changing_veh] = \
# np.array([0] * sum(non_lane_changing_veh))
for i, veh_id in enumerate(self.rl_veh):
if self.time_counter <= self.env_params.additional_params["lane_change_duration"]\
+ self.k.vehicle.get_last_lc(veh_id):
direction[i] = 0
x, y = self.k.vehicle.kernel_api.vehicle.getPosition(veh_id)
print(x, y)
print("edgeID", self.k.vehicle.get_edge(veh_id))
print("lane", self.k.vehicle.get_lane(veh_id))
self.k.vehicle.kernel_api.vehicle.moveToXY(vehID=veh_id,
edgeID="highway_1",
lane=1,
x=x+acceleration[i],
y=y+direction[i],
keepRoute=2)
for x in np.nditer(direction, op_flags=['readwrite']):
if x > 0.7:
x[...] = 1
elif x < -0.7:
x[...] = -1
else:
x[...] = 0
# print("actions:", actions)
# print("veh id: ", self.rl_veh)
# print("acceleration: ", acceleration)
# print("direction", direction)
# self.k.vehicle.apply_acceleration(self.rl_veh, acc=acceleration)
# self.k.vehicle.apply_lane_change(self.rl_veh, direction=direction)
def get_state(self):
"""See class definition."""
obs = [
0
for _ in range(4 * self.num_rl * self.num_lanes + 2 * self.num_rl)
]
# print("rl veh id: ", self.rl_veh)
self.visible = []
self.update_veh_id()
speeds = []
for i, rl_id in enumerate(self.rl_veh):
# x, y = self.k.vehicle.kernel_api.vehicle.getPosition(rl_id)
# print(x, y)
# print("edgeID", self.k.vehicle.get_edge(rl_id))
# print("lane", self.k.vehicle.get_lane(rl_id))
# self.k.vehicle.kernel_api.vehicle.moveToXY(vehID=[rl_id, rl_id], edgeID="highway_1", lane=1, x=600, y=134)
# add the speed for the ego rl vehicle
x = self.k.vehicle.get_x_by_id(rl_id)
if x == -1001:
continue
speed = self.k.vehicle.get_speed(rl_id)
obs[-2*i - 1] = speed
speeds.append(speed)
obs[-2*i - 2] = x
# if rl_id not in self.k.vehicle.get_ids():
# print("not in:", rl_id)
# self.additional_command()
# normalizers
max_length = self.k.network.length()
max_speed = self.k.network.max_speed()
# set to 1000 since the absence of a vehicle implies a large
# headway
headway = [1] * self.num_lanes
tailway = [1] * self.num_lanes
vel_in_front = [0] * self.num_lanes
vel_behind = [0] * self.num_lanes
lane_leaders = self.k.vehicle.get_lane_leaders(rl_id)
lane_followers = self.k.vehicle.get_lane_followers(rl_id)
lane_headways = self.k.vehicle.get_lane_headways(rl_id)
lane_tailways = self.k.vehicle.get_lane_tailways(rl_id)
headway[0:len(lane_headways)] = lane_headways
tailway[0:len(lane_tailways)] = lane_tailways
for j, lane_leader in enumerate(lane_leaders):
if lane_leader != '':
lane_headways[j] /= max_length
vel_in_front[j] = self.k.vehicle.get_speed(lane_leader) \
/ max_speed
self.visible.extend([lane_leader])
for j, lane_follower in enumerate(lane_followers):
if lane_follower != '':
lane_headways[j] /= max_length
vel_behind[j] = self.k.vehicle.get_speed(lane_follower) \
/ max_speed
self.visible.extend([lane_follower])
# add the headways, tailways, and speed for all lane leaders
# and followers
obs[4*self.num_lanes*i:4*self.num_lanes*(i+1)] = \
np.concatenate((headway, tailway, vel_in_front, vel_behind))
# if len(speeds) > 3:
# self.stuck = True
# for speed in speeds:
# if speed != 0:
# self.stuck = False
obs = np.array(obs)
# print("observation: ", obs)
# print("observation shape: ", obs.shape)
np.clip(obs, -1000, 3000, out=obs)
return obs
def additional_command(self):
# 红绿灯相关
# decide wether there is a waiting pedestrian and switch if the green
# phase for the vehicles exceeds its minimum duration
if not self.activeRequest:
self.activeRequest = self.checkWaitingPersons()
if self.k.kernel_api.trafficlight.getPhase(self.TLSID) == self.VEHICLE_GREEN_PHASE:
self.greenTimeSoFar += 1
if self.greenTimeSoFar > self.MIN_GREEN_TIME:
# check whether someone has pushed the button
if self.activeRequest:
# switch to the next phase
self.k.kernel_api.trafficlight.setPhase(
self.TLSID, self.VEHICLE_GREEN_PHASE + 1)
# reset state
self.activeRequest = False
# MOVE XY相关
# specify observed vehicles
for veh_id in self.leader + self.follower:
self.k.vehicle.set_observed(veh_id)
# update the "absolute_position" variable
for veh_id in self.k.vehicle.get_ids():
this_pos = self.k.vehicle.get_x_by_id(veh_id)
if this_pos == -1001:
# in case the vehicle isn't in the network
self.absolute_position[veh_id] = -1001
else:
change = this_pos - self.prev_pos.get(veh_id, this_pos)
self.absolute_position[veh_id] = \
(self.absolute_position.get(veh_id, this_pos) + change) \
% self.k.network.length()
self.prev_pos[veh_id] = this_pos
return
def update_veh_id(self):
# add rl vehicles that just entered the network into the rl queue
for veh_id in self.k.vehicle.get_rl_ids():
if veh_id not in list(self.rl_queue) + self.rl_veh:
self.rl_queue.append(veh_id)
# remove rl vehicles that exited the network
for veh_id in list(self.rl_queue):
if veh_id not in self.k.vehicle.get_rl_ids() or veh_id not in self.k.vehicle.get_ids():
self.rl_queue.remove(veh_id)
for veh_id in self.rl_veh:
if veh_id not in self.k.vehicle.get_rl_ids() or veh_id not in self.k.vehicle.get_ids():
# print("rm veh_id", veh_id)
self.rl_veh.remove(veh_id)
# fil up rl_veh until they are enough controlled vehicles
while len(self.rl_queue) > 0 and len(self.rl_veh) < self.num_rl:
rl_id = self.rl_queue.popleft()
self.rl_veh.append(rl_id)
# print("add rl_veh:", rl_id)
# print("update_veh_id, self.rl_veh:", self.rl_veh)
def checkWaitingPersons(self):
"""check whether a person has requested to cross the street"""
# check both sides of the crossing
for edge in self.WALKINGAREAS:
peds = self.k.kernel_api.edge.getLastStepPersonIDs(edge)
# check who is waiting at the crossing
# we assume that pedestrians push the button upon
# standing still for 1s
for ped in peds:
if (self.k.kernel_api.person.getWaitingTime(ped) == 1 and
self.k.kernel_api.person.getNextEdge(ped) in self.CROSSINGS):
numWaiting = self.k.kernel_api.trafficlight.getServedPersonCount(self.TLSID, self.PEDESTRIAN_GREEN_PHASE)
print("%s: pedestrian %s pushes the button (waiting: %s)" %
(self.k.kernel_api.simulation.getTime(), ped, numWaiting))
return True
return False
def step(self, rl_actions):
"""Advance the environment by one step.
Assigns actions to autonomous and human-driven agents (i.e. vehicles,
traffic lights, etc...). Actions that are not assigned are left to the
control of the simulator. The actions are then used to advance the
simulator by the number of time steps requested per environment step.
Results from the simulations are processed through various classes,
such as the Vehicle and TrafficLight kernels, to produce standardized
methods for identifying specific network state features. Finally,
results from the simulator are used to generate appropriate
observations.
Parameters
----------
rl_actions : array_like
an list of actions provided by the rl algorithm
Returns
-------
observation : array_like
agent's observation of the current environment
reward : float
amount of reward associated with the previous state/action pair
done : bool
indicates whether the episode has ended
info : dict
contains other diagnostic information from the previous action
"""
for _ in range(self.env_params.sims_per_step):
self.time_counter += 1
self.step_counter += 1
# perform acceleration actions for controlled human-driven vehicles
if len(self.k.vehicle.get_controlled_ids()) > 0:
accel = []
for veh_id in self.k.vehicle.get_controlled_ids():
action = self.k.vehicle.get_acc_controller(
veh_id).get_action(self)
accel.append(action)
self.k.vehicle.apply_acceleration(
self.k.vehicle.get_controlled_ids(), accel)
# perform lane change actions for controlled human-driven vehicles
if len(self.k.vehicle.get_controlled_lc_ids()) > 0:
direction = []
for veh_id in self.k.vehicle.get_controlled_lc_ids():
target_lane = self.k.vehicle.get_lane_changing_controller(
veh_id).get_action(self)
direction.append(target_lane)
self.k.vehicle.apply_lane_change(
self.k.vehicle.get_controlled_lc_ids(),
direction=direction)
# perform (optionally) routing actions for all vehicles in the
# network, including RL and SUMO-controlled vehicles
routing_ids = []
routing_actions = []
for veh_id in self.k.vehicle.get_ids():
if self.k.vehicle.get_routing_controller(veh_id) \
is not None:
routing_ids.append(veh_id)
route_contr = self.k.vehicle.get_routing_controller(
veh_id)
routing_actions.append(route_contr.choose_route(self))
self.k.vehicle.choose_routes(routing_ids, routing_actions)
self.apply_rl_actions(rl_actions)
self.additional_command()
# advance the simulation in the simulator by one step
self.k.simulation.simulation_step()
# store new observations in the vehicles and traffic lights class
self.k.update(reset=False)
# update the colors of vehicles
if self.sim_params.render:
self.k.vehicle.update_vehicle_colors()
# crash encodes whether the simulator experienced a collision
crash = self.k.simulation.check_collision()
# stop collecting new simulation steps if there is a collision
if crash:
break
# render a frame
self.render()
states = self.get_state()
# collect information of the state of the network based on the
# environment class used
self.state = np.asarray(states).T
# collect observation new state associated with action
next_observation = np.copy(states)
# test if the environment should terminate due to a collision or the
# time horizon being met
done = (self.time_counter >= self.env_params.warmup_steps +
self.env_params.horizon) or self.stuck
if done:
print("done")
if self.stuck:
print("stuck")
else:
print("time up")
# compute the info for each agent
infos = {}
# compute the reward
if self.env_params.clip_actions:
rl_clipped = self.clip_actions(rl_actions)
reward = self.compute_reward(rl_clipped, fail=crash)
else:
reward = self.compute_reward(rl_actions, fail=crash)
return next_observation, reward, done, infos
def reset(self):
"""See parent class.
This also includes updating the initial absolute position and previous
position.
"""
self.rl_queue.clear()
self.rl_veh.clear()
obs = super().reset()
print("reset")
for veh_id in self.k.vehicle.get_ids():
self.absolute_position[veh_id] = self.k.vehicle.get_x_by_id(veh_id)
self.prev_pos[veh_id] = self.k.vehicle.get_x_by_id(veh_id)
self.leader = []
self.follower = []
return obs
if __name__ == "__main__":
flow_params = dict(
exp_tag='template',
env_name=MoveXYPedEnv,
network=PedCrossing,
simulator='traci',
sim=sim_params,
env=env_params,
net=net_params,
veh=vehicles,
initial=initial_config,
tls=tl_logic,
)
# number of time steps
flow_params['env'].horizon = 10000
exp = Experiment(flow_params)
# run the sumo simulation
_ = exp.run(1)
| 37.053312 | 125 | 0.588158 | [
"Apache-2.0"
] | KarlRong/Safe-RL-for-Driving | traci_pedestrian_crossing/movexy_ped.py | 22,992 | Python |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 14:52:03 2020
@author: DELL
"""
import pandas as pd
data = pd.read_csv('http://187.191.75.115/gobmx/salud/datos_abiertos/datos_abiertos_covid19.zip', encoding = 'ANSI')
res = data[data['ENTIDAD_RES'] == 31]
res.to_csv('data_yuc_actualizado.csv', index = False) | 19.8125 | 116 | 0.690852 | [
"MIT"
] | Luisbaduy97/COVID-YUCATAN | datos_yuc_actualizado.py | 317 | Python |
#!/usr/bin/env python
import unittest
from ct.proto import client_pb2
from ct.proto import test_message_pb2
from ct.serialization import tls_message
valid_test_message = test_message_pb2.TestMessage()
valid_test_message.uint_8 = 0
valid_test_message.uint_16 = 258
valid_test_message.uint_24 = 197637
valid_test_message.uint_32 = 101124105
valid_test_message.uint_48 = 11042563100175
valid_test_message.uint_64 = 255
valid_test_message.fixed_bytes = "\xff\x00"
valid_test_message.var_bytes = "hello"
valid_test_message.var_bytes2 = "world"
valid_test_message.vector_bytes.append("hello")
valid_test_message.vector_bytes.append("world")
valid_test_message.vector_uint32.append(1)
valid_test_message.vector_uint32.append(255)
valid_test_message.test_enum = test_message_pb2.TestMessage.ENUM_1
valid_test_message.select_uint32 = 2
valid_test_message.embedded_message.uint_32 = 3
valid_test_message.repeated_message.add().uint_32 = 4
valid_test_message.repeated_message.add().uint_32 = 256
# Test vectors are given as a list of serialized, hex-encoded components.
serialized_valid_test_message = [
"00", # 0: uint_8
"0102", # 1: uint_16
"030405", # 2: uint_24
"06070809", # 3: uint_32
"0a0b0c0d0e0f", # 4: uint_48
"00000000000000ff", # 5: uint_64
"ff00", # 6: fixed_bytes
"05" + "hello".encode("hex"), # 7: var_bytes
"0005" + "world".encode("hex"), # 8: var_bytes2
"0c" + "05" + "hello".encode("hex") + "05" +
"world".encode("hex"), # 9: vector_bytes
"0800000001000000ff", # 10: vector_uint32
"0001", # 11: test_enum
"00000002", # 12: select_uint32
"0003", # 13: embedded_message.uint_32
"0400040100", # 14: repeated_message
]
class TLSReaderTest(unittest.TestCase):
def verify_decode(self, test_vector, test_message):
serialized = "".join(test_vector).decode("hex")
message = test_message_pb2.TestMessage()
tls_message.decode(serialized, message)
self.assertEqual(test_message, message,
msg = "%s vs %s" % (test_message, message))
def verify_decode_fail(self, test_vector):
serialized = "".join(test_vector).decode("hex")
message = test_message_pb2.TestMessage()
self.assertRaises(tls_message.TLSDecodingError,
tls_message.decode, serialized, message)
def test_decode_valid(self):
self.verify_decode(serialized_valid_test_message, valid_test_message)
pass
def test_decode_valid_select(self):
test_vector = serialized_valid_test_message[:]
test_vector[11] = "0000"
test_vector[12] = ""
test_message = test_message_pb2.TestMessage()
test_message.CopyFrom(valid_test_message)
test_message.test_enum = test_message_pb2.TestMessage.ENUM_0
test_message.ClearField("select_uint32")
self.verify_decode(test_vector, test_message)
def test_decode_invalid_select_fails(self):
test_vector = serialized_valid_test_message[:]
test_vector[11] = "0000"
self.verify_decode_fail(test_vector)
def test_decode_too_short_fails(self):
test_vector = serialized_valid_test_message[:]
# var_bytes2 has a min length of 4
test_vector[8] = "bit".encode("hex")
self.verify_decode_fail(test_vector)
def test_decode_empty(self):
test_vector = serialized_valid_test_message[:]
# var_bytes has no min length
test_vector[7] = "00"
test_message = test_message_pb2.TestMessage()
test_message.CopyFrom(valid_test_message)
test_message.var_bytes = ""
self.verify_decode(test_vector, test_message)
def test_decode_too_long_fails(self):
test_vector = serialized_valid_test_message[:]
# var_bytes has a max length of 16
test_vector[7] = "16" + "Iamtoolongformyowngood".encode("hex")
self.verify_decode_fail(test_vector)
def test_decode_repeated_too_short_fails(self):
test_vector = serialized_valid_test_message[:]
# repeated_uint32 has a min total length of 4
test_vector[10] = "00"
self.verify_decode_fail(test_vector)
def test_decode_repeated_too_long_fails(self):
test_vector = serialized_valid_test_message[:]
# repeated_uint32 has a max total length of 8
test_vector[10] = "0c" + "00"*12
self.verify_decode_fail(test_vector)
def test_decode_repeated_invalid_contents_fails(self):
test_vector = serialized_valid_test_message[:]
# repeated_uint32 must be a multiple of 4
test_vector[10] = "02" + "0000"
self.verify_decode_fail(test_vector)
def test_read_longer_buffer(self):
test_vector = serialized_valid_test_message[:]
test_vector.append("somegarbageintheend".encode("hex"))
serialized = "".join(test_vector).decode("hex")
message = test_message_pb2.TestMessage()
reader = tls_message.TLSReader(serialized)
reader.read(message)
self.assertEqual(valid_test_message, message,
msg = "%s vs %s" % (valid_test_message, message))
self.assertFalse(reader.finished())
class TLSWriterTest(unittest.TestCase):
def verify_encode(self, test_message, test_vector):
serialized = tls_message.encode(test_message)
self.assertEqual("".join(test_vector), serialized.encode("hex"))
def verify_encode_fails(self, test_message):
self.assertRaises(tls_message.TLSEncodingError,
tls_message.encode, test_message)
def test_encode(self):
self.verify_encode(valid_test_message, serialized_valid_test_message)
def test_encode_ignores_skipped_fields(self):
test_message = test_message_pb2.TestMessage()
test_message.CopyFrom(valid_test_message)
test_message.skip_uint32 = 42
self.verify_encode(test_message, serialized_valid_test_message)
def test_encode_ignores_bad_select(self):
test_vector = serialized_valid_test_message[:]
test_vector[11] = "0000"
test_vector[12] = ""
test_message = test_message_pb2.TestMessage()
test_message.CopyFrom(valid_test_message)
test_message.test_enum = test_message_pb2.TestMessage.ENUM_0
self.verify_encode(test_message, test_vector)
def test_encode_too_large_value_fails(self):
test_message = test_message_pb2.TestMessage()
test_message.CopyFrom(valid_test_message)
test_message.uint_8 = 65000
self.verify_encode_fails(test_message)
def test_encode_bad_length_fails(self):
test_message = test_message_pb2.TestMessage()
test_message.CopyFrom(valid_test_message)
test_message.fixed_bytes = "hello"
self.verify_encode_fails(test_message)
def test_encode_too_short_fails(self):
test_message = test_message_pb2.TestMessage()
test_message.CopyFrom(valid_test_message)
test_message.var_bytes2 = "sho"
self.verify_encode_fails(test_message)
def test_encode_too_long_fails(self):
test_message = test_message_pb2.TestMessage()
test_message.CopyFrom(valid_test_message)
test_message.var_bytes = "Iamtoolongformyowngood"
self.verify_encode_fails(test_message)
def test_encode_repeated_too_long_fails(self):
test_message = test_message_pb2.TestMessage()
test_message.CopyFrom(valid_test_message)
test_message.vector_uint32.extend([1, 2, 3, 4])
self.verify_encode_fails(test_message)
def test_encode_repeated_too_short_fails(self):
test_message = test_message_pb2.TestMessage()
test_message.CopyFrom(valid_test_message)
test_message.ClearField("vector_uint32")
self.verify_encode_fails(test_message)
class SCTEncodingTest(unittest.TestCase):
def setUp(self):
sct_proto = client_pb2.SignedCertificateTimestamp()
sct_proto.version = client_pb2.V1
sct_proto.id.key_id = (
"a4b90990b418581487bb13a2cc67700a3c359804f91bdfb8e377cd0ec80ddc10"
).decode('hex')
sct_proto.timestamp = 1365427532443
sct_proto.signature.hash_algorithm = client_pb2.DigitallySigned.SHA256
sct_proto.signature.sig_algorithm = client_pb2.DigitallySigned.ECDSA
sct_proto.signature.signature = (
"304502210089de897f603e590b1aa0d7c4236c2f697e90602795f7a469215fda5e"
"460123fc022065ab501ce3dbaf49bd563d1c9ff0ac76120bc11f65a44122b3cd8b"
"89fc77a48c").decode("hex")
self._sct_proto = sct_proto
def test_correctly_encodes_sct(self):
sct = tls_message.encode(self._sct_proto)
expected_sct = ("00a4b90990b418581487bb13a2cc67700a3c359804f91bdfb8e377"
"cd0ec80ddc100000013de9d2b29b000004030047304502210089de"
"897f603e590b1aa0d7c4236c2f697e90602795f7a469215fda5e46"
"0123fc022065ab501ce3dbaf49bd563d1c9ff0ac76120bc11f65a4"
"4122b3cd8b89fc77a48c").decode("hex")
self.assertEqual(sct, expected_sct)
def test_correctly_encodes_sct_list_one_sct(self):
# Taken from the C++ serializer test, to ensure this encoder
# produces results compatible with the C++ one.
single_sct = ("0069616d617075626c69636b657973686174776f6669766573697864"
"696765737400000000000004d20000040300097369676e6174757265"
).decode("hex")
sct_list = client_pb2.SignedCertificateTimestampList()
sct_list.sct_list.append(single_sct)
encoded_sct_list = tls_message.encode(sct_list)
self.assertEqual(encoded_sct_list[:4],
"003a0038".decode("hex"))
self.assertEqual(encoded_sct_list[4:], single_sct)
def test_correctly_encodes_sct_list_multiple_scts(self):
first_sct = tls_message.encode(self._sct_proto)
sct_proto_2 = client_pb2.SignedCertificateTimestamp()
sct_proto_2.CopyFrom(self._sct_proto)
sct_proto_2.timestamp = 1365427530000
second_sct = tls_message.encode(sct_proto_2)
sct_list = client_pb2.SignedCertificateTimestampList()
sct_list.sct_list.extend([first_sct, second_sct])
encoded_sct_list = tls_message.encode(sct_list)
# First 2 bytes are list length prefix - 240 bytes in total
# Next 2 bytes are the length of the first SCT: 118
self.assertEqual(encoded_sct_list[:4],
"00f00076".decode("hex"))
first_sct_end = len(first_sct) + 4
# The actual SCT
self.assertEqual(encoded_sct_list[4:first_sct_end], first_sct)
# Next 2 bytes are the length of the second SCT (118 again)
self.assertEqual(encoded_sct_list[first_sct_end:first_sct_end+2],
"0076".decode("hex"))
# The 2nd SCT
self.assertEqual(encoded_sct_list[first_sct_end+2:], second_sct)
if __name__ == "__main__":
unittest.main()
| 39.521583 | 80 | 0.70638 | [
"Apache-2.0"
] | DavadDi/archon | vendor/github.com/google/certificate-transparency/python/ct/serialization/tls_message_test.py | 10,987 | Python |
class Solution:
def intToRoman(self, num: int) -> str:
romans = ["M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"]
values = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]
res = ''
for i in range(len(romans)):
while (num - values[i] >= 0):
res += romans[i]
num -= values[i]
return res | 31.615385 | 88 | 0.403893 | [
"MIT"
] | geemaple/algorithm | leetcode/12.integer-to-roman.py | 411 | Python |
__author__ = 'Eduardo Mendes'
__email__ = '[email protected]'
__version__ = '0'
| 21.75 | 38 | 0.758621 | [
"MIT"
] | dunossauro/qaninja-liveclass | qaclass-bdd/__init__.py | 87 | Python |
# coding: utf-8
import codecs
import re
import json
from budget2013_common import *
class Budget2013_37_SubTable1Item(object):
def __init__(self):
self._no = None
self._purpose = None
self._principal = None
self._value = None
self._regress = None
self._check = None
self._other = []
@property
def no(self):
return self._no
@no.setter
def no(self, value):
self._no = value
@property
def purpose(self):
return self._purpose
@purpose.setter
def purpose(self, value):
self._purpose = value
@property
def principal(self):
return self._principal
@principal.setter
def principal(self, value):
self._principal = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
@property
def regress(self):
return self._regress
@regress.setter
def regress(self, value):
self._regress = value
@property
def check(self):
return self._check
@check.setter
def check(self, value):
self._check = value
@property
def other(self):
return self._other
@other.setter
def other(self, value):
self._other = value
class JsonEncoder_Budget2013_37_SubTable1Item(json.JSONEncoder):
def default(self, o):
return {
"no": o.no,
"purpose": o.purpose,
"principal": o.principal,
"value": o.value,
"regress": o.regress,
"check": o.check,
"other": o.other
}
class Budget2013_37_SubTable1(object):
def __init__(self):
self._caption = None
self._headers = []
self._items = []
self._notes = []
@property
def caption(self):
return self._caption
@caption.setter
def caption(self, value):
self._caption = value
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, value):
self._headers = value
@property
def items(self):
return self._items
@items.setter
def items(self, value):
self._items = value
@property
def notes(self):
return self._notes
@notes.setter
def notes(self, value):
self._notes = value
class JsonEncoder_Budget2013_37_SubTable1(json.JSONEncoder):
def default(self, o):
item_encoder = JsonEncoder_Budget2013_37_SubTable1Item()
return {
"caption": o.caption,
"headers": o.headers,
"items": [item_encoder.default(item) for item in o.items],
"notes": o.notes
}
class Budget2013_37_SubTable2(object):
def __init__(self):
self._caption = None
self._headers = []
self._items = []
@property
def caption(self):
return self._caption
@caption.setter
def caption(self, value):
self._caption = value
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, value):
self._headers = value
@property
def items(self):
return self._items
@items.setter
def items(self, value):
self._items = value
class JsonEncoder_Budget2013_37_SubTable2Item(json.JSONEncoder):
def default(self, o):
return {
"name": o["name"],
"value": o["value"]
}
class JsonEncoder_Budget2013_37_SubTable2(json.JSONEncoder):
def default(self, o):
item_encoder = JsonEncoder_Budget2013_37_SubTable2Item()
return {
"caption": o.caption,
"headers": o.headers,
"items": [item_encoder.default(item) for item in o.items]
}
class Budget2013_37(object):
def __init__(self):
self._caption = None
self._subtable1 = Budget2013_37_SubTable1()
self._subtable2 = Budget2013_37_SubTable2()
@property
def caption(self):
return self._caption
@caption.setter
def caption(self, value):
self._caption = value
@property
def subtable1(self):
return self._subtable1
@property
def subtable2(self):
return self._subtable2
class JsonEncoder_Budget2013_37(json.JSONEncoder):
def default(self, o):
subtable1_encoder = JsonEncoder_Budget2013_37_SubTable1()
subtable2_encoder = JsonEncoder_Budget2013_37_SubTable2()
return {
"caption": o.caption,
"subtable1": subtable1_encoder.default(o.subtable1),
"subtable2": subtable2_encoder.default(o.subtable2)
}
def check_document(document):
total_value = 0.0
for item in document.subtable1.items[:-1]:
total_value += item.value
if total_value != document.subtable1.items[-1].value:
print total_value, document.subtable1.items[-1].value
raise Exception(u"Сумма не сходится.")
def get_document(input_file_name):
with codecs.open(input_file_name, "r", encoding = "utf-8-sig") as input_file:
input_data = input_file.readlines()
document = Budget2013_37()
line_index = 0
# caption
caption_lines = []
while line_index < len(input_data):
caption_line = input_data[line_index].strip()
line_index += 1
if not caption_line:
break
caption_lines.append(caption_line)
document.caption = join_lines(caption_lines)
# subtable1 caption
caption_lines = []
while line_index < len(input_data):
caption_line = input_data[line_index].strip()
line_index += 1
if not caption_line:
break
caption_lines.append(caption_line)
document.subtable1.caption = join_lines(caption_lines)
# subtable1 headers
headers = input_data[line_index].strip()
line_index += 2
document.subtable1.headers = headers.split(";")
# subtable1 data
while not input_data[line_index].strip().startswith(u"ИТОГО"):
item = Budget2013_37_SubTable1Item()
# no + purpose
purpose_lines = []
while line_index < len(input_data):
purpose_line = input_data[line_index].strip()
line_index += 1
if not purpose_line:
break
purpose_lines.append(purpose_line)
purpose = join_lines(purpose_lines)
m = re.compile(u"^(\\d+) (.*)").match(purpose)
item.no = int(m.group(1))
item.purpose = m.group(2)
# principal
principal_lines = []
while line_index < len(input_data):
principal_line = input_data[line_index].strip()
line_index += 1
if not principal_line:
break
principal_lines.append(principal_line)
item.principal = join_lines(principal_lines)
# value
item.value = float(input_data[line_index].strip().replace(",", ".").replace(" ", ""))
line_index += 2
# regress
s = input_data[line_index].strip()
if s == u"Нет":
item.regress = False
elif s == u"Есть":
item.regress = True
else:
print s
raise Exception(u"Unknown regress: " + s)
line_index += 2
# check
s = input_data[line_index].strip()
if s == u"Нет":
item.check = False
elif s == u"Есть":
item.check = True
else:
print s
raise Exception(u"Unknown check: " + s)
line_index += 2
# other
other_lines = []
while line_index < len(input_data):
other_line = input_data[line_index].strip()
line_index += 1
if not other_line:
break
if re.compile("^\\d+\\. ").match(other_line):
if other_lines:
o = join_lines(other_lines)
item.other.append(o)
other_lines = []
other_lines.append(other_line)
if other_lines:
o = join_lines(other_lines)
item.other.append(o)
other_lines = []
document.subtable1.items.append(item)
# ИТОГО
s = input_data[line_index].strip()
m = re.compile(u"^(ИТОГО)\\*? (.*)").match(s)
item = Budget2013_37_SubTable1Item()
item.purpose = m.group(1)
item.value = float(m.group(2).replace(",", ".").replace(" ", ""))
document.subtable1.items.append(item)
line_index += 2
# notes
notes_lines = []
while line_index < len(input_data):
notes_line = input_data[line_index].rstrip()
line_index += 1
if not notes_line:
break
m = re.compile("^\\*? (.*)").match(notes_line)
if m:
if notes_lines:
note = join_lines(notes_lines)
document.subtable1.notes.append(note)
notes_lines = []
notes_lines.append(m.group(1))
else:
notes_lines.append(notes_line.lstrip())
if notes_lines:
note = join_lines(notes_lines)
document.subtable1.notes.append(note)
notes_lines = []
line_index += 1
# subtable2 caption
caption_lines = []
while line_index < len(input_data):
caption_line = input_data[line_index].strip()
line_index += 1
if not caption_line:
break
caption_lines.append(caption_line)
document.subtable2.caption = join_lines(caption_lines)
# subtable2 headers
headers = input_data[line_index].strip()
line_index += 1
document.subtable2.headers = headers.split(";")
#subtable2 data
while line_index < len(input_data):
data_line = input_data[line_index].strip()
line_index += 1
if not data_line:
break
m = re.compile("([\\d ,]+)$").search(data_line)
value = float(m.group(1).replace(",", ".").replace(" ", ""))
name = data_line[:len(data_line) - len(m.group(1)) - 1].strip()
item = {"name": name, "value": value}
document.subtable2.items.append(item)
check_document(document)
return document
def do_write_text_document(output_file, document):
output_file.write(document.caption + "\r\n\r\n")
output_file.write(document.subtable1.caption + "\r\n\r\n")
output_file.write(u" ".join(document.subtable1.headers) + "\r\n\r\n")
for item in document.subtable1.items[:-1]:
output_file.write(unicode(item.no) + " " + item.purpose + " " +
item.principal + " " + unicode(item.value) + " " +
unicode(item.regress) + " " + unicode(item.check) + "\r\n")
if item.other:
for o in item.other:
output_file.write(o + "\r\n");
output_file.write("\r\n")
output_file.write(document.subtable1.items[-1].purpose + " " + unicode(document.subtable1.items[-1].value) + "\r\n\r\n")
for note in document.subtable1.notes:
output_file.write(note + "\r\n")
output_file.write("\r\n")
output_file.write(document.subtable2.caption + "\r\n\r\n")
output_file.write(u" ".join(document.subtable2.headers) + "\r\n\r\n")
for item in document.subtable2.items:
output_file.write(item["name"] + " " + unicode(item["value"]) + "\r\n")
if __name__ == "__main__":
parser = get_default_argument_parser()
args = parser.parse_args()
input_file_name = args.input_file_name
output_pickle_file_name = args.output_pickle_file_name
output_text_file_name = args.output_text_file_name
output_json_file_name = args.output_json_file_name
output_json_pretty_file_name = args.output_json_pretty_file_name
if (not output_pickle_file_name) and (not output_text_file_name) and (not output_json_file_name) and (not output_json_pretty_file_name):
raise Exception("No output file specified")
document = get_document(input_file_name)
if output_pickle_file_name:
write_pickle_document(document, output_pickle_file_name)
if output_text_file_name:
write_text_document(document, output_text_file_name, do_write_text_document)
if output_json_file_name:
write_json_document(document, output_json_file_name, JsonEncoder_Budget2013_37)
if output_json_pretty_file_name:
write_json_pretty_document(document, output_json_pretty_file_name, JsonEncoder_Budget2013_37)
| 25.50591 | 137 | 0.700806 | [
"MIT"
] | capocannoniere/budget | federal/2013/code/budget2013_37.py | 10,833 | Python |
"""
Baseline CNN, losss function and metrics
Also customizes knowledge distillation (KD) loss function here
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
"""
This is the standard way to define your own network in PyTorch. You typically choose the components
(e.g. LSTMs, linear layers etc.) of your network in the __init__ function. You then apply these layers
on the input step-by-step in the forward function. You can use torch.nn.functional to apply functions
such as F.relu, F.sigmoid, F.softmax, F.max_pool2d. Be careful to ensure your dimensions are correct after each
step. You are encouraged to have a look at the network in pytorch/nlp/model/net.py to get a better sense of how
you can go about defining your own network.
The documentation for all the various components available o you is here: http://pytorch.org/docs/master/nn.html
"""
class studentB(nn.Module):
def __init__(self, params):
"""
We define an convolutional network that predicts the sign from an image. The components
required are:
Args:
params: (Params) contains num_channels
"""
super(studentB, self).__init__()
self.num_channels = params.num_channels
# each of the convolution layers below have the arguments (input_channels, output_channels, filter_size,
# stride, padding). We also include batch normalisation layers that help stabilise training.
# For more details on how to use these layers, check out the documentation.
self.conv1 = nn.Conv2d(3, 32, 5, stride=1, padding=2)
self.bn1 = nn.BatchNorm2d(32)
self.conv2_1 = nn.Conv2d(32, 32, 1, stride=1, padding=0)
self.conv2_2 = nn.Conv2d(32, 32, 3, stride=1, padding=1)
self.conv2_3 = nn.Conv2d(32, 64, 1, stride=1, padding=0)
self.bn2 = nn.BatchNorm2d(64)
self.conv3_1 = nn.Conv2d(64, 64, 1, stride=1, padding=0)
self.conv3_2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(64, 128, 1, stride=1, padding=0)
self.bn3 = nn.BatchNorm2d(128)
# 2 fully connected layers to transform the output of the convolution layers to the final output
self.fc1 = nn.Linear(4*4*128, 500)
self.fcbn1 = nn.BatchNorm1d(500)
self.fc2 = nn.Linear(500, 10)
self.dropout_rate = params.dropout_rate
def forward(self, s):
"""
This function defines how we use the components of our network to operate on an input batch.
Args:
s: (Variable) contains a batch of images, of dimension batch_size x 3 x 32 x 32 .
Returns:
out: (Variable) dimension batch_size x 6 with the log probabilities for the labels of each image.
Note: the dimensions after each step are provided
"""
# -> batch_size x 3 x 32 x 32
# we apply the convolution layers, followed by batch normalisation, maxpool and relu x 3
s = self.bn1(self.conv1(s)) # batch_size x 32 x 32 x 32
s = F.relu(F.max_pool2d(s, 2)) # batch_size x 32 x 16 x 16
s = self.conv2_1(s)
s = self.conv2_2(s)
s = self.conv2_3(s)
s = self.bn2(s) # batch_size x 10 * 2 x 16 x 16
s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels*2 x 8 x 8
s = self.conv3_1(s)
s = self.conv3_2(s)
s = self.conv3_3(s)
s = self.bn3(s) # batch_size x 10 * 2 x 16 x 16
s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels*2 x 8 x 8
# flatten the output for each image
s = s.view(-1, 4*4*128) # batch_size x 4*4*num_channels*4
# apply 2 fully connected layers with dropout
s = F.dropout(F.relu(self.fcbn1(self.fc1(s))),
p=self.dropout_rate, training=self.training) # batch_size x self.num_channels*4
s = self.fc2(s) # batch_size x 10
return s
def loss_fn(outputs, labels):
"""
Compute the cross entropy loss given outputs and labels.
Args:
outputs: (Variable) dimension batch_size x 6 - output of the model
labels: (Variable) dimension batch_size, where each element is a value in [0, 1, 2, 3, 4, 5]
Returns:
loss (Variable): cross entropy loss for all images in the batch
Note: you may use a standard loss function from http://pytorch.org/docs/master/nn.html#loss-functions. This example
demonstrates how you can easily define a custom loss function.
"""
return nn.CrossEntropyLoss()(outputs, labels)
def loss_fn_kd(outputs, labels, teacher_outputs, params):
"""
Compute the knowledge-distillation (KD) loss given outputs, labels.
"Hyperparameters": temperature and alpha
NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher
and student expects the input tensor to be log probabilities! See Issue #2
"""
alpha = params.alpha
T = params.temperature
KD_loss = nn.KLDivLoss()(F.log_softmax(outputs/T, dim=1),
F.softmax(teacher_outputs/T, dim=1)) * (alpha * T * T) + \
F.cross_entropy(outputs, labels) * (1. - alpha)
return KD_loss
def accuracy(outputs, labels):
"""
Compute the accuracy, given the outputs and labels for all images.
Args:
outputs: (np.ndarray) output of the model
labels: (np.ndarray) [0, 1, ..., num_classes-1]
Returns: (float) accuracy in [0,1]
"""
outputs = np.argmax(outputs, axis=1)
return np.sum(outputs==labels)/float(labels.size)
# maintain all metrics required in this dictionary- these are used in the training and evaluation loops
metrics = {
'accuracy': accuracy,
# could add more metrics such as accuracy for each token type
} | 40.058442 | 119 | 0.625385 | [
"MIT"
] | eungbean/knowledge-distillation-cifar10 | model/studentB.py | 6,169 | Python |
from typing import TypedDict
from cff.models.cloudfront_event import CloudFrontEvent
class Record(TypedDict):
"""Record of an event that raised a Lambda event."""
cf: CloudFrontEvent
"""The CloudFront event that raised this Lambda event."""
| 23.363636 | 61 | 0.747082 | [
"MIT"
] | cariad/cff | cff/models/record.py | 257 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.