ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a599505489f42533ddbf825d0b317b08670d3e1
|
# -*- coding: utf-8 -*-
import os
import json
from django.utils import timezone
from nose.tools import * # noqa: F403
from api.citations.utils import render_citation
from osf_tests.factories import UserFactory, PreprintFactory
from tests.base import OsfTestCase
from osf.models import OSFUser
class Node:
_id = '2nthu'
csl = {'publisher': 'Open Science Framework', 'author': [{'given': u'Henrique', 'family': u'Harman'}],
'URL': 'localhost:5000/2nthu', 'issued': {'date-parts': [[2016, 12, 6]]},
'title': u'The study of chocolate in its many forms', 'type': 'webpage', 'id': u'2nthu'}
visible_contributors = ''
class TestCiteprocpy(OsfTestCase):
def setUp(self):
super(TestCiteprocpy, self).setUp()
self.user = UserFactory(fullname='Henrique Harman')
def test_failing_citations(self):
node = Node()
node.visible_contributors = OSFUser.objects.filter(fullname='Henrique Harman')
url_data_path = os.path.join(os.path.dirname(__file__), '../website/static/citeprocpy_test_data.json')
with open(url_data_path) as url_test_data:
data = json.load(url_test_data)['fails']
matches = []
for k, v in data.items():
try:
citeprocpy = render_citation(node, k)
except (TypeError, AttributeError):
citeprocpy = ''
if citeprocpy == v:
matches.append(k)
assert(len(matches) == 0)
def test_passing_citations(self):
node = Node()
node.visible_contributors = OSFUser.objects.filter(fullname='Henrique Harman')
url_data_path = os.path.join(os.path.dirname(__file__), '../website/static/citeprocpy_test_data.json')
with open(url_data_path) as url_test_data:
data = json.load(url_test_data)['passes']
not_matches = []
citation = []
for k, v in data.items():
try:
citeprocpy = render_citation(node, k)
except (TypeError, AttributeError):
citeprocpy = ''
if citeprocpy != v:
not_matches.append(k)
citation.append(citeprocpy)
assert(len(not_matches) == 0)
class TestCiteprocpyMLA(OsfTestCase):
MLA_DATE_FORMAT = '%-d {month} %Y'
# MLA month abreviations here
# http://www.pomfret.ctschool.net/computer_classes/documents/mla-abbreviationsofmonths.pdf
MLA_MONTH_MAP = {
1: 'Jan.',
2: 'Feb.',
3: 'Mar.',
4: 'Apr.',
5: 'May',
6: 'June',
7: 'July',
8: 'Aug.',
9: 'Sept.',
10: 'Oct.',
11: 'Nov.',
12: 'Dec.',
}
def setUp(self):
super(TestCiteprocpyMLA, self).setUp()
self.user = UserFactory(fullname='John Tordoff')
self.second_contrib = UserFactory(fullname='Carson Wentz')
self.third_contrib = UserFactory(fullname='Nick Foles')
self.preprint = PreprintFactory(creator=self.user, title='My Preprint')
date = timezone.now().date()
self.formated_date = date.strftime(self.MLA_DATE_FORMAT).format(month=self.MLA_MONTH_MAP[date.month])
def test_render_citations_mla_one_author(self):
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John. “{}.” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
# test_suffix
self.user.suffix = 'Junior'
self.user.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John, Junior. “{}.” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
# test_no_middle_names
self.user.suffix = ''
self.user.middle_names = ''
self.user.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John. “{}.” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
def test_citation_no_repeated_periods(self):
self.preprint.title = 'A Study of Coffee.'
self.preprint.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John. “{}” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
def test_citation_osf_provider(self):
self.preprint.title = 'A Study of Coffee.'
self.preprint.save()
self.preprint.provider.name = 'Open Science Framework'
self.preprint.provider.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John. “{}” {}, {}. Web.'.format(
self.preprint.title,
'OSF Preprints',
self.formated_date)
)
def test_two_authors(self):
self.preprint.add_contributor(self.second_contrib)
self.preprint.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John, and Carson Wentz. “{}.” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
def test_three_authors(self):
self.preprint.add_contributor(self.second_contrib)
self.preprint.add_contributor(self.third_contrib)
self.preprint.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John, et al. “{}.” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
# first name suffix
self.user.suffix = 'Jr.'
self.user.save()
citation = render_citation(self.preprint, 'modern-language-association')
assert_equal(citation, u'Tordoff, John, Jr., et al. “{}.” {}, {}. Web.'.format(
self.preprint.title,
self.preprint.provider.name,
self.formated_date)
)
|
py
|
1a59952ebdf65b9706f88f7b8a179655018dc8ea
|
from rx import Observable
from pymongo import MongoClient
from rx.concurrency.newthreadscheduler import Scheduler, Disposable
if __name__ == '__main__':
client = MongoClient('mongodb://localhost')
movies = client.cinema.movies
Observable.from_(movies.find().limit(10)) \
.subscribe_on(Scheduler.new_thread) \
.subscribe(lambda d: print(d['title']))
|
py
|
1a599615125892d97bbbac367a462d63ce00a44c
|
#!/usr/bin/python3
# Tested with Python 3.8.6
#------------------------------------------------------------------------------
# find_bpl_hotspots.py
#------------------------------------------------------------------------------
# Author: Isabel J. Rodriguez
# 2021.01.23
#------------------------------------------------------------------------------
"""
Scrape data from the Bklyn Reach website and generate a csv file containing
relevant information from participating libraries in the BPL system.
INPUTS
------
NONE
Uses the existing Bklyn Reach url: https://www.bklynlibrary.org/reach/
OUTPUTS
-------
Output file:
"bpl_wifi.csv"
Data included:
LIBRARY
ADDRESS
WI-FI PROGRAM
AVAILABILITY
LIBRARY WEBSITE
"""
# Standard Python library imports
import csv
import sys
import time
# Companion scripts
from write_to_csv import write_to_csv
from exception_handler import exception_handler
from soupify_webpage import parse_html
# Geolocator
from geopy.geocoders import Nominatim
def pull_wifi_data():
# fetch html
bpl_reach_url= 'https://www.bklynlibrary.org/reach/'
webpage_soup = parse_html(bpl_reach_url)
# parse html content
containers = webpage_soup.findAll("div", {"class" : "panel-body"})
# containers[0] has all active participating libraries
# containers[1] libraries listed as having a program 'coming soon'
list_active = containers[0].ul.findAll("li")
return list_active
def geolocate_coordinates(street_address=None):
if street_address is not None:
try:
geolocator = Nominatim(user_agent="bpl_wifi")
location = geolocator.geocode(street_address)
print(location.address)
latitude = str(location.latitude)
longitude = str(location.longitude)
except AttributeError:
latitude = 'NaN'
longitude = 'NaN'
return latitude, longitude
def pull_address_data(url=None):
"""
Libraries with active extended wi-fi programs have their websites listed.
Access websites and pull street address and zip code. If an street address
intersection is given e.g.,
"16 Brighton First Rd. at Brighton Beach Ave."
remove the intersection and return e.g., "16 Brighton First Rd."
"""
if url is not None:
webpage_soup = parse_html(url)
street_container = webpage_soup.findAll("div", {"class":"street-block"})
zip_container = webpage_soup.findAll("div", {"class":"addressfield-container-inline locality-block country-US"})
street_address = street_container[0].div.text
zip_code = zip_container[0].findAll("span", {"class":"postal-code"})[0].text
# clean address data
split_address = street_address.split()
stopwords = ['at', '(near', '(Near', '(at', '(@']
# remove street intersection
for stopword in stopwords:
if stopword in split_address:
street_address = split_address[:split_address.index(stopword)]
street_address = ' '.join(street_address)
else:
pass
# addresses with street numbers spelled out decreases accuracy
# replace with number (e.g., first --> 1st)
# this is done on a case-by-case basis but could be generalized
if 'First' in street_address:
street_address = street_address.replace("First", "1st")
else:
pass
if 'Fourth' in street_address:
street_address = street_address.replace("Fourth", "4th")
# grab geolocation data
latitude, longitude = geolocate_coordinates(street_address=street_address + ', Brooklyn')
return street_address, zip_code, latitude, longitude
def store_data(list_active):
"""
Create a dictionary to store information for Brooklyn Public
Libraries participating in the Bklyn Reach extended wi-fi program.
"""
# Bklyn Reach service details
wifi_range = '300 feet'
wifi_availability = '24/7'
wifi_program = 'Bklyn Reach'
city = 'Brooklyn'
state = 'New York'
# create a storage container for BPL data
bp_libraries = {list_active[i].text: {'STREET ADDRESS' : '',
'CITY' : city,
'STATE' : state,
'ZIP CODE' : '',
'LATITUDE' : '',
'LONGITUDE' : '',
'WI-FI PROGRAM': wifi_program,
'AVAILABILITY': wifi_availability,
'WI-FI RANGE' : wifi_range,
'LIBRARY WEBSITE': '' }
for i in range(len(list_active))}
print("Compiling data...")
for i in range (len(list_active)):
nested_dict = bp_libraries[list_active[i].text]
street_address, zip_code, latitude, longitude = pull_address_data(list_active[i].a["href"])
nested_dict['STREET ADDRESS'] = street_address
nested_dict['ZIP CODE'] = zip_code
nested_dict['LATITUDE'] = latitude
nested_dict['LONGITUDE'] = longitude
nested_dict['LIBRARY WEBSITE'] = list_active[i].a["href"]
return bp_libraries
def write_data_to_csv(bp_libraries,
output_filename=None,
output_folder=None):
"""
Pull data from storage dictionary into a list of lists,
and write to csv.
ARGUMENTS
---------
bp_libraries : dict
output_filename : str
e.g., "bpl_wifi.csv"
output_folder : str
RETURNS
-------
None
"""
output = []
# Order and sort data into output container
for key, val in bp_libraries.items():
output.append([key,
val['STREET ADDRESS'],
val['CITY'],
val['STATE'],
val['ZIP CODE'],
val['LATITUDE'],
val['LONGITUDE'],
val['WI-FI PROGRAM'],
val['AVAILABILITY'],
val['LIBRARY WEBSITE']])
output.sort(key=lambda header: header[0])
print("Compilation complete. Writing out to a csv file.")
write_to_csv(output_filename=output_filename,
output_folder=output_folder,
output=output)
@exception_handler
def main(output_filename=None):
"""
Contains a pipeline that accepts an input csv file, and
outputs processed and sorted data into an output csv file.
ARGUMENTS
---------
output_filename : str
e.g., "wifi.csv"
RETURNS
-------
None
"""
list_active = pull_wifi_data()
bp_libraries = store_data(list_active)
write_data_to_csv(bp_libraries,
output_filename=output_filename,
output_folder=output_folder)
if __name__ == "__main__":
date = time.strftime("%m%d%Y")
output_folder = "../output/"
output_filename = "bpl_wifi_{}.csv".format(date)
main(output_filename)
|
py
|
1a5997130b8b7affab53d19b284a1bbc29695650
|
"""
LC 621
You are given a list of tasks that need to be run, in any order, on a server. Each task will take one CPU interval to execute but once a task has finished, it has a cooling period during which it can’t be run again. If the cooling period for all tasks is ‘K’ intervals, find the minimum number of CPU intervals that the server needs to finish all tasks.
If at any time the server can’t execute any task then it must stay idle.
Example 1:
Input: [a, a, a, b, c, c], K=2
Output: 7
Explanation: a -> c -> b -> a -> c -> idle -> a
Example 2:
Input: [a, b, a], K=3
Output: 5
Explanation: a -> b -> idle -> idle -> a
"""
from collections import deque
from heapq import *
def schedule_tasks(tasks, k):
if len(tasks) <= 1:
return len(tasks)
if k == 0:
return len(tasks)
# count
freqs = {}
for task in tasks:
if task in freqs:
freqs[task] += 1
else:
freqs[task] = 1
freqs = list(freqs.values())
max_f = max(freqs)
n_max = freqs.count(max_f)
# only consider the most frequent tasks
# if gaps are needed, gaps happend for 1 of the most frequent tasks
# the tail has n_max tasks
# the front has max_f - 1 groups, k + 1 values in each
return max(len(tasks), n_max + (k + 1) * (max_f - 1))
def main():
print("Minimum intervals needed to execute all tasks: " +
str(schedule_tasks(['a', 'a', 'a', 'b', 'c', 'c'], 2)))
print("Minimum intervals needed to execute all tasks: " +
str(schedule_tasks(['a', 'b', 'a'], 3)))
main()
"""
#letters is O(1)
Time O(N)
Space O(1)
"""
|
py
|
1a59984f16d7f0d4f82362527ad08c93011569d1
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'PrePartido.ui'
#
# Created by: PyQt4 UI code generator 4.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(741, 596)
self.label = QtGui.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(100, 90, 71, 17))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(550, 90, 41, 17))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.tablaJugVis = QtGui.QTableWidget(Dialog)
self.tablaJugVis.setGeometry(QtCore.QRect(20, 110, 341, 351))
self.tablaJugVis.setColumnCount(4)
self.tablaJugVis.setObjectName(_fromUtf8("tablaJugVis"))
self.tablaJugVis.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tablaJugVis.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tablaJugVis.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tablaJugVis.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tablaJugVis.setHorizontalHeaderItem(3, item)
self.tablaJugLocal = QtGui.QTableWidget(Dialog)
self.tablaJugLocal.setGeometry(QtCore.QRect(385, 110, 341, 351))
self.tablaJugLocal.setObjectName(_fromUtf8("tablaJugLocal"))
self.tablaJugLocal.setColumnCount(4)
self.tablaJugLocal.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tablaJugLocal.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tablaJugLocal.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tablaJugLocal.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tablaJugLocal.setHorizontalHeaderItem(3, item)
self.botonAceptar = QtGui.QPushButton(Dialog)
self.botonAceptar.setGeometry(QtCore.QRect(630, 550, 97, 29))
self.botonAceptar.setObjectName(_fromUtf8("botonAceptar"))
self.botonCancelar = QtGui.QPushButton(Dialog)
self.botonCancelar.setGeometry(QtCore.QRect(510, 550, 97, 29))
self.botonCancelar.setObjectName(_fromUtf8("botonCancelar"))
self.label_3 = QtGui.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(20, 480, 141, 17))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_4 = QtGui.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(190, 30, 251, 21))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_5 = QtGui.QLabel(Dialog)
self.label_5.setGeometry(QtCore.QRect(480, 480, 141, 17))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.labelPromBRVis = QtGui.QLabel(Dialog)
self.labelPromBRVis.setGeometry(QtCore.QRect(210, 480, 64, 17))
self.labelPromBRVis.setText(_fromUtf8(""))
self.labelPromBRVis.setObjectName(_fromUtf8("labelPromBRVis"))
self.labelPromBRLocal = QtGui.QLabel(Dialog)
self.labelPromBRLocal.setGeometry(QtCore.QRect(670, 480, 64, 17))
self.labelPromBRLocal.setText(_fromUtf8(""))
self.labelPromBRLocal.setObjectName(_fromUtf8("labelPromBRLocal"))
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.label.setText(_translate("Dialog", "Visitante", None))
self.label_2.setText(_translate("Dialog", "Local", None))
item = self.tablaJugVis.horizontalHeaderItem(0)
item.setText(_translate("Dialog", "Nombre", None))
item = self.tablaJugVis.horizontalHeaderItem(1)
item.setText(_translate("Dialog", "Peso", None))
item = self.tablaJugVis.horizontalHeaderItem(2)
item.setText(_translate("Dialog", "Bioritmo", None))
item = self.tablaJugVis.horizontalHeaderItem(3)
item.setText(_translate("Dialog", "Posicion", None))
item = self.tablaJugLocal.horizontalHeaderItem(0)
item.setText(_translate("Dialog", "Nombre", None))
item = self.tablaJugLocal.horizontalHeaderItem(1)
item.setText(_translate("Dialog", "Peso", None))
item = self.tablaJugLocal.horizontalHeaderItem(2)
item.setText(_translate("Dialog", "Bioritmo", None))
item = self.tablaJugLocal.horizontalHeaderItem(3)
item.setText(_translate("Dialog", "Posicion", None))
self.botonAceptar.setText(_translate("Dialog", "Aceptar", None))
self.botonCancelar.setText(_translate("Dialog", "Cancelar", None))
self.label_3.setText(_translate("Dialog", "Promedio Bioritmos:", None))
self.label_4.setText(_translate("Dialog", "Jugadores en el Partido", None))
self.label_5.setText(_translate("Dialog", "Promedio Bioritmos:", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
py
|
1a5998b91ef8a9364f3c7543eb841619e1096d44
|
from slack_sdk import WebClient
from typing import Optional
import os
from pprint import pprint
# from slackmostreacted import mostreacted
# from slackmostreacted import Post
from slackmostreacted.slack_utils import most_reacted_messages, post_most_reaction_award, search_channel
from slackmostreacted.slack_utils import list_messages
from slackmostreacted.slack_utils import Channel
def test_connect_bot() -> None:
# channel: Channel = search_channel("tmc-zatsudan")
# print(list_messages(channel.id))
# pprint(most_reacted_messages(channel_name="tmc-zatsudan", k=5))
messages = most_reacted_messages("test_award")
post_most_reaction_award("test_award", awarded_message=messages[0])
def test_search_name(slack_webclient_mock: WebClient) -> None:
assert search_channel("test-channel-1", client=slack_webclient_mock).id == "C1"
def test_most_reacted_post() -> None:
# channel = "mychannel"
# post: Post = mostreacted(channel)
pass
|
py
|
1a5998c1f94241efa659478343c0aeb287067889
|
"""Fixtures for pywemo."""
import asyncio
import contextlib
from unittest.mock import create_autospec, patch
import pytest
import pywemo
from homeassistant.components.wemo import CONF_DISCOVERY, CONF_STATIC
from homeassistant.components.wemo.const import DOMAIN
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
MOCK_HOST = "127.0.0.1"
MOCK_PORT = 50000
MOCK_NAME = "WemoDeviceName"
MOCK_SERIAL_NUMBER = "WemoSerialNumber"
MOCK_FIRMWARE_VERSION = "WeMo_WW_2.00.XXXXX.PVT-OWRT"
MOCK_INSIGHT_CURRENT_WATTS = 0.01
MOCK_INSIGHT_TODAY_KWH = 3.33
MOCK_INSIGHT_STATE_THRESHOLD_POWER = 8.0
@pytest.fixture(name="pywemo_model")
def pywemo_model_fixture():
"""Fixture containing a pywemo class name used by pywemo_device_fixture."""
return "LightSwitch"
@pytest.fixture(name="pywemo_registry", autouse=True)
async def async_pywemo_registry_fixture():
"""Fixture for SubscriptionRegistry instances."""
registry = create_autospec(pywemo.SubscriptionRegistry, instance=True)
registry.callbacks = {}
registry.semaphore = asyncio.Semaphore(value=0)
def on_func(device, type_filter, callback):
registry.callbacks[device.name] = callback
registry.semaphore.release()
registry.on.side_effect = on_func
registry.is_subscribed.return_value = False
with patch("pywemo.SubscriptionRegistry", return_value=registry):
yield registry
@pytest.fixture(name="pywemo_discovery_responder", autouse=True)
def pywemo_discovery_responder_fixture():
"""Fixture for the DiscoveryResponder instance."""
with patch("pywemo.ssdp.DiscoveryResponder", autospec=True):
yield
@contextlib.contextmanager
def create_pywemo_device(pywemo_registry, pywemo_model):
"""Create a WeMoDevice instance."""
cls = getattr(pywemo, pywemo_model)
device = create_autospec(cls, instance=True)
device.host = MOCK_HOST
device.port = MOCK_PORT
device.name = MOCK_NAME
device.serialnumber = MOCK_SERIAL_NUMBER
device.model_name = pywemo_model.replace("LongPress", "")
device.udn = f"uuid:{device.model_name}-1_0-{device.serialnumber}"
device.firmware_version = MOCK_FIRMWARE_VERSION
device.get_state.return_value = 0 # Default to Off
device.supports_long_press.return_value = cls.supports_long_press()
if issubclass(cls, pywemo.Insight):
device.standby_state = pywemo.StandbyState.OFF
device.current_power_watts = MOCK_INSIGHT_CURRENT_WATTS
device.today_kwh = MOCK_INSIGHT_TODAY_KWH
device.threshold_power_watts = MOCK_INSIGHT_STATE_THRESHOLD_POWER
device.on_for = 1234
device.today_on_time = 5678
device.total_on_time = 9012
if issubclass(cls, pywemo.Maker):
device.has_sensor = 1
device.sensor_state = 1
device.switch_mode = 1
device.switch_state = 0
url = f"http://{MOCK_HOST}:{MOCK_PORT}/setup.xml"
with patch("pywemo.setup_url_for_address", return_value=url), patch(
"pywemo.discovery.device_from_description", return_value=device
):
yield device
@pytest.fixture(name="pywemo_device")
def pywemo_device_fixture(pywemo_registry, pywemo_model):
"""Fixture for WeMoDevice instances."""
with create_pywemo_device(pywemo_registry, pywemo_model) as pywemo_device:
yield pywemo_device
@pytest.fixture(name="wemo_entity_suffix")
def wemo_entity_suffix_fixture():
"""Fixture to select a specific entity for wemo_entity."""
return ""
async def async_create_wemo_entity(hass, pywemo_device, wemo_entity_suffix):
"""Create a hass entity for a wemo device."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_DISCOVERY: False,
CONF_STATIC: [f"{MOCK_HOST}:{MOCK_PORT}"],
},
},
)
await hass.async_block_till_done()
entity_registry = er.async_get(hass)
for entry in entity_registry.entities.values():
if entry.entity_id.endswith(wemo_entity_suffix or pywemo_device.name.lower()):
return entry
return None
@pytest.fixture(name="wemo_entity")
async def async_wemo_entity_fixture(hass, pywemo_device, wemo_entity_suffix):
"""Fixture for a Wemo entity in hass."""
return await async_create_wemo_entity(hass, pywemo_device, wemo_entity_suffix)
|
py
|
1a5999e65c7705dccfb20443922a2c320851bf49
|
"""Authentication and authorization."""
|
py
|
1a599a19a8d89f7aef8bc18111cb828a30450581
|
##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Exceptions for zope.testrunner
"""
class DocTestFailureException(AssertionError):
"""Use custom exception for doctest unit test failures"""
|
py
|
1a599b769a2fba1b4a4651d68860246f6697bd50
|
# -*- coding: utf-8 -*-
'''
Created on 2016年3月27日
@author: fly
'''
# example 1
def log(func):
def wraper():
print('Info:starting {}'.format(func.__name__))
func()
print('Info:finishing {}'.format(func.__name__))
return wraper
@log
def run():
print('Runing run...')
#end example 1
#example 2
from time import sleep, time
def timer(cls):
def wraper():
s = time()
obj = cls()
e = time()
print('Cost {:.3f}s to init.'.format(e-s))
return obj
return wraper
@timer
class Obj():
def __init__(self):
print("hello")
sleep(3)
print('obj')
if __name__=='__main__':
run()
Obj()
|
py
|
1a599beb6e6815a7224ac2ce1efac40cb8de41c3
|
try:
import os, sys
except:
print("ExceptionERROR: Missing fundamental packages (required: os, sys).")
code_icon = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + "\\templates\\code_icon.ico"
dir2ra = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\"
dir2co = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\StrandingRisk\\"
dir2conditions = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\01_Conditions\\"
dir2flows = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\00_Flows\\"
dir2gs = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\GetStarted\\"
dir2lf = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\LifespanDesign\\"
dir2map = os.path.abspath(os.path.join(os.path.dirname(__file__), "..\\..")) + "\\02_Maps\\"
dir2map_templates = os.path.abspath(os.path.join(os.path.dirname(__file__), "..\\..")) + "\\02_Maps\\templates\\"
dir2ml = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\MaxLifespan\\"
dir2mt = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\ModifyTerrain\\"
dir2oxl = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + "\\openpyxl\\"
dir2pm = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\ProjectMaker\\"
dir2rb = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\ModifyTerrain\\RiverBuilder\\"
dir2ripy = os.path.dirname(__file__) + "\\"
dir2templates = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + "\\templates\\"
dir2sh = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\SHArC\\"
dir2va = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\VolumeAssessment\\"
ft2ac = float(1 / 43560)
m2ft = 0.3048
empty_file = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + "\\templates\\oups.txt"
xlsx_aqua = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + "\\templates\\Fish.xlsx"
xlsx_dummy = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + "\\templates\\empty.xlsx"
xlsx_mu = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) + "\\templates\\morphological_units.xlsx"
xlsx_reaches = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\ModifyTerrain\\.templates\\computation_extents.xlsx"
xlsx_thresholds = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\LifespanDesign\\.templates\\threshold_values.xlsx"
xlsx_volumes = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\VolumeAssessment\\.templates\\volumes_template.xlsx"
xlsx_connectivity = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..')) + "\\StrandingRisk\\.templates\\disconnected_area_template.xlsx"
|
py
|
1a599bf0d78706b914fd5287157d7254a93390a1
|
# -*- coding: utf-8 -*-
# Scrapy settings for xskt project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'xskt'
SPIDER_MODULES = ['xskt.spiders']
NEWSPIDER_MODULE = 'xskt.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'xskt (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'xskt.middlewares.XsktSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'xskt.middlewares.XsktDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'xskt.pipelines.XsktPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
py
|
1a599db434164e6a0b7cdccb873da05f1e3d52d3
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .certificate_item import CertificateItem
class DeletedCertificateItem(CertificateItem):
"""The deleted certificate item containing metadata about the deleted
certificate.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Certificate identifier.
:type id: str
:param attributes: The certificate management attributes.
:type attributes: ~azure.keyvault.models.CertificateAttributes
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param x509_thumbprint: Thumbprint of the certificate.
:type x509_thumbprint: bytes
:param recovery_id: The url of the recovery object, used to identify and
recover the deleted certificate.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the certificate is scheduled to
be purged, in UTC
:vartype scheduled_purge_date: datetime
:ivar deleted_date: The time when the certificate was deleted, in UTC
:vartype deleted_date: datetime
"""
_validation = {
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'x509_thumbprint': {'key': 'x5t', 'type': 'base64'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(self, *, id: str=None, attributes=None, tags=None, x509_thumbprint: bytes=None, recovery_id: str=None, **kwargs) -> None:
super(DeletedCertificateItem, self).__init__(id=id, attributes=attributes, tags=tags, x509_thumbprint=x509_thumbprint, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
|
py
|
1a599e9b7f41422d5c062f56fb8b2b35b63163ea
|
from __init__ import *
import sys
sys.path.insert(0, ROOT)
from fractions import Fraction
from compiler import *
from constructs import *
def interpolate(G, U, l, pipe_data, name):
z = pipe_data['z']
y = pipe_data['y']
x = pipe_data['x']
extent = pipe_data['extent']
interior = pipe_data['interior']
ghosts = pipe_data['ghosts']
inner_box = interior[l]['inner_box']
UU = Function(([z, y, x], [extent[l], extent[l], extent[l]]),
Double, str(name))
zz = z/2
yy = y/2
xx = x/2
def z1(xx):
return G(zz , yy+1, xx) + G(zz , yy , xx)
def z2(xx):
return G(zz+1, yy , xx) + G(zz , yy , xx)
def z3(xx):
return G(zz+1, yy+1, xx) + G(zz+1, yy, xx) \
+ G(zz , yy+1, xx) + G(zz , yy, xx)
expr_000 = G(zz, yy, xx)
expr_001 = 0.500 * (G(zz, yy, xx) + G(zz, yy, xx+1))
expr_010 = 0.500 * z1(xx)
expr_011 = 0.250 * (z1(xx) + z1(xx+1))
expr_100 = 0.500 * z2(xx)
expr_101 = 0.250 * (z2(xx) + z2(xx+1))
expr_110 = 0.250 * z3(xx)
expr_111 = 0.125 * (z3(xx) + z3(xx+1))
even_x = Condition(x%2, '==', 0)
even_y = Condition(y%2, '==', 0)
even_z = Condition(z%2, '==', 0)
if U == None:
correct = 0.0
else:
correct = U(z, y, x)
UU.defn = [ correct + \
Select(even_z,
Select(even_y,
Select(even_x,
expr_000,
expr_001),
Select(even_x,
expr_010,
expr_011)),
Select(even_y,
Select(even_x,
expr_100,
expr_101),
Select(even_x,
expr_110,
expr_111))) ]
return UU
|
py
|
1a599ed9fac727280e8c9a8f35309e9f4146c6f0
|
"""Arkane Studios made a number of Source Engine powered projects.
Few made it to release."""
from . import dark_messiah_multiplayer
from . import dark_messiah_singleplayer
scripts = [dark_messiah_multiplayer, dark_messiah_singleplayer]
|
py
|
1a599f317269766437803a0493f1346f0814a375
|
# -*- coding: utf-8 -*-
import os
import sys
import math
sys.dont_write_bytecode = True
import caffe
from caffe import layers as L
from caffe import params as P
from caffe.proto import caffe_pb2
sys.path.append('../')
from PyLib.LayerParam.MultiBoxLossLayerParam import *
from PyLib.NetLib.ConvBNLayer import *
from PyLib.NetLib.InceptionLayer import *
from PyLib.NetLib.MultiScaleLayer import *
from PyLib.NetLib.VggNet import VGG16_BaseNet_ChangeChannel
from PyLib.NetLib.YoloNet import YoloNetPart
from BaseNet import *
from AddC6 import *
from DetectorHeader import *
from DAP_Param import *
import numpy as np
from solverParam import truncvalues
# ##############################################################################
# ------------------------------------------------------------------------------
# Final Network
flag_train_withperson = True
def InceptionOfficialLayer(net, from_layer, out_layer, channels_1=1,channels_3=[],channels_5=[],channels_ave=1,inter_bn = True,leaky=False):
fea_layer = from_layer
concatlayers = []
mid_layer = "{}/incep/1x1".format(out_layer)
ConvBNUnitLayer(net, fea_layer, mid_layer, use_bn=inter_bn, use_relu=True, num_output=channels_1, kernel_size=1,
pad=0,stride=1, use_scale=True, leaky=leaky)
concatlayers.append(net[mid_layer])
start_layer = mid_layer
mid_layer = "{}/incep/1_reduce".format(out_layer)
ConvBNUnitLayer(net, fea_layer, mid_layer, use_bn=inter_bn, use_relu=True,num_output=channels_3[0], kernel_size=1, pad=0,
stride=1, use_scale=True, leaky=leaky)
start_layer = mid_layer
mid_layer = "{}/incep/3x3".format(out_layer)
ConvBNUnitLayer(net, start_layer, mid_layer, use_bn=inter_bn, use_relu=True, num_output=channels_3[1], kernel_size=3, pad=1,
stride=1, use_scale=True, leaky=leaky)
concatlayers.append(net[mid_layer])
mid_layer = "{}/incep/2_reduce".format(out_layer)
ConvBNUnitLayer(net, fea_layer, mid_layer, use_bn=inter_bn, use_relu=True, num_output=channels_5[0], kernel_size=1, pad=0,
stride=1, use_scale=True, leaky=leaky)
start_layer = mid_layer
mid_layer = "{}/incep/5x5".format(out_layer)
ConvBNUnitLayer(net, start_layer, mid_layer, use_bn=inter_bn, use_relu=True, num_output=channels_5[1], kernel_size=5, pad=2,
stride=1, use_scale=True, leaky=leaky)
concatlayers.append(net[mid_layer])
mid_layer = "{}/incep/pool".format(out_layer)
net[mid_layer] = L.Pooling(net[fea_layer], pool=P.Pooling.AVE, kernel_size=3, stride=1, pad=1)
start_layer = mid_layer
mid_layer = "{}/incep/pool_1x1".format(out_layer)
ConvBNUnitLayer(net, start_layer, mid_layer, use_bn=inter_bn, use_relu=True, num_output=channels_ave, kernel_size=1,
pad=0,stride=1, use_scale=True, leaky=leaky)
concatlayers.append(net[mid_layer])
# incep
layer_name = "{}/incep".format(out_layer)
name = "{}/incep".format(out_layer)
net[name] = L.Concat(*concatlayers, name=layer_name, axis=1)
return net
def FaceBoxAlikeNet(net, train=True, data_layer="data", gt_label="label", \
net_width=512, net_height=288):
lr = 1
decay = 1
from_layer = data_layer
num_channels = [32,64,128]
k_sizes = [5,3,3]
strides = [2,1,1]
for i in xrange(len(num_channels)):
add_layer = "conv{}".format(i+1)
ConvBNUnitLayer(net, from_layer, add_layer, use_bn=True, use_relu=True, leaky=False,
num_output=num_channels[i], kernel_size=k_sizes[i], pad=(k_sizes[i]-1)/2, stride=strides[i], use_scale=True,
n_group=1, lr_mult=lr, decay_mult=decay)
from_layer = add_layer
# if not i == len(num_channels) - 1:
add_layer = "pool{}".format(i+1)
net[add_layer] = L.Pooling(net[from_layer], pool=P.Pooling.MAX, kernel_size=3, stride=2, pad=0)
from_layer = add_layer
layer_cnt = len(num_channels)
num_channels = [256,256,256,256]
divide_scale = 4
for i in xrange(len(num_channels)):
n_chan = num_channels[i]
add_layer = "conv{}_{}".format(layer_cnt+1,i + 1)
net = InceptionOfficialLayer(net, from_layer, add_layer, channels_1=n_chan/divide_scale, channels_3=[n_chan/8, n_chan/4],
channels_5=[n_chan/8, n_chan/4], channels_ave=n_chan/divide_scale, inter_bn=True, leaky=False)
from_layer = "conv{}_{}/incep".format(layer_cnt+1,i + 1)
layer_cnt += 1
num_channels = [256,256,256]
for i in xrange(len(num_channels)):
if i == 0:
stride = 2
else:
stride = 1
add_layer = "conv{}_{}".format(layer_cnt+1,i + 1)
ConvBNUnitLayer(net, from_layer, add_layer, use_bn=True, use_relu=True, leaky=False,
num_output=num_channels[i], kernel_size=3, pad=1, stride=stride,
use_scale=True, n_group=1, lr_mult=lr, decay_mult=decay)
from_layer = add_layer
layer_cnt += 1
num_channels = [256,256,256,256]
for i in xrange(len(num_channels)):
if i == 0:
stride = 2
else:
stride = 1
add_layer = "conv{}_{}".format(layer_cnt+1,i + 1)
ConvBNUnitLayer(net, from_layer, add_layer, use_bn=True, use_relu=True, leaky=False,
num_output=num_channels[i], kernel_size=3, pad=1, stride=stride,
use_scale=True, n_group=1, lr_mult=lr, decay_mult=decay)
from_layer = add_layer
lr_detnetperson = 1.0
# Create SSD Header for SSD1
if flag_train_withperson:
mbox_1_layers = SsdDetectorHeaders(net, \
net_width=net_width, net_height=net_height, data_layer=data_layer, \
from_layers=ssd_Param_1.get('feature_layers', []), \
num_classes=ssd_Param_1.get("num_classes", 2), \
boxsizes=ssd_Param_1.get("anchor_boxsizes", []), \
aspect_ratios=ssd_Param_1.get("anchor_aspect_ratios", []), \
prior_variance=ssd_Param_1.get("anchor_prior_variance",
[0.1, 0.1, 0.2, 0.2]), \
flip=ssd_Param_1.get("anchor_flip", True), \
clip=ssd_Param_1.get("anchor_clip", True), \
normalizations=ssd_Param_1.get("interlayers_normalizations", []), \
use_batchnorm=ssd_Param_1.get("interlayers_use_batchnorm", True), \
inter_layer_channels=ssd_Param_1.get("interlayers_channels_kernels", []), \
use_focus_loss=ssd_Param_1.get("bboxloss_using_focus_loss", False), \
use_dense_boxes=ssd_Param_1.get('bboxloss_use_dense_boxes', False), \
stage=1, lr_mult=lr_detnetperson)
# make Loss or Detout for SSD1
if train:
loss_param = get_loss_param(normalization=ssd_Param_1.get("bboxloss_normalization", P.Loss.VALID))
mbox_1_layers.append(net[gt_label])
use_dense_boxes = ssd_Param_1.get('bboxloss_use_dense_boxes', False)
if use_dense_boxes:
bboxloss_param = {
'gt_labels': ssd_Param_1.get('gt_labels', []),
'target_labels': ssd_Param_1.get('target_labels', []),
'num_classes': ssd_Param_1.get("num_classes", 2),
'alias_id': ssd_Param_1.get("alias_id", 0),
'loc_loss_type': ssd_Param_1.get("bboxloss_loc_loss_type", P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type': ssd_Param_1.get("bboxloss_conf_loss_type", P.MultiBoxLoss.LOGISTIC),
'loc_weight': ssd_Param_1.get("bboxloss_loc_weight", 1),
'conf_weight': ssd_Param_1.get("bboxloss_conf_weight", 1),
'overlap_threshold': ssd_Param_1.get("bboxloss_overlap_threshold", 0.5),
'neg_overlap': ssd_Param_1.get("bboxloss_neg_overlap", 0.5),
'size_threshold': ssd_Param_1.get("bboxloss_size_threshold", 0.0001),
'do_neg_mining': ssd_Param_1.get("bboxloss_do_neg_mining", True),
'neg_pos_ratio': ssd_Param_1.get("bboxloss_neg_pos_ratio", 3),
'using_focus_loss': ssd_Param_1.get("bboxloss_using_focus_loss", False),
'gama': ssd_Param_1.get("bboxloss_focus_gama", 2),
'use_difficult_gt': ssd_Param_1.get("bboxloss_use_difficult_gt", False),
'code_type': ssd_Param_1.get("bboxloss_code_type", P.PriorBox.CENTER_SIZE),
'use_prior_for_matching': True,
'encode_variance_in_target': False,
'flag_noperson': ssd_Param_1.get('flag_noperson', False),
}
net["mbox_1_loss"] = L.DenseBBoxLoss(*mbox_1_layers, dense_bbox_loss_param=bboxloss_param, \
loss_param=loss_param,
include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
bboxloss_param = {
'gt_labels': ssd_Param_1.get('gt_labels', []),
'target_labels': ssd_Param_1.get('target_labels', []),
'num_classes': ssd_Param_1.get("num_classes", 2),
'alias_id': ssd_Param_1.get("alias_id", 0),
'loc_loss_type': ssd_Param_1.get("bboxloss_loc_loss_type", P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type': ssd_Param_1.get("bboxloss_conf_loss_type", P.MultiBoxLoss.SOFTMAX),
'loc_weight': ssd_Param_1.get("bboxloss_loc_weight", 1),
'conf_weight': ssd_Param_1.get("bboxloss_conf_weight", 1),
'overlap_threshold': ssd_Param_1.get("bboxloss_overlap_threshold", 0.5),
'neg_overlap': ssd_Param_1.get("bboxloss_neg_overlap", 0.5),
'size_threshold': ssd_Param_1.get("bboxloss_size_threshold", 0.0001),
'do_neg_mining': ssd_Param_1.get("bboxloss_do_neg_mining", True),
'neg_pos_ratio': ssd_Param_1.get("bboxloss_neg_pos_ratio", 3),
'using_focus_loss': ssd_Param_1.get("bboxloss_using_focus_loss", False),
'gama': ssd_Param_1.get("bboxloss_focus_gama", 2),
'use_difficult_gt': ssd_Param_1.get("bboxloss_use_difficult_gt", False),
'code_type': ssd_Param_1.get("bboxloss_code_type", P.PriorBox.CENTER_SIZE),
'match_type': P.MultiBoxLoss.PER_PREDICTION,
'share_location': True,
'use_prior_for_matching': True,
'background_label_id': 0,
'encode_variance_in_target': False,
'map_object_to_agnostic': False,
}
net["mbox_1_loss"] = L.BBoxLoss(*mbox_1_layers, bbox_loss_param=bboxloss_param, \
loss_param=loss_param,
include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
if ssd_Param_1.get("bboxloss_conf_loss_type", P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.SOFTMAX:
reshape_name = "mbox_1_conf_reshape"
net[reshape_name] = L.Reshape(mbox_1_layers[1], \
shape=dict(dim=[0, -1, ssd_Param_1.get("num_classes", 2)]))
softmax_name = "mbox_1_conf_softmax"
net[softmax_name] = L.Softmax(net[reshape_name], axis=2)
flatten_name = "mbox_1_conf_flatten"
net[flatten_name] = L.Flatten(net[softmax_name], axis=1)
mbox_1_layers[1] = net[flatten_name]
elif ssd_Param_1.get("bboxloss_conf_loss_type", P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.LOGISTIC:
sigmoid_name = "mbox_1_conf_sigmoid"
net[sigmoid_name] = L.Sigmoid(mbox_1_layers[1])
mbox_1_layers[1] = net[sigmoid_name]
else:
raise ValueError("Unknown conf loss type.")
# Det-out param
det_out_param = {
'num_classes': ssd_Param_1.get("num_classes", 2),
'target_labels': ssd_Param_1.get('detout_target_labels', []),
'alias_id': ssd_Param_1.get("alias_id", 0),
'conf_threshold': ssd_Param_1.get("detout_conf_threshold", 0.01),
'nms_threshold': ssd_Param_1.get("detout_nms_threshold", 0.45),
'size_threshold': ssd_Param_1.get("detout_size_threshold", 0.0001),
'top_k': ssd_Param_1.get("detout_top_k", 30),
'share_location': True,
'code_type': P.PriorBox.CENTER_SIZE,
'background_label_id': 0,
'variance_encoded_in_target': False,
}
use_dense_boxes = ssd_Param_1.get('bboxloss_use_dense_boxes', False)
if use_dense_boxes:
net.detection_out_1 = L.DenseDetOut(*mbox_1_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.detection_out_1 = L.DetOut(*mbox_1_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
# make Loss & Detout for SSD2
if use_ssd2_for_detection:
mbox_2_layers = SsdDetectorHeaders(net, \
net_width=net_width, net_height=net_height, data_layer=data_layer, \
from_layers=ssd_Param_2.get('feature_layers', []), \
num_classes=ssd_Param_2.get("num_classes", 2), \
boxsizes=ssd_Param_2.get("anchor_boxsizes", []), \
aspect_ratios=ssd_Param_2.get("anchor_aspect_ratios", []), \
prior_variance=ssd_Param_2.get("anchor_prior_variance",
[0.1, 0.1, 0.2, 0.2]), \
flip=ssd_Param_2.get("anchor_flip", True), \
clip=ssd_Param_2.get("anchor_clip", True), \
normalizations=ssd_Param_2.get("interlayers_normalizations", []), \
use_batchnorm=ssd_Param_2.get("interlayers_use_batchnorm", True), \
inter_layer_channels=ssd_Param_2.get("interlayers_channels_kernels", []), \
use_focus_loss=ssd_Param_2.get("bboxloss_using_focus_loss", False), \
use_dense_boxes=ssd_Param_2.get('bboxloss_use_dense_boxes', False), \
stage=2)
# make Loss or Detout for SSD1
if train:
loss_param = get_loss_param(normalization=ssd_Param_2.get("bboxloss_normalization", P.Loss.VALID))
mbox_2_layers.append(net[gt_label])
use_dense_boxes = ssd_Param_2.get('bboxloss_use_dense_boxes', False)
if use_dense_boxes:
bboxloss_param = {
'gt_labels': ssd_Param_2.get('gt_labels', []),
'target_labels': ssd_Param_2.get('target_labels', []),
'num_classes': ssd_Param_2.get("num_classes", 2),
'alias_id': ssd_Param_2.get("alias_id", 0),
'loc_loss_type': ssd_Param_2.get("bboxloss_loc_loss_type", P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type': ssd_Param_2.get("bboxloss_conf_loss_type", P.MultiBoxLoss.LOGISTIC),
'loc_weight': ssd_Param_2.get("bboxloss_loc_weight", 1),
'conf_weight': ssd_Param_2.get("bboxloss_conf_weight", 1),
'overlap_threshold': ssd_Param_2.get("bboxloss_overlap_threshold", 0.5),
'neg_overlap': ssd_Param_2.get("bboxloss_neg_overlap", 0.5),
'size_threshold': ssd_Param_2.get("bboxloss_size_threshold", 0.0001),
'do_neg_mining': ssd_Param_2.get("bboxloss_do_neg_mining", True),
'neg_pos_ratio': ssd_Param_2.get("bboxloss_neg_pos_ratio", 3),
'using_focus_loss': ssd_Param_2.get("bboxloss_using_focus_loss", False),
'gama': ssd_Param_2.get("bboxloss_focus_gama", 2),
'use_difficult_gt': ssd_Param_2.get("bboxloss_use_difficult_gt", False),
'code_type': ssd_Param_2.get("bboxloss_code_type", P.PriorBox.CENTER_SIZE),
'use_prior_for_matching': True,
'encode_variance_in_target': False,
'flag_noperson': ssd_Param_2.get('flag_noperson', False),
}
net["mbox_2_loss"] = L.DenseBBoxLoss(*mbox_2_layers, dense_bbox_loss_param=bboxloss_param, \
loss_param=loss_param,
include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
bboxloss_param = {
'gt_labels': ssd_Param_2.get('gt_labels', []),
'target_labels': ssd_Param_2.get('target_labels', []),
'num_classes': ssd_Param_2.get("num_classes", 2),
'alias_id': ssd_Param_2.get("alias_id", 0),
'loc_loss_type': ssd_Param_2.get("bboxloss_loc_loss_type", P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type': ssd_Param_2.get("bboxloss_conf_loss_type", P.MultiBoxLoss.SOFTMAX),
'loc_weight': ssd_Param_2.get("bboxloss_loc_weight", 1),
'conf_weight': ssd_Param_2.get("bboxloss_conf_weight", 1),
'overlap_threshold': ssd_Param_2.get("bboxloss_overlap_threshold", 0.5),
'neg_overlap': ssd_Param_2.get("bboxloss_neg_overlap", 0.5),
'size_threshold': ssd_Param_2.get("bboxloss_size_threshold", 0.0001),
'do_neg_mining': ssd_Param_2.get("bboxloss_do_neg_mining", True),
'neg_pos_ratio': ssd_Param_2.get("bboxloss_neg_pos_ratio", 3),
'using_focus_loss': ssd_Param_2.get("bboxloss_using_focus_loss", False),
'gama': ssd_Param_2.get("bboxloss_focus_gama", 2),
'use_difficult_gt': ssd_Param_2.get("bboxloss_use_difficult_gt", False),
'code_type': ssd_Param_2.get("bboxloss_code_type", P.PriorBox.CENTER_SIZE),
'match_type': P.MultiBoxLoss.PER_PREDICTION,
'share_location': True,
'use_prior_for_matching': True,
'background_label_id': 0,
'encode_variance_in_target': False,
'map_object_to_agnostic': False,
}
net["mbox_2_loss"] = L.BBoxLoss(*mbox_2_layers, bbox_loss_param=bboxloss_param, \
loss_param=loss_param,
include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
if ssd_Param_2.get("bboxloss_conf_loss_type", P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.SOFTMAX:
reshape_name = "mbox_2_conf_reshape"
net[reshape_name] = L.Reshape(mbox_2_layers[1], \
shape=dict(dim=[0, -1, ssd_Param_2.get("num_classes", 2)]))
softmax_name = "mbox_2_conf_softmax"
net[softmax_name] = L.Softmax(net[reshape_name], axis=2)
flatten_name = "mbox_2_conf_flatten"
net[flatten_name] = L.Flatten(net[softmax_name], axis=1)
mbox_2_layers[1] = net[flatten_name]
elif ssd_Param_2.get("bboxloss_conf_loss_type", P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.LOGISTIC:
sigmoid_name = "mbox_2_conf_sigmoid"
net[sigmoid_name] = L.Sigmoid(mbox_2_layers[1])
mbox_2_layers[1] = net[sigmoid_name]
else:
raise ValueError("Unknown conf loss type.")
# Det-out param
det_out_param = {
'num_classes': ssd_Param_2.get("num_classes", 2),
'target_labels': ssd_Param_2.get('detout_target_labels', []),
'alias_id': ssd_Param_2.get("alias_id", 0),
'conf_threshold': ssd_Param_2.get("detout_conf_threshold", 0.01),
'nms_threshold': ssd_Param_2.get("detout_nms_threshold", 0.45),
'size_threshold': ssd_Param_2.get("detout_size_threshold", 0.0001),
'top_k': ssd_Param_2.get("detout_top_k", 30),
'share_location': True,
'code_type': P.PriorBox.CENTER_SIZE,
'background_label_id': 0,
'variance_encoded_in_target': False,
}
use_dense_boxes = ssd_Param_2.get('bboxloss_use_dense_boxes', False)
if use_dense_boxes:
net.detection_out_2 = L.DenseDetOut(*mbox_2_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.detection_out_2 = L.DetOut(*mbox_2_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
# EVAL in TEST MODE
if not train:
det_eval_param = {
'gt_labels': eval_Param.get('eval_gt_labels', []),
'num_classes': eval_Param.get("eval_num_classes", 2),
'evaluate_difficult_gt': eval_Param.get("eval_difficult_gt", False),
'boxsize_threshold': eval_Param.get("eval_boxsize_threshold", [0, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25]),
'iou_threshold': eval_Param.get("eval_iou_threshold", [0.9, 0.75, 0.5]),
'background_label_id': 0,
}
if use_ssd2_for_detection:
det_out_layers = []
if flag_train_withperson:
det_out_layers.append(net['detection_out_1'])
det_out_layers.append(net['detection_out_2'])
name = 'det_out'
net[name] = L.Concat(*det_out_layers, axis=2)
net.det_accu = L.DetEval(net[name], net[gt_label], \
detection_evaluate_param=det_eval_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.det_accu = L.DetEval(net['detection_out_1'], net[gt_label], \
detection_evaluate_param=det_eval_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
return net
|
py
|
1a59a1b642e22768f387a065a21ca503410d4ec7
|
__all__=['common']
|
py
|
1a59a2135252e98f8e908225a1d5136e40049562
|
import sys
if './' not in sys.path: sys.path.append('./')
from screws.freeze.base import FrozenOnly
from objects.CSCG._3d.forms.trace._2tr.discretize.vector.standard import _3dCSCG_2Trace_Discretize_StandardVector
from objects.CSCG._3d.forms.trace._2tr.discretize.vector.boundary_wise import _3dCSCG_2Trace_Discretize_BoundaryWiseVector
from objects.CSCG._3d.forms.trace._2tr.discretize.scalar.standard import _3dCSCG_2Trace_Discretize_StandardScalar
from objects.CSCG._3d.forms.trace._2tr.discretize.scalar.boundary_wise import _3dCSCG_2Trace_Discretize_BoundaryWiseScalar
class _3dCSCG_2Trace_Discretize(FrozenOnly):
""""""
def __init__(self, tf):
self._tf_ = tf
self._standard_vector_ = _3dCSCG_2Trace_Discretize_StandardVector(tf)
self._boundary_wise_vector_ = _3dCSCG_2Trace_Discretize_BoundaryWiseVector(tf)
self._standard_scalar_ = _3dCSCG_2Trace_Discretize_StandardScalar(tf)
self._boundary_wise_scalar_ = _3dCSCG_2Trace_Discretize_BoundaryWiseScalar(tf)
self._freeze_self_()
def __call__(self, update_cochain=True, target='func', **kwargs):
"""
Do the discretization.
:param bool update_cochain: Whether we update the cochain if the trace form.
:param target:
:param kwargs: Keywords arguments to be passed to particular discretization schemes.
:return: The cochain corresponding to the particular discretization scheme.
"""
SELF = self._tf_
if target == 'func':
if SELF.TW.func.body.__class__.__name__ == '_3dCSCG_ScalarField':
if SELF.func.ftype == 'standard':
return self._standard_scalar_(
update_cochain=update_cochain, **kwargs)
else:
raise Exception(f'3dCSCG 2-trace can not (target func) discretize '
f'_3dCSCG_ScalarField of ftype {SELF.func.ftype}.')
elif SELF.TW.func.body.__class__.__name__ == '_3dCSCG_VectorField':
if SELF.func.ftype == 'standard': # we will discretize the norm component of the vector.
return self._standard_vector_(
update_cochain=update_cochain, **kwargs)
else:
raise Exception(f'3dCSCG 2-trace can not (target func) discretize '
f'_3dCSCG_VectorField of ftype {SELF.func.ftype}.')
else:
raise NotImplementedError(f'3dCSCG 2-trace can not (target func) '
f'discretize {SELF.TW.func.body.__class__}.')
elif target == 'BC': # We target at the BC, so we do not update the cochain!
if SELF.TW.BC.body.__class__.__name__ == '_3dCSCG_ScalarField':
if SELF.BC.ftype == 'standard':
return self._standard_scalar_(
update_cochain=False, target='BC', **kwargs)
elif SELF.BC.ftype == 'boundary-wise':
return self._boundary_wise_scalar_(
**kwargs) # must be False update_cochain and 'BC' target.
else:
raise Exception(f'3dCSCG 2-trace can not (target BC) discretize '
f'_3dCSCG_ScalarField of ftype {SELF.BC.ftype}.')
elif SELF.TW.BC.body.__class__.__name__ == '_3dCSCG_VectorField':
if SELF.BC.ftype == 'standard': # we will discretize the norm flux of the vector.
return self._standard_vector_(
update_cochain=False, target='BC', **kwargs)
elif SELF.BC.ftype == 'boundary-wise': # we will discretize the norm flux of the vector.
return self._boundary_wise_vector_(
**kwargs) # must be False update_cochain and 'BC' target.
else:
raise Exception(f'3dCSCG 2-trace can not (target BC) discretize '
f'_3dCSCG_VectorField of ftype {SELF.BC.ftype}.')
else:
raise NotImplementedError(f'3dCSCG 2-trace can not (target BC) '
f'discretize {SELF.TW.BC.body.__class__}.')
else:
raise NotImplementedError(f"target={target} not implemented "
f"for 3d CSCG 2-trace form discretization.")
if __name__ == '__main__':
# mpiexec -n 5 python _3dCSCG\forms\trace\_2_trace\discretize\main.py
from objects.CSCG._3d.master import MeshGenerator, SpaceInvoker, FormCaller
mesh = MeshGenerator('crazy', c=0.)([2,2,2])
space = SpaceInvoker('polynomials')([('Lobatto',5), ('Lobatto',5), ('Lobatto',5)])
FC = FormCaller(mesh, space)
|
py
|
1a59a218da4ad725fd3b2b5be23909d67105d46c
|
# This file is part of the Hotwire Shell user interface.
#
# Copyright (C) 2007 Colin Walters <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os, sys, logging, time, inspect, locale, gettext
import gtk, gobject, pango
from hotwire.externals.singletonmixin import Singleton
import hotwire_ui.widgets as hotwidgets
from hotwire_ui.odisp import MultiObjectsDisplay
from hotwire_ui.pixbufcache import PixbufCache
from hotwire.command import CommandQueue
from hotwire.async import QueueIterator
from hotwire.logutil import log_except
from hotwire_ui.oinspect import InspectWindow, ObjectInspectLink, ClassInspectorSidebar
_logger = logging.getLogger("hotwire.ui.Command")
class CommandStatusDisplay(gtk.HBox):
def __init__(self, cmdname):
super(CommandStatusDisplay, self).__init__(spacing=4)
self.__cmdname = cmdname
self.__text = gtk.Label()
self.pack_start(self.__text, expand=False)
self.__progress = gtk.ProgressBar()
self.__progress_visible = False
def set_status(self, text, progress):
if self.__cmdname:
text = self.__cmdname + ' ' + text
self.__text.set_text(text)
if progress >= 0:
if not self.__progress_visible:
self.__progress_visible = True
self.pack_start(self.__progress, expand=False)
self.__progress.show()
self.__progress.set_fraction(progress/100.0)
class CommandExecutionHeader(gtk.VBox):
__gsignals__ = {
"action" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, []),
"expand-inspector" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_BOOLEAN,)),
"setvisible" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, []),
"complete" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self, context, pipeline, odisp, overview_mode=True, **args):
super(CommandExecutionHeader, self).__init__(**args)
self.__context = context
self.__pipeline = pipeline
self.__overview_mode = overview_mode
self.__primary_complete = False
self.__complete_unseen = False
self.__last_view_time = None
self.__visible = True
self.__prev_pipeline_state = None
self.__cancelled = False
self.__undone = False
self.__exception = False
self.__mouse_hovering = False
self.__throbber_pixbuf_done = PixbufCache.getInstance().get('throbber-done.gif', size=None)
self.__throbber_pixbuf_ani = PixbufCache.getInstance().get('throbber.gif', size=None, animation=True)
self.__tooltips = gtk.Tooltips()
self.__pipeline.connect("state-changed", self.__on_pipeline_state_change)
self.__pipeline.connect("metadata", self.__on_pipeline_metadata)
self.__main_hbox = gtk.HBox()
self.pack_start(self.__main_hbox, expand=True)
self.__cmdstatus_vbox = gtk.VBox()
self.__main_hbox.pack_start(self.__cmdstatus_vbox, expand=True)
self.__titlebox_ebox = gtk.EventBox()
self.__titlebox_ebox.set_visible_window(False)
if overview_mode:
self.__titlebox_ebox.add_events(gtk.gdk.BUTTON_PRESS_MASK
& gtk.gdk.ENTER_NOTIFY_MASK
& gtk.gdk.LEAVE_NOTIFY_MASK)
self.__titlebox_ebox.connect("enter_notify_event", self.__on_enter)
self.__titlebox_ebox.connect("leave_notify_event", self.__on_leave)
self.__titlebox_ebox.connect("button-press-event", lambda eb, e: self.__on_button_press(e))
self.__titlebox = gtk.HBox()
self.__titlebox_ebox.add(self.__titlebox)
self.__cmdstatus_vbox.pack_start(hotwidgets.Align(self.__titlebox_ebox), expand=False)
self.__pipeline_str = self.__pipeline.__str__()
self.__title = gtk.Label()
self.__title.set_alignment(0, 0.5)
#self.__title.set_selectable(True)
self.__title.set_ellipsize(True)
self.__state_image = gtk.Image()
self.__titlebox.pack_start(self.__state_image, expand=False)
self.__titlebox.pack_start(hotwidgets.Align(self.__title, padding_left=4), expand=True)
self.__statusbox = gtk.HBox()
self.__cmdstatus_vbox.pack_start(self.__statusbox, expand=False)
self.__status_left = gtk.Label()
self.__status_right = gtk.Label()
self.__statusbox.pack_start(hotwidgets.Align(self.__status_left, padding_left=4), expand=False)
self.__action = hotwidgets.Link()
self.__action.connect("clicked", self.__on_action)
self.__statusbox.pack_start(hotwidgets.Align(self.__action), expand=False)
self.__statusbox.pack_start(hotwidgets.Align(self.__status_right), expand=False)
self.__undoable = self.__pipeline.get_undoable() and (not self.__pipeline.get_idempotent())
status_cmds = list(pipeline.get_status_commands())
self.__pipeline_status_visible = False
if status_cmds:
self.__cmd_statuses = gtk.HBox(spacing=8)
show_cmd_name = len(status_cmds) > 1
for cmdname in status_cmds:
self.__cmd_statuses.pack_start(CommandStatusDisplay(show_cmd_name and cmdname or None), expand=True)
self.__statusbox.pack_start(hotwidgets.Align(self.__cmd_statuses), expand=False)
else:
self.__cmd_statuses = None
self.__cmd_status_show_cmd = False
self.__objects = odisp
self.__objects.connect("primary-complete", self.__on_primary_complete)
self.__objects.connect("changed", lambda o: self.__update_titlebox())
self.__exception_box = gtk.HBox()
self.__exception_link = hotwidgets.Link()
self.__exception_link.set_alignment(0.0, 0.5)
self.__exception_link.set_ellipsize(True)
self.__exception_link.connect('clicked', self.__on_exception_clicked)
self.__exception_box.pack_start(self.__exception_link, expand=True)
self.__cmdstatus_vbox.pack_start(hotwidgets.Align(self.__exception_box, padding_left=4), expand=False)
if overview_mode:
self.__cmdstatus_vbox.pack_start(gtk.HSeparator(), expand=False)
self.__otype_expander = None
else:
self.__otype_expander = gtk.Expander('')
self.__otype_expander.unset_flags(gtk.CAN_FOCUS);
self.__otype_expander.set_use_markup(True)
self.__otype_expander.connect('notify::expanded', self.__on_otype_expander_toggled)
self.__main_hbox.pack_start(self.__otype_expander, expand=False)
def __on_otype_expander_toggled(self, *args):
self.emit('expand-inspector', self.__otype_expander.get_property('expanded'))
def __on_exception_clicked(self, link):
w = gtk.Dialog(_('Exception - Hotwire'), parent=link.get_toplevel(),
flags=0, buttons=(gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
w.set_has_separator(False)
w.set_border_width(5)
w.set_size_request(640, 480)
scroll = gtk.ScrolledWindow()
scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
view = gtk.TextView()
view.set_wrap_mode(True)
scroll.add(view)
w.vbox.pack_start(hotwidgets.Border(scroll), expand=True)
view.get_buffer().set_property('text', self.__pipeline.get_exception_info()[3])
w.show_all()
w.run()
w.destroy()
def set_inspector_expander_active(self, active):
self.__otype_expander.set_property('expanded', active)
def get_pipeline(self):
return self.__pipeline
def get_state(self):
return self.__pipeline.get_state()
def set_unseen(self, unseen):
self.__complete_unseen = unseen
_logger.debug("marking %s as unseen=%s", self.__pipeline, unseen)
self.__update_titlebox()
def update_viewed_time(self):
self.__last_view_time = time.time()
def get_viewed_time(self):
return self.__last_view_time
def get_visible(self):
return self.__visible
def scroll_up(self, full=False):
if self.__objects:
self.__objects.scroll_up(full)
def scroll_down(self, full=False):
if self.__objects:
self.__objects.scroll_down(full)
def disconnect(self):
self.__pipeline.disconnect()
def get_output_type(self):
return self.__pipeline.get_output_type()
def get_output(self):
# Can't just return objects directly as this can be
# called from other threads
# TODO make this actually async
queue = CommandQueue()
gobject.idle_add(self.__enqueue_output, queue)
for obj in QueueIterator(queue):
yield obj
def __enqueue_output(self, queue):
for obj in self.__objects.get_objects():
queue.put(obj)
queue.put(None)
def __on_primary_complete(self, od):
self.__primary_complete = True
self.__on_pipeline_state_change(self.__pipeline)
@log_except(_logger)
def __on_action(self, *args):
_logger.debug("emitting action")
self.emit('action')
def get_objects_widget(self):
return self.__objects
def __update_titlebox(self):
if self.__mouse_hovering:
self.__title.set_markup('<tt><u>%s</u></tt>' % (gobject.markup_escape_text(self.__pipeline_str),))
else:
self.__title.set_markup('<tt>%s</tt>' % (gobject.markup_escape_text(self.__pipeline_str),))
if self.__objects:
ocount = self.__objects.get_ocount() or 0
status_str = self.__objects.get_status_str()
if status_str is None:
status_str = _('%d objects') % (ocount,)
else:
status_str = None
if self.__objects:
self.__tooltips.set_tip(self.__titlebox_ebox, self.__pipeline_str)
def set_status_action(status_text_left, action_text='', status_markup=False):
if action_text:
status_text_left += " ("
if status_text_left:
if status_markup:
self.__status_left.set_markup(status_text_left)
else:
self.__status_left.set_text(status_text_left)
else:
self.__status_left.set_text('')
if action_text:
self.__action.set_text(action_text)
self.__action.show()
else:
self.__action.set_text('')
self.__action.hide()
status_right_start = action_text and ')' or ''
status_right_end = self.__pipeline_status_visible and '; ' or ''
if status_str:
if status_text_left:
status_str_fmt = ', '
else:
status_str_fmt = ''
status_str_fmt += status_str
else:
status_str_fmt = ''
self.__status_right.set_text(status_right_start + status_str_fmt + status_right_end)
def _color(text, color):
return '<span foreground="%s">%s</span>' % (color,gobject.markup_escape_text(text))
def _markupif(tag, text, b):
if b:
return '<%s>%s</%s>' % (tag, text, tag)
return text
state = self.get_state()
if state == 'waiting':
set_status_action(_('Waiting...'))
elif state == 'cancelled':
set_status_action(_markupif('b', _color(_('Cancelled'), "red"), self.__complete_unseen), '', status_markup=True)
elif state == 'undone':
set_status_action(_markupif('b', _color(_('Undone'), "red"), self.__complete_unseen), '', status_markup=True)
elif state == 'exception':
set_status_action(_markupif('b', _('Exception'), self.__complete_unseen), '', status_markup=True)
elif state == 'executing':
set_status_action(_('Executing'), None)
elif state == 'complete':
set_status_action(_markupif('b', _('Complete'), self.__complete_unseen), None, status_markup=True)
if self.__otype_expander is not None:
otype = self.__objects.get_output_common_supertype()
if otype is not None:
self.__otype_expander.get_property('label-widget').set_markup('<b>%s</b> %s' % (_('Type:'), gobject.markup_escape_text(otype.__name__)))
def __on_pipeline_metadata(self, pipeline, cmdidx, cmd, key, flags, meta):
_logger.debug("got pipeline metadata idx=%d key=%s flags=%s", cmdidx, key, flags)
if key == 'hotwire.fileop.basedir':
self.__handle_basedir(cmdidx, meta)
return
if key == 'hotwire.status':
self.__handle_status(cmdidx, meta)
return
def __handle_basedir(self, cmdidx, meta):
_logger.debug("got basedir %s", meta)
def __handle_status(self, cmdidx, meta):
self.__pipeline_status_visible = True
statusdisp = self.__cmd_statuses.get_children()[cmdidx]
statusdisp.set_status(*meta)
self.__update_titlebox()
def __isexecuting(self):
state = self.__pipeline.get_state()
return (state == 'executing' or (state == 'complete' and not self.__primary_complete))
def __on_pipeline_state_change(self, pipeline):
state = self.__pipeline.get_state()
_logger.debug("state change to %s for pipeline %s", state, self.__pipeline_str)
isexecuting = self.__isexecuting()
self.__update_titlebox()
if state != 'exception':
self.__exception_box.hide()
if isexecuting:
self.__state_image.set_from_animation(self.__throbber_pixbuf_ani)
elif state == 'complete':
self.__state_image.set_from_pixbuf(self.__throbber_pixbuf_done)
elif state == 'cancelled':
self.__state_image.set_from_stock('gtk-dialog-error', gtk.ICON_SIZE_MENU)
elif state == 'undone':
self.__state_image.set_from_stock('gtk-dialog-warning', gtk.ICON_SIZE_MENU)
elif state == 'exception':
self.__state_image.set_from_stock('gtk-dialog-error', gtk.ICON_SIZE_MENU)
self.__exception_box.show()
excinfo = self.__pipeline.get_exception_info()
self.__exception_link.set_text("%s: %s" % (excinfo[0], excinfo[1]))
else:
raise Exception("Unknown state %s" % (state,))
self.emit("complete")
@log_except(_logger)
def __on_button_press(self, e):
if self.__overview_mode and e.button == 1:
self.emit('setvisible')
return True
elif (not self.__overview_mode) and e.button in (1,3):
menu = gtk.Menu()
def makemenu(name):
return self.__context.get_ui().get_action('/Menubar/WidgetMenuAdditions/ControlMenu/' + action).create_menu_item()
for action in ['Cancel', 'Undo']:
menu.append(makemenu(action))
menu.append(gtk.SeparatorMenuItem())
menu.append(self.__context.get_ui().get_action('/Menubar/FileMenu/FileDetachAdditions/DetachPipeline').create_menu_item())
menu.append(gtk.SeparatorMenuItem())
for action in ['RemovePipeline', 'UndoRemovePipeline']:
menu.append(makemenu(action))
menu.show_all()
menu.popup(None, None, None, e.button, e.time)
return True
return False
@log_except(_logger)
def __on_enter(self, w, c):
self.__talk_to_the_hand(True)
@log_except(_logger)
def __on_leave(self, w, c):
self.__talk_to_the_hand(False)
def __talk_to_the_hand(self, hand):
display = self.get_display()
cursor = None
if hand:
cursor = gtk.gdk.Cursor(display, gtk.gdk.HAND2)
self.window.set_cursor(cursor)
self.__mouse_hovering = hand
self.__update_titlebox()
class CommandExecutionDisplay(gtk.VBox):
def __init__(self, context, pipeline, odisp):
super(CommandExecutionDisplay, self).__init__()
self.odisp = odisp
self.cmd_header = CommandExecutionHeader(context, pipeline, odisp, overview_mode=False)
self.pack_start(self.cmd_header, expand=False)
self.pack_start(odisp, expand=True)
def cancel(self):
self.odisp.cancel()
self.cmd_header.get_pipeline().cancel()
def undo(self):
self.cmd_header.get_pipeline().undo()
class CommandExecutionHistory(gtk.VBox):
__gsignals__ = {
"show-command" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"command-action" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
def __init__(self, context):
super(CommandExecutionHistory, self).__init__()
self.__context = context
self.__cmd_overview = gtk.VBox()
self.__cmd_overview_scroll = scroll = gtk.ScrolledWindow()
scroll.set_property('hscrollbar-policy', gtk.POLICY_NEVER)
scroll.add_with_viewport(self.__cmd_overview)
self.pack_start(scroll, expand=True)
def add_pipeline(self, pipeline, odisp):
cmd = CommandExecutionHeader(self.__context, pipeline, odisp)
cmd.connect('action', self.__handle_cmd_action)
cmd.show_all()
cmd.connect("setvisible", self.__handle_cmd_show)
self.__cmd_overview.pack_start(cmd, expand=False)
@log_except(_logger)
def __handle_cmd_action(self, cmd):
self.emit('command-action', cmd)
def get_overview_list(self):
return self.__cmd_overview.get_children()
def remove_overview(self, oview):
self.__cmd_overview.remove(oview)
def get_scroll(self):
return self.__cmd_overview_scroll
def scroll_to_bottom(self):
vadjust = self.__cmd_overview_scroll.get_vadjustment()
vadjust.value = max(vadjust.lower, vadjust.upper - vadjust.page_size)
@log_except(_logger)
def __handle_cmd_show(self, cmd):
self.emit("show-command", cmd)
class CommandExecutionControl(gtk.VBox):
# This may be a sucky policy, but it's less sucky than what came before.
COMPLETE_CMD_EXPIRATION_SECS = 5 * 60
__gsignals__ = {
"new-window" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
__gproperties__ = {
'pipeline-count' : (gobject.TYPE_INT, '', '',
0, 4096, 0, gobject.PARAM_READWRITE),
'executing-pipeline-count' : (gobject.TYPE_INT, '', '',
0, 4096, 0, gobject.PARAM_READWRITE),
'unseen-pipeline-count' : (gobject.TYPE_INT, '', '',
0, 4096, 0, gobject.PARAM_READWRITE)
}
def __init__(self, context):
super(CommandExecutionControl, self).__init__()
self.__ui_string = """
<ui>
<menubar name='Menubar'>
<menu action='FileMenu'>
<placeholder name='FileDetachAdditions'>
<menuitem action='DetachPipeline'/>
</placeholder>
</menu>
<menu action='EditMenu'>
<placeholder name='EditMenuAdditions'>
<menuitem action='Copy'/>
<separator/>
<menuitem action='Search'/>
<menuitem action='Input'/>
</placeholder>
</menu>
<menu action='ViewMenu'>
<menuitem action='Overview'/>
<separator/>
<menuitem action='Inspector'/>
<separator/>
<placeholder name='ViewMenuAdditions'/>
<menuitem action='PreviousCommand'/>
<menuitem action='NextCommand'/>
<separator/>
<menuitem action='PreviousUnseenCommand'/>
<menuitem action='LastCommand'/>
</menu>
<placeholder name='WidgetMenuAdditions'>
<menu action='ControlMenu'>
<menuitem action='Cancel'/>
<menuitem action='Undo'/>
<separator/>
<menuitem action='RemovePipeline'/>
<menuitem action='UndoRemovePipeline'/>
</menu>
</placeholder>
</menubar>
<accelerator action='ScrollHome'/>
<accelerator action='ScrollEnd'/>
<accelerator action='ScrollPgUp'/>
<accelerator action='ScrollPgDown'/>
</ui>"""
self.__actions = [
('DetachPipeline', gtk.STOCK_JUMP_TO, _('Detach _Pipeline'), '<control><shift>N', _('Create window from output'), self.__to_window_cb),
('Copy', gtk.STOCK_COPY, _('_Copy'), '<control>c', _('Copy output'), self.__copy_cb),
('Cancel', gtk.STOCK_CANCEL, _('_Cancel'), '<control><shift>c', _('Cancel current command'), self.__cancel_cb),
('Undo', gtk.STOCK_UNDO, _('_Undo'), None, _('Undo current command'), self.__undo_cb),
('Search', gtk.STOCK_FIND, _('_Search'), '<control>s', _('Search output'), self.__search_cb),
('Input', gtk.STOCK_EDIT, _('_Input'), '<control>i', _('Send input'), self.__input_cb),
('ScrollHome', None, _('Output _Top'), '<control>Home', _('Scroll to output top'), self.__view_home_cb),
('ScrollEnd', None, _('Output _Bottom'), '<control>End', _('Scroll to output bottom'), self.__view_end_cb),
('ScrollPgUp', None, _('Output Page _Up'), 'Page_Up', _('Scroll output up'), self.__view_up_cb),
('ScrollPgDown', None, _('Output Page _Down'), 'Page_Down', _('Scroll output down'), self.__view_down_cb),
('ControlMenu', None, _('_Control')),
('RemovePipeline', gtk.STOCK_REMOVE, _('_Remove Pipeline'), '<control><shift>K', _('Remove current pipeline view'), self.__remove_pipeline_cb),
('UndoRemovePipeline', gtk.STOCK_UNDO, _('U_ndo Remove Pipeline'), '<control><shift>J', _('Undo removal of current pipeline view'), self.__undo_remove_pipeline_cb),
('PreviousCommand', gtk.STOCK_GO_UP, _('_Previous'), '<control>Up', _('View previous command'), self.__view_previous_cb),
('NextCommand', gtk.STOCK_GO_DOWN, _('_Next'), '<control>Down', _('View next command'), self.__view_next_cb),
('PreviousUnseenCommand', gtk.STOCK_GO_UP, _('Previous _Unseen'), '<control><shift>Up', _('View most recent unseen command'), self.__view_previous_unseen_cb),
('LastCommand', gtk.STOCK_GOTO_BOTTOM, _('Last'), '<control><shift>Down', _('View most recent command'), self.__view_last_cb),
]
self.__toggle_actions = [
('Overview', None, _('_Overview'), '<control><shift>o', _('Toggle overview'), self.__overview_cb),
('Inspector', None, _('_Inspector'), '<control><shift>I', _('Toggle inspector'), self.__inspector_cb),
]
self.__action_group = gtk.ActionGroup('HotwireActions')
self.__action_group.add_actions(self.__actions)
self.__action_group.add_toggle_actions(self.__toggle_actions)
self.__action_group.get_action('Overview').set_active(False)
self.__context = context
# Holds a reference to the signal handler id for the "changed" signal on the current odisp
# so we know when to reload any metadata
self.__odisp_changed_connection = None
self.__header = gtk.HBox()
def create_arrow_button(action_name):
action = self.__action_group.get_action(action_name)
icon = action.create_icon(gtk.ICON_SIZE_MENU)
button = gtk.Button(label='x')
button.connect('clicked', lambda *args: action.activate())
action.connect("notify::sensitive", lambda *args: button.set_sensitive(action.get_sensitive()))
button.set_property('image', icon)
button.set_focus_on_click(False)
return button
self.__header_label = create_arrow_button('PreviousCommand')
self.__header.pack_start(self.__header_label, expand=False)
self.__header_exec_label = gtk.Label()
self.__header.pack_start(self.__header_exec_label, expand=False)
self.pack_start(self.__header, expand=False)
self.__cmd_paned = gtk.HPaned()
self.pack_start(self.__cmd_paned, expand=True)
self.__cmd_notebook = gtk.Notebook()
self.__cmd_paned.pack1(self.__cmd_notebook, resize=True)
self.__cmd_notebook.connect('switch-page', self.__on_page_switch)
self.__cmd_notebook.set_show_tabs(False)
self.__cmd_notebook.set_show_border(False)
self.__inspector = ClassInspectorSidebar()
self.__cmd_paned.pack2(self.__inspector, resize=False)
self.__cmd_overview = CommandExecutionHistory(self.__context)
self.__cmd_overview.show_all()
self.__cmd_overview.set_no_show_all(True)
self.__cmd_overview.connect('show-command', self.__on_show_command)
self.__cmd_overview.connect('command-action', self.__handle_cmd_overview_action)
self.pack_start(self.__cmd_overview, expand=True)
self.__footer = gtk.HBox()
self.__footer_label = create_arrow_button('NextCommand')
self.__footer.pack_start(self.__footer_label, expand=False)
self.__footer_exec_label = gtk.Label()
self.__footer.pack_start(self.__footer_exec_label, expand=False)
self.pack_start(self.__footer, expand=False)
self.__complete_unseen_pipelines = set()
self.__history_visible = False
self.__inspector_visible = False
self.__prevcmd_count = 0
self.__prevcmd_executing_count = 0
self.__nextcmd_count = 0
self.__nextcmd_executing_count = 0
self.__idle_command_gc_id = 0
self.__actively_destroyed_pipeline_box = []
self.__sync_visible()
self.__sync_cmd_sensitivity()
def get_ui(self):
return (self.__ui_string, self.__action_group, None)
def __get_complete_commands(self):
for child in self.__iter_cmds():
if child.get_state() != 'executing':
yield child
def __iter_cmds(self):
for child in self.__cmd_notebook.get_children():
yield child.cmd_header
def add_cmd_widget(self, cmd):
pipeline = cmd.cmd_header.get_pipeline()
pipeline.connect('state-changed', self.__on_pipeline_state_change)
self.__cmd_overview.add_pipeline(pipeline, cmd.odisp)
pgnum = self.__cmd_notebook.append_page(cmd)
self.__cmd_notebook.set_current_page(pgnum)
self.__sync_visible()
self.__sync_display()
gobject.idle_add(lambda: self.__sync_display())
def add_pipeline(self, pipeline):
_logger.debug("adding child %s", pipeline)
pipeline.connect('state-changed', self.__on_pipeline_state_change)
odisp = MultiObjectsDisplay(self.__context, pipeline)
cmd = CommandExecutionDisplay(self.__context, pipeline, odisp)
cmd.cmd_header.connect('action', self.__handle_cmd_action)
cmd.cmd_header.connect('expand-inspector', self.__on_expand_inspector)
cmd.show_all()
pgnum = self.__cmd_notebook.append_page(cmd)
self.__cmd_notebook.set_current_page(pgnum)
self.__cmd_overview.add_pipeline(pipeline, odisp)
self.__sync_visible()
self.__sync_display(pgnum)
self.notify('pipeline-count')
# Garbage-collect old commands at this point
gobject.idle_add(self.__command_gc)
@log_except(_logger)
def __command_gc(self):
curtime = time.time()
changed = False
for cmd in self.__iter_cmds():
pipeline = cmd.get_pipeline()
if pipeline in self.__complete_unseen_pipelines:
continue
compl_time = pipeline.get_completion_time()
if not compl_time:
continue
lastview_time = cmd.get_viewed_time()
if curtime - lastview_time > self.COMPLETE_CMD_EXPIRATION_SECS:
changed = True
self.remove_pipeline(pipeline, destroy=True)
for cmdview in self.__actively_destroyed_pipeline_box:
cmdview.destroy()
self.__actively_destroyed_pipeline_box = []
self.__sync_cmd_sensitivity()
def __mark_pipeline_unseen(self, pipeline, unseen):
(cmdview, overview) = self.__get_widgets_for_pipeline(pipeline)
cmdview.cmd_header.set_unseen(unseen)
overview.set_unseen(unseen)
self.notify('unseen-pipeline-count')
def __get_widgets_for_pipeline(self, pipeline):
cmdview, overview = (None, None)
for child in self.__cmd_notebook.get_children():
if not child.cmd_header.get_pipeline() == pipeline:
continue
cmdview = child
for child in self.__cmd_overview.get_overview_list():
if not child.get_pipeline() == pipeline:
continue
overview = child
return (cmdview, overview)
def remove_pipeline(self, pipeline, disconnect=True, destroy=False):
if disconnect:
pipeline.disconnect()
try:
self.__complete_unseen_pipelines.remove(pipeline)
except KeyError, e:
pass
(cmdview, overview) = self.__get_widgets_for_pipeline(pipeline)
self.__cmd_notebook.remove(cmdview)
self.__cmd_overview.remove_overview(overview)
if destroy:
cmdview.destroy()
overview.destroy()
return None
self.notify('pipeline-count')
return (cmdview, overview)
@log_except(_logger)
def __handle_cmd_complete(self, *args):
self.__sync_cmd_sensitivity()
@log_except(_logger)
def __handle_cmd_overview_action(self, oview, cmd):
self.__handle_cmd_action(cmd)
@log_except(_logger)
def __handle_cmd_action(self, cmd):
pipeline = cmd.get_pipeline()
_logger.debug("handling action for %s", pipeline)
if pipeline.validate_state_transition('cancelled'):
_logger.debug("doing cancel")
pipeline.cancel()
elif pipeline.validate_state_transition('undone'):
_logger.debug("doing undo")
pipeline.undo()
else:
raise ValueError("Couldn't do action %s from state %s" % (action,cmd.cmd_header.get_pipeline().get_state()))
@log_except(_logger)
def __on_show_command(self, overview, cmd):
_logger.debug("showing command %s", cmd)
target = None
for child in self.__cmd_notebook.get_children():
if child.cmd_header.get_pipeline() == cmd.get_pipeline():
target = child
break
if target:
pgnum = self.__cmd_notebook.page_num(target)
self.__cmd_notebook.set_current_page(pgnum)
self.__action_group.get_action("Overview").activate()
from hotwire_ui.shell import locate_current_shell
hw = locate_current_shell(self)
hw.grab_focus()
def get_current(self):
cmd = self.get_current_cmd(full=True)
return cmd and cmd.odisp
def get_current_cmd(self, full=False, curpage=None):
if curpage is not None:
page = curpage
else:
page = self.__cmd_notebook.get_current_page()
if page < 0:
return None
cmd = self.__cmd_notebook.get_nth_page(page)
if full:
return cmd
return cmd.cmd_header
def __copy_cb(self, a):
_logger.debug("doing copy cmd")
cmd = self.get_current_cmd(full=True)
cmd.odisp.do_copy()
def __cancel_cb(self, a):
_logger.debug("doing cancel cmd")
cmd = self.get_current_cmd(full=True)
cmd.cancel()
def __undo_cb(self, a):
_logger.debug("doing undo cmd")
cmd = self.get_current_cmd(full=True)
cmd.undo()
def __search_cb(self, a):
cmd = self.get_current_cmd(full=True)
top = self.get_toplevel()
lastfocused = top.get_focus()
cmd.odisp.start_search(lastfocused)
def __input_cb(self, a):
cmd = self.get_current_cmd(full=True)
top = self.get_toplevel()
lastfocused = top.get_focus()
cmd.odisp.start_input(lastfocused)
def __view_previous_cb(self, a):
self.open_output(True)
def __view_next_cb(self, a):
self.open_output(False)
def __view_previous_unseen_cb(self, a):
target = None
for cmd in self.__iter_cmdslice(False):
pipeline = cmd.odisp.get_pipeline()
if pipeline in self.__complete_unseen_pipelines:
target = cmd
break
if target:
pgnum = self.__cmd_notebook.page_num(target)
self.__cmd_notebook.set_current_page(pgnum)
def __view_last_cb(self, a):
self.__cmd_notebook.set_current_page(self.__cmd_notebook.get_n_pages()-1)
def __view_home_cb(self, a):
self.__do_scroll(True, True)
def __view_end_cb(self, a):
self.__do_scroll(False, True)
def __view_up_cb(self, a):
self.__do_scroll(True, False)
def __view_down_cb(self, a):
self.__do_scroll(False, False)
def __to_window_cb(self, a):
cmd = self.get_current_cmd(full=True)
pipeline = cmd.cmd_header.get_pipeline()
#pipeline.disconnect('state-changed', self.__on_pipeline_state_change)
(cmdview, overview) = self.remove_pipeline(pipeline, disconnect=False)
self.emit('new-window', cmdview)
self.__sync_display()
def __remove_pipeline_cb(self, a):
cmd = self.get_current_cmd(full=True)
pipeline = cmd.cmd_header.get_pipeline()
_logger.debug("doing remove of %s", pipeline)
(cmdview, overview) = self.remove_pipeline(pipeline, disconnect=False)
overview.destroy()
self.__actively_destroyed_pipeline_box.append(cmdview)
self.__sync_display()
def __undo_remove_pipeline_cb(self, a):
cmd = self.__actively_destroyed_pipeline_box.pop()
_logger.debug("undoing remove of %s", cmd)
pgnum = self.__cmd_notebook.append_page(cmd)
self.__cmd_notebook.set_current_page(pgnum)
self.__cmd_overview.add_pipeline(cmd.cmd_header.get_pipeline(), cmd.odisp)
self.__sync_display(pgnum)
def __overview_cb(self, a):
self.__toggle_history_expanded()
def __on_expand_inspector(self, header, expand):
if self.__inspector_visible == (not not expand):
return
self.__action_group.get_action('Inspector').set_active(expand)
def __inspector_cb(self, a):
self.__inspector_visible = not self.__inspector_visible
self.__sync_inspector_expanded()
def __sync_inspector_expanded(self, nth=None):
self.__sync_visible()
curcmd = self.get_current_cmd(True, curpage=nth)
curcmd.cmd_header.set_inspector_expander_active(self.__inspector_visible)
def __vadjust(self, scroll, pos, full):
adjustment = scroll.get_vadjustment()
if not full:
val = scroll.get_vadjustment().page_increment
if not pos:
val = 0 - val;
newval = adjustment.value + val
else:
if pos:
newval = adjustment.upper
else:
newval = adjustment.lower
newval = max(min(newval, adjustment.upper-adjustment.page_size), adjustment.lower)
adjustment.value = newval
def __do_scroll(self, prev, full):
if self.__history_visible:
scroll = self.__cmd_overview.get_scroll()
self.__vadjust(scroll, not prev, full)
return
cmd = self.get_current_cmd()
if prev:
cmd.scroll_up(full)
else:
cmd.scroll_down(full)
def __toggle_history_expanded(self):
self.__history_visible = not self.__history_visible
_logger.debug("history visible: %s", self.__history_visible)
self.__sync_visible()
self.__sync_cmd_sensitivity()
self.__sync_display()
if self.__history_visible:
self.__cmd_overview.scroll_to_bottom()
def __sync_visible(self):
if self.__history_visible:
self.__cmd_overview.show()
self.__cmd_paned.hide()
self.__header.hide()
self.__footer.hide()
else:
self.__cmd_overview.hide()
self.__cmd_paned.show()
if self.__inspector_visible:
self.__inspector.show()
else:
self.__inspector.hide()
if self.__nextcmd_count > 0:
self.__header.show()
self.__footer.show()
@log_except(_logger)
def __on_pipeline_state_change(self, pipeline):
_logger.debug("handling state change to %s", pipeline.get_state())
if pipeline.is_complete():
self.__complete_unseen_pipelines.add(pipeline)
self.__mark_pipeline_unseen(pipeline, True)
self.__sync_display()
def __sync_cmd_sensitivity(self, curpage=None):
actions = map(self.__action_group.get_action, ['Copy', 'Cancel', 'PreviousCommand', 'NextCommand', 'Undo',
'Input', 'RemovePipeline', 'DetachPipeline',
'PreviousUnseenCommand', 'LastCommand', 'UndoRemovePipeline'])
if self.__history_visible:
for action in actions:
action.set_sensitive(False)
cmd = None
return
else:
undoidx = 10
actions[undoidx].set_sensitive(len(self.__actively_destroyed_pipeline_box) > 0)
cmd = self.get_current_cmd(full=True, curpage=curpage)
if not cmd:
for action in actions[:undoidx]:
action.set_sensitive(False)
return
pipeline = cmd.cmd_header.get_pipeline()
_logger.debug("sync sensitivity page %s pipeline: %s", curpage, cmd.cmd_header.get_pipeline().get_state())
cancellable = not not (pipeline.validate_state_transition('cancelled'))
undoable = not not (pipeline.validate_state_transition('undone'))
_logger.debug("cancellable: %s undoable: %s", cancellable, undoable)
actions[1].set_sensitive(cancellable)
actions[4].set_sensitive(undoable)
actions[5].set_sensitive(pipeline.get_state() == 'executing' and cmd.odisp.supports_input() or False)
actions[6].set_sensitive(pipeline.is_complete())
actions[7].set_sensitive(True)
actions[2].set_sensitive(self.__prevcmd_count > 0)
actions[3].set_sensitive(self.__nextcmd_count > 0)
actions[8].set_sensitive(len(self.__complete_unseen_pipelines) > 0)
npages = self.__cmd_notebook.get_n_pages()
if curpage is None:
curpage = self.__cmd_notebook.get_current_page()
actions[9].set_sensitive(npages > 0 and curpage < npages-1)
def __sync_display(self, nth=None):
def set_label(container, label, n, label_exec, n_exec, n_done):
if n <= 0 or self.__history_visible:
container.hide_all()
return
container.show_all()
label.set_label(gettext.ngettext(' %d pipeline' % (n,), ' %d pipelines' % (n,), n))
if n_exec > 0 and n_done > 0:
label_exec.set_markup(_(' %d executing, <b>%d complete</b>') % (n_exec, n_done))
elif n_done > 0:
label_exec.set_markup(_(' <b>%d complete</b>') % (n_done,))
elif n_exec > 0:
label_exec.set_label(_(' %d executing') % (n_exec,))
else:
label_exec.set_label('')
# FIXME - this is a bit of a hackish place to put this
curcmd = self.get_current_cmd(True, curpage=nth)
if curcmd:
current = curcmd.cmd_header
pipeline = current.get_pipeline()
_logger.debug("sync display, current=%s", pipeline)
if pipeline in self.__complete_unseen_pipelines:
self.__complete_unseen_pipelines.remove(pipeline)
self.__mark_pipeline_unseen(pipeline, False)
current.update_viewed_time()
self.__prevcmd_count = 0
self.__prevcmd_executing_count = 0
self.__prevcmd_complete_count = 0
self.__nextcmd_count = 0
self.__nextcmd_executing_count = 0
self.__nextcmd_complete_count = 0
for cmd in self.__iter_cmdslice(False, nth):
self.__prevcmd_count += 1
pipeline = cmd.odisp.get_pipeline()
if pipeline.get_state() == 'executing':
self.__prevcmd_executing_count += 1
if pipeline in self.__complete_unseen_pipelines:
self.__prevcmd_complete_count += 1
for cmd in self.__iter_cmdslice(True, nth):
self.__nextcmd_count += 1
pipeline = cmd.odisp.get_pipeline()
if pipeline.get_state() == 'executing':
self.__nextcmd_executing_count += 1
if pipeline in self.__complete_unseen_pipelines:
self.__nextcmd_complete_count += 1
self.notify('executing-pipeline-count')
# The idea here is to not take up the vertical space if we're viewing the last command.
if self.__nextcmd_count == 0:
self.__header.hide()
else:
set_label(self.__header, self.__header_label, self.__prevcmd_count, self.__header_exec_label, self.__prevcmd_executing_count, self.__prevcmd_complete_count)
set_label(self.__footer, self.__footer_label, self.__nextcmd_count, self.__footer_exec_label, self.__nextcmd_executing_count, self.__nextcmd_complete_count)
self.__sync_cmd_sensitivity(curpage=nth)
if curcmd:
if self.__odisp_changed_connection is not None:
(o, id) = self.__odisp_changed_connection
o.disconnect(id)
odisp = curcmd.odisp
self.__odisp_changed_connection = (odisp, odisp.connect("changed", self.__sync_odisp))
self.__sync_odisp(odisp)
@log_except(_logger)
def __sync_odisp(self, odisp):
self.__inspector.set_otype(odisp.get_output_common_supertype())
def __iter_cmdslice(self, is_end, nth_src=None):
if nth_src is not None:
nth = nth_src
else:
nth = self.__cmd_notebook.get_current_page()
n_pages = self.__cmd_notebook.get_n_pages()
if is_end:
r = xrange(nth+1, n_pages)
else:
r = xrange(0, nth)
for i in r:
yield self.__cmd_notebook.get_nth_page(i)
def __on_page_switch(self, notebook, page, nth):
self.__sync_display(nth=nth)
def open_output(self, do_prev=False, dry_run=False):
nth = self.__cmd_notebook.get_current_page()
n_pages = self.__cmd_notebook.get_n_pages()
_logger.debug("histmode: %s do_prev: %s nth: %s n_pages: %s", self.__history_visible, do_prev, nth, n_pages)
if do_prev and nth > 0:
target_nth = nth - 1
elif (not do_prev) and nth < n_pages-1:
target_nth = nth + 1
else:
return False
if dry_run:
return True
self.__cmd_notebook.set_current_page(target_nth)
from hotwire_ui.shell import locate_current_shell
hw = locate_current_shell(self)
hw.grab_focus()
def do_get_property(self, property):
if property.name == 'pipeline-count':
return self.__cmd_notebook.get_n_pages()
elif property.name == 'unseen-pipeline-count':
return len(self.__complete_unseen_pipelines)
elif property.name == 'executing-pipeline-count':
return self.__prevcmd_executing_count + self.__nextcmd_executing_count
else:
raise AttributeError('unknown property %s' % property.name)
def create_overview_button(self):
return OverviewButton(self, self.__action_group.get_action('Overview'))
def create_unseen_button(self):
return UnseenNotifyButton(self, self.__action_group.get_action('PreviousUnseenCommand'))
class OverviewButton(gtk.ToggleButton):
def __init__(self, outputs, overview_action):
super(OverviewButton, self).__init__()
self.__outputs = outputs
self.__tooltips = gtk.Tooltips()
self.__image = gtk.Image()
self.__image.set_property('pixbuf', PixbufCache.getInstance().get('throbber-done.gif', size=None))
self.set_property('image', self.__image)
self.set_focus_on_click(False)
outputs.connect('notify::pipeline-count', self.__on_pipeline_count_changed)
self.__cached_unseen_count = 0
self.__orig_bg = self.style.bg[gtk.STATE_NORMAL]
self.__idle_flash_count = 0
self.__idle_flash_id = 0
outputs.connect('notify::executing-pipeline-count', self.__on_pipeline_count_changed)
self.__on_pipeline_count_changed()
self.__overview_action = overview_action
overview_action.connect('notify::active', self.__on_overview_active_changed)
self.connect('notify::active', self.__on_self_active_changed)
def __on_pipeline_count_changed(self, *args):
(count, unseen_count, executing_count) = map(self.__outputs.get_property,
('pipeline-count', 'unseen-pipeline-count', 'executing-pipeline-count'))
self.set_label(_('%d (%d)') % (count, executing_count))
self.__tooltips.set_tip(self, _('%d total, %d executing, %d complete') % (count, executing_count, unseen_count))
def __start_idle_flash(self):
self.__idle_flash_count = 4
if self.__idle_flash_id == 0:
self.__idle_flash_id = gobject.timeout_add(250, self.__idle_flash)
@log_except(_logger)
def __idle_flash(self):
self.__idle_flash_count -= 1
if self.__idle_flash_count % 2 == 1:
self.style.bg[gtk.STATE_NORMAL] = "yellow"
else:
self.style.bg[gtk.STATE_NORMAL] = self.__orig_bg
if self.__idle_flash_count == 0:
self.__idle_flash_id = 0
return False
else:
return True
def __on_self_active_changed(self, *args):
ostate = self.__overview_action.get_active()
selfstate = self.get_property('active')
if ostate != selfstate:
self.__overview_action.set_active(selfstate)
def __on_overview_active_changed(self, *args):
self.set_active(self.__overview_action.get_active())
class UnseenNotifyButton(gtk.Button):
def __init__(self, outputs, prevunseen_action):
super(UnseenNotifyButton, self).__init__()
self.__tooltips = gtk.Tooltips()
self.__image = gtk.Image()
self.__image.set_from_stock(gtk.STOCK_GO_UP, gtk.ICON_SIZE_MENU)
self.set_property('image', self.__image)
self.set_focus_on_click(False)
self.__outputs = outputs
outputs.connect('notify::pipeline-count', self.__on_pipeline_count_changed)
self.__prev_unseen_action = prevunseen_action
outputs.connect('notify::unseen-pipeline-count', self.__on_pipeline_count_changed)
self.connect('clicked', self.__on_clicked)
def __on_pipeline_count_changed(self, *args):
unseen_count = self.__outputs.get_property('unseen-pipeline-count')
self.set_label(_('%d complete') % (unseen_count,))
if unseen_count > 0:
self.show()
else:
self.hide()
def __on_clicked(self, self2):
self.__prev_unseen_action.activate()
self.hide()
|
py
|
1a59a36e388a251b1b4299141c7b2f1219123352
|
from django.utils.text import slugify
from django.contrib.auth.models import AbstractUser
from django.db import models
from app.core.models import BaseModel
from .managers import UserManager
class Account(AbstractUser):
"""
Extension of the original authorization account of django admin.
Will use default OAuth of django admin for ease of use.
Inherited
---------
AbstactUser : class obj
OAuth model intended for default permissions needed.
"""
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
profile_picture = models.ImageField(null=True, blank=True)
username = models.CharField(max_length=255)
first_name = models.CharField(max_length=150)
last_name = models.CharField(max_length=150)
preferred_name = models.CharField(max_length=128, null=True, blank=True)
secondary_email = models.EmailField(
max_length=64,
unique=True,
null=True,
blank=True,
)
region = models.CharField(max_length=64, null=True, blank=True)
country = models.CharField(max_length=40, null=True, blank=True)
longitude = models.FloatField(default=0)
latitude = models.FloatField(default=0)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.preferred_name or self.email
@property
def slug(self):
return slugify(self.preferred_name)
class AccountAddress(BaseModel):
"""
AccountAddress model is for the addreses of a certain user to be used
by our Courier model.
Inherited
---------
core.Basemodel : class obj
Contains the basic information of when this object was created etc.
It also helps deal with deletion records by setting is_active to
False.
"""
account = models.ForeignKey(to=Account, on_delete=models.CASCADE)
number_bulding = models.CharField(max_length=64, null=True, blank=True)
street = models.CharField(max_length=64, null=True, blank=True)
area = models.CharField(max_length=128, null=True, blank=True)
zipcode = models.IntegerField(default=0)
province = models.CharField(max_length=32, null=True, blank=True)
country = models.CharField(max_length=56, null=True, blank=True)
def __str__(self):
return '{} {}'.format(self.preferred_name, self.country)
|
py
|
1a59a3c40dba032f402afe15a2b038e803004055
|
__all__ = [
"cfpq",
]
|
py
|
1a59a4d1030d904bb35d033a361a9669a0a2f3a7
|
#!/usr/bin/python2.7
import os, sys, shutil, platform, time
OUTPUT = ""
SOURCE = None
VERSIONS = 1
__OUTPUT = "bin/conf0%s"
COMPILER = "gcc"
INCLUDE = [ ]
__SOURCE = [
"src/0%s/*.c",
"src/0%s/lib/fs/*.c",
]
FLAGS = [ "-Wall", "-Wextra","--std=c99" ]
LINK = [ "m" ]
DEFINE = [ ]
EXTRA = ""
if platform.system() == "Windows":
OUTPUT += ".exe"
LINK += [ "mingw32" ]
FLAGS += [ "-mwindows" ]
if platform.system() == "Linux":
LINK += [ ]
if platform.system() == "Darwin":
LINK += [ ]
FLAGS += [ ]
DEFINE += [ ]
def fmt(fmt, dic):
for k in dic:
fmt = fmt.replace("{" + k + "}", str(dic[k]))
return fmt
def clearup():
if os.path.isfile(OUTPUT):
os.remove(OUTPUT)
def main():
global FLAGS, SOURCE, LINK
for i in range(VERSIONS + 1):
OUTPUT = __OUTPUT % i
SOURCE = [ ]
for j in range(len(__SOURCE)):
SOURCE += [ __SOURCE[j] % i ]
print "[conf0%d] initing..." % i
starttime = time.time()
clearup()
# Handle args
build = "debug" if "debug" in sys.argv else "release"
verbose = "verbose" in sys.argv
# Handle build type
if build == "debug":
FLAGS += [ "-g" ]
else:
FLAGS += [ "-O3" ]
print ("[conf0%d] building (" % i) + build + ")..."
# Make sure there arn't any temp files left over from a previous build
# Create directories
outdir = os.path.dirname(OUTPUT)
if not os.path.exists(outdir):
os.makedirs(outdir)
# Build
cmd = fmt(
"{compiler} -o {output} {flags} {source} {include} {link} {define} " +
"{extra}",
{
"compiler" : COMPILER,
"output" : OUTPUT,
"source" : " ".join(SOURCE),
"include" : " ".join(map(lambda x:"-I" + x, INCLUDE)),
"link" : " ".join(map(lambda x:"-l" + x, LINK)),
"define" : " ".join(map(lambda x:"-D" + x, DEFINE)),
"flags" : " ".join(FLAGS),
"extra" : EXTRA,
})
if verbose:
print cmd
print "[conf0%d] compiling..." % i
res = os.system(cmd)
if build == "release" and os.path.isfile(OUTPUT):
print "[conf0%d] stripping..." % i
os.system("strip %s" % OUTPUT)
if res == 0:
print "[conf0%d] done (%.2fs)" % (i, time.time() - starttime)
else:
print "[conf0%d] done with errors" % i
sys.exit(res)
if __name__ == "__main__":
main()
|
py
|
1a59a4da036df034978fc9e321dde8fcc872d7e4
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
# CHANGED manage.py will use development settings by
# default. Change the DJANGO_SETTINGS_MODULE environment variable
# for using the environment specific settings file.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "appstore.settings.development")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py
|
1a59a524967bdb49d16f31ae1f6d78e6330e720d
|
import unittest
import context
from fizzbuzz import MainClass
#
# Example based on code here: https://docs.python.org/2/library/unittest.html
#
class TestFizzBuzz(unittest.TestCase):
def setUp(self):
self.fizzbuzz = MainClass()
def test_fizz_number(self):
result = self.fizzbuzz.fizz(4)
self.assertIsNotNone(result)
if __name__ == '__main__':
unittest.main()
|
py
|
1a59a548b43ed8e1ceab5088ad8d214abcff3945
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Siconos is a program dedicated to modeling, simulation and control
# of non smooth dynamical systems.
#
# Copyright 2021 INRIA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# -----------------------------------------------------------------------
#
# DiodeBridge : sample of an electrical circuit involving :
# - a linear dynamical system consisting of an LC oscillator (1 µF , 10 mH)
# - a non smooth system (a 1000 Ohm resistor supplied through a 4
# diodes bridge) in parallel with the oscillator
#
# Expected behavior :
#
# The initial state (Vc = 10 V , IL = 0) of the oscillator provides
# an initial energy.
# The period is 2 Pi sqrt(LC) ~ 0,628 ms.
# The non smooth system is a full wave rectifier :
# each phase (positive and negative) of the oscillation allows current to flow
# through the resistor in a constant direction, resulting in an energy loss :
# the oscillation damps.
#
# State variables :
# - the voltage across the capacitor (or inductor)
# - the current through the inductor
#
# Since there is only one dynamical system, the interaction is defined by :
# - complementarity laws between diodes current and
# voltage. Depending on the diode position in the bridge, y stands
# for the reverse voltage across the diode or for the diode
# current (see figure in the template file)
# - a linear time invariant relation between the state variables and
# y and lambda (derived from Kirchhoff laws)
#
# -----------------------------------------------------------------------
t0 = 0.0
T = 5.0e-3 # Total simulation time
h_step = 1.0e-6 # Time step
Lvalue = 1e-2 # inductance
Cvalue = 1e-6 # capacitance
Rvalue = 1e3 # resistance
Vinit = 10.0 # initial voltage
Modeltitle = "DiodeBridge"
withPlot = True
if (withPlot):
import matplotlib
matplotlib.use('Agg')
from matplotlib.pyplot import subplot, title, plot, grid, savefig
from siconos.kernel import FirstOrderLinearDS, FirstOrderLinearTIR, \
ComplementarityConditionNSL, Interaction,\
NonSmoothDynamicalSystem, EulerMoreauOSI, TimeDiscretisation, LCP, \
TimeStepping
#
# dynamical system
#
init_state = [Vinit, 0]
A = [[0, -1.0/Cvalue],
[1.0/Lvalue, 0 ]]
LSDiodeBridge = FirstOrderLinearDS(init_state, A)
#
# Interactions
#
C = [[0., 0.],
[0, 0.],
[-1., 0.],
[1., 0.]]
D = [[1./Rvalue, 1./Rvalue, -1., 0.],
[1./Rvalue, 1./Rvalue, 0., -1.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]]
B = [[0., 0., -1./Cvalue, 1./Cvalue],
[0., 0., 0., 0. ]]
LTIRDiodeBridge = FirstOrderLinearTIR(C, B)
LTIRDiodeBridge.setDPtr(D)
nslaw = ComplementarityConditionNSL(4)
InterDiodeBridge = Interaction(nslaw, LTIRDiodeBridge)
#
# Model
#
DiodeBridge = NonSmoothDynamicalSystem(t0, T)
DiodeBridge.setTitle(Modeltitle)
# add the dynamical system in the non smooth dynamical system
DiodeBridge.insertDynamicalSystem(LSDiodeBridge)
# link the interaction and the dynamical system
DiodeBridge.link(InterDiodeBridge, LSDiodeBridge)
#
# Simulation
#
# (1) OneStepIntegrators
theta = 0.5
gamma = 0.5
aOSI = EulerMoreauOSI(theta, gamma)
aOSI.setUseGammaForRelation(True)
# (2) Time discretisation
aTiDisc = TimeDiscretisation(t0, h_step)
# (3) Non smooth problem
aLCP = LCP()
# (4) Simulation setup with (1) (2) (3)
aTS = TimeStepping(DiodeBridge, aTiDisc, aOSI, aLCP)
# end of model definition
#
# computation
#
k = 0
h = aTS.timeStep()
print("Timestep : ", h)
# Number of time steps
N = int((T - t0) / h)
print("Number of steps : ", N)
# Get the values to be plotted
# ->saved in a matrix dataPlot
from numpy import zeros
dataPlot = zeros([N, 10])
x = LSDiodeBridge.x()
print("Initial state : ", x)
y = InterDiodeBridge.y(0)
print("First y : ", y)
lambda_ = InterDiodeBridge.lambda_(0)
# For the initial time step:
# time
# inductor voltage
dataPlot[k, 1] = x[0]
# inductor current
dataPlot[k, 2] = x[1]
# diode R1 current
dataPlot[k, 3] = y[0]
# diode R1 voltage
dataPlot[k, 4] = - lambda_[0]
# diode F2 voltage
dataPlot[k, 5] = - lambda_[1]
# diode F1 current
dataPlot[k, 6] = lambda_[2]
k += 1
while (k < N):
aTS.computeOneStep()
#aLCP.display()
dataPlot[k, 0] = aTS.nextTime()
# inductor voltage
dataPlot[k, 1] = x[0]
# inductor current
dataPlot[k, 2] = x[1]
# diode R1 current
dataPlot[k, 3] = y[0]
# diode R1 voltage
dataPlot[k, 4] = - lambda_[0]
# diode F2 voltage
dataPlot[k, 5] = - lambda_[1]
# diode F1 current
dataPlot[k, 6] = lambda_[2]
k += 1
aTS.nextStep()
# comparison with reference file
from siconos.kernel import SimpleMatrix, getMatrix
from numpy.linalg import norm
ref = getMatrix(SimpleMatrix("DiodeBridge.ref"))
error = norm(dataPlot[:,0:6] - ref[:,0:6])
print("error = " , error)
#assert (error < 1e-09)
withRef = True
if (withPlot):
#
# plots
#
subplot(411)
title('inductor voltage')
plot(dataPlot[0:k - 1, 0], dataPlot[0:k - 1, 1])
if (withRef):
plot(ref[0:k - 1, 0], ref[0:k - 1, 1])
grid()
subplot(412)
title('inductor current')
plot(dataPlot[0:k - 1, 0], dataPlot[0:k - 1, 2])
if (withRef):
plot(ref[0:k - 1, 0], ref[0:k - 1, 2])
grid()
subplot(413)
title('diode R1 (blue) and F2 (green) voltage')
plot(dataPlot[0:k - 1, 0], -dataPlot[0:k - 1, 4])
plot(dataPlot[0:k - 1, 0], dataPlot[0:k - 1, 5])
if (withRef):
plot(ref[0:k - 1, 0], -ref[0:k - 1, 4])
plot(ref[0:k - 1, 0], ref[0:k - 1, 5])
grid()
subplot(414)
title('resistor current')
plot(dataPlot[0:k - 1, 0], dataPlot[0:k - 1, 3] + dataPlot[0:k - 1, 6] )
if (withRef):
plot(dataPlot[0:k - 1, 0], ref[0:k - 1, 3] + ref[0:k - 1, 6] )
grid()
savefig("diode_brige_tgs.png")
|
py
|
1a59a5db4e239fef5b4f32f54c5a805605c5cb7e
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
################################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
File: network.py
"""
import argparse
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
from source.utils.utils import str2bool
from source.utils.utils import id_to_text
from source.utils.utils import init_embedding
from source.utils.utils import build_data_feed
from source.utils.utils import load_id2str_dict
from source.inputters.corpus import KnowledgeCorpus
from source.models.knowledge_seq2seq import knowledge_seq2seq
#配置解析CMD 参数
def model_config():
""" model config """
parser = argparse.ArgumentParser()
# Data CMD参数组
data_arg = parser.add_argument_group("Data")
data_arg.add_argument("--data_dir", type=str, default="./data/")
data_arg.add_argument("--data_prefix", type=str, default="demo")
data_arg.add_argument("--save_dir", type=str, default="./models/")
data_arg.add_argument("--vocab_path", type=str, default="./data/vocab.txt")
data_arg.add_argument("--embed_file", type=str,
default="./data/sgns.weibo.300d.txt")
# Network CMD参数组
net_arg = parser.add_argument_group("Network")
#****词嵌入维度
net_arg.add_argument("--embed_size", type=int, default=300)
net_arg.add_argument("--hidden_size", type=int, default=800)
net_arg.add_argument("--bidirectional", type=str2bool, default=True)
# 训练时由载入的vocab又重新更新了以下vocab_size
net_arg.add_argument("--vocab_size", type=int, default=30004)
#过滤知识三元组时的filter参数 单个实体名长度大于等于min_len 小于等于max_len
net_arg.add_argument("--min_len", type=int, default=1)
net_arg.add_argument("--max_len", type=int, default=500)
net_arg.add_argument("--num_layers", type=int, default=1)
net_arg.add_argument("--attn", type=str, default='dot',
choices=['none', 'mlp', 'dot', 'general'])
# Training / Testing CMD参数组
train_arg = parser.add_argument_group("Training")
#TODO:run_train.sh 里分为stage0 stage1
train_arg.add_argument("--stage", type=int, default="0")
train_arg.add_argument("--run_type", type=str, default="train")
train_arg.add_argument("--init_model", type=str, default="")
train_arg.add_argument("--init_opt_state", type=str, default="")
train_arg.add_argument("--optimizer", type=str, default="Adam")
train_arg.add_argument("--lr", type=float, default=0.0005)
train_arg.add_argument("--grad_clip", type=float, default=5.0)
train_arg.add_argument("--dropout", type=float, default=0.3)
train_arg.add_argument("--num_epochs", type=int, default=13)
#stage0 train x 个epoch,则stage1 默认从x+1个epoch开始,x不加指定默认为5.
train_arg.add_argument("--pretrain_epoch", type=int, default=5)
train_arg.add_argument("--use_bow", type=str2bool, default=True)
train_arg.add_argument("--use_posterior", type=str2bool, default=False)
# Geneation
gen_arg = parser.add_argument_group("Generation")
gen_arg.add_argument("--beam_size", type=int, default=10)
gen_arg.add_argument("--max_dec_len", type=int, default=30)
gen_arg.add_argument("--length_average", type=str2bool, default=True)
gen_arg.add_argument("--output", type=str, default="./output/test.result")
gen_arg.add_argument("--model_path", type=str, default="./models/best_model/")
gen_arg.add_argument("--unk_id", type=int, default=1)
gen_arg.add_argument("--bos_id", type=int, default=2)
gen_arg.add_argument("--eos_id", type=int, default=3)
# MISC
misc_arg = parser.add_argument_group("Misc")
misc_arg.add_argument("--use_gpu", type=str2bool, default=True)
misc_arg.add_argument("--log_steps", type=int, default=300)
misc_arg.add_argument("--valid_steps", type=int, default=1000)
misc_arg.add_argument("--batch_size", type=int, default=1)
config = parser.parse_args()
return config
def trace_fianl_result(final_score, final_ids, final_index, topk=1, EOS=3):
""" trace fianl result """
col_size = final_score.shape[1]
row_size = final_score.shape[0]
found_eos_num = 0
i = row_size - 1
beam_size = col_size
score = final_score[-1]
row_array = [row_size - 1] * beam_size
col_array = [e for e in range(col_size)]
while i >= 0:
for j in range(col_size - 1, -1, -1):
if final_ids[i, j] == EOS:
repalce_idx = beam_size - (found_eos_num % beam_size) - 1
score[repalce_idx] = final_score[i, j]
found_eos_num += 1
row_array[repalce_idx] = i
col_array[repalce_idx] = j
i -= 1
topk_index = np.argsort(score,)[-topk:]
trace_result = []
trace_score = []
for index in reversed(topk_index):
start_i = row_array[index]
start_j = col_array[index]
ids = []
for k in range(start_i, -1, -1):
ids.append(final_ids[k, start_j])
start_j = final_index[k, start_j]
ids = ids[::-1]
trace_result.append(ids)
trace_score.append(score[index])
return trace_result, trace_score
def load():
""" load model for predict """
config = model_config()
config.vocab_size = len(open(config.vocab_path).readlines())
final_score, final_ids, final_index = knowledge_seq2seq(config)
final_score.persistable = True
final_ids.persistable = True
final_index.persistable = True
main_program = fluid.default_main_program()
if config.use_gpu:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
fluid.io.load_params(executor=exe, dirname=config.model_path, main_program=main_program)
processors = KnowledgeCorpus(
data_dir=config.data_dir,
data_prefix=config.data_prefix,
vocab_path=config.vocab_path,
min_len=config.min_len,
max_len=config.max_len)
# load dict
id_dict_array = load_id2str_dict(config.vocab_path)
model_handle = [exe, place, final_score, final_ids, final_index, processors, id_dict_array]
return model_handle
def predict(model_handle, text):
""" predict for text by model_handle """
batch_size = 1
[exe, place, final_score, final_ids, final_index, processors, id_dict_array] = model_handle
data_generator = processors.preprocessing_for_lines([text], batch_size=batch_size)
results = []
for batch_id, data in enumerate(data_generator()):
data_feed, sent_num = build_data_feed(data, place, batch_size=batch_size)
out = exe.run(feed=data_feed,
fetch_list=[final_score.name, final_ids.name, final_index.name])
batch_score = out[0]
batch_ids = out[1]
batch_pre_index = out[2]
batch_score_arr = np.split(batch_score, batch_size, axis=1)
batch_ids_arr = np.split(batch_ids, batch_size, axis=1)
batch_pre_index_arr = np.split(batch_pre_index, batch_size, axis=1)
index = 0
for (score, ids, pre_index) in zip(batch_score_arr, batch_ids_arr, batch_pre_index_arr):
trace_ids, trace_score = trace_fianl_result(score, ids, pre_index, topk=1, EOS=3)
results.append(id_to_text(trace_ids[0][:-1], id_dict_array))
index += 1
if index >= sent_num:
break
return results[0]
#for stage0--train bow loss: init from random and embedding file(if no embedding file init embedding layer with random)
#for stage1--train overal loss: load from stage0 pretrain/之前的保存
def init_model(config, param_name_list, place):
""" init model """
stage = config.stage
if stage == 0:
for name in param_name_list:
t = fluid.global_scope().find_var(name).get_tensor()
init_scale = 0.05
np_t = np.asarray(t)
if str(name) == 'embedding':
np_para = init_embedding(config.embed_file, config.vocab_path,
init_scale, np_t.shape)
else:
np_para = np.random.uniform(-init_scale, init_scale, np_t.shape).astype('float32')
t.set(np_para.astype('float32'), place)
else:
model_init_file = config.init_model
try:
model_init = np.load(model_init_file)
except:
print("load init model failed", model_init_file)
raise Exception("load init model failed")
print("load init model")
for name in param_name_list:
t = fluid.global_scope().find_var(name).get_tensor()
t.set(model_init[str(name)].astype('float32'), place)
# load opt state
opt_state_init_file = config.init_opt_state
if opt_state_init_file != "":
print("begin to load opt state")
opt_state_data = np.load(opt_state_init_file)
for k, v in opt_state_data.items():
t = fluid.global_scope().find_var(str(k)).get_tensor()
t.set(v, place)
print("set opt state finished")
print("init model parameters finshed")
def train_loop(config,
train_generator, valid_generator,
main_program, inference_program,
model_handle, param_name_list, opt_var_name_list):
""" model train loop """
stage = config.stage
[exe, place, bow_loss, kl_loss, nll_loss, final_loss] = model_handle
#总步数
total_step = 0
start_epoch = 0 if stage == 0 else config.pretrain_epoch
end_epoch = config.pretrain_epoch if stage == 0 else config.num_epochs
print("stage"+str(stage)+"--- start epoch/end epoch: ", start_epoch, end_epoch)
best_score = float('inf')
for epoch_idx in range(start_epoch, end_epoch):
total_bow_loss = 0
total_kl_loss = 0
total_nll_loss = 0
total_final_loss = 0
sample_num = 0
for batch_id, data in enumerate(train_generator()):
data_feed = build_data_feed(data, place,
batch_size=config.batch_size,
is_training=True,
bow_max_len=config.max_len,
pretrain_epoch=epoch_idx < config.pretrain_epoch)
if data_feed is None:
break
out = exe.run(main_program, feed=data_feed,
fetch_list=[bow_loss.name, kl_loss.name, nll_loss.name, final_loss.name])
total_step += 1
total_bow_loss += out[0]
total_kl_loss += out[1]
total_nll_loss += out[2]
total_final_loss += out[3]
sample_num += 1
if batch_id > 0 and batch_id % config.log_steps == 0:
print("epoch %d step %d | "
"bow loss %0.6f kl loss %0.6f nll loss %0.6f total loss %0.6f" % \
(epoch_idx, batch_id,
total_bow_loss / sample_num, total_kl_loss / sample_num, \
total_nll_loss / sample_num, total_final_loss / sample_num))
total_bow_loss = 0
total_kl_loss = 0
total_nll_loss = 0
total_final_loss = 0
sample_num = 0
#在训练过程中,每config.valid_steps 个batch(步)进行一次valid,并储存一次最好模型
if batch_id > 0 and batch_id % config.valid_steps == 0:
eval_bow_loss, eval_kl_loss, eval_nll_loss, eval_total_loss = \
vaild_loop(config, valid_generator, inference_program, model_handle)
# save model
if stage != 0:
param_path = config.save_dir + "/" + str(total_step)
fluid.io.save_params(executor=exe, dirname=param_path,
main_program=main_program)
if eval_nll_loss < best_score:
# save to best
best_model_path = config.save_dir + "/best_model"
print("save to best", eval_nll_loss, best_model_path)
fluid.io.save_params(executor=exe, dirname=best_model_path,
main_program=main_program)
best_score = eval_nll_loss
eval_bow_loss, eval_kl_loss, eval_nll_loss, eval_total_loss = \
vaild_loop(config, valid_generator, inference_program, model_handle)
if stage != 0:
param_path = config.save_dir + "/" + str(total_step)
fluid.io.save_params(executor=exe, dirname=param_path,
main_program=main_program)
if eval_nll_loss < best_score:
best_model_path = config.save_dir + "/best_model"
print("save to best", eval_nll_loss, best_model_path)
fluid.io.save_params(executor=exe, dirname=best_model_path,
main_program=main_program)
best_score = eval_nll_loss
if stage == 0:
# save last model and opt_stat to npz for next stage init
save_model_file = config.save_dir + "/model_stage_0"
save_opt_state_file = config.save_dir + "/opt_state_stage_0"
model_stage_0 = {}
for name in param_name_list:
t = np.asarray(fluid.global_scope().find_var(name).get_tensor())
model_stage_0[name] = t
np.savez(save_model_file, **model_stage_0)
opt_state_stage_0 = {}
for name in opt_var_name_list:
t_data = np.asarray(fluid.global_scope().find_var(name).get_tensor())
opt_state_stage_0[name] = t_data
np.savez(save_opt_state_file, **opt_state_stage_0)
def vaild_loop(config, valid_generator, inference_program, model_handle):
""" model vaild loop """
[exe, place, bow_loss, kl_loss, nll_loss, final_loss] = model_handle
valid_num = 0.0
total_valid_bow_loss = 0.0
total_valid_kl_loss = 0.0
total_valid_nll_loss = 0.0
total_valid_final_loss = 0.0
for batch_id, data in enumerate(valid_generator()):
data_feed = build_data_feed(data, place,
batch_size=config.batch_size,
is_training=True,
bow_max_len=config.max_len,
pretrain_epoch=False)
if data_feed is None:
continue
val_fetch_outs = \
exe.run(inference_program,
feed=data_feed,
fetch_list=[bow_loss.name, kl_loss.name, nll_loss.name, final_loss.name])
total_valid_bow_loss += val_fetch_outs[0] * config.batch_size
total_valid_kl_loss += val_fetch_outs[1] * config.batch_size
total_valid_nll_loss += val_fetch_outs[2] * config.batch_size
total_valid_final_loss += val_fetch_outs[3] * config.batch_size
valid_num += config.batch_size
print("valid dataset: bow loss %0.6f kl loss %0.6f nll loss %0.6f total loss %0.6f" % \
(total_valid_bow_loss / valid_num, total_valid_kl_loss / valid_num, \
total_valid_nll_loss / valid_num, total_valid_final_loss / valid_num))
return [total_valid_bow_loss / valid_num, total_valid_kl_loss / valid_num, \
total_valid_nll_loss / valid_num, total_valid_final_loss / valid_num]
def test(config):
""" test """
batch_size = config.batch_size
config.vocab_size = len(open(config.vocab_path).readlines())
final_score, final_ids, final_index = knowledge_seq2seq(config)
final_score.persistable = True
final_ids.persistable = True
final_index.persistable = True
main_program = fluid.default_main_program()
if config.use_gpu:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
fluid.io.load_params(executor=exe, dirname=config.model_path,
main_program=main_program)
print("laod params finsihed")
# test data generator
processors = KnowledgeCorpus(
data_dir=config.data_dir,
data_prefix=config.data_prefix,
vocab_path=config.vocab_path,
min_len=config.min_len,
max_len=config.max_len)
test_generator = processors.data_generator(
batch_size=config.batch_size,
phase="test",
shuffle=False)
# load dict
id_dict_array = load_id2str_dict(config.vocab_path)
out_file = config.output
fout = open(out_file, 'w')
for batch_id, data in enumerate(test_generator()):
data_feed, sent_num = build_data_feed(data, place, batch_size=batch_size)
if data_feed is None:
break
out = exe.run(feed=data_feed,
fetch_list=[final_score.name, final_ids.name, final_index.name])
batch_score = out[0]
batch_ids = out[1]
batch_pre_index = out[2]
batch_score_arr = np.split(batch_score, batch_size, axis=1)
batch_ids_arr = np.split(batch_ids, batch_size, axis=1)
batch_pre_index_arr = np.split(batch_pre_index, batch_size, axis=1)
index = 0
for (score, ids, pre_index) in zip(batch_score_arr, batch_ids_arr, batch_pre_index_arr):
trace_ids, trace_score = trace_fianl_result(score, ids, pre_index, topk=1, EOS=3)
fout.write(id_to_text(trace_ids[0][:-1], id_dict_array))
fout.write('\n')
index += 1
if index >= sent_num:
break
fout.close()
def train(config):
""" model training """
config.vocab_size = len(open(config.vocab_path).readlines())
#搭建网络:Bi-GRU Utterance encoder +Bi-GRU KG encoder +层级GRU decoder
bow_loss, kl_loss, nll_loss, final_loss= knowledge_seq2seq(config)
#持久性变量(Persistables)是一种在每次迭代结束后均不会被删除的变量
bow_loss.persistable = True
kl_loss.persistable = True
nll_loss.persistable = True
final_loss.persistable = True
#fluid.layers 接口中添加的op和variable会存储在 default main program 中
main_program = fluid.default_main_program()
inference_program = fluid.default_main_program().clone(for_test=True)
#给指定参数做梯度裁剪
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=config.grad_clip))
optimizer = fluid.optimizer.Adam(learning_rate=config.lr)
if config.stage == 0:
print("stage 0")
optimizer.minimize(bow_loss)
else:
print("stage 1")
optimizer.minimize(final_loss)
#优化器的训练参数如lr
opt_var_name_list = optimizer.get_opti_var_name_list()
if config.use_gpu:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
exe = Executor(place)
#初始化 default_startup_program函数可以获取默认/全局 startup Program (初始化启动程序)。
exe.run(framework.default_startup_program())
#block0 表示一段代码的最外层块
param_list = main_program.block(0).all_parameters()
param_name_list = [p.name for p in param_list]
#TODO:init包含
init_model(config, param_name_list, place)
processors = KnowledgeCorpus(
data_dir=config.data_dir,
data_prefix=config.data_prefix,
vocab_path=config.vocab_path,
min_len=config.min_len,
max_len=config.max_len)
#train_generator为yeild 生成函数
#进行了如下操作:
#读取stream record file
#对读入的文本进行tokennize;根据max min len 进行过滤
#并根据词汇表把src\tgt\cue文本串(chatpath+knowledge+":"+history\ response\ KG cue)转为数字串
##进行padding并返回padding后的串和每个串的原长
train_generator = processors.train_generator(
batch_size=config.batch_size,
phase="train",
shuffle=True)
valid_generator = processors.data_generator(
batch_size=config.batch_size,
phase="dev",
shuffle=False)
model_handle = [exe, place, bow_loss, kl_loss, nll_loss, final_loss]
#在训练过程中,每config.valid_steps 个batch(步)进行一次valid,并储存一次最好模型
train_loop(config,
train_generator, valid_generator,
main_program, inference_program,
model_handle, param_name_list, opt_var_name_list)
if __name__ == "__main__":
#配置解析CMD 参数
config = model_config()
# 模式: train / test
run_type = config.run_type
if run_type == "train":
train(config)
elif run_type == "test":
test(config)
|
py
|
1a59a75690fa532f488106da4e606771223db53a
|
__author__ = "arunrajms"
from django.db import transaction
from django.shortcuts import render
from rest_framework.views import APIView
from django.views.generic.base import View
from rest_framework.response import Response
from rest_framework import status
import json
from models import Switch
from serializer.SwitchSerializer import SwitchSerializer
from serializer.SwitchSerializer import SwitchGetSerializer
from usermanagement.utils import RequestValidator
from django.http import HttpResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
class SwitchList(APIView):
'''
'''
def dispatch(self,request, *args, **kwargs):
me = RequestValidator(request.META)
if me.user_is_exist():
return super(SwitchList, self).dispatch(request,*args, **kwargs)
else:
resp = me.invalid_token()
return JsonResponse(resp,status=status.HTTP_400_BAD_REQUEST)
def get(self, request, format=None):
switch = Switch.objects.all()
serializer = SwitchGetSerializer(switch, many=True)
return Response(serializer.data)
@transaction.atomic
def post(self, request, format=None):
serializer = SwitchSerializer(data=request.data)
if serializer.is_valid():
swi_object = Switch()
swi_object.model = serializer.data['model']
swi_object.name = serializer.data['name']
swi_object.image = serializer.data['image']
swi_object.slots = serializer.data['slots']
swi_object.tier = serializer.data['tier']
swi_object.line_cards = str(serializer.data['line_cards'])
swi_object.save()
serializer = SwitchGetSerializer(swi_object)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SwitchDetailList(APIView):
'''
'''
def dispatch(self,request, *args, **kwargs):
me = RequestValidator(request.META)
if me.user_is_exist():
return super(SwitchList, self).dispatch(request,*args, **kwargs)
else:
resp = me.invalid_token()
return JsonResponse(resp,status=status.HTTP_400_BAD_REQUEST)
|
py
|
1a59a786b7f9ab772503a2aacce33ffdb416d75b
|
from dataclasses import dataclass
from output.models.ibm_data.instance_invalid.s3_4_2_4.s3_4_2_4ii06_xsd.s3_4_2_4ii06 import C1
__NAMESPACE__ = "http://xstest-tns/schema11_S3_4_2_4"
@dataclass
class Root(C1):
class Meta:
name = "root"
namespace = "http://xstest-tns/schema11_S3_4_2_4"
|
py
|
1a59a7cf0d4666982932fd70bd5210cd3b230e92
|
#!/usr/bin/env python
""" project creation and deletion check for v3 """
# We just want to see any exception that happens
# don't want the script to die under any cicumstances
# script must try to clean itself up
# pylint: disable=broad-except
# pylint: disable=invalid-name
# pylint: disable=import-error
import argparse
import time
import logging
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ocutil = OCUtil()
commandDelay = 10 # seconds
def runOCcmd(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
oc_time = time.time()
oc_result = ocutil.run_user_cmd(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - oc_time))
return oc_result
def parse_args():
""" parse the args from the cli """
logger.debug("parse_args()")
parser = argparse.ArgumentParser(description='OpenShift project creation and deletion test')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--namespace', default="ops-project-operation-check",
help='namespace (be careful of using existing namespaces)')
return parser.parse_args()
def send_metrics(status_code_create, status_code_delete):
""" send data to MetricSender"""
logger.debug("send_metrics()")
ms_time = time.time()
ms = MetricSender()
logger.info("Send data to MetricSender")
# 1 means create and delete the project failed
ms.add_metric({'openshift.master.project.create': status_code_create})
ms.add_metric({'openshift.master.project.delete': status_code_delete})
ms.send_metrics()
logger.info("Data sent to Zagg in %s seconds", str(time.time() - ms_time))
def check_project(config):
""" check create and delete project """
logger.info('check_project()')
logger.debug(config)
project = None
try:
project = runOCcmd("get project {}".format(config.namespace))
logger.debug(project)
except Exception:
pass # don't want exception if project not found
if project:
project_exist = 1 # project exists
else:
project_exist = 0 # project doest not exists
return project_exist
def create_project(config):
" create the project "
try:
runOCcmd("new-project {}".format(config.namespace), base_cmd='oc adm')
time.sleep(commandDelay)
except Exception:
logger.exception('error creating new project')
def delete_project(config):
" delete the project "
try:
runOCcmd("delete project {}".format(config.namespace), base_cmd='oc')
time.sleep(commandDelay)
except Exception:
logger.exception('error delete project')
def main():
""" check the project operation status """
logger.debug("main()")
args = parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
ocutil.namespace = args.namespace
project_exists = check_project(args)
# project does not exists.
delete_project_code = 0
if project_exists == 0:
logger.info("project does not exists, going to create it")
create_project(args)
create_project_code = check_project(args)
if create_project_code == 0:
# 0 means project creation failed, no project was created
logger.info('project creation failed')
else:
# project creation succeed, then delete the project
delete_project(args)
delete_project_code = check_project(args)
if delete_project_code == 1:
# 1 means project deletion failed, the project still exists
# give the deletion second chance. 10 more seconds to check the
# teminating status project
delete_project_code = check_project(args)
if delete_project_code == 1:
logger.info('project deletion failed in 20s')
else:
delete_project_code = 0
else:
# the project already exists, so I think the project creation failed
create_project_code = 0
#logger.info("{} {}".format(create_project_code, delete_project_code))
if create_project_code == 1 and delete_project_code == 0:
logger.info('creation and deletion succeed, no data was sent to zagg')
send_metrics(create_project_code, delete_project_code)
if __name__ == "__main__":
main()
|
py
|
1a59a80a1f2ce578765cbd74b74e94de2e2d81c1
|
#!/usr/bin/python
import os
import sys
import numpy as np
import biotite
import biotite.structure as struc
import biotite.database.rcsb as rcsb
import biotite.structure.io.pdb as pdb
import biotite.structure.io as strucio
def Nano(angstrom):
'''Convert angstrom to nanometer'''
nano = angstrom / 10
return(nano)
def DiameterA(TheFile):
'''
Find the diameter of a protein's structure accuratly, requires lots
of memory and crashes for big structures. Here we broadcast the array
against itself, calculating all pairwise distances between points.
This is a bad idea, because we have N*(N-1) = (1e6)**2 = 1 trillion
pairs! This will raise a MemoryError for N=1 million, as it requires
half a million gigabytes!!
'''
# Get atom coordinates
atom_array = strucio.load_structure(TheFile)
# Remove all non-amino acids atoms
atom_array = atom_array[struc.filter_amino_acids(atom_array)]
# Coordinates as a NumPy array
coord = atom_array.coord
# Calculate all pairwise difference vectors
diff = coord[:, np.newaxis, :] - coord[np.newaxis, :, :]
# Calculate absolute of difference vectors -> square distances
sq_dist = np.sum(diff*diff, axis=-1)
# Get maximum distance
maxdist = np.max(sq_dist)
# Maximum distance is diameter
diameter = np.sqrt(np.max(sq_dist))
return(round(diameter, 3))
def Diameter(TheFile):
'''
Find the diameter of a protein's structure approximately, requires less
memory thus good for big structures
'''
# Get atom coordinates
atom_array = strucio.load_structure(TheFile)
# Remove all non-amino acids atoms
atom_array = atom_array[struc.filter_amino_acids(atom_array)]
# Coordinates as a NumPy array
coord = atom_array.coord
# Find geometric center
center = np.mean(coord, axis=0)
# Find largest distance from center -> diameter
diameter = 2*np.sqrt(np.sum((coord - center)**2, axis=-1)).max()
return(round(diameter, 3))
def main():
directory = sys.argv[1]
filelist = os.listdir(directory)
for File in filelist:
try:
diameter = DiameterA('{}/{}'.format(directory, File))
diameternano = round(Nano(diameter), 3)
print('{} = {} A\t{} nm'.format(File, diameter, diameternano))
except:
diameter = Diameter('{}/{}'.format(directory, File))
diameternano = round(Nano(diameter), 3)
print('{} = {} A\t{} nm'.format(File, diameter, diameternano))
if __name__ == '__main__': main()
|
py
|
1a59a81393a9eba19fe9e3865d7f5a6454464b97
|
import sys
# package need to be installed, pip install docker
import docker
import time
import yaml
import os
import random
import subprocess
import signal
import urllib2
import shutil
import xlwt
# package need to be installed, apt-get install python-pymongo
import pymongo
auto = False
private_registry = "202.114.10.146:9999/"
suffix = "-gear"
apppath = ""
# run paraments
hostPort = 27017
localVolume = "/var/lib/gear/volume"
pwd = os.getcwd()
runEnvironment = ["MONGO_INITDB_ROOT_USERNAME=bian",
"MONGO_INITDB_ROOT_PASSWORD=1122",
"MONGO_INITDB_DATABASE=games", ]
runPorts = {"27017/tcp": hostPort, }
runVolumes = {localVolume: {'bind': '/data/db', 'mode': 'rw'},}
runWorking_dir = ""
runCommand = "echo hello"
waitline = "hello"
# result
result = [["tag", "finishTime", "local data", "pull data"], ]
class Runner:
def __init__(self, images):
self.images_to_pull = images
def check(self):
# detect whether the file exists, if true, delete it
if os.path.exists("./images_run.txt"):
os.remove("./images_run.txt")
def run(self):
self.check()
client = docker.from_env()
# if don't give a tag, then all image under this registry will be pulled
repos = self.images_to_pull[0]["repo"]
for repo in repos:
tags = self.images_to_pull[1][repo]
for tag in tags:
private_repo = private_registry + repo + suffix + ":" + tag
if localVolume != "":
if os.path.exists(localVolume) == False:
os.makedirs(localVolume)
print "start running: ", private_repo
# create a random name
runName = '%d' % (random.randint(1,100000000))
# get present time
startTime = time.time()
# get present net data
cnetdata = get_net_data()
# run images
container = client.containers.create(image=private_repo, environment=runEnvironment,
ports=runPorts, volumes=runVolumes, working_dir=runWorking_dir,
command=runCommand, name=runName, detach=True)
container.start()
while True:
if waitline == "":
break
elif container.logs().find(waitline) >= 0:
break
else:
time.sleep(0.1)
pass
# print run time
finishTime = time.time() - startTime
print "finished in " , finishTime, "s"
container_path = os.path.join("/var/lib/gear/private", private_repo)
local_data = subprocess.check_output(['du','-sh', container_path]).split()[0].decode('utf-8')
print "local data: ", local_data
pull_data = get_net_data() - cnetdata
print "pull data: ", pull_data
print "\n"
try:
container.kill()
except:
print "kill fail!"
pass
container.remove(force=True)
# cmd = '%s kill %s' % ("docker", runName)
# rc = os.system(cmd)
# assert(rc == 0)
# record the image and its Running time
result.append([tag, finishTime, local_data, pull_data])
if auto != True:
raw_input("Next?")
else:
time.sleep(5)
if localVolume != "":
shutil.rmtree(localVolume)
class Generator:
def __init__(self, profilePath=""):
self.profilePath = profilePath
def generateFromProfile(self):
if self.profilePath == "":
print "Error: profile path is null"
with open(self.profilePath, 'r') as f:
self.images = yaml.load(f)
return self.images
def get_net_data():
netCard = "/proc/net/dev"
fd = open(netCard, "r")
for line in fd.readlines():
if line.find("enp0s3") >= 0:
field = line.split()
data = float(field[1]) / 1024.0 / 1024.0
fd.close()
return data
if __name__ == "__main__":
if len(sys.argv) == 2:
auto = True
generator = Generator(os.path.split(os.path.realpath(__file__))[0]+"/image_versions.yaml")
images = generator.generateFromProfile()
runner = Runner(images)
runner.run()
# create a workbook sheet
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("run_time")
for row in range(len(result)):
for column in range(len(result[row])):
sheet.write(row, column, result[row][column])
workbook.save(os.path.split(os.path.realpath(__file__))[0]+"/first_run.xls")
|
py
|
1a59a820b4c4e56c566a1e650ad15f7a4077028c
|
import os, re
def parse_file(path):
script_dir = os.path.dirname(__file__)
f = open(os.path.join(script_dir, path), 'r')
passports = []
passport = {}
for line in f:
line = line.strip()
if line:
fields = line.split()
for field in fields:
key, value = field.split(":")
passport[key] = value
else:
passports.append(passport)
passport = {}
if passport:
passports.append(passport)
return passports
# Complexity: O(n), assuming constant number of keys
def count_valid(passports):
count = 0
required_keys = ['byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid']
for passport in passports:
valid = True
for key in required_keys:
if key not in passport:
valid = False
break
if valid:
count += 1
return count
def validate_height(str):
if len(str) < 2:
return False
units = str[-2:]
value = str[:-2]
if not value.isdigit():
return False
value = int(value)
if units == 'cm':
return value >= 150 and value <= 193
elif units == 'in':
return value >= 59 and value <= 76
return False
# Complexity: O(n), assuming constant number of keys
def count_valid_with_validation(passports):
count = 0
conditions = {
'byr': lambda x: x.isdigit() and (int(x) >= 1920 and int(x) <= 2002),
'iyr': lambda x: x.isdigit() and (int(x) >= 2010 and int(x) <= 2020),
'eyr': lambda x: x.isdigit() and (int(x) >= 2020 and int(x) <= 2030),
'hgt': validate_height,
'hcl': lambda x: re.match('^#[0-9a-f]{6}$', x),
'ecl': lambda x: x in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'],
'pid': lambda x: x.isdigit() and len(x) == 9
}
for passport in passports:
valid = True
for key in conditions:
if not(key in passport and conditions[key](passport[key])):
valid = False
break
if valid:
count += 1
return count
passports = parse_file("../data/day04.txt")
print(count_valid(passports))
print(count_valid_with_validation(passports))
|
py
|
1a59a85fb94fcc776b6b007f2793c71b461bd5fd
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.18.3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AssociateData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'name': 'str'
}
attribute_map = {
'type': 'type',
'name': 'name'
}
def __init__(self, type=None, name=None): # noqa: E501
"""AssociateData - a model defined in Swagger""" # noqa: E501
self._type = None
self._name = None
self.discriminator = None
if type is not None:
self.type = type
if name is not None:
self.name = name
@property
def type(self):
"""Gets the type of this AssociateData. # noqa: E501
associate typed REQUIRED # noqa: E501
:return: The type of this AssociateData. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this AssociateData.
associate typed REQUIRED # noqa: E501
:param type: The type of this AssociateData. # noqa: E501
:type: str
"""
self._type = type
@property
def name(self):
"""Gets the name of this AssociateData. # noqa: E501
associate name REQUIRED # noqa: E501
:return: The name of this AssociateData. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AssociateData.
associate name REQUIRED # noqa: E501
:param name: The name of this AssociateData. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AssociateData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AssociateData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py
|
1a59a9175cdd3289a8ba66cedb37cd268d8cd079
|
# encoding=utf-8
## SOLVED 2013/12/23
## -59231
# Euler discovered the remarkable quadratic formula:
# n² + n + 41
# It turns out that the formula will produce 40 primes for the consecutive
# values n = 0 to 39. However, when n = 40, 402 + 40 + 41 = 40(40 + 1) + 41 is
# divisible by 41, and certainly when n = 41, 41² + 41 + 41 is clearly divisible
# by 41.
# The incredible formula n² − 79n + 1601 was discovered, which produces 80
# primes for the consecutive values n = 0 to 79. The product of the
# coefficients, −79 and 1601, is −126479.
# Considering quadratics of the form:
# n² + an + b, where |a| < 1000 and |b| < 1000
# where |n| is the modulus/absolute value of n
# e.g. |11| = 11 and |−4| = 4
# Find the product of the coefficients, a and b, for the quadratic expression
# that produces the maximum number of primes for consecutive values of n,
# starting with n = 0.
import helpers.prime as prime
def euler():
longest_sequence = 0
product = 0
for a in range(-1000, 1000):
for b in range(-1000, 1000):
length = sequence_length(a, b)
if length > longest_sequence:
longest_sequence = length
product = a * b
return product
def sequence_length(a, b):
def f():
return n ** 2 + a * n + b
n = 0
while f() > 1 and prime.is_prime(f()):
n += 1
return n
|
py
|
1a59aa211f09b01a7e358c3944681e10a291e26d
|
# In "and" operator if ONE is false the whole is false
# in "or" operator if ONE is true the whole is true
print("Welcome to the rollercoaster!")
height = int(input("What is your height in cms? "))
bill = 0
if height >= 120:
print("You can ride the rollercoaster")
age = int(input("What is your age? "))
if age < 12:
bill = 5
print("Child tickets are $5")
elif age <= 18:
bill = 7
print("Youth tickets are $7")
elif age >= 45 and age <= 55:
print("Everything is going to be ok. Have a free ride")
else:
bill = 14
print("Adult tickets are $14")
wants_photo = input("Do you want a photo taken? Y or N.")
if wants_photo == "Y":
bill += 3
print(f"Your final bill is $ {bill}")
else:
print("Sorry, your pp isn't grown up to ride the rollercoaster")
|
py
|
1a59ab4ca8620e109f27c734681494f7097308e1
|
#**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
from dataclasses import dataclass
from pathlib import Path
import math
import bpy
from pxr import Sdf, UsdLux, Tf
from ...utils.image import cache_image_file, cache_image_file_path
from ...utils import BLENDER_DATA_DIR
from ...utils import usd as usd_utils
from ...utils import logging
log = logging.Log('export.world')
OBJ_PRIM_NAME = "World"
LIGHT_PRIM_NAME = "World"
@dataclass(init=False, eq=True)
class ShadingData:
type: str
use_scene_lights: bool = True
use_scene_world: bool = True
has_world: bool = False
studiolight: Path = None
studiolight_rotate_z: float = 0.0
studiolight_background_alpha: float = 0.0
studiolight_intensity: float = 1.0
def __init__(self, context: bpy.types.Context, world: bpy.types.World):
shading = context.area.spaces.active.shading
self.type = shading.type
if self.type == 'RENDERED':
self.use_scene_lights = shading.use_scene_lights_render
self.use_scene_world = shading.use_scene_world_render
else:
self.use_scene_lights = shading.use_scene_lights
self.use_scene_world = shading.use_scene_world
if self.use_scene_world:
self.has_world = bool(world)
else:
if shading.selected_studio_light.path:
self.studiolight = Path(shading.selected_studio_light.path)
else:
self.studiolight = BLENDER_DATA_DIR / "studiolights/world" / shading.studio_light
self.studiolight_rotate_z = shading.studiolight_rotate_z
self.studiolight_background_alpha = shading.studiolight_background_alpha
self.studiolight_intensity = shading.studiolight_intensity
@dataclass(init=False, eq=True, repr=True)
class WorldData:
""" Comparable dataclass which holds all environment settings """
color: tuple = (0.05, 0.05, 0.05)
image: str = None
intensity: float = 1.0
rotation: tuple = (0.0, 0.0, 0.0)
transparency: float = 1.0
@property
def clear_color(self):
color = [c * self.intensity for c in self.color]
color.append(self.transparency)
return tuple(color)
@staticmethod
def init_from_world(world: bpy.types.World):
""" Returns WorldData from bpy.types.World """
data = WorldData()
if not world:
return data
if not world.use_nodes:
data.color = tuple(world.color)
return data
output_node = next((node for node in world.node_tree.nodes
if node.bl_idname == 'ShaderNodeOutputWorld' and node.is_active_output),
None)
if not output_node:
return data
from .nodes import ShaderNodeOutputWorld
node_parser = ShaderNodeOutputWorld(world, output_node)
node_item = node_parser.export()
if not node_item:
return data
node_data = node_item.data
if isinstance(node_data, float):
data.color = (node_data, node_data, node_data)
data.transparency = 1.0
return data
if isinstance(node_data, tuple):
data.color = node_data[:3]
data.transparency = node_data[3]
return data
# node_data is dict here
intensity = node_data.get('intensity', 1.0)
if isinstance(intensity, tuple):
intensity = intensity[0]
data.intensity = intensity
color = node_data.get('color')
if color is None:
image = node_data.get('image')
if image:
data.image = cache_image_file(image)
elif isinstance(color, float):
data.color = (color, color, color)
data.transparency = color
elif isinstance(color, tuple):
data.color = color[:3]
data.transparency = color[3]
else: # dict
image = color.get('image')
if image:
data.image = cache_image_file(image)
rotation = node_data.get('rotation')
if isinstance(rotation, tuple):
data.rotation = rotation[:3]
return data
@staticmethod
def init_from_shading(shading: ShadingData, world):
if shading.use_scene_world:
return WorldData.init_from_world(world)
data = WorldData()
data.intensity = shading.studiolight_intensity
data.rotation = (0.0, 0.0, shading.studiolight_rotate_z)
data.image = cache_image_file_path(shading.studiolight)
return data
@staticmethod
def init_from_stage(stage):
data = WorldData()
light_prim = next((prim for prim in stage.TraverseAll() if
prim.GetTypeName() == 'DomeLight'), None)
if light_prim:
data.color = light_prim.GetAttribute('inputs:color').Get()
data.intensity = light_prim.GetAttribute('inputs:intensity').Get()
data.transparency = light_prim.GetAttribute('inputs:transparency').Get()
return data
def sync(root_prim, world: bpy.types.World, shading: ShadingData = None):
if shading:
data = WorldData.init_from_shading(shading, world)
else:
data = WorldData.init_from_world(world)
stage = root_prim.GetStage()
obj_prim = stage.DefinePrim(root_prim.GetPath().AppendChild(OBJ_PRIM_NAME))
usd_light = UsdLux.DomeLight.Define(stage, obj_prim.GetPath().AppendChild(LIGHT_PRIM_NAME))
light_prim = usd_light.GetPrim()
usd_light.OrientToStageUpAxis()
if data.image:
tex_attr = usd_light.CreateTextureFileAttr()
tex_attr.ClearDefault()
usd_utils.add_delegate_variants(obj_prim, {
'GL': lambda: tex_attr.Set(""),
'RPR': lambda: tex_attr.Set(str(data.image))
})
usd_light.CreateColorAttr(data.color)
usd_light.CreateIntensityAttr(data.intensity)
light_prim.CreateAttribute("inputs:transparency", Sdf.ValueTypeNames.Float).Set(data.transparency)
# set correct Dome light rotation
usd_light.AddRotateXOp().Set(180.0)
usd_light.AddRotateYOp().Set(-90.0 + math.degrees(data.rotation[2]))
def sync_update(root_prim, world: bpy.types.World, shading: ShadingData = None):
stage = root_prim.GetStage()
usd_light = UsdLux.DomeLight.Define(
stage, root_prim.GetPath().AppendChild(OBJ_PRIM_NAME).AppendChild(LIGHT_PRIM_NAME))
# removing prev settings
usd_light.CreateColorAttr().Clear()
usd_light.CreateIntensityAttr().Clear()
if usd_light.GetTextureFileAttr().Get() is not None:
usd_light.GetTextureFileAttr().Block()
usd_light.ClearXformOpOrder()
sync(root_prim, world, shading)
def get_clear_color(root_prim):
light_prim = root_prim.GetChild(OBJ_PRIM_NAME).GetChild(LIGHT_PRIM_NAME)
color = light_prim.GetAttribute('inputs:color').Get()
intensity = light_prim.GetAttribute('inputs:intensity').Get()
transparency = light_prim.GetAttribute('inputs:transparency').Get()
clear_color = [c * intensity for c in color]
clear_color.append(transparency)
return tuple(clear_color)
|
py
|
1a59abb21e5e08d68ef6066325c2a4be18590724
|
"""A basic implementation of linear regression
Uses gradient descent for finding the minimum, and least squares
as the loss function to be optimized.
"""
import numpy as np
class LinearRegression():
"""Basic implementation of linear regression"""
def fit(self, x, y, lr=0.01, epochs=100):
"""Calculate linear regression for numpy ndarrays
Rows should be data points, and columns should be features
"""
# Add bias
x = _append_bias(x)
# Extract number of features
n_features = x.shape[1]
w = _initialise_weights(n_features)
for i in range(epochs):
error = _mse(y, _activation(x, w))
w = _train_epoch(x, y, w, lr)
print("MSE: {}".format(error))
print("Weights: {}".format(w))
self.w = w
def predict(self, x):
"""Predict based on the weights computed previously"""
x = _append_bias(x)
return _activation(x, self.w)
def _mse(a, b):
"""Compute MSE for 2 vectors"""
return np.mean(np.square(a - b))
def _activation(x, w):
"""Activation function (dot product in this case)"""
return np.dot(x, w)
def _partial_derivative_mse(x, y, w):
"""Calculate partial derivatives for MSE"""
new_w = []
for feature in x:
new_w.append(-2 * np.mean(feature * (y - (_activation(x, w)))))
return new_w
def _train_epoch(x, y, w, lr):
"""Train for one epoch using gradient descent"""
gradient_w = np.zeros(x.shape[1])
for i in range(len(x)):
gradient_w += _partial_derivative_mse(x[i], y[i], w)
return w - lr * gradient_w
def _initialise_weights(n):
return np.random.rand(n)
def _append_bias(x):
"""Append 1 to each data point"""
return np.hstack((x, np.ones((x.shape[0], 1))))
|
py
|
1a59ac30b5345e8a0af8b7bf44c71e9bc32293dc
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PoliciesPermissionGrantPoliciesOperations(object):
"""PoliciesPermissionGrantPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~identity_sign_ins.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_excludes(
self,
permission_grant_policy_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum138"]]]
select=None, # type: Optional[List[Union[str, "models.Enum139"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfPermissionGrantConditionSet"]
"""Get excludes from policies.
Get excludes from policies.
:param permission_grant_policy_id: key: id of permissionGrantPolicy.
:type permission_grant_policy_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~identity_sign_ins.models.Enum138]
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum139]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfPermissionGrantConditionSet or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~identity_sign_ins.models.CollectionOfPermissionGrantConditionSet]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfPermissionGrantConditionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_excludes.metadata['url'] # type: ignore
path_format_arguments = {
'permissionGrantPolicy-id': self._serialize.url("permission_grant_policy_id", permission_grant_policy_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfPermissionGrantConditionSet', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_excludes.metadata = {'url': '/policies/permissionGrantPolicies/{permissionGrantPolicy-id}/excludes'} # type: ignore
def create_excludes(
self,
permission_grant_policy_id, # type: str
body, # type: "models.MicrosoftGraphPermissionGrantConditionSet"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPermissionGrantConditionSet"
"""Create new navigation property to excludes for policies.
Create new navigation property to excludes for policies.
:param permission_grant_policy_id: key: id of permissionGrantPolicy.
:type permission_grant_policy_id: str
:param body: New navigation property.
:type body: ~identity_sign_ins.models.MicrosoftGraphPermissionGrantConditionSet
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPermissionGrantConditionSet, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphPermissionGrantConditionSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPermissionGrantConditionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_excludes.metadata['url'] # type: ignore
path_format_arguments = {
'permissionGrantPolicy-id': self._serialize.url("permission_grant_policy_id", permission_grant_policy_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPermissionGrantConditionSet')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPermissionGrantConditionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_excludes.metadata = {'url': '/policies/permissionGrantPolicies/{permissionGrantPolicy-id}/excludes'} # type: ignore
def get_excludes(
self,
permission_grant_policy_id, # type: str
permission_grant_condition_set_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum140"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPermissionGrantConditionSet"
"""Get excludes from policies.
Get excludes from policies.
:param permission_grant_policy_id: key: id of permissionGrantPolicy.
:type permission_grant_policy_id: str
:param permission_grant_condition_set_id: key: id of permissionGrantConditionSet.
:type permission_grant_condition_set_id: str
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum140]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPermissionGrantConditionSet, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphPermissionGrantConditionSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPermissionGrantConditionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_excludes.metadata['url'] # type: ignore
path_format_arguments = {
'permissionGrantPolicy-id': self._serialize.url("permission_grant_policy_id", permission_grant_policy_id, 'str'),
'permissionGrantConditionSet-id': self._serialize.url("permission_grant_condition_set_id", permission_grant_condition_set_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPermissionGrantConditionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_excludes.metadata = {'url': '/policies/permissionGrantPolicies/{permissionGrantPolicy-id}/excludes/{permissionGrantConditionSet-id}'} # type: ignore
def update_excludes(
self,
permission_grant_policy_id, # type: str
permission_grant_condition_set_id, # type: str
body, # type: "models.MicrosoftGraphPermissionGrantConditionSet"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property excludes in policies.
Update the navigation property excludes in policies.
:param permission_grant_policy_id: key: id of permissionGrantPolicy.
:type permission_grant_policy_id: str
:param permission_grant_condition_set_id: key: id of permissionGrantConditionSet.
:type permission_grant_condition_set_id: str
:param body: New navigation property values.
:type body: ~identity_sign_ins.models.MicrosoftGraphPermissionGrantConditionSet
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_excludes.metadata['url'] # type: ignore
path_format_arguments = {
'permissionGrantPolicy-id': self._serialize.url("permission_grant_policy_id", permission_grant_policy_id, 'str'),
'permissionGrantConditionSet-id': self._serialize.url("permission_grant_condition_set_id", permission_grant_condition_set_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPermissionGrantConditionSet')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_excludes.metadata = {'url': '/policies/permissionGrantPolicies/{permissionGrantPolicy-id}/excludes/{permissionGrantConditionSet-id}'} # type: ignore
def delete_excludes(
self,
permission_grant_policy_id, # type: str
permission_grant_condition_set_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property excludes for policies.
Delete navigation property excludes for policies.
:param permission_grant_policy_id: key: id of permissionGrantPolicy.
:type permission_grant_policy_id: str
:param permission_grant_condition_set_id: key: id of permissionGrantConditionSet.
:type permission_grant_condition_set_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_excludes.metadata['url'] # type: ignore
path_format_arguments = {
'permissionGrantPolicy-id': self._serialize.url("permission_grant_policy_id", permission_grant_policy_id, 'str'),
'permissionGrantConditionSet-id': self._serialize.url("permission_grant_condition_set_id", permission_grant_condition_set_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_excludes.metadata = {'url': '/policies/permissionGrantPolicies/{permissionGrantPolicy-id}/excludes/{permissionGrantConditionSet-id}'} # type: ignore
def list_includes(
self,
permission_grant_policy_id, # type: str
orderby=None, # type: Optional[List[Union[str, "models.Enum141"]]]
select=None, # type: Optional[List[Union[str, "models.Enum142"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfPermissionGrantConditionSet0"]
"""Get includes from policies.
Get includes from policies.
:param permission_grant_policy_id: key: id of permissionGrantPolicy.
:type permission_grant_policy_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~identity_sign_ins.models.Enum141]
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum142]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfPermissionGrantConditionSet0 or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~identity_sign_ins.models.CollectionOfPermissionGrantConditionSet0]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfPermissionGrantConditionSet0"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_includes.metadata['url'] # type: ignore
path_format_arguments = {
'permissionGrantPolicy-id': self._serialize.url("permission_grant_policy_id", permission_grant_policy_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfPermissionGrantConditionSet0', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_includes.metadata = {'url': '/policies/permissionGrantPolicies/{permissionGrantPolicy-id}/includes'} # type: ignore
def create_includes(
self,
permission_grant_policy_id, # type: str
body, # type: "models.MicrosoftGraphPermissionGrantConditionSet"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPermissionGrantConditionSet"
"""Create new navigation property to includes for policies.
Create new navigation property to includes for policies.
:param permission_grant_policy_id: key: id of permissionGrantPolicy.
:type permission_grant_policy_id: str
:param body: New navigation property.
:type body: ~identity_sign_ins.models.MicrosoftGraphPermissionGrantConditionSet
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPermissionGrantConditionSet, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphPermissionGrantConditionSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPermissionGrantConditionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_includes.metadata['url'] # type: ignore
path_format_arguments = {
'permissionGrantPolicy-id': self._serialize.url("permission_grant_policy_id", permission_grant_policy_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPermissionGrantConditionSet')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPermissionGrantConditionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_includes.metadata = {'url': '/policies/permissionGrantPolicies/{permissionGrantPolicy-id}/includes'} # type: ignore
def get_includes(
self,
permission_grant_policy_id, # type: str
permission_grant_condition_set_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum143"]]]
expand=None, # type: Optional[List[str]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphPermissionGrantConditionSet"
"""Get includes from policies.
Get includes from policies.
:param permission_grant_policy_id: key: id of permissionGrantPolicy.
:type permission_grant_policy_id: str
:param permission_grant_condition_set_id: key: id of permissionGrantConditionSet.
:type permission_grant_condition_set_id: str
:param select: Select properties to be returned.
:type select: list[str or ~identity_sign_ins.models.Enum143]
:param expand: Expand related entities.
:type expand: list[str]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphPermissionGrantConditionSet, or the result of cls(response)
:rtype: ~identity_sign_ins.models.MicrosoftGraphPermissionGrantConditionSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphPermissionGrantConditionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_includes.metadata['url'] # type: ignore
path_format_arguments = {
'permissionGrantPolicy-id': self._serialize.url("permission_grant_policy_id", permission_grant_policy_id, 'str'),
'permissionGrantConditionSet-id': self._serialize.url("permission_grant_condition_set_id", permission_grant_condition_set_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphPermissionGrantConditionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_includes.metadata = {'url': '/policies/permissionGrantPolicies/{permissionGrantPolicy-id}/includes/{permissionGrantConditionSet-id}'} # type: ignore
def update_includes(
self,
permission_grant_policy_id, # type: str
permission_grant_condition_set_id, # type: str
body, # type: "models.MicrosoftGraphPermissionGrantConditionSet"
**kwargs # type: Any
):
# type: (...) -> None
"""Update the navigation property includes in policies.
Update the navigation property includes in policies.
:param permission_grant_policy_id: key: id of permissionGrantPolicy.
:type permission_grant_policy_id: str
:param permission_grant_condition_set_id: key: id of permissionGrantConditionSet.
:type permission_grant_condition_set_id: str
:param body: New navigation property values.
:type body: ~identity_sign_ins.models.MicrosoftGraphPermissionGrantConditionSet
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_includes.metadata['url'] # type: ignore
path_format_arguments = {
'permissionGrantPolicy-id': self._serialize.url("permission_grant_policy_id", permission_grant_policy_id, 'str'),
'permissionGrantConditionSet-id': self._serialize.url("permission_grant_condition_set_id", permission_grant_condition_set_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphPermissionGrantConditionSet')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_includes.metadata = {'url': '/policies/permissionGrantPolicies/{permissionGrantPolicy-id}/includes/{permissionGrantConditionSet-id}'} # type: ignore
def delete_includes(
self,
permission_grant_policy_id, # type: str
permission_grant_condition_set_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete navigation property includes for policies.
Delete navigation property includes for policies.
:param permission_grant_policy_id: key: id of permissionGrantPolicy.
:type permission_grant_policy_id: str
:param permission_grant_condition_set_id: key: id of permissionGrantConditionSet.
:type permission_grant_condition_set_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_includes.metadata['url'] # type: ignore
path_format_arguments = {
'permissionGrantPolicy-id': self._serialize.url("permission_grant_policy_id", permission_grant_policy_id, 'str'),
'permissionGrantConditionSet-id': self._serialize.url("permission_grant_condition_set_id", permission_grant_condition_set_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_includes.metadata = {'url': '/policies/permissionGrantPolicies/{permissionGrantPolicy-id}/includes/{permissionGrantConditionSet-id}'} # type: ignore
|
py
|
1a59ac53d37504e0dee7af7621b9f90347a03c3b
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
microcode = '''
def macroop FADD1_R
{
addfp st(0), sti, st(0)
};
def macroop FADD1_M
{
ldfp ufp1, seg, sib, disp
addfp st(0), st(0), ufp1
};
def macroop FADD1_P
{
rdip t7
ldfp ufp1, seg, riprel, disp
addfp st(0), st(0), ufp1
};
def macroop FADD2_R
{
addfp sti, sti, st(0)
};
def macroop FADD2_M
{
ldfp ufp1, seg, sib, disp
addfp st(0), st(0), ufp1
};
def macroop FADD2_P
{
rdip t7
ldfp ufp1, seg, riprel, disp
addfp st(0), st(0), ufp1
};
def macroop FADDP
{
addfp st(1), st(0), st(1), spm=1
};
def macroop FADDP_R
{
addfp sti, sti, st(0), spm=1
};
def macroop FADDP_M
{
fault "std::make_shared<UnimpInstFault>()"
};
def macroop FADDP_P
{
fault "std::make_shared<UnimpInstFault>()"
};
# FIADD
'''
|
py
|
1a59acaef0a93f041438aef4c2a111af3f821883
|
print('\033[1;32;44mOlá, Mundo!\033[m')
|
py
|
1a59affd4b0a6171ec70970f7d6e1155b2b4ce36
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG Website - A Django-powered website for Reaction Mechanism Generator
#
# Copyright (c) 2011 Prof. William H. Green ([email protected]) and the
# RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import os.path
import re
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.forms.models import BaseInlineFormSet, inlineformset_factory
from rmgweb.rmg.models import *
from rmgweb.rmg.forms import *
from rmgpy.molecule.molecule import Molecule
from rmgpy.molecule.group import Group
from rmgpy.thermo import *
from rmgpy.kinetics import *
from rmgpy.data.base import Entry
from rmgpy.data.thermo import ThermoDatabase
from rmgpy.data.kinetics import *
from rmgpy.data.rmg import RMGDatabase
from rmgweb.main.tools import *
from rmgweb.database.views import loadDatabase
################################################################################
def index(request):
"""
The RMG simulation homepage.
"""
return render_to_response('rmg.html', context_instance=RequestContext(request))
def convertChemkin(request):
"""
Allows user to upload chemkin and RMG dictionary files to generate a nice looking html output.
"""
chemkin = Chemkin()
path = ''
chemkin.deleteDir()
if request.method == 'POST':
chemkin.createDir()
form = UploadChemkinForm(request.POST, request.FILES, instance=chemkin)
if form.is_valid():
form.save()
path = 'media/rmg/tools/output.html'
# Generate the output HTML file
chemkin.createOutput()
# Go back to the network's main page
return render_to_response('chemkinUpload.html', {'form': form, 'path':path}, context_instance=RequestContext(request))
# Otherwise create the form
else:
form = UploadChemkinForm(instance=chemkin)
return render_to_response('chemkinUpload.html', {'form': form,'path':path}, context_instance=RequestContext(request))
def convertAdjlists(request):
"""
Allows user to upload a dictionary txt file and convert it back into old style adjacency lists in the form of a txt file.
"""
conversion = AdjlistConversion()
path = ''
conversion.deleteDir()
if request.method == 'POST':
conversion.createDir()
form = UploadDictionaryForm(request.POST, request.FILES, instance=conversion)
if form.is_valid():
form.save()
path = 'media/rmg/tools/adjlistConversion/RMG_Dictionary.txt'
# Generate the output HTML file
conversion.createOutput()
# Go back to the network's main page
return render_to_response('dictionaryUpload.html', {'form': form, 'path':path}, context_instance=RequestContext(request))
# Otherwise create the form
else:
form = UploadDictionaryForm(instance=conversion)
return render_to_response('dictionaryUpload.html', {'form': form,'path':path}, context_instance=RequestContext(request))
def compareModels(request):
"""
Allows user to compare 2 RMG models with their chemkin and species dictionaries and generate
a pretty HTML diff file.
"""
diff = Diff()
path = ''
diff.deleteDir()
if request.method == 'POST':
diff.createDir()
form = ModelCompareForm(request.POST, request.FILES, instance=diff)
if form.is_valid():
form.save()
path = 'media/rmg/tools/compare/diff.html'
# Generate the output HTML file
diff.createOutput()
return render_to_response('modelCompare.html', {'form': form, 'path':path}, context_instance=RequestContext(request))
# Otherwise create the form
else:
form = ModelCompareForm(instance=diff)
return render_to_response('modelCompare.html', {'form': form,'path':path}, context_instance=RequestContext(request))
def mergeModels(request):
"""
Merge 2 RMG models with their chemkin and species dictionaries.
Produces a merged chemkin file and species dictionary.
"""
model = Diff()
path = ''
model.deleteDir()
if request.method == 'POST':
model.createDir()
form = ModelCompareForm(request.POST, request.FILES, instance = model)
if form.is_valid():
form.save()
model.merge()
path = 'media/rmg/tools/compare'
#[os.path.join(model.path,'chem.inp'), os.path.join(model.path,'species_dictionary.txt'), os.path.join(model.path,'merging_log.txt')]
return render_to_response('mergeModels.html', {'form': form, 'path':path}, context_instance=RequestContext(request))
else:
form = ModelCompareForm(instance=model)
return render_to_response('mergeModels.html', {'form': form,'path':path}, context_instance=RequestContext(request))
def generateFlux(request):
"""
Allows user to upload a set of RMG condition files and/or chemkin species concentraiton output
to generate a flux diagram video.
"""
from generateFluxDiagram import createFluxDiagram
flux = FluxDiagram()
path = ''
flux.deleteDir()
if request.method == 'POST':
flux.createDir()
form = FluxDiagramForm(request.POST, request.FILES,instance=flux)
if form.is_valid():
form.save()
input = os.path.join(flux.path,'input.py')
chemkin = os.path.join(flux.path,'chem.inp')
dict = os.path.join(flux.path,'species_dictionary.txt')
chemkinOutput = ''
if 'ChemkinOutput' in request.FILES:
chemkinOutput = os.path.join(flux.path,'chemkin_output.out')
java = form.cleaned_data['Java']
settings = {}
settings['maximumNodeCount'] = form.cleaned_data['MaxNodes']
settings['maximumEdgeCount'] = form.cleaned_data['MaxEdges']
settings['timeStep'] = form.cleaned_data['TimeStep']
settings['concentrationTolerance'] = form.cleaned_data['ConcentrationTolerance']
settings['speciesRateTolerance'] = form.cleaned_data['SpeciesRateTolerance']
createFluxDiagram(flux.path, input, chemkin, dict, java, settings, chemkinOutput)
# Look at number of subdirectories to determine where the flux diagram videos are
subdirs = [name for name in os.listdir(flux.path) if os.path.isdir(os.path.join(flux.path, name))]
subdirs.remove('species')
return render_to_response('fluxDiagram.html', {'form': form, 'path':subdirs}, context_instance=RequestContext(request))
else:
form = FluxDiagramForm(instance=flux)
return render_to_response('fluxDiagram.html', {'form': form,'path':path}, context_instance=RequestContext(request))
def runPopulateReactions(request):
"""
Allows user to upload chemkin and RMG dictionary files to generate a nice looking html output.
"""
populateReactions = PopulateReactions()
outputPath = ''
chemkinPath = ''
populateReactions.deleteDir()
if request.method == 'POST':
populateReactions.createDir()
form = PopulateReactionsForm(request.POST, request.FILES, instance=populateReactions)
if form.is_valid():
form.save()
outputPath = 'media/rmg/tools/populateReactions/output.html'
chemkinPath = 'media/rmg/tools/populateReactions/chemkin/chem.inp'
# Generate the output HTML file
populateReactions.createOutput()
# Go back to the network's main page
return render_to_response('populateReactionsUpload.html', {'form': form, 'output': outputPath, 'chemkin': chemkinPath}, context_instance=RequestContext(request))
# Otherwise create the form
else:
form = PopulateReactionsForm(instance=populateReactions)
return render_to_response('populateReactionsUpload.html', {'form': form, 'output': outputPath, 'chemkin': chemkinPath}, context_instance=RequestContext(request))
def input(request):
ThermoLibraryFormset = inlineformset_factory(Input, ThermoLibrary, ThermoLibraryForm,
BaseInlineFormSet, extra=1, can_delete=True)
ReactionLibraryFormset = inlineformset_factory(Input, ReactionLibrary, ReactionLibraryForm,
BaseInlineFormSet, extra=1, can_delete=True)
ReactorSpeciesFormset = inlineformset_factory(Input, ReactorSpecies, ReactorSpeciesForm,
BaseInlineFormSet, extra = 1, can_delete=True)
ReactorFormset = inlineformset_factory(Input, Reactor, ReactorForm,
BaseInlineFormSet, extra = 1, can_delete=True)
Input.objects.all().delete()
input = Input()
input.deleteDir()
uploadform = UploadInputForm(instance=input)
form = InputForm(instance=input)
thermolibformset = ThermoLibraryFormset(instance=input)
reactionlibformset = ReactionLibraryFormset(instance=input)
reactorspecformset = ReactorSpeciesFormset(instance=input)
reactorformset = ReactorFormset(instance=input)
upload_error = ''
input_error = ''
if request.method == 'POST':
input.createDir()
# Load an input file into the form by uploading it
if "upload" in request.POST:
uploadform = UploadInputForm(request.POST, request.FILES, instance=input)
if uploadform.is_valid():
uploadform.save()
initial_thermo_libraries, initial_reaction_libraries, initial_reactor_systems, initial_species, initial = input.loadForm(input.loadpath)
# Make the formsets the lengths of the initial data
if initial_thermo_libraries:
ThermoLibraryFormset = inlineformset_factory(Input, ThermoLibrary, ThermoLibraryForm, BaseInlineFormSet,
extra=len(initial_thermo_libraries), can_delete=True)
if initial_reaction_libraries:
ReactionLibraryFormset = inlineformset_factory(Input, ReactionLibrary, ReactionLibraryForm, BaseInlineFormSet,
extra=len(initial_reaction_libraries), can_delete=True)
ReactorSpeciesFormset = inlineformset_factory(Input, ReactorSpecies, ReactorSpeciesForm, BaseInlineFormSet,
extra=len(initial_species), can_delete=True)
ReactorFormset = inlineformset_factory(Input, Reactor, ReactorForm, BaseInlineFormSet,
extra = len(initial_reactor_systems), can_delete=True)
thermolibformset = ThermoLibraryFormset()
reactionlibformset = ReactionLibraryFormset()
reactorspecformset = ReactorSpeciesFormset()
reactorformset = ReactorFormset()
# Load the initial data into the forms
form = InputForm(initial = initial)
for subform, data in zip(thermolibformset.forms, initial_thermo_libraries):
subform.initial = data
for subform, data in zip(reactionlibformset.forms, initial_reaction_libraries):
subform.initial = data
for subform, data in zip(reactorspecformset.forms, initial_species):
subform.initial = data
for subform, data in zip(reactorformset.forms, initial_reactor_systems):
subform.initial = data
else:
upload_error = 'Your input file was invalid. Please try again.'
if "submit" in request.POST:
uploadform = UploadInputForm(request.POST, instance=input)
form = InputForm(request.POST, instance = input)
thermolibformset = ThermoLibraryFormset(request.POST, instance=input)
reactionlibformset = ReactionLibraryFormset(request.POST, instance=input)
reactorspecformset = ReactorSpeciesFormset(request.POST, instance=input)
reactorformset = ReactorFormset(request.POST, instance=input)
if (form.is_valid() and thermolibformset.is_valid() and reactionlibformset.is_valid()
and reactorspecformset.is_valid() and reactorformset.is_valid()):
form.save()
thermolibformset.save()
reactionlibformset.save()
reactorspecformset.save()
reactorformset.save()
posted = Input.objects.all()[0]
input.saveForm(posted, form)
path = 'media/rmg/tools/input/input.py'
return render_to_response('inputResult.html', {'path': path})
else:
# Will need more useful error messages later.
input_error = 'Your form was invalid. Please edit the form and try again.'
return render_to_response('input.html', {'uploadform': uploadform, 'form': form, 'thermolibformset':thermolibformset,
'reactionlibformset':reactionlibformset, 'reactorspecformset':reactorspecformset,
'reactorformset':reactorformset, 'upload_error': upload_error,
'input_error': input_error}, context_instance=RequestContext(request))
def plotKinetics(request):
"""
Allows user to upload chemkin files to generate a plot of reaction kinetics.
"""
from rmgpy.quantity import Quantity
from rmgweb.database.forms import RateEvaluationForm
if request.method == 'POST':
chemkin = Chemkin()
chemkin.createDir()
form = UploadChemkinForm(request.POST, request.FILES, instance=chemkin)
rateForm = RateEvaluationForm(request.POST)
eval = []
if rateForm.is_valid():
temperature = Quantity(rateForm.cleaned_data['temperature'], str(rateForm.cleaned_data['temperature_units'])).value_si
pressure = Quantity(rateForm.cleaned_data['pressure'], str(rateForm.cleaned_data['pressure_units'])).value_si
eval = [temperature, pressure]
kineticsDataList = chemkin.getKinetics()
if form.is_valid():
form.save()
kineticsDataList = chemkin.getKinetics()
return render_to_response('plotKineticsData.html', {'kineticsDataList': kineticsDataList,
'plotWidth': 500,
'plotHeight': 400 + 15 * len(kineticsDataList),
'form': rateForm,
'eval':eval },
context_instance=RequestContext(request))
# Otherwise create the form
else:
chemkin = Chemkin()
chemkin.deleteDir()
form = UploadChemkinForm(instance=chemkin)
return render_to_response('plotKinetics.html', {'form': form}, context_instance=RequestContext(request))
def javaKineticsLibrary(request):
"""
Allows user to upload chemkin files to generate a plot of reaction kinetics.
"""
from rmgpy.quantity import Quantity
eval = False
if request.method == 'POST':
chemkin = Chemkin()
chemkin.createDir()
form = UploadChemkinForm(request.POST, request.FILES, instance=chemkin)
if form.is_valid():
form.save()
chemkin.createJavaKineticsLibrary()
eval = True
return render_to_response('javaKineticsLibrary.html', {'form': form,
'eval': eval },
context_instance=RequestContext(request))
# Otherwise create the form
else:
chemkin = Chemkin()
chemkin.deleteDir()
form = UploadChemkinForm(instance=chemkin)
return render_to_response('javaKineticsLibrary.html', {'form': form}, context_instance=RequestContext(request))
def evaluateNASA(request):
"""
Creates webpage form form entering a chemkin format NASA Polynomial and quickly
obtaining it's enthalpy and Cp values.
"""
from rmgpy.chemkin import readThermoEntry
form = NASAForm()
thermo = None
thermoData = None
if request.method == 'POST':
posted = NASAForm(request.POST, error_class=DivErrorList)
initial = request.POST.copy()
if posted.is_valid():
NASA = posted.cleaned_data['NASA']
if NASA != '':
species, thermo, formula = readThermoEntry(str(NASA))
try:
thermoData = thermo.toThermoData()
except:
# if we cannot convert the thermo to thermo data, we will not be able to display the
# H298, S298, and Cp values, but that's ok.
pass
form = NASAForm(initial, error_class=DivErrorList)
return render_to_response('NASA.html', {'form': form, 'thermo':thermo, 'thermoData':thermoData}, context_instance=RequestContext(request))
|
py
|
1a59b0a16dfd478d4488e0ef683320c55de3333b
|
import tensorflow.keras.backend as K
import tensorflow as tf
class learning_phase_scope(object):
def __init__(self, value):
self.value = value
def __enter__(self):
self.learning_phase_placeholder = K.learning_phase()
K.set_learning_phase(self.value)
def __exit__(self, *args):
K._GRAPH_LEARNING_PHASES[tf.get_default_graph()] = self.learning_phase_placeholder
|
py
|
1a59b0db0729402e6dad09242cdfdf6703e1539b
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v6.resources.types import language_constant
from google.ads.googleads.v6.services.types import language_constant_service
from .transports.base import (
LanguageConstantServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import LanguageConstantServiceGrpcTransport
class LanguageConstantServiceClientMeta(type):
"""Metaclass for the LanguageConstantService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[LanguageConstantServiceTransport]]
_transport_registry["grpc"] = LanguageConstantServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[LanguageConstantServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class LanguageConstantServiceClient(
metaclass=LanguageConstantServiceClientMeta
):
"""Service to fetch language constants."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
LanguageConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
LanguageConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> LanguageConstantServiceTransport:
"""Return the transport used by the client instance.
Returns:
LanguageConstantServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def language_constant_path(criterion_id: str,) -> str:
"""Return a fully-qualified language_constant string."""
return "languageConstants/{criterion_id}".format(
criterion_id=criterion_id,
)
@staticmethod
def parse_language_constant_path(path: str) -> Dict[str, str]:
"""Parse a language_constant path into its component segments."""
m = re.match(r"^languageConstants/(?P<criterion_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, LanguageConstantServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the language constant service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.LanguageConstantServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, LanguageConstantServiceTransport):
# transport is a LanguageConstantServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = LanguageConstantServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_language_constant(
self,
request: language_constant_service.GetLanguageConstantRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> language_constant.LanguageConstant:
r"""Returns the requested language constant.
Args:
request (:class:`google.ads.googleads.v6.services.types.GetLanguageConstantRequest`):
The request object. Request message for
[LanguageConstantService.GetLanguageConstant][google.ads.googleads.v6.services.LanguageConstantService.GetLanguageConstant].
resource_name (:class:`str`):
Required. Resource name of the
language constant to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.resources.types.LanguageConstant:
A language.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a language_constant_service.GetLanguageConstantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, language_constant_service.GetLanguageConstantRequest
):
request = language_constant_service.GetLanguageConstantRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_language_constant
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("LanguageConstantServiceClient",)
|
py
|
1a59b1db27b4aecabe5df03bb55acd39072dbfc0
|
import inspect
import os
import sys
from collections import namedtuple
from functools import lru_cache
from dagster import check, seven
from dagster.core.code_pointer import (
CodePointer,
CustomPointer,
FileCodePointer,
ModuleCodePointer,
get_python_file_from_target,
)
from dagster.core.errors import DagsterInvalidSubsetError, DagsterInvariantViolationError
from dagster.core.origin import PipelinePythonOrigin, RepositoryPythonOrigin, SchedulePythonOrigin
from dagster.core.selector import parse_solid_selection
from dagster.serdes import pack_value, unpack_value, whitelist_for_serdes
from dagster.utils.backcompat import experimental
from .pipeline_base import IPipeline
def get_ephemeral_repository_name(pipeline_name):
check.str_param(pipeline_name, "pipeline_name")
return "__repository__{pipeline_name}".format(pipeline_name=pipeline_name)
@whitelist_for_serdes
class ReconstructableRepository(
namedtuple("_ReconstructableRepository", "pointer container_image")
):
def __new__(
cls,
pointer,
container_image=None,
):
return super(ReconstructableRepository, cls).__new__(
cls,
pointer=check.inst_param(pointer, "pointer", CodePointer),
container_image=check.opt_str_param(container_image, "container_image"),
)
@lru_cache(maxsize=1)
def get_definition(self):
return repository_def_from_pointer(self.pointer)
def get_reconstructable_pipeline(self, name):
return ReconstructablePipeline(self, name)
def get_reconstructable_schedule(self, name):
return ReconstructableSchedule(self, name)
@classmethod
def for_file(cls, file, fn_name, working_directory=None, container_image=None):
if not working_directory:
working_directory = os.getcwd()
return cls(FileCodePointer(file, fn_name, working_directory), container_image)
@classmethod
def for_module(cls, module, fn_name, container_image=None):
return cls(ModuleCodePointer(module, fn_name), container_image)
def get_cli_args(self):
return self.pointer.get_cli_args()
def get_python_origin(self):
return RepositoryPythonOrigin(
executable_path=sys.executable,
code_pointer=self.pointer,
container_image=self.container_image,
)
def get_python_origin_id(self):
return self.get_python_origin().get_id()
@whitelist_for_serdes
class ReconstructablePipeline(
namedtuple(
"_ReconstructablePipeline",
"repository pipeline_name solid_selection_str solids_to_execute",
),
IPipeline,
):
def __new__(
cls,
repository,
pipeline_name,
solid_selection_str=None,
solids_to_execute=None,
):
check.opt_set_param(solids_to_execute, "solids_to_execute", of_type=str)
return super(ReconstructablePipeline, cls).__new__(
cls,
repository=check.inst_param(repository, "repository", ReconstructableRepository),
pipeline_name=check.str_param(pipeline_name, "pipeline_name"),
solid_selection_str=check.opt_str_param(solid_selection_str, "solid_selection_str"),
solids_to_execute=solids_to_execute,
)
@property
def solid_selection(self):
return seven.json.loads(self.solid_selection_str) if self.solid_selection_str else None
@lru_cache(maxsize=1)
def get_definition(self):
from dagster.core.definitions.job import JobDefinition
defn = self.repository.get_definition().get_pipeline(self.pipeline_name)
if isinstance(defn, JobDefinition):
return self.repository.get_definition().get_pipeline(self.pipeline_name)
else:
return (
self.repository.get_definition()
.get_pipeline(self.pipeline_name)
.get_pipeline_subset_def(self.solids_to_execute)
)
def _resolve_solid_selection(self, solid_selection):
# resolve a list of solid selection queries to a frozenset of qualified solid names
# e.g. ['foo_solid+'] to {'foo_solid', 'bar_solid'}
check.list_param(solid_selection, "solid_selection", of_type=str)
solids_to_execute = parse_solid_selection(self.get_definition(), solid_selection)
if len(solids_to_execute) == 0:
raise DagsterInvalidSubsetError(
"No qualified solids to execute found for solid_selection={requested}".format(
requested=solid_selection
)
)
return solids_to_execute
def get_reconstructable_repository(self):
return self.repository
def _subset_for_execution(self, solids_to_execute, solid_selection=None):
if solids_to_execute:
pipe = ReconstructablePipeline(
repository=self.repository,
pipeline_name=self.pipeline_name,
solid_selection_str=seven.json.dumps(solid_selection) if solid_selection else None,
solids_to_execute=frozenset(solids_to_execute),
)
else:
pipe = ReconstructablePipeline(
repository=self.repository,
pipeline_name=self.pipeline_name,
)
return pipe
def subset_for_execution(self, solid_selection):
# take a list of solid queries and resolve the queries to names of solids to execute
check.opt_list_param(solid_selection, "solid_selection", of_type=str)
solids_to_execute = (
self._resolve_solid_selection(solid_selection) if solid_selection else None
)
return self._subset_for_execution(solids_to_execute, solid_selection)
def subset_for_execution_from_existing_pipeline(self, solids_to_execute):
# take a frozenset of resolved solid names from an existing pipeline
# so there's no need to parse the selection
check.opt_set_param(solids_to_execute, "solids_to_execute", of_type=str)
return self._subset_for_execution(solids_to_execute)
def describe(self):
return '"{name}" in repository ({repo})'.format(
repo=self.repository.pointer.describe, name=self.pipeline_name
)
@staticmethod
def for_file(python_file, fn_name):
return bootstrap_standalone_recon_pipeline(
FileCodePointer(python_file, fn_name, os.getcwd())
)
@staticmethod
def for_module(module, fn_name):
return bootstrap_standalone_recon_pipeline(ModuleCodePointer(module, fn_name))
def to_dict(self):
return pack_value(self)
@staticmethod
def from_dict(val):
check.dict_param(val, "val")
inst = unpack_value(val)
check.invariant(
isinstance(inst, ReconstructablePipeline),
"Deserialized object is not instance of ReconstructablePipeline, got {type}".format(
type=type(inst)
),
)
return inst
def get_python_origin(self):
return PipelinePythonOrigin(self.pipeline_name, self.repository.get_python_origin())
def get_python_origin_id(self):
return self.get_python_origin().get_id()
@whitelist_for_serdes
class ReconstructableSchedule(
namedtuple(
"_ReconstructableSchedule",
"repository schedule_name",
)
):
def __new__(
cls,
repository,
schedule_name,
):
return super(ReconstructableSchedule, cls).__new__(
cls,
repository=check.inst_param(repository, "repository", ReconstructableRepository),
schedule_name=check.str_param(schedule_name, "schedule_name"),
)
def get_python_origin(self):
return SchedulePythonOrigin(self.schedule_name, self.repository.get_python_origin())
def get_python_origin_id(self):
return self.get_python_origin().get_id()
@lru_cache(maxsize=1)
def get_definition(self):
return self.repository.get_definition().get_schedule_def(self.schedule_name)
def reconstructable(target):
"""
Create a :py:class:`~dagster.core.definitions.reconstructable.ReconstructablePipeline` from a
function that returns a :py:class:`~dagster.PipelineDefinition`, or a function decorated with
:py:func:`@pipeline <dagster.pipeline>`
When your pipeline must cross process boundaries, e.g., for execution on multiple nodes or
in different systems (like ``dagstermill``), Dagster must know how to reconstruct the pipeline
on the other side of the process boundary.
This function implements a very conservative strategy for reconstructing pipelines, so that
its behavior is easy to predict, but as a consequence it is not able to reconstruct certain
kinds of pipelines, such as those defined by lambdas, in nested scopes (e.g., dynamically
within a method call), or in interactive environments such as the Python REPL or Jupyter
notebooks.
If you need to reconstruct pipelines constructed in these ways, you should use
:py:func:`~dagster.core.definitions.reconstructable.build_reconstructable_pipeline` instead,
which allows you to specify your own strategy for reconstructing a pipeline.
Examples:
.. code-block:: python
from dagster import PipelineDefinition, pipeline, reconstructable
@pipeline
def foo_pipeline():
...
reconstructable_foo_pipeline = reconstructable(foo_pipeline)
def make_bar_pipeline():
return PipelineDefinition(...)
reconstructable_bar_pipeline = reconstructable(bar_pipeline)
"""
from dagster.core.definitions import PipelineDefinition
if not seven.is_function_or_decorator_instance_of(target, PipelineDefinition):
raise DagsterInvariantViolationError(
"Reconstructable target should be a function or definition produced "
"by a decorated function, got {type}.".format(type=type(target)),
)
if seven.is_lambda(target):
raise DagsterInvariantViolationError(
"Reconstructable target can not be a lambda. Use a function or "
"decorated function defined at module scope instead, or use "
"build_reconstructable_pipeline."
)
if seven.qualname_differs(target):
raise DagsterInvariantViolationError(
'Reconstructable target "{target.__name__}" has a different '
'__qualname__ "{target.__qualname__}" indicating it is not '
"defined at module scope. Use a function or decorated function "
"defined at module scope instead, or use build_reconstructable_pipeline.".format(
target=target
)
)
try:
if (
hasattr(target, "__module__")
and hasattr(target, "__name__")
and inspect.getmodule(target).__name__ != "__main__"
):
return ReconstructablePipeline.for_module(target.__module__, target.__name__)
except: # pylint: disable=bare-except
pass
python_file = get_python_file_from_target(target)
if not python_file:
raise DagsterInvariantViolationError(
"reconstructable() can not reconstruct pipelines defined in interactive environments "
"like <stdin>, IPython, or Jupyter notebooks. "
"Use a pipeline defined in a module or file instead, or "
"use build_reconstructable_pipeline."
)
pointer = FileCodePointer(
python_file=python_file, fn_name=target.__name__, working_directory=os.getcwd()
)
return bootstrap_standalone_recon_pipeline(pointer)
@experimental
def build_reconstructable_pipeline(
reconstructor_module_name,
reconstructor_function_name,
reconstructable_args=None,
reconstructable_kwargs=None,
):
"""
Create a :py:class:`dagster.core.definitions.reconstructable.ReconstructablePipeline`.
When your pipeline must cross process boundaries, e.g., for execution on multiple nodes or
in different systems (like ``dagstermill``), Dagster must know how to reconstruct the pipeline
on the other side of the process boundary.
This function allows you to use the strategy of your choice for reconstructing pipelines, so
that you can reconstruct certain kinds of pipelines that are not supported by
:py:func:`~dagster.reconstructable`, such as those defined by lambdas, in nested scopes (e.g.,
dynamically within a method call), or in interactive environments such as the Python REPL or
Jupyter notebooks.
If you need to reconstruct pipelines constructed in these ways, use this function instead of
:py:func:`~dagster.reconstructable`.
Args:
reconstructor_module_name (str): The name of the module containing the function to use to
reconstruct the pipeline.
reconstructor_function_name (str): The name of the function to use to reconstruct the
pipeline.
reconstructable_args (Tuple): Args to the function to use to reconstruct the pipeline.
Values of the tuple must be JSON serializable.
reconstructable_kwargs (Dict[str, Any]): Kwargs to the function to use to reconstruct the
pipeline. Values of the dict must be JSON serializable.
Examples:
.. code-block:: python
# module: mymodule
from dagster import PipelineDefinition, pipeline, build_reconstructable_pipeline
class PipelineFactory:
def make_pipeline(*args, **kwargs):
@pipeline
def _pipeline(...):
...
return _pipeline
def reconstruct_pipeline(*args):
factory = PipelineFactory()
return factory.make_pipeline(*args)
factory = PipelineFactory()
foo_pipeline_args = (...,...)
foo_pipeline_kwargs = {...:...}
foo_pipeline = factory.make_pipeline(*foo_pipeline_args, **foo_pipeline_kwargs)
reconstructable_foo_pipeline = build_reconstructable_pipeline(
'mymodule',
'reconstruct_pipeline',
foo_pipeline_args,
foo_pipeline_kwargs,
)
"""
check.str_param(reconstructor_module_name, "reconstructor_module_name")
check.str_param(reconstructor_function_name, "reconstructor_function_name")
reconstructable_args = list(check.opt_tuple_param(reconstructable_args, "reconstructable_args"))
reconstructable_kwargs = list(
(
[key, value]
for key, value in check.opt_dict_param(
reconstructable_kwargs, "reconstructable_kwargs", key_type=str
).items()
)
)
reconstructor_pointer = ModuleCodePointer(
reconstructor_module_name, reconstructor_function_name
)
pointer = CustomPointer(reconstructor_pointer, reconstructable_args, reconstructable_kwargs)
pipeline_def = pipeline_def_from_pointer(pointer)
return ReconstructablePipeline(
repository=ReconstructableRepository(pointer), # creates ephemeral repo
pipeline_name=pipeline_def.name,
)
def bootstrap_standalone_recon_pipeline(pointer):
# So this actually straps the the pipeline for the sole
# purpose of getting the pipeline name. If we changed ReconstructablePipeline
# to get the pipeline on demand in order to get name, we could avoid this.
pipeline_def = pipeline_def_from_pointer(pointer)
return ReconstructablePipeline(
repository=ReconstructableRepository(pointer), # creates ephemeral repo
pipeline_name=pipeline_def.name,
)
def _check_is_loadable(definition):
from .pipeline import PipelineDefinition
from .repository import RepositoryDefinition
from .graph import GraphDefinition
if not isinstance(definition, (PipelineDefinition, RepositoryDefinition, GraphDefinition)):
raise DagsterInvariantViolationError(
(
"Loadable attributes must be either a PipelineDefinition, GraphDefinition, or a "
"RepositoryDefinition. Got {definition}."
).format(definition=repr(definition))
)
return definition
def load_def_in_module(module_name, attribute):
return def_from_pointer(CodePointer.from_module(module_name, attribute))
def load_def_in_package(package_name, attribute):
return def_from_pointer(CodePointer.from_python_package(package_name, attribute))
def load_def_in_python_file(python_file, attribute, working_directory):
return def_from_pointer(CodePointer.from_python_file(python_file, attribute, working_directory))
def def_from_pointer(pointer):
target = pointer.load_target()
from .pipeline import PipelineDefinition
from .repository import RepositoryDefinition
from .graph import GraphDefinition
if isinstance(
target, (PipelineDefinition, RepositoryDefinition, GraphDefinition)
) or not callable(target):
return _check_is_loadable(target)
# if its a function invoke it - otherwise we are pointing to a
# artifact in module scope, likely decorator output
if seven.get_args(target):
raise DagsterInvariantViolationError(
"Error invoking function at {target} with no arguments. "
"Reconstructable target must be callable with no arguments".format(
target=pointer.describe()
)
)
return _check_is_loadable(target())
def pipeline_def_from_pointer(pointer):
from .pipeline import PipelineDefinition
target = def_from_pointer(pointer)
if isinstance(target, PipelineDefinition):
return target
raise DagsterInvariantViolationError(
"CodePointer ({str}) must resolve to a PipelineDefinition. "
"Received a {type}".format(str=pointer.describe(), type=type(target))
)
def repository_def_from_target_def(target):
from .pipeline import PipelineDefinition
from .graph import GraphDefinition
from .repository import CachingRepositoryData, RepositoryDefinition
# special case - we can wrap a single pipeline in a repository
if isinstance(target, (PipelineDefinition, GraphDefinition)):
# consider including pipeline name in generated repo name
return RepositoryDefinition(
name=get_ephemeral_repository_name(target.name),
repository_data=CachingRepositoryData.from_list([target]),
)
elif isinstance(target, RepositoryDefinition):
return target
else:
return None
def repository_def_from_pointer(pointer):
target = def_from_pointer(pointer)
repo_def = repository_def_from_target_def(target)
if not repo_def:
raise DagsterInvariantViolationError(
"CodePointer ({str}) must resolve to a "
"RepositoryDefinition or a PipelineDefinition. "
"Received a {type}".format(str=pointer.describe(), type=type(target))
)
return repo_def
|
py
|
1a59b304df4f92cedf49533e5dcc3924100dd008
|
# -*- coding: utf-8 -*-
""" S3 Pivot Table Reports Method
@copyright: 2011-2015 (c) Sahana Software Foundation
@license: MIT
@requires: U{B{I{Python 2.6}} <http://www.python.org>}
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import re
import sys
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from itertools import product
from gluon import current
from gluon.storage import Storage
from gluon.html import *
from gluon.languages import regex_translate
from gluon.sqlhtml import OptionsWidget
from gluon.validators import IS_IN_SET, IS_EMPTY_OR
from s3query import FS
from s3rest import S3Method
from s3utils import s3_flatlist, s3_has_foreign_key, s3_unicode, S3MarkupStripper, s3_represent_value
from s3xml import S3XMLFormat
from s3validators import IS_NUMBER
layer_pattern = re.compile("([a-zA-Z]+)\((.*)\)\Z")
# Compact JSON encoding
DEFAULT = lambda: None
SEPARATORS = (",", ":")
FACT = re.compile(r"([a-zA-Z]+)\(([a-zA-Z0-9_.$:\,~]+)\),*(.*)\Z")
SELECTOR = re.compile(r"^[a-zA-Z0-9_.$:\~]+\Z")
# =============================================================================
class S3Report(S3Method):
""" RESTful method for pivot table reports """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Page-render entry point for REST interface.
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
if r.http == "GET":
if r.representation == "geojson":
output = self.geojson(r, **attr)
else:
output = self.report(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def report(self, r, **attr):
"""
Pivot table report page
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
output = {}
resource = self.resource
get_config = resource.get_config
show_filter_form = False
if r.representation in ("html", "iframe"):
filter_widgets = get_config("filter_widgets", None)
if filter_widgets and not self.hide_filter:
# Apply filter defaults (before rendering the data!)
from s3filter import S3FilterForm
show_filter_form = True
S3FilterForm.apply_filter_defaults(r, resource)
widget_id = "pivottable"
# @todo: make configurable:
maxrows = 20
maxcols = 20
# Extract the relevant GET vars
report_vars = ("rows", "cols", "fact", "totals")
get_vars = dict((k, v) for k, v in r.get_vars.iteritems()
if k in report_vars)
# Fall back to report options defaults
report_options = get_config("report_options", {})
defaults = report_options.get("defaults", {})
if not any (k in get_vars for k in ("rows", "cols", "fact")):
get_vars = defaults
get_vars["chart"] = r.get_vars.get("chart",
defaults.get("chart", None))
get_vars["table"] = r.get_vars.get("table",
defaults.get("table", None))
# Generate the pivot table
if get_vars:
rows = get_vars.get("rows", None)
cols = get_vars.get("cols", None)
layer = get_vars.get("fact", "id")
try:
facts = S3PivotTableFact.parse(layer)
except SyntaxError:
current.log.error(sys.exc_info()[1])
facts = None
if not facts or not any([rows, cols]):
pivottable = None
else:
prefix = resource.prefix_selector
get_vars["rows"] = prefix(rows) if rows else None
get_vars["cols"] = prefix(cols) if cols else None
get_vars["fact"] = ",".join("%s(%s)" % (fact.method, fact.selector) for fact in facts)
pivottable = S3PivotTable(resource, rows, cols, facts)
else:
pivottable = None
# Render as JSON-serializable dict
if pivottable is not None:
pivotdata = pivottable.json(maxrows=maxrows, maxcols=maxcols)
else:
pivotdata = None
if r.representation in ("html", "iframe"):
tablename = resource.tablename
# Filter widgets
if show_filter_form:
advanced = False
for widget in filter_widgets:
if "hidden" in widget.opts and widget.opts.hidden:
advanced = resource.get_config("report_advanced", True)
break
filter_formstyle = get_config("filter_formstyle", None)
filter_form = S3FilterForm(filter_widgets,
formstyle=filter_formstyle,
advanced=advanced,
submit=False,
_class="filter-form",
_id="%s-filter-form" % widget_id)
fresource = current.s3db.resource(tablename)
alias = resource.alias if r.component else None
filter_widgets = filter_form.fields(fresource,
r.get_vars,
alias=alias)
else:
# Render as empty string to avoid the exception in the view
filter_widgets = None
# Generate the report form
ajax_vars = Storage(r.get_vars)
ajax_vars.update(get_vars)
filter_url = r.url(method="",
representation="",
vars=ajax_vars.fromkeys((k for k in ajax_vars
if k not in report_vars)))
ajaxurl = attr.get("ajaxurl", r.url(method="report",
representation="json",
vars=ajax_vars))
output = S3ReportForm(resource).html(pivotdata,
get_vars = get_vars,
filter_widgets = filter_widgets,
ajaxurl = ajaxurl,
filter_url = filter_url,
widget_id = widget_id)
output["title"] = self.crud_string(tablename, "title_report")
output["report_type"] = "pivottable"
# Detect and store theme-specific inner layout
self._view(r, "pivottable.html")
# View
current.response.view = self._view(r, "report.html")
elif r.representation == "json":
output = json.dumps(pivotdata, separators=SEPARATORS)
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
def geojson(self, r, **attr):
"""
Render the pivot table data as a dict ready to be exported as
GeoJSON for display on a Map.
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
resource = self.resource
response = current.response
s3 = response.s3
# Set response headers
response.headers["Content-Type"] = s3.content_type.get("geojson",
"application/json")
if not resource.count():
# No Data
return json.dumps({})
# Extract the relevant GET vars
get_vars = r.get_vars
layer_id = r.get_vars.get("layer", None)
level = get_vars.get("level", "L0")
# Fall back to report options defaults
get_config = resource.get_config
report_options = get_config("report_options", {})
defaults = report_options.get("defaults", {})
# The rows dimension
context = get_config("context")
if context and "location" in context:
# @ToDo: We can add sanity-checking using resource.parse_bbox_query() as a guide if-desired
rows = "(location)$%s" % level
else:
# Fallback to location_id
rows = "location_id$%s" % level
# Fallback we can add if-required
#rows = "site_id$location_id$%s" % level
# Filter out null values
resource.add_filter(FS(rows) != None)
# Set XSLT stylesheet
stylesheet = os.path.join(r.folder, r.XSLT_PATH, "geojson", "export.xsl")
# Do we have any data at this level of aggregation?
fallback_to_points = True # @ToDo: deployment_setting?
output = None
if fallback_to_points:
if resource.count() == 0:
# Show Points
resource.clear_query()
# Apply URL filters (especially BBOX)
resource.build_query(filter=s3.filter, vars=get_vars)
# Extract the Location Data
xmlformat = S3XMLFormat(stylesheet)
include, exclude = xmlformat.get_fields(resource.tablename)
resource.load(fields=include,
skip=exclude,
start=0,
limit=None,
orderby=None,
virtual=False,
cacheable=True)
gis = current.gis
attr_fields = []
style = gis.get_style(layer_id=layer_id,
aggregate=False)
popup_format = style.popup_format
if popup_format:
if "T(" in popup_format:
# i18n
T = current.T
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
style.popup_format = popup_format
# Extract the attr_fields
parts = popup_format.split("{")
# Skip the first part
parts = parts[1:]
for part in parts:
attribute = part.split("}")[0]
attr_fields.append(attribute)
attr_fields = ",".join(attr_fields)
location_data = gis.get_location_data(resource,
attr_fields=attr_fields)
# Export as GeoJSON
current.xml.show_ids = True
output = resource.export_xml(fields=include,
mcomponents=None,
references=[],
stylesheet=stylesheet,
as_json=True,
location_data=location_data,
map_data=dict(style=style),
)
# Transformation error?
if not output:
r.error(400, "XSLT Transformation Error: %s " % current.xml.error)
else:
while resource.count() == 0:
# Try a lower level of aggregation
level = int(level[1:])
if level == 0:
# Nothing we can display
return json.dumps({})
resource.clear_query()
# Apply URL filters (especially BBOX)
resource.build_query(filter=s3.filter, vars=get_vars)
level = "L%s" % (level - 1)
if context and "location" in context:
# @ToDo: We can add sanity-checking using resource.parse_bbox_query() as a guide if-desired
rows = "(location)$%s" % level
else:
# Fallback to location_id
rows = "location_id$%s" % level
# Fallback we can add if-required
#rows = "site_id$location_id$%s" % level
resource.add_filter(FS(rows) != None)
if not output:
# Build the Pivot Table
cols = None
layer = get_vars.get("fact", defaults.get("fact", "count(id)"))
facts = S3PivotTableFact.parse(layer)[:1]
pivottable = S3PivotTable(resource, rows, cols, facts)
# Extract the Location Data
#attr_fields = []
style = current.gis.get_style(layer_id=layer_id,
aggregate=True)
popup_format = style.popup_format
if popup_format:
if"T(" in popup_format:
# i18n
T = current.T
items = regex_translate.findall(popup_format)
for item in items:
titem = str(T(item[1:-1]))
popup_format = popup_format.replace("T(%s)" % item,
titem)
style.popup_format = popup_format
# Extract the attr_fields
# No need as defaulted inside S3PivotTable.geojson()
#parts = popup_format.split("{")
## Skip the first part
#parts = parts[1:]
#for part in parts:
# attribute = part.split("}")[0]
# attr_fields.append(attribute)
#attr_fields = ",".join(attr_fields)
ids, location_data = pivottable.geojson(fact=facts[0], level=level)
# Export as GeoJSON
current.xml.show_ids = True
gresource = current.s3db.resource("gis_location", id=ids)
output = gresource.export_xml(fields=[],
mcomponents=None,
references=[],
stylesheet=stylesheet,
as_json=True,
location_data=location_data,
# Tell the client that we are
# displaying aggregated data and
# the level it is aggregated at
map_data=dict(level=int(level[1:]),
style=style),
)
# Transformation error?
if not output:
r.error(400, "XSLT Transformation Error: %s " % current.xml.error)
return output
# -------------------------------------------------------------------------
def widget(self, r, method=None, widget_id=None, visible=True, **attr):
"""
Pivot table report widget
@param r: the S3Request
@param method: the widget method
@param widget_id: the widget ID
@param visible: whether the widget is initially visible
@param attr: controller attributes
"""
output = {}
resource = self.resource
get_config = resource.get_config
# @todo: make configurable:
maxrows = 20
maxcols = 20
# Extract the relevant GET vars
report_vars = ("rows", "cols", "fact", "totals")
get_vars = dict((k, v) for k, v in r.get_vars.iteritems()
if k in report_vars)
# Fall back to report options defaults
report_options = get_config("report_options", {})
defaults = report_options.get("defaults", {})
if not any (k in get_vars for k in ("rows", "cols", "fact")):
get_vars = defaults
get_vars["chart"] = r.get_vars.get("chart",
defaults.get("chart", None))
get_vars["table"] = r.get_vars.get("table",
defaults.get("table", None))
# Generate the pivot table
if get_vars:
rows = get_vars.get("rows", None)
cols = get_vars.get("cols", None)
layer = get_vars.get("fact", "id")
try:
facts = S3PivotTableFact.parse(layer)
except SyntaxError:
current.log.error(sys.exc_info()[1])
facts = None
if not facts or not any([rows, cols]):
pivottable = None
else:
prefix = resource.prefix_selector
get_vars["rows"] = prefix(rows) if rows else None
get_vars["cols"] = prefix(cols) if cols else None
get_vars["fact"] = ",".join("%s(%s)" % (fact.method, fact.selector) for fact in facts)
if visible:
pivottable = S3PivotTable(resource, rows, cols, facts)
else:
pivottable = None
else:
pivottable = None
# Render as JSON-serializable dict
if pivottable is not None:
pivotdata = pivottable.json(maxrows=maxrows, maxcols=maxcols)
else:
pivotdata = None
if r.representation in ("html", "iframe"):
# Generate the report form
ajax_vars = Storage(r.get_vars)
ajax_vars.update(get_vars)
filter_form = attr.get("filter_form", None)
filter_tab = attr.get("filter_tab", None)
filter_url = r.url(method="",
representation="",
vars=ajax_vars.fromkeys((k for k in ajax_vars
if k not in report_vars)),
)
ajaxurl = attr.get("ajaxurl", r.url(method="report",
representation="json",
vars=ajax_vars))
output = S3ReportForm(resource).html(pivotdata,
get_vars = get_vars,
filter_widgets = None,
ajaxurl = ajaxurl,
filter_url = filter_url,
filter_form = filter_form,
filter_tab = filter_tab,
widget_id = widget_id)
# Detect and store theme-specific inner layout
view = self._view(r, "pivottable.html")
# Render inner layout (outer page layout is set by S3Summary)
output["title"] = None
output = XML(current.response.render(view, output))
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output
# =============================================================================
class S3ReportForm(object):
""" Helper class to render a report form """
def __init__(self, resource):
self.resource = resource
# -------------------------------------------------------------------------
def html(self,
pivotdata,
filter_widgets=None,
get_vars=None,
ajaxurl=None,
filter_url=None,
filter_form=None,
filter_tab=None,
widget_id=None):
"""
Render the form for the report
@param get_vars: the GET vars if the request (as dict)
@param widget_id: the HTML element base ID for the widgets
"""
T = current.T
appname = current.request.application
# Report options
report_options = self.report_options(get_vars = get_vars,
widget_id = widget_id)
# Pivot data
hidden = {"pivotdata": json.dumps(pivotdata, separators=SEPARATORS)}
empty = T("No report specified.")
hide = T("Hide Table")
show = T("Show Table")
throbber = "/%s/static/img/indicator.gif" % appname
# Filter options
if filter_widgets is not None:
filter_options = self._fieldset(T("Filter Options"),
filter_widgets,
_id="%s-filters" % widget_id,
_class="filter-form")
else:
filter_options = ""
# Report form submit element
resource = self.resource
submit = resource.get_config("report_submit", True)
if submit:
_class = "pt-submit"
if submit is True:
label = T("Update Report")
elif isinstance(submit, (list, tuple)):
label = submit[0]
_class = "%s %s" % (submit[1], _class)
else:
label = submit
submit = TAG[""](
INPUT(_type="button",
_value=label,
_class=_class))
else:
submit = ""
# Form
form = FORM(filter_options,
report_options,
submit,
hidden = hidden,
_class = "pt-form",
_id = "%s-pt-form" % widget_id,
)
# View variables
output = {"form": form,
"throbber": throbber,
"hide": hide,
"show": show,
"empty": empty,
"widget_id": widget_id,
}
# Script options
settings = current.deployment_settings
opts = {
#"renderFilter": True,
#"collapseFilter": False,
#"renderOptions": True,
"collapseOptions": settings.get_ui_hide_report_options(),
"renderTable": True,
"collapseTable": False,
"showTotals": self.show_totals,
"ajaxURL": ajaxurl,
"renderChart": True,
"collapseChart": True,
"defaultChart": None,
"exploreChart": True,
"filterURL": filter_url,
"filterTab": filter_tab,
"filterForm": filter_form,
"autoSubmit": settings.get_ui_report_auto_submit(),
"thousandSeparator": settings.get_L10n_thousands_separator(),
"thousandGrouping": settings.get_L10n_thousands_grouping(),
"textAll": str(T("All")),
}
chart_opt = get_vars["chart"]
if chart_opt is not None:
if str(chart_opt).lower() in ("0", "off", "false"):
opts["renderChart"] = False
elif ":" in chart_opt:
opts["collapseChart"] = False
ctype, caxis = chart_opt.split(":", 1)
opts["defaultChart"] = {"type": ctype, "axis": caxis}
table_opt = get_vars["table"]
if table_opt is not None:
table_opt = str(table_opt).lower()
if table_opt in ("0", "off", "false"):
opts["renderTable"] = False
elif table_opt == "collapse":
opts["collapseTable"] = True
# Scripts
s3 = current.response.s3
scripts = s3.scripts
if s3.debug:
# @todo: support CDN
script = "/%s/static/scripts/d3/d3.js" % appname
if script not in scripts:
scripts.append(script)
script = "/%s/static/scripts/d3/nv.d3.js" % appname
if script not in scripts:
scripts.append(script)
script = "/%s/static/scripts/S3/s3.ui.pivottable.js" % appname
if script not in scripts:
scripts.append(script)
else:
script = "/%s/static/scripts/S3/s3.pivotTables.min.js" % appname
if script not in scripts:
scripts.append(script)
script = '''$('#%(widget_id)s').pivottable(%(opts)s)''' % \
dict(widget_id = widget_id,
opts = json.dumps(opts,
separators=SEPARATORS),
)
s3.jquery_ready.append(script)
return output
# -------------------------------------------------------------------------
def report_options(self, get_vars=None, widget_id="pivottable"):
"""
Render the widgets for the report options form
@param get_vars: the GET vars if the request (as dict)
@param widget_id: the HTML element base ID for the widgets
"""
T = current.T
SHOW_TOTALS = T("Show totals")
FACT = T("Report of")
ROWS = T("Grouped by")
COLS = T("and")
resource = self.resource
get_config = resource.get_config
options = get_config("report_options")
# Specific formstyle?
settings = current.deployment_settings
formstyle = settings.get_ui_report_formstyle()
# Fall back to inline-variant of current formstyle
if formstyle is None:
formstyle = settings.get_ui_inline_formstyle()
# Helper for labels
label = lambda s, **attr: LABEL("%s:" % s, **attr)
formfields = []
# Layer selector
layer_id = "%s-fact" % widget_id
layer_widget = self.layer_options(options=options,
get_vars=get_vars,
widget_id=layer_id)
formfields.append((layer_id + "-row",
label(FACT, _for=layer_id),
layer_widget,
"",
))
# Rows/Columns selectors
axis_options = self.axis_options
rows_id = "%s-rows" % widget_id
cols_id = "%s-cols" % widget_id
rows_options = axis_options("rows",
options=options,
get_vars=get_vars,
widget_id=rows_id)
cols_options = axis_options("cols",
options=options,
get_vars=get_vars,
widget_id=cols_id)
axis_widget = DIV(rows_options,
label(COLS, _for=cols_id),
cols_options,
_class="pt-axis-options",
)
formfields.append(("%s-axis-row" % widget_id,
label(ROWS, _for=rows_id),
axis_widget,
"",
))
# Show Totals switch
show_totals = True
if get_vars and "totals" in get_vars and \
str(get_vars["totals"]).lower() in ("0", "false", "off"):
show_totals = False
self.show_totals = show_totals
show_totals_id = "%s-totals" % widget_id
totals_widget = INPUT(_type="checkbox",
_id=show_totals_id,
_name="totals",
_class="pt-totals",
value=show_totals
)
formfields.append(("%s-show-totals-row" % widget_id,
label(SHOW_TOTALS, _for=show_totals_id),
totals_widget,
"",
))
try:
widgets = formstyle(FIELDSET(), formfields)
except:
# Old style (should be avoided)
widgets = TAG[""]([formstyle(*formfield) for formfield in formfields])
# Render fieldset
fieldset = self._fieldset(T("Report Options"),
widgets,
_id="%s-options" % widget_id)
return fieldset
# -------------------------------------------------------------------------
def axis_options(self, axis,
options=None,
get_vars=None,
widget_id=None):
"""
Construct an OptionsWidget for rows or cols axis
@param axis: "rows" or "cols"
@param options: the report options
@param get_vars: the GET vars if the request (as dict)
@param widget_id: the HTML element ID for the widget
"""
resource = self.resource
prefix = resource.prefix_selector
# Get all selectors
if options and axis in options:
fields = options[axis]
else:
fields = resource.get_config("list_fields")
if not fields:
fields = [f.name for f in resource.readable_fields()]
# Resolve the selectors
pkey = str(resource._id)
resolve_selector = resource.resolve_selector
rfields = []
append = rfields.append
for f in fields:
if isinstance(f, (tuple, list)):
label, selector = f[:2]
else:
label, selector = None, f
rfield = resolve_selector(selector)
if rfield.colname == pkey:
continue
if label:
rfield.label = label
append(rfield)
# Get current value
if get_vars and axis in get_vars:
value = get_vars[axis]
else:
value = ""
if value:
value = prefix(value)
# Dummy field
opts = [(prefix(rfield.selector), rfield.label) for rfield in rfields]
dummy_field = Storage(name=axis, requires=IS_IN_SET(opts))
# Construct widget
return OptionsWidget.widget(dummy_field,
value,
_id=widget_id,
_name=axis,
_class="pt-%s" % axis)
# -------------------------------------------------------------------------
def layer_options(self,
options=None,
get_vars=None,
widget_id=None):
"""
Construct an OptionsWidget for the fact layer
@param options: the report options
@param get_vars: the GET vars if the request (as dict)
@param widget_id: the HTML element ID for the widget
"""
resource = self.resource
all_methods = S3PivotTableFact.METHODS
# Get all layers
layers = None
methods = None
if options:
if "methods" in options:
methods = options["methods"]
if "fact" in options:
layers = options["fact"]
if not layers:
layers = resource.get_config("list_fields")
if not layers:
layers = [f.name for f in resource.readable_fields()]
if not methods:
methods = all_methods
# Resolve layer options
T = current.T
RECORDS = T("Records")
mname = S3PivotTableFact._get_method_label
def layer_label(rfield, method):
""" Helper to construct a layer label """
mlabel = mname(method)
flabel = rfield.label if rfield.label != "Id" else RECORDS
# @ToDo: Exclude this string from admin/translate exports
return T("%s (%s)") % (flabel, mlabel)
prefix = resource.prefix_selector
layer_opts = []
for option in layers:
if isinstance(option, tuple):
title, layer = option
else:
title, layer = None, option
try:
facts = S3PivotTableFact.parse(layer)
except SyntaxError:
continue
if len(facts) > 1:
# Multi-fact layer
labels = []
expressions = []
for fact in facts:
if not title:
rfield = resource.resolve_selector(fact.selector)
labels.append(fact.get_label(rfield, layers))
expressions.append("%s(%s)" % (fact.method, fact.selector))
if not title:
title = " / ".join(labels)
layer_opts.append((",".join(expressions), title))
continue
else:
fact = facts[0]
label = fact.label or title
if fact.default_method:
s, m = fact.selector, None
else:
s, m = fact.selector, fact.method
# Resolve the selector
selector = prefix(s)
rfield = resource.resolve_selector(selector)
if not rfield.field and not rfield.virtual:
continue
if m is None and label:
rfield.label = label
if m is None:
# Only field given -> auto-detect aggregation methods
is_amount = None
ftype = rfield.ftype
if ftype == "integer":
is_amount = True
requires = rfield.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
for r in requires:
if isinstance(r, IS_IN_SET) or \
isinstance(r, IS_EMPTY_OR) and \
isinstance(r.other, IS_IN_SET):
is_amount = False
elif ftype == "double":
is_amount = True
elif ftype[:9] == "reference" or \
ftype[:5] == "list:" or \
ftype in ("id", "string", "text"):
is_amount = False
if ftype in ("datetime", "date", "time"):
mopts = ["min", "max", "list"]
elif is_amount is None:
mopts = ["sum", "min", "max", "avg", "count", "list"]
elif is_amount:
mopts = ["sum", "min", "max", "avg"]
else:
mopts = ["count", "list"]
for method in mopts:
if method in methods:
label = layer_label(rfield, method)
layer_opts.append(("%s(%s)" % (method, selector), label))
else:
# Explicit method specified
if label is None:
label = layer_label(rfield, m)
layer_opts.append(("%s(%s)" % (m, selector), label))
# Get current value
if get_vars and "fact" in get_vars:
layer = get_vars["fact"]
else:
layer = ""
if layer:
match = layer_pattern.match(layer)
if match is None:
layer = ""
else:
selector, method = match.group(2), match.group(1)
selector = prefix(selector)
layer = "%s(%s)" % (method, selector)
if len(layer_opts) == 1:
# Field is read-only if there is only 1 option
default = layer_opts[0]
widget = TAG[""](default[1],
INPUT(_type="hidden",
_id=widget_id,
_name=widget_id,
_value=default[0],
_class="pt-fact-single-option"))
else:
# Render Selector
dummy_field = Storage(name="fact",
requires=IS_IN_SET(layer_opts))
widget = OptionsWidget.widget(dummy_field,
layer,
_id=widget_id,
_name="fact",
_class="pt-fact")
return widget
# -------------------------------------------------------------------------
@staticmethod
def _fieldset(title, widgets, **attr):
"""
Helper method to wrap widgets in a FIELDSET container with
show/hide option
@param title: the title for the field set
@param widgets: the widgets
@param attr: HTML attributes for the field set
"""
T = current.T
SHOW = T("Show")
HIDE = T("Hide")
return FIELDSET(LEGEND(title,
BUTTON(SHOW,
_type="button",
_class="toggle-text",
),
BUTTON(HIDE,
_type="button",
_class="toggle-text",
)
),
widgets,
**attr)
# =============================================================================
class S3PivotTableFact(object):
""" Class representing a fact layer """
#: Supported aggregation methods
METHODS = {"list": "List",
"count": "Count",
"min": "Minimum",
"max": "Maximum",
"sum": "Total",
"avg": "Average",
#"std": "Standard Deviation"
}
def __init__(self, method, selector, label=None, default_method=True):
"""
Constructor
@param method: the aggregation method
@param selector: the field selector
@param label: the fact label
@param default_method: using default method (used by parser)
"""
if method is None:
method = "count"
default_method = True
if method not in self.METHODS:
raise SyntaxError("Unsupported aggregation function: %s" % method)
self.method = method
self.selector = selector
self._layer = None
self.label = label
self.resource = None
self.rfield = None
self.column = selector
self.default_method = default_method
# -------------------------------------------------------------------------
@property
def layer(self):
layer = self._layer
if not layer:
layer = self._layer = (self.selector, self.method)
return layer
# -------------------------------------------------------------------------
def compute(self, values, method=DEFAULT, totals=False):
"""
Aggregate a list of values.
@param values: iterable of values
"""
if values is None:
return None
if method is DEFAULT:
method = self.method
if totals and method == "list":
method = "count"
if method is None or method == "list":
return values if values else None
values = [v for v in values if v != None]
if method == "count":
return len(values)
elif method == "min":
try:
return min(values)
except (TypeError, ValueError):
return None
elif method == "max":
try:
return max(values)
except (TypeError, ValueError):
return None
elif method == "sum":
try:
return sum(values)
except (TypeError, ValueError):
return None
elif method == "avg":
try:
if len(values):
return sum(values) / float(len(values))
else:
return 0.0
except (TypeError, ValueError):
return None
#elif method == "std":
#import numpy
#if not values:
#return 0.0
#try:
#return numpy.std(values)
#except (TypeError, ValueError):
#return None
return None
# -------------------------------------------------------------------------
def aggregate_totals(self, totals):
"""
Aggregate totals for this fact (hyper-aggregation)
@param totals: iterable of totals
"""
if self.method in ("list", "count"):
total = self.compute(totals, method="sum")
else:
total = self.compute(totals)
return total
# -------------------------------------------------------------------------
@classmethod
def parse(cls, fact):
"""
Parse fact expression
@param fact: the fact expression
"""
if isinstance(fact, tuple):
label, fact = fact
else:
label = None
if isinstance(fact, list):
facts = []
for f in fact:
facts.extend(cls.parse(f))
if not facts:
raise SyntaxError("Invalid fact expression: %s" % fact)
return facts
# Parse the fact
other = None
default_method = False
if not fact:
method, parameters = "count", "id"
else:
match = FACT.match(fact)
if match:
method, parameters, other = match.groups()
if other:
other = cls.parse((label, other) if label else other)
elif SELECTOR.match(fact):
method, parameters, other = "count", fact, None
default_method = True
else:
raise SyntaxError("Invalid fact expression: %s" % fact)
# Validate method
if method not in cls.METHODS:
raise SyntaxError("Unsupported aggregation method: %s" % method)
# Extract parameters
parameters = parameters.split(",")
selector = parameters[0]
facts = [cls(method,
selector,
label=label,
default_method=default_method,
),
]
if other:
facts.extend(other)
return facts
# -------------------------------------------------------------------------
@classmethod
def _get_method_label(cls, code):
"""
Get a label for a method
@param code: the method code
@return: the label (lazyT), or None for unsupported methods
"""
methods = cls.METHODS
if code is None:
code = "list"
if code in methods:
return current.T(methods[code])
else:
return None
# -------------------------------------------------------------------------
@staticmethod
def _get_field_label(rfield, fact_options=None):
"""
Get the label for a field
@param rfield: the S3ResourceField
@param fact_options: the corresponding subset of the report
options ("fact", "rows" or "cols")
"""
label = None
if not rfield:
return
resource = rfield.resource
fields = list(fact_options) if fact_options else []
list_fields = resource.get_config("list_fields")
if list_fields:
fields.extend(list_fields)
prefix = resource.prefix_selector
# Search through the field labels in report options
selector = prefix(rfield.selector)
for f in fields:
if type(f) is tuple and \
isinstance(f[1], basestring) and \
prefix(f[1]) == selector:
label = f[0]
break
if not label and rfield:
if rfield.ftype == "id":
label = current.T("Records")
else:
label = rfield.label
return label if label else ""
# -------------------------------------------------------------------------
def get_label(self, rfield, fact_options=None):
"""
Get a label for this fact
@param rfield: the S3ResourceField
@param fact_options: the "fact" list of the report options
"""
label = self.label
if label:
# Already set
return label
if fact_options:
# Lookup the label from the fact options
prefix = rfield.resource.prefix_selector
for fact_option in fact_options:
facts = self.parse(fact_option)
for fact in facts:
if fact.method == self.method and \
prefix(fact.selector) == prefix(self.selector):
label = fact.label
break
if label:
break
if not label:
# Construct a label from the field label and the method name
field_label = self._get_field_label(rfield, fact_options)
method_label = self._get_method_label(self.method)
label = "%s (%s)" % (field_label, method_label)
self.label = label
return label
# =============================================================================
class S3PivotTable(object):
""" Class representing a pivot table of a resource """
def __init__(self, resource, rows, cols, facts, strict=True):
"""
Constructor - extracts all unique records, generates a
pivot table from them with the given dimensions and
computes the aggregated values for each cell.
@param resource: the S3Resource
@param rows: field selector for the rows dimension
@param cols: field selector for the columns dimension
@param facts: list of S3PivotTableFacts to compute
@param strict: filter out dimension values which don't match
the resource filter
"""
# Initialize ----------------------------------------------------------
#
if not rows and not cols:
raise SyntaxError("No rows or columns specified for pivot table")
self.resource = resource
self.lfields = None
self.dfields = None
self.rfields = None
self.rows = rows
self.cols = cols
self.facts = facts
# API variables -------------------------------------------------------
#
self.records = None
""" All records in the pivot table as a Storage like:
{
<record_id>: <Row>
}
"""
self.empty = False
""" Empty-flag (True if no records could be found) """
self.numrows = None
""" The number of rows in the pivot table """
self.numcols = None
""" The number of columns in the pivot table """
self.cell = None
""" Array of pivot table cells in [rows[columns]]-order, each
cell is a Storage like:
{
records: <list_of_record_ids>,
(<fact>, <method>): <aggregated_value>, ...per layer
}
"""
self.row = None
""" List of row headers, each header is a Storage like:
{
value: <dimension value>,
records: <list_of_record_ids>,
(<fact>, <method>): <total value>, ...per layer
}
"""
self.col = None
""" List of column headers, each header is a Storage like:
{
value: <dimension value>,
records: <list_of_record_ids>,
(<fact>, <method>): <total value>, ...per layer
}
"""
self.totals = Storage()
""" The grand total values for each layer, as a Storage like:
{
(<fact>, <method): <total value>, ...per layer
}
"""
self.values = {}
# Get the fields ------------------------------------------------------
#
tablename = resource.tablename
# The "report_fields" table setting defines which additional
# fields shall be included in the report base layer. This is
# useful to provide easy access to the record data behind a
# pivot table cell.
fields = current.s3db.get_config(tablename, "report_fields", [])
self._get_fields(fields=fields)
rows = self.rows
cols = self.cols
# Retrieve the records ------------------------------------------------
#
data = resource.select(self.rfields.keys(), limit=None)
drows = data["rows"]
if drows:
key = str(resource.table._id)
records = Storage([(i[key], i) for i in drows])
# Generate the data frame -----------------------------------------
#
gfields = self.gfields
pkey_colname = gfields[self.pkey]
rows_colname = gfields[rows]
cols_colname = gfields[cols]
if strict:
rfields = self.rfields
axes = (rfield
for rfield in (rfields[rows], rfields[cols])
if rfield != None)
axisfilter = resource.axisfilter(axes)
else:
axisfilter = None
dataframe = []
extend = dataframe.extend
#insert = dataframe.append
expand = self._expand
for _id in records:
row = records[_id]
item = {key: _id}
if rows_colname:
item[rows_colname] = row[rows_colname]
if cols_colname:
item[cols_colname] = row[cols_colname]
extend(expand(item, axisfilter=axisfilter))
self.records = records
# Group the records -----------------------------------------------
#
matrix, rnames, cnames = self._pivot(dataframe,
pkey_colname,
rows_colname,
cols_colname)
# Initialize columns and rows -------------------------------------
#
if cols:
self.col = [Storage({"value": v}) for v in cnames]
self.numcols = len(self.col)
else:
self.col = [Storage({"value": None})]
self.numcols = 1
if rows:
self.row = [Storage({"value": v}) for v in rnames]
self.numrows = len(self.row)
else:
self.row = [Storage({"value": None})]
self.numrows = 1
# Add the layers --------------------------------------------------
#
add_layer = self._add_layer
for fact in self.facts:
add_layer(matrix, fact)
else:
# No items to report on -------------------------------------------
#
self.empty = True
# -------------------------------------------------------------------------
# API methods
# -------------------------------------------------------------------------
def __len__(self):
""" Total number of records in the report """
items = self.records
if items is None:
return 0
else:
return len(self.records)
# -------------------------------------------------------------------------
def geojson(self,
fact=None,
level="L0"):
"""
Render the pivot table data as a dict ready to be exported as
GeoJSON for display on a Map.
Called by S3Report.geojson()
@param layer: the layer. e.g. ("id", "count")
- we only support methods "count" & "sum"
- @ToDo: Support density: 'per sqkm' and 'per population'
@param level: the aggregation level (defaults to Country)
"""
if fact is None:
fact = self.facts[0]
layer = fact.layer
# The rows dimension
# @ToDo: We can add sanity-checking using resource.parse_bbox_query() if-desired
context = self.resource.get_config("context")
if context and "location" in context:
rows_dim = "(location)$%s" % level
else:
# Fallback to location_id
rows_dim = "location_id$%s" % level
# Fallback we can add if-required
#rows_dim = "site_id$location_id$%s" % level
# The data
attributes = {}
geojsons = {}
if self.empty:
location_ids = []
else:
numeric = lambda x: isinstance(x, (int, long, float))
row_repr = lambda v: s3_unicode(v)
ids = {}
irows = self.row
rows = []
# Group and sort the rows
is_numeric = None
for i in xrange(self.numrows):
irow = irows[i]
total = irow[layer]
if is_numeric is None:
is_numeric = numeric(total)
if not is_numeric:
total = len(irow.records)
header = Storage(value = irow.value,
text = irow.text if "text" in irow
else row_repr(irow.value))
rows.append((i, total, header))
self._sortdim(rows, self.rfields[rows_dim])
# Aggregate the grouped values
db = current.db
gtable = current.s3db.gis_location
query = (gtable.level == level) & (gtable.deleted == False)
for rindex, rtotal, rtitle in rows:
rval = rtitle.value
if rval:
# @ToDo: Handle duplicate names ;)
if rval in ids:
_id = ids[rval]
else:
q = query & (gtable.name == rval)
row = db(q).select(gtable.id,
gtable.parent,
limitby=(0, 1)
).first()
try:
_id = row.id
# Cache
ids[rval] = _id
except:
continue
attribute = dict(name=s3_unicode(rval),
value=rtotal)
attributes[_id] = attribute
location_ids = [ids[r] for r in ids]
query = (gtable.id.belongs(location_ids))
geojsons = current.gis.get_locations(gtable,
query,
join=False,
geojson=True)
# Prepare for export via xml.gis_encode() and geojson/export.xsl
location_data = {}
geojsons = dict(gis_location = geojsons)
location_data["geojsons"] = geojsons
attributes = dict(gis_location = attributes)
location_data["attributes"] = attributes
return location_ids, location_data
# -------------------------------------------------------------------------
def json(self, maxrows=None, maxcols=None):
"""
Render the pivot table data as JSON-serializable dict
@param layer: the layer
@param maxrows: maximum number of rows (None for all)
@param maxcols: maximum number of columns (None for all)
@param least: render the least n rows/columns rather than
the top n (with maxrows/maxcols)
{
labels: {
layer:
rows:
cols:
total:
},
method: <aggregation method>,
cells: [rows[cols]],
rows: [rows[index, value, label, total]],
cols: [cols[index, value, label, total]],
total: <grand total>,
filter: [rows selector, cols selector]
}
"""
rfields = self.rfields
resource = self.resource
T = current.T
OTHER = "__other__"
rows_dim = self.rows
cols_dim = self.cols
# The output data
orows = []
rappend = orows.append
ocols = []
cappend = ocols.append
ocells = []
lookups = {}
facts = self.facts
if not self.empty:
# Representation methods for row and column keys
row_repr = self._represent_method(rows_dim)
col_repr = self._represent_method(cols_dim)
# Label for the "Others" row/columns
others = s3_unicode(T("Others"))
# Get the layers (fact.selector, fact.method),
# => used as keys to access the pivot data
layers = [fact.layer for fact in facts]
least = facts[0].method == "min"
# Group and sort the rows (grouping = determine "others")
irows = self.row
rows = []
rtail = (None, None)
for i in xrange(self.numrows):
irow = irows[i]
totals = [irow[layer] for layer in layers]
sort_total = totals[0]
header = {"value": irow.value,
"text": irow.text if "text" in irow
else row_repr(irow.value),
}
rows.append((i, sort_total, totals, header))
if maxrows is not None:
rtail = self._tail(rows, maxrows, least=least, facts=facts)
self._sortdim(rows, rfields[rows_dim])
if rtail[1] is not None:
values = [irows[i]["value"] for i in rtail[0]]
rows.append((OTHER,
rtail[1],
rtail[2],
{"value": values, "text":others},
))
# Group and sort the cols (grouping = determine "others")
icols = self.col
cols = []
ctail = (None, None)
for i in xrange(self.numcols):
icol = icols[i]
totals = [icol[layer] for layer in layers]
sort_total = totals[0]
header = {"value": icol.value,
"text": icol.text if "text" in icol
else col_repr(icol.value),
}
cols.append((i, sort_total, totals, header))
if maxcols is not None:
ctail = self._tail(cols, maxcols, least=least, facts=facts)
self._sortdim(cols, rfields[cols_dim])
if ctail[1] is not None:
values = [icols[i]["value"] for i in ctail[0]]
cols.append((OTHER,
ctail[1],
ctail[2],
{"value": values, "text": others},
))
rothers = rtail[0] or set()
cothers = ctail[0] or set()
# Group and sort the cells accordingly
# @todo: break up into subfunctions
icell = self.cell
cells = {}
for i in xrange(self.numrows):
irow = icell[i]
ridx = (i, OTHER) if rothers and i in rothers else (i,)
for j in xrange(self.numcols):
cell = irow[j]
cidx = (j, OTHER) if cothers and j in cothers else (j,)
cell_records = cell["records"]
for layer_index, layer in enumerate(layers):
# Get cell items for the layer
# => items can be a single numeric value, or a list
items = cell[layer]
# Get cell value for the layer
if isinstance(items, list):
value = len(items)
else:
value = items
for ri in ridx:
if ri not in cells:
orow = cells[ri] = {}
else:
orow = cells[ri]
for ci in cidx:
if ci not in orow:
# Create a new output cell
ocell = orow[ci] = {"values": [],
"items": [],
"records": [],
}
else:
ocell = orow[ci]
if layer_index == 0:
# Extend the list of records
ocell["records"].extend(cell_records)
value_array = ocell["values"]
items_array = ocell["items"]
if len(value_array) <= layer_index:
value_array.append(value)
items_array.append(items)
else:
ovalue = value_array[layer_index]
oitems = items_array[layer_index]
if isinstance(ovalue, list):
ovalue.append(value)
oitems.append(items)
else:
value_array[layer_index] = [ovalue, value]
items_array[layer_index] = [oitems, items]
# Get field representation methods
represents = self._represents(layers)
# Aggregate the grouped values
value_maps = {}
add_columns = True # do this only once
for rindex, rtotal, rtotals, rtitle in rows:
orow = []
# Row value for filter construction
rval = rtitle["value"]
if rindex == OTHER and isinstance(rval, list):
rval = ",".join(s3_unicode(v) for v in rval)
elif rval is not None:
rval = s3_unicode(rval)
# The output row summary
rappend((rindex,
rindex in rothers,
rtotals,
rval,
rtitle["text"],
))
for cindex, ctotal, ctotals, ctitle in cols:
# Get the corresponding cell
cell = cells[rindex][cindex]
value_array = cell["values"]
items_array = cell["items"]
# Initialize the output cell
# @todo: deflate JSON keys
ocell = {"items": [], "values": [], "keys": []}
for layer_index, fact in enumerate(facts):
selector, method = fact.layer
if selector not in lookups:
lookup = lookups[selector] = {}
else:
lookup = lookups[selector]
if selector not in value_maps:
value_map = value_maps[selector] = {}
else:
value_map = value_maps[selector]
# Add the cell value
value = value_array[layer_index]
if type(value) is list:
# "Others" cell with multiple totals
value = fact.aggregate_totals(value)
ocell["values"].append(value)
has_fk, _repr = represents[selector]
rfield = self.rfields[selector]
items = items_array[layer_index]
okeys = None
# Build a lookup table for field values if counting
if method in ("count", "list"):
keys = []
for record_id in cell["records"]:
record = self.records[record_id]
try:
fvalue = record[rfield.colname]
except AttributeError:
continue
if fvalue is None:
continue
if type(fvalue) is not list:
fvalue = [fvalue]
for v in fvalue:
if v is None:
continue
if has_fk:
if v not in keys:
keys.append(v)
if v not in lookup:
lookup[v] = _repr(v)
else:
if v not in value_map:
next_id = len(value_map)
value_map[v] = next_id
keys.append(next_id)
lookup[next_id] = _repr(v)
else:
prev_id = value_map[v]
if prev_id not in keys:
keys.append(prev_id)
# Sort the keys by their representations
keys.sort(key=lambda i: lookup[i])
if method == "list":
items = [lookup[key] for key in keys if key in lookup]
else:
okeys = keys
ocell["items"].append(items)
ocell["keys"].append(okeys)
orow.append(ocell)
if add_columns:
# Column value for filter construction
cval = ctitle["value"]
if cindex == OTHER and isinstance(cval, list):
cval = ",".join(s3_unicode(v) for v in cval)
elif cval is not None:
cval = s3_unicode(cval)
# The output column summary
cappend((cindex,
cindex in cothers,
ctotals,
cval,
ctitle["text"],
))
add_columns = False
ocells.append(orow)
# Lookup labels
report_options = resource.get_config("report_options", {})
if report_options:
fact_options = report_options.get("fact")
else:
fact_options = ()
# @todo: lookup report title before constructing from fact labels
fact_data = []
fact_labels = []
for fact in facts:
rfield = rfields[fact.selector]
fact_label = str(fact.get_label(rfield, fact_options))
fact_data.append((fact.selector, fact.method, fact_label))
fact_labels.append(fact_label)
get_label = S3PivotTableFact._get_field_label
if rows_dim:
rows_label = str(get_label(rfields[rows_dim], report_options.get("rows")))
else:
rows_label = ""
if cols_dim:
cols_label = str(get_label(rfields[cols_dim], report_options.get("cols")))
else:
cols_label = ""
labels = {"total": str(T("Total")),
"none": str(current.messages["NONE"]),
"per": str(T("per")),
"breakdown": str(T("Breakdown")),
# @todo: use report title:
"layer": " / ".join(fact_labels),
"rows": rows_label,
"cols": cols_label,
}
# Compile the output dict
output = {"rows": orows,
"cols": ocols,
"facts": fact_data,
"cells": ocells,
"lookups": lookups,
"total": self._totals(self.totals, [fact]),
"nodata": None if not self.empty else str(T("No data available")),
"labels": labels,
}
# Add axis selectors for filter-URL construction
prefix = resource.prefix_selector
output["filter"] = (prefix(rows_dim) if rows_dim else None,
prefix(cols_dim) if cols_dim else None,
)
return output
# -------------------------------------------------------------------------
def _represents(self, layers):
"""
Get the representation functions per fact field
@param layers: the list of layers, tuples (selector, method)
"""
rfields = self.rfields
represents = {}
values = self.values
for selector, method in layers:
if selector in represents:
continue
# Get the field
rfield = rfields[selector]
f = rfield.field
# Utilize bulk-representation for field values
if method in ("list", "count") and \
f is not None and \
hasattr(f.represent, "bulk"):
all_values = values[(selector, method)]
if all_values:
f.represent.bulk(list(s3_flatlist(all_values)))
# Get the representation method
has_fk = f is not None and s3_has_foreign_key(f)
if has_fk:
represent = lambda v, f=f: s3_unicode(f.represent(v))
else:
m = self._represent_method(selector)
represent = lambda v, m=m: s3_unicode(m(v))
represents[selector] = (has_fk, represent)
return represents
# -------------------------------------------------------------------------
@staticmethod
def _sortdim(items, rfield, index=3):
"""
Sort a dimension (sorts items in-place)
@param items: the items as list of tuples
(index, sort-total, totals, header)
@param rfield: the dimension (S3ResourceField)
@param index: alternative index of the value/text dict
within each item
"""
if not rfield:
return
ftype = rfield.ftype
sortby = "value"
if ftype == "integer":
requires = rfield.requires
if isinstance(requires, (tuple, list)):
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
if isinstance(requires, IS_IN_SET):
sortby = "text"
elif ftype[:9] == "reference":
sortby = "text"
items.sort(key=lambda item: item[index][sortby])
return
# -------------------------------------------------------------------------
@classmethod
def _tail(cls, items, length=10, least=False, facts=None):
"""
Find the top/least <length> items (by total)
@param items: the items as list of tuples
(index, sort-total, totals, header)
@param length: the maximum number of items
@param least: find least rather than top
@param facts: the facts to aggregate the tail totals
"""
try:
if len(items) > length:
l = list(items)
l.sort(lambda x, y: int(y[1]-x[1]))
if least:
l.reverse()
keys = [item[0] for item in l[length-1:]]
totals = []
for i, fact in enumerate(facts):
subtotals = [item[2][i] for item in l[length-1:]]
totals.append(fact.aggregate_totals(subtotals))
return (keys, totals[0], totals)
except (TypeError, ValueError):
pass
return (None, None)
# -------------------------------------------------------------------------
@staticmethod
def _totals(values, facts, append=None):
"""
Get the totals of a row/column/report
@param values: the values dictionary
@param facts: the facts
@param append: callback to collect the totals for JSON data
(currently only collects the first layer)
"""
totals = []
number_represent = IS_NUMBER.represent
for fact in facts:
value = values[fact.layer]
#if fact.method == "list":
#value = value and len(value) or 0
if not len(totals) and append is not None:
append(value)
totals.append(s3_unicode(number_represent(value)))
totals = " / ".join(totals)
return totals
# -------------------------------------------------------------------------
# Internal methods
# -------------------------------------------------------------------------
@staticmethod
def _pivot(items, pkey_colname, rows_colname, cols_colname):
"""
2-dimensional pivoting of a list of unique items
@param items: list of unique items as dicts
@param pkey_colname: column name of the primary key
@param rows_colname: column name of the row dimension
@param cols_colname: column name of the column dimension
@return: tuple of (cell matrix, row headers, column headers),
where cell matrix is a 2-dimensional array [rows[columns]]
and row headers and column headers each are lists (in the
same order as the cell matrix)
"""
rvalues = Storage()
cvalues = Storage()
cells = Storage()
# All unique rows values
rindex = 0
cindex = 0
for item in items:
rvalue = item[rows_colname] if rows_colname else None
cvalue = item[cols_colname] if cols_colname else None
if rvalue not in rvalues:
r = rvalues[rvalue] = rindex
rindex += 1
else:
r = rvalues[rvalue]
if cvalue not in cvalues:
c = cvalues[cvalue] = cindex
cindex += 1
else:
c = cvalues[cvalue]
if (r, c) not in cells:
cells[(r, c)] = [item[pkey_colname]]
else:
cells[(r, c)].append(item[pkey_colname])
matrix = []
for r in xrange(len(rvalues)):
row = []
for c in xrange(len(cvalues)):
row.append(cells[(r, c)])
matrix.append(row)
rnames = [None] * len(rvalues)
for k, v in rvalues.items():
rnames[v] = k
cnames = [None] * len(cvalues)
for k, v in cvalues.items():
cnames[v] = k
return matrix, rnames, cnames
# -------------------------------------------------------------------------
def _add_layer(self, matrix, fact):
"""
Compute an aggregation layer, updates:
- self.cell: the aggregated values per cell
- self.row: the totals per row
- self.col: the totals per column
- self.totals: the overall totals per layer
@param matrix: the cell matrix
@param fact: the fact field
@param method: the aggregation method
"""
rows = self.row
cols = self.col
records = self.records
extract = self._extract
resource = self.resource
RECORDS = "records"
VALUES = "values"
table = resource.table
pkey = table._id.name
layer = fact.layer
numcols = len(self.col)
numrows = len(self.row)
# Initialize cells
if self.cell is None:
self.cell = [[Storage()
for i in xrange(numcols)]
for j in xrange(numrows)]
cells = self.cell
all_values = []
for r in xrange(numrows):
# Initialize row header
row = rows[r]
row[RECORDS] = []
row[VALUES] = []
row_records = row[RECORDS]
row_values = row[VALUES]
for c in xrange(numcols):
# Initialize column header
col = cols[c]
if RECORDS not in col:
col[RECORDS] = []
col_records = col[RECORDS]
if VALUES not in col:
col[VALUES] = []
col_values = col[VALUES]
# Get the records
cell = cells[r][c]
if RECORDS in cell and cell[RECORDS] is not None:
ids = cell[RECORDS]
else:
data = matrix[r][c]
if data:
remove = data.remove
while None in data:
remove(None)
ids = data
else:
ids = []
cell[RECORDS] = ids
row_records.extend(ids)
col_records.extend(ids)
# Get the values
if fact.selector is None:
fact.selector = pkey
values = ids
row_values = row_records
col_values = row_records
all_values = records.keys()
else:
values = []
append = values.append
for i in ids:
value = extract(records[i], fact.selector)
if value is None:
continue
append(value)
values = list(s3_flatlist(values))
if fact.method in ("list", "count"):
values = list(set(values))
row_values.extend(values)
col_values.extend(values)
all_values.extend(values)
# Aggregate values
value = fact.compute(values)
cell[layer] = value
# Compute row total
row[layer] = fact.compute(row_values, totals=True)
del row[VALUES]
# Compute column total
for c in xrange(numcols):
col = cols[c]
col[layer] = fact.compute(col[VALUES], totals=True)
del col[VALUES]
# Compute overall total
self.totals[layer] = fact.compute(all_values, totals=True)
self.values[layer] = all_values
return
# -------------------------------------------------------------------------
def _get_fields(self, fields=None):
"""
Determine the fields needed to generate the report
@param fields: fields to include in the report (all fields)
"""
resource = self.resource
table = resource.table
# Lambda to prefix all field selectors
alias = resource.alias
def prefix(s):
if isinstance(s, (tuple, list)):
return prefix(s[-1])
if "." not in s.split("$", 1)[0]:
return "%s.%s" % (alias, s)
elif s[:2] == "~.":
return "%s.%s" % (alias, s[2:])
else:
return s
self.pkey = pkey = prefix(table._id.name)
self.rows = rows = self.rows and prefix(self.rows) or None
self.cols = cols = self.cols and prefix(self.cols) or None
if not fields:
fields = ()
# dfields (data-fields): fields to generate the layers
dfields = [prefix(s) for s in fields]
if rows and rows not in dfields:
dfields.append(rows)
if cols and cols not in dfields:
dfields.append(cols)
if pkey not in dfields:
dfields.append(pkey)
for fact in self.facts:
selector = fact.selector = prefix(fact.selector)
if selector not in dfields:
dfields.append(selector)
self.dfields = dfields
# rfields (resource-fields): dfields resolved into a ResourceFields map
rfields = resource.resolve_selectors(dfields)[0]
rfields = Storage([(f.selector.replace("~", alias), f) for f in rfields])
self.rfields = rfields
# gfields (grouping-fields): fields to group the records by
self.gfields = {pkey: rfields[pkey].colname,
rows: rfields[rows].colname
if rows and rows in rfields else None,
cols: rfields[cols].colname
if cols and cols in rfields else None,
}
return
# -------------------------------------------------------------------------
def _represent_method(self, field):
"""
Get the representation method for a field in the report
@param field: the field selector
"""
rfields = self.rfields
default = lambda value: None
if field and field in rfields:
rfield = rfields[field]
if rfield.field:
def repr_method(value):
return s3_represent_value(rfield.field, value,
strip_markup=True)
elif rfield.virtual:
stripper = S3MarkupStripper()
def repr_method(val):
if val is None:
return "-"
text = s3_unicode(val)
if "<" in text:
stripper.feed(text)
return stripper.stripped() # = totally naked ;)
else:
return text
else:
repr_method = default
else:
repr_method = default
return repr_method
# -------------------------------------------------------------------------
def _extract(self, row, field):
"""
Extract a field value from a DAL row
@param row: the row
@param field: the fieldname (list_fields syntax)
"""
rfields = self.rfields
if field not in rfields:
raise KeyError("Invalid field name: %s" % field)
rfield = rfields[field]
try:
return rfield.extract(row)
except AttributeError:
return None
# -------------------------------------------------------------------------
def _expand(self, row, axisfilter=None):
"""
Expand a data frame row into a list of rows for list:type values
@param row: the row
@param field: the field to expand (None for all fields)
@param axisfilter: dict of filtered field values by column names
"""
pairs = []
append = pairs.append
for colname in self.gfields.values():
if not colname:
continue
value = row[colname]
if type(value) is list:
if not value:
value = [None]
if axisfilter and colname in axisfilter:
p = [(colname, v) for v in value
if v in axisfilter[colname]]
if not p:
raise RuntimeError("record does not match query")
else:
append(p)
else:
append([(colname, v) for v in value])
else:
append([(colname, value)])
result = [dict(i) for i in product(*pairs)]
return result
# END =========================================================================
|
py
|
1a59b308cbe8b4e13ba8d7fd11b34da780616b7a
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/OperationDefinition
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import operationdefinition
def impl_operationdefinition_1(inst):
assert inst.code == "data-requirements"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "[email protected]"
assert inst.date == fhirtypes.DateTime.validate("2017-04-19T07:44:43+10:00")
assert inst.description == (
"The data-requirements operation aggregates and returns the "
"parameters and data requirements for the measure and all its"
" dependencies as a single module definition"
)
assert inst.id == "Measure-data-requirements"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "Data Requirements"
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 1
assert inst.parameter[0].name == "periodStart"
assert inst.parameter[0].type == "date"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].documentation == (
"The end of the measurement period. The period will end at "
"the end of the period implied by the supplied timestamp. "
"E.g. a value of 2014 would set the period end to be "
"2014-12-31T23:59:59 inclusive"
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 1
assert inst.parameter[1].name == "periodEnd"
assert inst.parameter[1].type == "date"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].documentation == (
"The result of the requirements gathering is a module-"
"definition Library that describes the aggregate parameters, "
"data requirements, and dependencies of the measure"
)
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 1
assert inst.parameter[2].name == "return"
assert inst.parameter[2].type == "Library"
assert inst.parameter[2].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "Measure"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "generated"
assert inst.type is False
assert inst.url == (
"http://hl7.org/fhir/OperationDefinition/Measure-data-" "requirements"
)
def test_operationdefinition_1(base_settings):
"""No. 1 tests collection for OperationDefinition.
Test File: operation-measure-data-requirements.json
"""
filename = (
base_settings["unittest_data_dir"] / "operation-measure-data-requirements.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_1(inst2)
def impl_operationdefinition_2(inst):
assert inst.code == "translate"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "[email protected]"
assert inst.date == fhirtypes.DateTime.validate("2017-04-19T07:44:43+10:00")
assert inst.id == "ConceptMap-translate"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "Concept Translation"
assert inst.parameter[0].documentation == (
"The code that is to be translated. If a code is provided, a "
"system must be provided"
)
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 0
assert inst.parameter[0].name == "code"
assert inst.parameter[0].type == "code"
assert inst.parameter[0].use == "in"
assert (
inst.parameter[1].documentation
== "The system for the code that is to be translated"
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 0
assert inst.parameter[1].name == "system"
assert inst.parameter[1].type == "uri"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].documentation == (
"The version of the system, if one was provided in the source" " data"
)
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 0
assert inst.parameter[2].name == "version"
assert inst.parameter[2].type == "string"
assert inst.parameter[2].use == "in"
assert inst.parameter[3].max == "1"
assert inst.parameter[3].min == 0
assert inst.parameter[3].name == "source"
assert inst.parameter[3].type == "uri"
assert inst.parameter[3].use == "in"
assert inst.parameter[4].documentation == "A coding to translate"
assert inst.parameter[4].max == "1"
assert inst.parameter[4].min == 0
assert inst.parameter[4].name == "coding"
assert inst.parameter[4].type == "Coding"
assert inst.parameter[4].use == "in"
assert inst.parameter[5].documentation == (
"A full codeableConcept to validate. The server can translate"
" any of the coding values (e.g. existing translations) as it"
" chooses"
)
assert inst.parameter[5].max == "1"
assert inst.parameter[5].min == 0
assert inst.parameter[5].name == "codeableConcept"
assert inst.parameter[5].type == "CodeableConcept"
assert inst.parameter[5].use == "in"
assert inst.parameter[6].max == "1"
assert inst.parameter[6].min == 0
assert inst.parameter[6].name == "target"
assert inst.parameter[6].type == "uri"
assert inst.parameter[6].use == "in"
assert inst.parameter[7].max == "1"
assert inst.parameter[7].min == 0
assert inst.parameter[7].name == "targetsystem"
assert inst.parameter[7].type == "uri"
assert inst.parameter[7].use == "in"
assert (
inst.parameter[8].documentation
== "Another element that may help produce the correct mapping"
)
assert inst.parameter[8].max == "*"
assert inst.parameter[8].min == 0
assert inst.parameter[8].name == "dependency"
assert inst.parameter[8].part[0].documentation == "The element for this dependency"
assert inst.parameter[8].part[0].max == "1"
assert inst.parameter[8].part[0].min == 0
assert inst.parameter[8].part[0].name == "element"
assert inst.parameter[8].part[0].type == "uri"
assert inst.parameter[8].part[0].use == "in"
assert inst.parameter[8].part[1].documentation == "The value for this dependency"
assert inst.parameter[8].part[1].max == "1"
assert inst.parameter[8].part[1].min == 0
assert inst.parameter[8].part[1].name == "concept"
assert inst.parameter[8].part[1].type == "CodeableConcept"
assert inst.parameter[8].part[1].use == "in"
assert inst.parameter[8].use == "in"
assert inst.parameter[9].documentation == (
"if this is true, then the operation should return all the "
"codes that might be mapped to this code. This parameter "
"reverses the meaning of the source and target parameters"
)
assert inst.parameter[9].max == "1"
assert inst.parameter[9].min == 0
assert inst.parameter[9].name == "reverse"
assert inst.parameter[9].type == "boolean"
assert inst.parameter[9].use == "in"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "ConceptMap"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "generated"
assert inst.type is True
assert inst.url == "http://hl7.org/fhir/OperationDefinition/ConceptMap-translate"
def test_operationdefinition_2(base_settings):
"""No. 2 tests collection for OperationDefinition.
Test File: operation-conceptmap-translate.json
"""
filename = (
base_settings["unittest_data_dir"] / "operation-conceptmap-translate.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_2(inst2)
def impl_operationdefinition_3(inst):
assert inst.code == "expand"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "[email protected]"
assert inst.date == fhirtypes.DateTime.validate("2017-04-19T07:44:43+10:00")
assert inst.id == "ValueSet-expand"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "Value Set Expansion"
assert inst.parameter[0].documentation == (
"A canonical url for a value set. The server must know the "
"value set (e.g. it is defined explicitly in the server's "
"value sets, or it is defined implicitly by some code system "
"known to the server"
)
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 0
assert inst.parameter[0].name == "url"
assert inst.parameter[0].type == "uri"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].documentation == (
"The value set is provided directly as part of the request. "
"Servers may choose not to accept value sets in this fashion"
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 0
assert inst.parameter[1].name == "valueSet"
assert inst.parameter[1].type == "ValueSet"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 0
assert inst.parameter[2].name == "context"
assert inst.parameter[2].type == "uri"
assert inst.parameter[2].use == "in"
assert inst.parameter[3].max == "1"
assert inst.parameter[3].min == 0
assert inst.parameter[3].name == "filter"
assert inst.parameter[3].type == "string"
assert inst.parameter[3].use == "in"
assert inst.parameter[4].max == "1"
assert inst.parameter[4].min == 0
assert inst.parameter[4].name == "profile"
assert inst.parameter[4].type == "uri"
assert inst.parameter[4].use == "in"
assert inst.parameter[5].max == "1"
assert inst.parameter[5].min == 0
assert inst.parameter[5].name == "date"
assert inst.parameter[5].type == "dateTime"
assert inst.parameter[5].use == "in"
assert inst.parameter[6].documentation == (
"Paging support - where to start if a subset is desired "
"(default = 0). Offset is number of records (not number of "
"pages)"
)
assert inst.parameter[6].max == "1"
assert inst.parameter[6].min == 0
assert inst.parameter[6].name == "offset"
assert inst.parameter[6].type == "integer"
assert inst.parameter[6].use == "in"
assert inst.parameter[7].max == "1"
assert inst.parameter[7].min == 0
assert inst.parameter[7].name == "count"
assert inst.parameter[7].type == "integer"
assert inst.parameter[7].use == "in"
assert inst.parameter[8].documentation == (
"Controls whether concept designations are to be included or "
"excluded in value set expansions. Overrides the value in the"
" expansion profile if there is one"
)
assert inst.parameter[8].max == "1"
assert inst.parameter[8].min == 0
assert inst.parameter[8].name == "includeDesignations"
assert inst.parameter[8].type == "boolean"
assert inst.parameter[8].use == "in"
assert inst.parameter[9].documentation == (
"Controls whether the value set definition is included or "
"excluded in value set expansions. Overrides the value in the"
" expansion profile if there is one"
)
assert inst.parameter[9].max == "1"
assert inst.parameter[9].min == 0
assert inst.parameter[9].name == "includeDefinition"
assert inst.parameter[9].type == "boolean"
assert inst.parameter[9].use == "in"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "ValueSet"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "generated"
assert inst.type is True
assert inst.url == "http://hl7.org/fhir/OperationDefinition/ValueSet-expand"
def test_operationdefinition_3(base_settings):
"""No. 3 tests collection for OperationDefinition.
Test File: operation-valueset-expand.json
"""
filename = base_settings["unittest_data_dir"] / "operation-valueset-expand.json"
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_3(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_3(inst2)
def impl_operationdefinition_4(inst):
assert inst.code == "populate"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "[email protected]"
assert inst.date == fhirtypes.DateTime.validate("2017-04-19T07:44:43+10:00")
assert inst.id == "Questionnaire-populate"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "Populate Questionnaire"
assert inst.parameter[0].documentation == (
"A logical questionnaire identifier (i.e. "
"''Questionnaire.identifier''). The server must know the "
"questionnaire or be able to retrieve it from other known "
"repositories."
)
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 0
assert inst.parameter[0].name == "identifier"
assert inst.parameter[0].type == "uri"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].documentation == (
"The [Questionnaire](questionnaire.html) is provided directly"
" as part of the request. Servers may choose not to accept "
"questionnaires in this fashion"
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 0
assert inst.parameter[1].name == "questionnaire"
assert inst.parameter[1].type == "Questionnaire"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 0
assert inst.parameter[2].name == "questionnaireRef"
assert (
inst.parameter[2].profile.reference
== "http://hl7.org/fhir/StructureDefinition/Questionnaire"
)
assert inst.parameter[2].type == "Reference"
assert inst.parameter[2].use == "in"
assert inst.parameter[3].max == "1"
assert inst.parameter[3].min == 1
assert inst.parameter[3].name == "subject"
assert inst.parameter[3].type == "Reference"
assert inst.parameter[3].use == "in"
assert inst.parameter[4].max == "*"
assert inst.parameter[4].min == 0
assert inst.parameter[4].name == "content"
assert inst.parameter[4].type == "Reference"
assert inst.parameter[4].use == "in"
assert inst.parameter[5].documentation == (
"If specified and set to 'true' (and the server is capable), "
"the server should use what resources and other knowledge it "
"has about the referenced subject when pre-populating answers"
" to questions."
)
assert inst.parameter[5].max == "1"
assert inst.parameter[5].min == 0
assert inst.parameter[5].name == "local"
assert inst.parameter[5].type == "boolean"
assert inst.parameter[5].use == "in"
assert inst.parameter[6].documentation == (
"The partially (or fully)-populated set of answers for the "
"specified Questionnaire"
)
assert inst.parameter[6].max == "1"
assert inst.parameter[6].min == 1
assert inst.parameter[6].name == "questionnaire"
assert inst.parameter[6].type == "QuestionnaireResponse"
assert inst.parameter[6].use == "out"
assert inst.parameter[7].max == "1"
assert inst.parameter[7].min == 0
assert inst.parameter[7].name == "issues"
assert inst.parameter[7].type == "OperationOutcome"
assert inst.parameter[7].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "Questionnaire"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "generated"
assert inst.type is True
assert inst.url == (
"http://hl7.org/fhir/OperationDefinition/Questionnaire-" "populate"
)
def test_operationdefinition_4(base_settings):
"""No. 4 tests collection for OperationDefinition.
Test File: operation-questionnaire-populate.json
"""
filename = (
base_settings["unittest_data_dir"] / "operation-questionnaire-populate.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_4(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_4(inst2)
def impl_operationdefinition_5(inst):
assert inst.code == "meta-add"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "[email protected]"
assert inst.date == fhirtypes.DateTime.validate("2017-04-19T07:44:43+10:00")
assert inst.id == "Resource-meta-add"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "Add profiles, tags, and security labels to a resource"
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 1
assert inst.parameter[0].name == "meta"
assert inst.parameter[0].type == "Meta"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].documentation == "Resulting meta for the resource"
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 1
assert inst.parameter[1].name == "return"
assert inst.parameter[1].type == "Meta"
assert inst.parameter[1].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "Resource"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "generated"
assert inst.type is False
assert inst.url == "http://hl7.org/fhir/OperationDefinition/Resource-meta-add"
def test_operationdefinition_5(base_settings):
"""No. 5 tests collection for OperationDefinition.
Test File: operation-resource-meta-add.json
"""
filename = base_settings["unittest_data_dir"] / "operation-resource-meta-add.json"
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_5(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_5(inst2)
def impl_operationdefinition_6(inst):
assert inst.code == "everything"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "[email protected]"
assert inst.date == fhirtypes.DateTime.validate("2017-04-19T07:44:43+10:00")
assert inst.id == "Encounter-everything"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "Fetch Encounter Record"
assert inst.parameter[0].documentation == 'The bundle type is "searchset"'
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 1
assert inst.parameter[0].name == "return"
assert inst.parameter[0].type == "Bundle"
assert inst.parameter[0].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "Encounter"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "generated"
assert inst.type is False
assert inst.url == "http://hl7.org/fhir/OperationDefinition/Encounter-everything"
def test_operationdefinition_6(base_settings):
"""No. 6 tests collection for OperationDefinition.
Test File: operation-encounter-everything.json
"""
filename = (
base_settings["unittest_data_dir"] / "operation-encounter-everything.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_6(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_6(inst2)
def impl_operationdefinition_7(inst):
assert inst.code == "evaluate"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "[email protected]"
assert inst.date == fhirtypes.DateTime.validate("2017-04-19T07:44:43+10:00")
assert inst.description == (
"The evaluate operation requests clinical decision support "
"guidance based on a specific decision support module"
)
assert inst.id == "ServiceDefinition-evaluate"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "Evaluate"
assert (
inst.parameter[0].documentation
== "An optional client-provided identifier to track the request."
)
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 0
assert inst.parameter[0].name == "requestId"
assert inst.parameter[0].type == "id"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 0
assert inst.parameter[1].name == "evaluateAtDateTime"
assert inst.parameter[1].type == "dateTime"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].documentation == (
"The input parameters for a request, if any. These parameters"
" are defined by the module that is the target of the "
"evaluation, and typically supply patient-independent "
"information to the module."
)
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 0
assert inst.parameter[2].name == "inputParameters"
assert inst.parameter[2].type == "Parameters"
assert inst.parameter[2].use == "in"
assert inst.parameter[3].documentation == (
"The input data for the request. These data are defined by "
"the data requirements of the module and typically provide "
"patient-dependent information."
)
assert inst.parameter[3].max == "*"
assert inst.parameter[3].min == 0
assert inst.parameter[3].name == "inputData"
assert inst.parameter[3].type == "Any"
assert inst.parameter[3].use == "in"
assert inst.parameter[4].documentation == "The patient in context, if any."
assert inst.parameter[4].max == "1"
assert inst.parameter[4].min == 0
assert inst.parameter[4].name == "patient"
assert (
inst.parameter[4].profile.reference
== "http://hl7.org/fhir/StructureDefinition/Patient"
)
assert inst.parameter[4].type == "Reference"
assert inst.parameter[4].use == "in"
assert inst.parameter[5].documentation == "The encounter in context, if any."
assert inst.parameter[5].max == "1"
assert inst.parameter[5].min == 0
assert inst.parameter[5].name == "encounter"
assert (
inst.parameter[5].profile.reference
== "http://hl7.org/fhir/StructureDefinition/Encounter"
)
assert inst.parameter[5].type == "Reference"
assert inst.parameter[5].use == "in"
assert inst.parameter[6].documentation == "The organization initiating the request."
assert inst.parameter[6].max == "1"
assert inst.parameter[6].min == 0
assert inst.parameter[6].name == "initiatingOrganization"
assert (
inst.parameter[6].profile.reference
== "http://hl7.org/fhir/StructureDefinition/Organization"
)
assert inst.parameter[6].type == "Reference"
assert inst.parameter[6].use == "in"
assert inst.parameter[7].documentation == "The person initiating the request."
assert inst.parameter[7].max == "1"
assert inst.parameter[7].min == 0
assert inst.parameter[7].name == "initiatingPerson"
assert inst.parameter[7].type == "Reference"
assert inst.parameter[7].use == "in"
assert inst.parameter[8].documentation == (
"The type of user initiating the request, e.g. patient, "
"healthcare provider, or specific type of healthcare provider"
" (physician, nurse, etc.)."
)
assert inst.parameter[8].max == "1"
assert inst.parameter[8].min == 0
assert inst.parameter[8].name == "userType"
assert inst.parameter[8].type == "CodeableConcept"
assert inst.parameter[8].use == "in"
assert (
inst.parameter[9].documentation
== "Preferred language of the person using the system."
)
assert inst.parameter[9].max == "1"
assert inst.parameter[9].min == 0
assert inst.parameter[9].name == "userLanguage"
assert inst.parameter[9].type == "CodeableConcept"
assert inst.parameter[9].use == "in"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "ServiceDefinition"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "generated"
assert inst.type is False
assert inst.url == (
"http://hl7.org/fhir/OperationDefinition/ServiceDefinition-" "evaluate"
)
def test_operationdefinition_7(base_settings):
"""No. 7 tests collection for OperationDefinition.
Test File: operation-servicedefinition-evaluate.json
"""
filename = (
base_settings["unittest_data_dir"] / "operation-servicedefinition-evaluate.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_7(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_7(inst2)
def impl_operationdefinition_8(inst):
assert inst.code == "meta"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "[email protected]"
assert inst.date == fhirtypes.DateTime.validate("2017-04-19T07:44:43+10:00")
assert inst.id == "Resource-meta"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "Access a list of profiles, tags, and security labels"
assert inst.parameter[0].documentation == "The meta returned by the operation"
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 1
assert inst.parameter[0].name == "return"
assert inst.parameter[0].type == "Meta"
assert inst.parameter[0].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "Resource"
assert inst.status == "draft"
assert inst.system is True
assert inst.text.status == "generated"
assert inst.type is True
assert inst.url == "http://hl7.org/fhir/OperationDefinition/Resource-meta"
def test_operationdefinition_8(base_settings):
"""No. 8 tests collection for OperationDefinition.
Test File: operation-resource-meta.json
"""
filename = base_settings["unittest_data_dir"] / "operation-resource-meta.json"
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_8(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_8(inst2)
def impl_operationdefinition_9(inst):
assert inst.code == "data-requirements"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "[email protected]"
assert inst.date == fhirtypes.DateTime.validate("2017-04-19T07:44:43+10:00")
assert inst.description == (
"The data-requirements operation aggregates and returns the "
"parameters and data requirements for the service module and "
"all its dependencies as a single module definition library."
)
assert inst.id == "ServiceDefinition-data-requirements"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "Data Requirements"
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 0
assert inst.parameter[0].name == "evaluateAtDateTime"
assert inst.parameter[0].type == "dateTime"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].documentation == (
"The result of the requirements gathering is a module-"
"definition Library that describes the aggregate parameters, "
"data requirements, and dependencies of the service."
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 1
assert inst.parameter[1].name == "return"
assert inst.parameter[1].type == "Library"
assert inst.parameter[1].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "ServiceDefinition"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "generated"
assert inst.type is False
assert inst.url == (
"http://hl7.org/fhir/OperationDefinition/ServiceDefinition-" "data-requirements"
)
def test_operationdefinition_9(base_settings):
"""No. 9 tests collection for OperationDefinition.
Test File: operation-servicedefinition-data-requirements.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "operation-servicedefinition-data-requirements.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_9(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_9(inst2)
def impl_operationdefinition_10(inst):
assert inst.code == "evaluate-measure"
assert inst.contact[0].telecom[0].system == "url"
assert inst.contact[0].telecom[0].value == "http://hl7.org/fhir"
assert inst.contact[0].telecom[1].system == "email"
assert inst.contact[0].telecom[1].value == "[email protected]"
assert inst.date == fhirtypes.DateTime.validate("2017-04-19T07:44:43+10:00")
assert inst.description == (
"The evaluate-measure operation is used to invoke an eMeasure"
" and obtain the results"
)
assert inst.id == "Measure-evaluate-measure"
assert inst.instance is True
assert inst.kind == "operation"
assert inst.name == "Evaluate Measure"
assert inst.parameter[0].max == "1"
assert inst.parameter[0].min == 1
assert inst.parameter[0].name == "periodStart"
assert inst.parameter[0].type == "date"
assert inst.parameter[0].use == "in"
assert inst.parameter[1].documentation == (
"The end of the measurement period. The period will end at "
"the end of the period implied by the supplied timestamp. "
"E.g. a value of 2014 would set the period end to be "
"2014-12-31T23:59:59 inclusive"
)
assert inst.parameter[1].max == "1"
assert inst.parameter[1].min == 1
assert inst.parameter[1].name == "periodEnd"
assert inst.parameter[1].type == "date"
assert inst.parameter[1].use == "in"
assert inst.parameter[2].documentation == (
"The measure to evaluate. This parameter is only required "
"when the operation is invoked on the resource type, it is "
"not used when invoking the operation on a Measure instance"
)
assert inst.parameter[2].max == "1"
assert inst.parameter[2].min == 0
assert inst.parameter[2].name == "measure"
assert (
inst.parameter[2].profile.reference
== "http://hl7.org/fhir/StructureDefinition/Measure"
)
assert inst.parameter[2].type == "Reference"
assert inst.parameter[2].use == "in"
assert inst.parameter[3].documentation == (
"The type of measure report, patient, patient-list, or "
"population. If not specified, a default value of patient "
"will be used if the patient parameter is supplied, "
"otherwise, population will be used"
)
assert inst.parameter[3].max == "1"
assert inst.parameter[3].min == 0
assert inst.parameter[3].name == "reportType"
assert inst.parameter[3].type == "code"
assert inst.parameter[3].use == "in"
assert inst.parameter[4].documentation == (
"Patient to evaluate against. If not specified, the measure "
"will be evaluated for all patients that meet the "
"requirements of the measure. If specified, only the "
"referenced patient will be evaluated"
)
assert inst.parameter[4].max == "1"
assert inst.parameter[4].min == 0
assert inst.parameter[4].name == "patient"
assert (
inst.parameter[4].profile.reference
== "http://hl7.org/fhir/StructureDefinition/Patient"
)
assert inst.parameter[4].type == "Reference"
assert inst.parameter[4].use == "in"
assert inst.parameter[5].documentation == (
"Practitioner to evaluate. If specified, the measure will be "
"evaluated only for patients whose primary practitioner is "
"the identified practitioner"
)
assert inst.parameter[5].max == "1"
assert inst.parameter[5].min == 0
assert inst.parameter[5].name == "practitioner"
assert (
inst.parameter[5].profile.reference
== "http://hl7.org/fhir/StructureDefinition/Practitioner"
)
assert inst.parameter[5].type == "Reference"
assert inst.parameter[5].use == "in"
assert inst.parameter[6].max == "1"
assert inst.parameter[6].min == 0
assert inst.parameter[6].name == "lastReceivedOn"
assert inst.parameter[6].type == "dateTime"
assert inst.parameter[6].use == "in"
assert inst.parameter[7].documentation == (
"The results of the measure calculation. See the "
"MeasureReport resource for a complete description of the "
"output of this operation"
)
assert inst.parameter[7].max == "1"
assert inst.parameter[7].min == 1
assert inst.parameter[7].name == "return"
assert inst.parameter[7].type == "MeasureReport"
assert inst.parameter[7].use == "out"
assert inst.publisher == "HL7 (FHIR Project)"
assert inst.resource[0] == "Measure"
assert inst.status == "draft"
assert inst.system is False
assert inst.text.status == "generated"
assert inst.type is True
assert inst.url == (
"http://hl7.org/fhir/OperationDefinition/Measure-evaluate-" "measure"
)
def test_operationdefinition_10(base_settings):
"""No. 10 tests collection for OperationDefinition.
Test File: operation-measure-evaluate-measure.json
"""
filename = (
base_settings["unittest_data_dir"] / "operation-measure-evaluate-measure.json"
)
inst = operationdefinition.OperationDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "OperationDefinition" == inst.resource_type
impl_operationdefinition_10(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "OperationDefinition" == data["resourceType"]
inst2 = operationdefinition.OperationDefinition(**data)
impl_operationdefinition_10(inst2)
|
py
|
1a59b32697417576b52cf2a925f63750e70e3900
|
import argparse
import sys
import pandas as pd
from PyQt5.QtWidgets import QApplication
from src.gui.main_window import MainWindow
from src.utils import load_json, load_configurations
pd.options.mode.chained_assignment = None # default='warn'
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default="configurations/config.json", help="Path to configuration.")
args = parser.parse_args()
print(f"Reading configuration from {args.config}")
config = load_json(args.config)
app = QApplication(sys.argv)
mw = MainWindow(config)
mw.show()
app.exec()
if __name__ == "__main__":
main()
|
py
|
1a59b4140a5ecd520fcaf9de46d1b4db6ebb3318
|
#!/usr/bin/python3
# coding=utf-8
# Copyright 2019 getcarrier.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Constants
"""
DEFAULT_REPORT_FILE = "{project_name}_{testing_type}_{scan_type}_{build_id}_report.html"
|
py
|
1a59b4adfd5d81ec2319531cae8f240481cf2ef6
|
from eventsourcing.example.application import (
close_example_application,
get_example_application,
init_example_application,
)
from eventsourcing.infrastructure.sqlalchemy.manager import SQLAlchemyRecordManager
from eventsourcing.infrastructure.sqlalchemy.records import IntegerSequencedNoIDRecord
from eventsourcing.tests.datastore_tests.test_sqlalchemy import (
SQLAlchemyDatastoreTestCase,
)
class TestExampleApplicationSingleInstanceFunctions(SQLAlchemyDatastoreTestCase):
def setUp(self):
super(TestExampleApplicationSingleInstanceFunctions, self).setUp()
# Setup the database.
self.datastore.setup_connection()
self.datastore.setup_tables()
def tearDown(self):
# Teardown single instance.
close_example_application()
# Teardown the database.
self.datastore.drop_tables()
self.datastore.close_connection()
super(TestExampleApplicationSingleInstanceFunctions, self).tearDown()
def test(self):
self.datastore.setup_connection()
self.datastore.setup_tables()
record_manager = SQLAlchemyRecordManager(
record_class=IntegerSequencedNoIDRecord, session=self.datastore.session
)
# Can't get the single instance before it has been constructed.
with self.assertRaises(AssertionError):
get_example_application()
# Construct single instance.
init_example_application(entity_record_manager=record_manager)
# Can't construct single instance twice.
with self.assertRaises(AssertionError):
init_example_application(entity_record_manager=record_manager)
# Get the single instance.
app1 = get_example_application()
app2 = get_example_application()
self.assertEqual(id(app1), id(app2))
# Close single instance.
close_example_application()
# Can't get the single instance before it has been constructed.
with self.assertRaises(AssertionError):
get_example_application()
# Construct single instance.
init_example_application(entity_record_manager=record_manager)
# Can't construct single instance twice.
with self.assertRaises(AssertionError):
init_example_application(entity_record_manager=record_manager)
# Get the single instance.
app1 = get_example_application()
app2 = get_example_application()
self.assertEqual(id(app1), id(app2))
|
py
|
1a59b58d892eca47b028e6229f77efbeaddc44a3
|
#!/usr/bin/python
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
module: win_vmware_desktop_netmgmt
short_description: Implement the VMNet Management part of the API
version_added: "2.4"
description:
- "Manage VMware Workstation Pro VMNets"
options:
vmnet:
description:
- This is the target NETVM to interact with
required: false
action: infos || delete || create || update_pf || update_mti
description:
- This is the action we want to do.
required: true
setting: portforward || mactoip
description:
- Choose what infos you want to list, portforwarding or mac-to-ip, empty = listing all vmnets
required: no, only usefull with action = infos
type: custom || bridged || nat || hostonly
description:
- This is the type of virtual network you want to create
required: only for create
vmnet:
description:
- Choose your VMnet
required: only when type = custom
protocol: TCP || UDP
description:
- Your targeted protocol
required: only for update_pf & delete
port: 1337
description:
- Your targeted port
required: only for update_pf & delete
guest_ip_address: "192.168.188.13"
description:
- Your targeted IP
required: true, only with update_pf
guest_port: "1111"
description:
- Your targeted port
required: only for update_pf
guest_description: "itworks!"
description:
- PF description
required: false, only usefull for update_pf
mac_address: "00:0C:29:87:4B:89"
description:
- Your targeted mac address
required: only for updateMTI
ip_address: "192.168.188.13"
description:
- Your targeted mac address
required: false, if you don't have a target IP it will delete the MTI
username: "api-username"
description:
- Your workstation API username
required: true
password: "api-password"
description:
- Your workstation API password
required: true
api_url: "http://127.0.0.1"
description:
- Your workstation API URL
required: false
default: "http://127.0.0.1"
api_port: "8697"
description:
- Your workstation API PORT
required: false
default: "8697"
author:
- Adam Magnier (@qsypoq)
'''
EXAMPLES = r'''
### Get infos of all the configured vmnets
- name: "Get all vmnet infos"
win_vmware_desktop_netmgmt:
action: infos
username: "api-username"
password: "api-password"
### Return all Mac-to-IP settings from vmnet8
- name: "Return MTI of vmnet8"
win_vmware_desktop_netmgmt:
action: infos
vmnet: "vmnet8"
setting: "mactoip"
username: "api-username"
password: "api-password"
### Return all the forwarded ports settings from vmnet8
- name: "Return vmnet13 portforward"
win_vmware_desktop_netmgmt:
action: infos
vmnet: "vmnet13"
setting "portforward"
username: "api-username"
password: "api-password"
### Create a new vmnet as vmnet13, as host only
- name: "Create vmnet13"
win_vmware_desktop_netmgmt:
vmnet: "vmnet13"
type: "hostonly"
action: create
username: "api-username"
password: "api-password"
### Delete the forwarded 1337 tcp port from vmnet8
- name: "Delete portforwarding"
win_vmware_desktop_netmgmt:
vmnet: "vmnet8"
protocol: "TCP"
port: "1337"
action: delete
username: "api-username"
password: "api-password"
### Update the forwarded 1337 tcp port from vmnet8 to 172.13.13.13:1111 with "itworks!" as description
- name: "update forwarded port"
win_vmware_desktop_netmgmt:
vmnet: "vmnet8"
protocol: "TCP"
port: "1337"
guest_ip_address: "172.13.13.13"
guest_port: "1111"
guest_description: "itworks!"
action: update_pf
username: "api-username"
password: "api-password"
### Update the MAC 00:12:29:34:4B:56 to be assigned as 192.168.188.13 on vmnet8
- name: "Update Mac to IP"
win_vmware_desktop_netmgmt:
vmnet: "vmnet8"
mac_address: "00:12:29:34:4B:56"
ip_address: "192.168.188.13"
action: update_mti
username: "api-username"
password: "api-password"
'''
RETURN = r'''
### Return all Mac-to-IP settings from vmnet8
{
"mactoips": [
{
"ip": "172.60.60.60",
"mac": "00:0c:34:3e:54:52",
"vmnet": "vmnet8"
},
{
"ip": "192.168.43.43",
"mac": "00:0c:40:87:36:17",
"vmnet": "vmnet8"
}
]
}
### Update Mac to IP
{
"Code": 0,
"Message": "The operation was successful"
}
### Create a new vmnet as vmnet13, as host only
{
"num": 1,
"vmnets": [
{
"dhcp": "true",
"mask": "255.255.255.0",
"name": "vmnet13",
"subnet": "192.168.244.0",
"type": "hostOnly"
}
]
}
'''
|
py
|
1a59b5b8f2555b2ab09aaa8dcefdbd0c8f96bf1c
|
input_data = open("day9.input").read().split("\n")
adj_m = [(0, 1), (1, 0), (0, -1), (-1, 0)]
sol = 0
basins = []
for ix, line in enumerate(input_data):
for iy, col in enumerate(line):
adj = []
for a in adj_m:
realx = ix+a[0]
realy = iy+a[1]
if realx >= 0 and realy >= 0 and realx < len(input_data) and realy < len(line):
adj.append(int(input_data[realx][realy]))
if int(col) < min(adj):
sol += int(col) + 1
basins.append([(ix, iy)])
print(sol)
for bx, basin in enumerate(basins):
while True:
check = []
for point in basins[bx]:
for (x, y) in adj_m:
realx = point[0]+x
realy = point[1]+y
if realx >= 0 and realy >= 0 and realx < len(input_data) and realy < len(line):
if input_data[realx][realy] != "9":
check.append((realx, realy))
check = list(filter(lambda x: x not in basins[bx], set(check)))
if len(check) == 0:
break
basins[bx] += check
basins.sort(key=lambda x: len(x), reverse=True)
print("Aufgabe2")
print(len(basins[0]) * len(basins[1]) * len(basins[2]))
|
py
|
1a59b62abb0868ce2e8d79cd3efd8c1ff8f28cef
|
import uuid
from django.db import models
from django.contrib.auth import get_user_model
from common.models import TimestampModel
User = get_user_model()
def upload_to(_, filename):
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return f'/covers/{filename}'
class Category(TimestampModel):
name = models.CharField(max_length=255)
description = models.TextField()
class Meta:
verbose_name_plural = 'categories'
ordering = ('name',)
def __str__(self):
return self.name
class Author(TimestampModel):
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Book(TimestampModel):
title = models.CharField(max_length=255)
description = models.TextField()
categories = models.ManyToManyField(Category)
authors = models.ManyToManyField(Author)
read_by = models.ManyToManyField(User)
cover = models.ImageField(upload_to=upload_to)
class Meta:
ordering = ('title',)
default_related_name = 'books'
class Review(TimestampModel):
rating = models.PositiveSmallIntegerField()
review = models.TextField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
book = models.ForeignKey(Book, on_delete=models.CASCADE)
class Meta:
ordering = ('-created_at',)
default_related_name = 'reviews'
|
py
|
1a59b6f3b59fcf6ac60a3ad82527d0576a6bd337
|
import uos
import network
from flashbdev import bdev
def wifi():
import ubinascii
ap_if = network.WLAN(network.AP_IF)
essid = b"MicroPython-%s" % ubinascii.hexlify(ap_if.config("mac")[-3:])
ap_if.config(essid=essid, authmode=network.AUTH_WPA_WPA2_PSK, password=b"micropythoN")
def check_bootsec():
buf = bytearray(bdev.SEC_SIZE)
bdev.readblocks(0, buf)
empty = True
for b in buf:
if b != 0xff:
empty = False
break
if empty:
return True
fs_corrupted()
def fs_corrupted():
import time
while 1:
print("""\
The FAT filesystem starting at sector %d with size %d sectors appears to
be corrupted. If you had important data there, you may want to make a flash
snapshot to try to recover it. Otherwise, perform factory reprogramming
of MicroPython firmware (completely erase flash, followed by firmware
programming).
""" % (bdev.START_SEC, bdev.blocks))
time.sleep(3)
def setup():
check_bootsec()
print("Performing initial setup")
# wifi()
uos.VfsFat.mkfs(bdev)
vfs = uos.VfsFat(bdev)
uos.mount(vfs, '/')
with open("boot.py", "w") as f:
f.write("""\
# This file is executed on every boot (including wake-boot from deepsleep)
#import esp
#esp.osdebug(None)
import uos, machine
uos.dupterm(machine.UART(0, 115200), 1)
import gc
#import webrepl
#webrepl.start()
gc.collect()
import prometheus.pnetwork
gc.collect()
prometheus.pnetwork.init_network()
gc.collect()
""")
return vfs
|
py
|
1a59b720efe9b67369e62178a99a771fbb6d5b80
|
import os
import json
from COCO_Eval_Utils import coco_eval,coco_eval_specific
from Utils import model_construction,init_optimizer,set_lr,clip_gradient,get_transform,get_sample_image_info,visualize_att,RewardCriterion,get_self_critical_reward
import torch.nn as nn
import torch
from torch.nn.utils.rnn import pack_padded_sequence
import tqdm
import numpy as np
from cider.pyciderevalcap.tokenizer.ptbtokenizer import PTBTokenizer
from cider.pyciderevalcap.ciderD.ciderD import CiderD
class Engine(object):
def __init__(self,model_settings_json,dataset_name,caption_vocab,data_dir=None,device='cpu'):
self.model,self.settings = model_construction(model_settings_json=model_settings_json,caption_vocab=caption_vocab,device=device)
self.device = device
self.data_dir = data_dir
self.dataset_name = dataset_name
self.caption_vocab = caption_vocab
self.tag = 'Model_' + self.settings['model_type'] + '_Dataset_' + dataset_name
self.model.to(self.device)
def load_pretrained_model(self,scst_model=False):
scst_not_found = False
if scst_model:
pretrained_scst_model_path = './CheckPoints/%s/' % self.tag + 'Captioner_scst_cp.pth'
if os.path.exists(pretrained_scst_model_path):
self.model.load_state_dict(torch.load(pretrained_scst_model_path))
print('load pretrained scst weights complete.')
else:
print('pretrained scst weights not found, try to load pretrained xe weights.')
scst_not_found = True
if not(scst_model) or scst_not_found:
pretrained_model_path = './CheckPoints/%s/' % self.tag + 'Captioner_cp.pth'
if os.path.exists(pretrained_model_path):
self.model.load_state_dict(torch.load(pretrained_model_path))
print('load pretrained xe weights complete.')
else:print('model checkpoint not found, training from scratch.')
def load_score_record(self,scst=False):
best_cider = 0.0
scst_score_record_path = './CheckPoints/%s/Captioner_scst_cp_score.json' % (self.tag)
score_record_path = './CheckPoints/%s/Captioner_cp_score.json' % (self.tag)
if scst and os.path.exists(scst_score_record_path):
scst_score_record = json.load(open(scst_score_record_path, 'r'))
best_cider = scst_score_record['cider']
if not scst and os.path.exists(score_record_path):
score_record = json.load(open(score_record_path,'r'))
best_cider = score_record['cider']
if best_cider != 0.0:print('best cider record: %.3f, model checkpoints below the score record will not be saved.' % best_cider)
else: print('best cider record not found.')
return best_cider
def get_model_params(self):
cnn_extractor_params = list(filter(lambda p: p.requires_grad, self.model.encoder.feature_extractor.parameters()))
captioner_params = list(self.model.decoder.parameters())
return cnn_extractor_params,captioner_params
#------------------------------XELoss training---------------------------------#
def training(self, num_epochs, train_dataloader, eval_dataloader, eval_caption_path,
optimizer_type, lr_opts, ss_opts, use_preset_settings, eval_beam_size=-1,
load_pretrained_model=False, overwrite_guarantee=True, cnn_FT_start=False, tqdm_visible=True):
os.makedirs('./CheckPoints/%s' % self.tag, exist_ok=True)
if load_pretrained_model:self.load_pretrained_model(scst_model=False)
else:print('training from scratch')
if overwrite_guarantee:best_cider_record = self.load_score_record(scst=False)
else:best_cider_record = 0.0
if hasattr(self.model,'cnn_fine_tune'):
self.model.cnn_fine_tune(cnn_FT_start)
cnn_extractor_params,captioner_params = self.get_model_params()
#------------Load preset training settings if exists--------------#
optim_type = optimizer_type
lr = lr_opts['learning_rate']
cnn_FT_lr = lr_opts['cnn_FT_learning_rate']
if use_preset_settings:
if self.settings.__contains__('optimizer'):
optim_type = self.settings['optimizer']
print('training under preset optimizer_type:%s' % optim_type)
if self.settings.__contains__('lr'):
lr = self.settings['lr']
print('training under preset learning_rate:%.6f' % lr)
if self.settings.__contains__('cnn_FT_lr'):
cnn_FT_lr = self.settings['cnn_FT_lr']
print('training under preset cnn_FT_learning_rate:%.6f' % cnn_FT_lr)
#-----------------------------------------------------------------#
cnn_extractor_optimizer = init_optimizer(optimizer_type=optim_type,params=cnn_extractor_params,learning_rate=cnn_FT_lr)
captioner_optimizer = init_optimizer(optimizer_type=optim_type,params=captioner_params,learning_rate=lr)
criterion = nn.CrossEntropyLoss().to(self.device)
cider_scores = []
best_cider = 0.0
best_epoch = 0
best_cider_woFT = 0.0
best_epoch_woFT = 0
for epoch in range(1, num_epochs + 1):
print('----------------------Start training for Epoch %d, CNN_fine_tune:%s---------------------' % (epoch, cnn_FT_start))
if epoch > lr_opts['lr_dec_start_epoch'] and lr_opts['lr_dec_start_epoch'] >= 0:
frac = (epoch - lr_opts['lr_dec_start_epoch']) // lr_opts['lr_dec_every']
decay_factor = lr_opts['lr_dec_rate'] ** frac
current_lr = lr * decay_factor
else:
current_lr = lr
if cnn_extractor_optimizer is not None:set_lr(cnn_extractor_optimizer,min(cnn_FT_lr,current_lr))
set_lr(captioner_optimizer, current_lr) # set the decayed rate
if epoch > ss_opts['ss_start_epoch'] and ss_opts['ss_start_epoch'] >= 0:
frac = (epoch - ss_opts['ss_start_epoch']) // ss_opts['ss_inc_every']
ss_prob = min(ss_opts['ss_inc_prob'] * frac, ss_opts['ss_max_prob'])
self.model.ss_prob = ss_prob
else:ss_prob = 0.0
print('| current_lr: %.6f cnn_FT_lr: %.6f current_scheduled_sampling_prob: %.2f |'
% (current_lr,cnn_FT_lr,ss_prob))
print('------------------------------------------------------------------------------------------')
self.training_epoch(dataloader=train_dataloader, optimizers=[cnn_extractor_optimizer,captioner_optimizer], criterion=criterion, tqdm_visible=tqdm_visible)
print('--------------Start evaluating for Epoch %d-----------------' % epoch)
results = self.eval_captions_json_generation(
dataloader=eval_dataloader,
eval_beam_size=eval_beam_size,
tqdm_visible=tqdm_visible
)
cider = coco_eval(results=results, eval_caption_path=eval_caption_path)
cider_scores.append(cider)
if cider > best_cider:
if cider > best_cider_record:
torch.save(self.model.state_dict(), './CheckPoints/%s/Captioner_cp.pth' % (self.tag))
score_record = {'cider':cider}
json.dump(score_record,open('./CheckPoints/%s/Captioner_cp_score.json' % (self.tag),'w'))
best_cider = cider
best_epoch = epoch
if len(cider_scores) >= 5:
last_5 = cider_scores[-4:]
last_5_max = max(last_5)
last_5_min = min(last_5)
if last_5_max != best_cider or abs(last_5_max - last_5_min) <= 0.01:
if not hasattr(self.model,'cnn_fine_tune') or cnn_FT_start:
print('No improvement with CIDEr in the last 5 epochs...Early stopping triggered.')
break
else:
print('No improvement with CIDEr in the last 5 epochs...CNN fine-tune triggered.')
best_cider_woFT = best_cider
best_epoch_woFT = best_epoch
cnn_FT_start = True
self.model.cnn_fine_tune(flag=cnn_FT_start)
self.load_pretrained_model(scst_model=False)
print('load pretrained model from previous best epoch:%d' % best_epoch_woFT)
cnn_extractor_params,_ = self.get_model_params()
cnn_extractor_optimizer = init_optimizer(optimizer_type=optim_type,params=cnn_extractor_params,learning_rate=cnn_FT_lr)
cider_scores = []
if hasattr(self.model,'cnn_fine_tune'):
print('Model of best epoch #:%d with CIDEr score %.3f w/o cnn fine-tune' % (best_epoch_woFT,best_cider_woFT))
print('Model of best epoch #:%d with CIDEr score %.3f w/ cnn fine-tune' % (best_epoch,best_cider))
else:
print('Model of best epoch #:%d with CIDEr score %.3f' % (best_epoch,best_cider))
def training_epoch(self, dataloader, optimizers, criterion, tqdm_visible=True):
self.model.train()
if tqdm_visible:
monitor = tqdm.tqdm(dataloader, desc='Training Process')
else:
monitor = dataloader
for batch_i, (_, imgs, captions, lengths) in enumerate(monitor):
imgs = imgs.to(self.device)
captions = captions.to(self.device)
lengths = [cap_len - 1 for cap_len in lengths]
targets = pack_padded_sequence(input=captions[:, 1:], lengths=lengths, batch_first=True)
self.model.zero_grad()
predictions = self.model(imgs, captions, lengths)
loss = criterion(predictions[0], targets[0])
loss_npy = loss.cpu().detach().numpy()
if tqdm_visible:
monitor.set_postfix(Loss=np.round(loss_npy, decimals=4))
loss.backward()
for optimizer in optimizers:
if optimizer is not None:
clip_gradient(optimizer, grad_clip=0.1)
optimizer.step()
#-------------------------------SCST training-----------------------------------------#
def SCSTtraining(self, num_epochs, train_dataloader, eval_dataloader, eval_caption_path,
optimizer_type, scst_lr, scst_cnn_FT_lr, use_preset_settings, eval_beam_size=-1,
load_pretrained_scst_model=False, overwrite_guarantee=True, cnn_FT_start=True, tqdm_visible=True):
print('SCST training needs the model pretrained.')
self.load_pretrained_model(scst_model=load_pretrained_scst_model)
if overwrite_guarantee:best_scst_cider_record = self.load_score_record(scst=True)
else:best_scst_cider_record = 0.0
if hasattr(self.model,'cnn_fine_tune'):
self.model.cnn_fine_tune(cnn_FT_start)
cnn_extractor_params,captioner_params = self.get_model_params()
#------------Load preset training settings if exists--------------#
optim_type = optimizer_type
lr = scst_lr
cnn_FT_lr = scst_cnn_FT_lr
if use_preset_settings:
if self.settings.__contains__('optimizer'):
optim_type = self.settings['optimizer']
print('training under preset optimizer_type:%s' % optim_type)
if self.settings.__contains__('scst_lr'):
lr = self.settings['scst_lr']
print('training under preset scst learning_rate:%.6f' % lr)
if self.settings.__contains__('scst_cnn_FT_lr'):
cnn_FT_lr = self.settings['scst_cnn_FT_lr']
print('training under preset scst cnn_FT_learning_rate:%.6f' % cnn_FT_lr)
#-----------------------------------------------------------------#
cnn_extractor_optimizer = init_optimizer(optimizer_type=optim_type,params=cnn_extractor_params,learning_rate=cnn_FT_lr)
captioner_optimizer = init_optimizer(optimizer_type=optim_type,params=captioner_params,learning_rate=lr)
criterion = RewardCriterion().to(self.device)
best_cider = 0.0
best_epoch = 0
for epoch in range(1,num_epochs + 1):
print('--------------Start training for Epoch %d, Training_Stage:SCST--------------' % (epoch))
print('| lr: %.6f cnn_FT_lr: %.6f |'
% (lr, cnn_FT_lr))
print('---------------------------------------------------------------------------')
self.SCST_training_epoch(dataloader=train_dataloader,optimizers=[cnn_extractor_optimizer,captioner_optimizer],criterion=criterion,tqdm_visible=tqdm_visible)
print('--------------Start evaluating for Epoch %d-----------------' % epoch)
results = self.eval_captions_json_generation(dataloader=eval_dataloader,eval_beam_size=eval_beam_size,tqdm_visible=tqdm_visible)
cider = coco_eval(results=results,eval_caption_path=eval_caption_path)
if cider > best_cider:
if cider > best_scst_cider_record: #avoid score decreasing
torch.save(self.model.state_dict(), './CheckPoints/%s/Captioner_scst_cp.pth' % (self.tag))
score_record = {'cider':cider}
json.dump(score_record,open('./CheckPoints/%s/Captioner_scst_cp_score.json' % (self.tag),'w'))
best_cider = cider
best_epoch = epoch
print('Model of best epoch #:%d with CIDEr score %.3f in stage:SCST'
% (best_epoch,best_cider))
def SCST_training_epoch(self,dataloader,optimizers,criterion,tqdm_visible=True):
self.model.train()
if tqdm_visible:monitor = tqdm.tqdm(dataloader,desc='Training Process')
else:monitor = dataloader
for batch_i,(imgids,imgs,img_gts) in enumerate(monitor):
imgs = imgs.to(self.device)
self.model.zero_grad()
self.model.eval()
with torch.no_grad():
greedy_res = self.model.sampler(imgs,max_len=20)
self.model.train()
seq_gen,seqLogprobs = self.model.sampler_rl(imgs,max_len=20) #(bsize,max_len)
rewards = get_self_critical_reward(gen_result=seq_gen,greedy_res=greedy_res,ground_truth=img_gts,
img_ids=imgids,caption_vocab = self.caption_vocab,dataset_name=self.dataset_name)
loss = criterion(seqLogprobs,seq_gen,rewards.to(self.device))
loss_npy = loss.cpu().detach().numpy()
if tqdm_visible:
monitor.set_postfix(Loss=np.round(loss_npy,decimals=4))
loss.backward()
for optimizer in optimizers:
if optimizer is not None:
clip_gradient(optimizer,grad_clip=0.25)
optimizer.step()
def eval_captions_json_generation(self,dataloader,eval_beam_size=-1,tqdm_visible=True):
self.model.eval()
result = []
print('Generating captions json for evaluation. Beam Search: %s' % (eval_beam_size!=-1))
if tqdm_visible:monitor = tqdm.tqdm(dataloader, desc='Generating Process')
else:monitor = dataloader
for batch_i, (image_ids, images) in enumerate(monitor):
images = images.to(self.device)
with torch.no_grad():
if eval_beam_size!=-1:
generated_captions = self.model.beam_search_sampler(images=images, beam_size=eval_beam_size)
else:
generated_captions = self.model.sampler(images=images, max_len=20)
captions = generated_captions.cpu().detach().numpy()
for image_idx in range(captions.shape[0]):
sampled_ids = captions[image_idx]
sampled_caption = []
for word_id in sampled_ids:
word = self.caption_vocab.ix2word[word_id]
if word == '<end>':
break
elif word != '<sta>':
sampled_caption.append(word)
sentence = ' '.join(sampled_caption)
tmp = {'image_id': int(image_ids[image_idx]), 'caption': sentence}
result.append(tmp)
return result
def eval(self,dataset,split,eval_scst,eval_dataloader,eval_caption_path,eval_beam_size=-1,output_statics=False,tqdm_visible=True):
self.load_pretrained_model(scst_model=eval_scst)
print('--------------Start evaluating for Dataset %s on %s split-----------------' % (dataset,split))
results = self.eval_captions_json_generation(dataloader=eval_dataloader, eval_beam_size=eval_beam_size,tqdm_visible=tqdm_visible)
if output_statics:coco_eval_specific(results=results,eval_caption_path=eval_caption_path)
else:coco_eval(results=results,eval_caption_path=eval_caption_path)
def test(self,use_scst_model,img_root,img_filename,eval_beam_size=-1):
self.load_pretrained_model(use_scst_model)
self.model.eval()
img_copy,gts = get_sample_image_info(img_root=img_root,img_filename=img_filename)
img = get_transform()(img_copy).unsqueeze(0)
img = img.to(self.device)
caption,additional = self.model.eval_test_image(image=img,caption_vocab=self.caption_vocab,max_len=20,eval_beam_size=eval_beam_size)
sentence = ' '.join(caption)
print('Generated caption:')
print(sentence)
if len(gts)>0:
img_id = list(gts.keys())[0]
res = [{'image_id':img_id,'caption':sentence}]
tokenizer = PTBTokenizer(_source='gts')
_gts = tokenizer.tokenize(gts)
tokenizer = PTBTokenizer(_source='res')
_res = tokenizer.tokenize(res)
ciderD_scorer = CiderD(df='COCO14-val')
ciderD_score,_ = ciderD_scorer.compute_score(gts=_gts,res=_res)
print('CIDEr-D :%.3f' % (ciderD_score))
self.show_additional_rlt(additional,img_copy,caption)
def show_additional_rlt(self,additional,image,caption):
pass
|
py
|
1a59b798332d91a71710c50722fdf3ecf2b93e50
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet group functionality."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import CTransaction, FromHex, ToHex
from test_framework.util import (
assert_equal,
)
from test_framework.estxconfig import COINBASE_MATURITY
def assert_approx(v, vexp, vspan=0.00001):
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
class WalletGroupTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], [], ['-avoidpartialspends']]
self.rpc_timewait = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some coins
self.nodes[0].generate(10+COINBASE_MATURITY)
# Get some addresses from the two nodes
addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
[self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
[self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
self.nodes[0].generate(1)
self.sync_all()
# For each node, send 0.2 coins back to 0;
# - node[1] should pick one 0.5 UTXO and leave the rest
# - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a
# given address, and leave the rest
txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx1 = self.nodes[1].getrawtransaction(txid1, True)
# txid1 should have 1 input and 2 outputs
assert_equal(1, len(tx1["vin"]))
assert_equal(2, len(tx1["vout"]))
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 0.3, 0.01)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
# txid2 should have 2 inputs and 2 outputs
assert_equal(2, len(tx2["vin"]))
assert_equal(2, len(tx2["vout"]))
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 1.3, 0.01)
# Empty out node2's wallet
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
for i in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
tx.vout = [tx.vout[0]] * 2000
funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx))
signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex'])
self.nodes[0].sendrawtransaction(signed_tx['hex'])
self.nodes[0].generate(1)
self.sync_all()
# Check that we can create a transaction that only requires ~100 of our
# utxos, without pulling in all outputs and creating a transaction that
# is way too big.
assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5)
if __name__ == '__main__':
WalletGroupTest().main ()
|
py
|
1a59b80e1d7bbfb67fa0188f2ca6d0ed932f1e09
|
# coding:utf-8
from django.conf import settings
from django.test.signals import setting_changed
try:
from django.utils.module_loading import import_string
except ImportError:
from django.utils.module_loading import import_by_path as import_string
from active_users.keys import AbstractActiveUserEntry
PREFIX = 'ACTIVE_USERS'
DEFAULTS = {
'KEY_EXPIRE': 20,
'KEY_CLASS': 'active_users.keys.ActiveUserEntry',
'EXCLUDE_URL_PATTERNS': []
}
class ActiveUsersSettings(object):
def __init__(self):
for key, default in DEFAULTS.items():
value = getattr(settings, '{0}_{1}'.format(PREFIX, key), default)
self.set_setting(key, value)
assert issubclass(self.KEY_CLASS, AbstractActiveUserEntry)
def set_setting(self, key, value):
setattr(
self, key, import_string(value) if key == 'KEY_CLASS' else value)
active_users_settings = ActiveUsersSettings()
def reload_settings(*args, **kwargs):
if kwargs['setting'].startswith(PREFIX):
key = kwargs['setting'].replace(PREFIX + '_', '')
if key in DEFAULTS:
active_users_settings.set_setting(
key, kwargs['value'] or DEFAULTS[key])
setting_changed.connect(reload_settings)
|
py
|
1a59b8521fabb0c71851cbca0cc9a183f944deae
|
"""
Django settings for dan6364_1_1 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dan6364_1_1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dan6364_1_1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
|
py
|
1a59ba7e56036198d6d925b421a5d57b89668525
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import subprocess
from collections import namedtuple
from datetime import datetime, timezone
from threading import Thread
from time import sleep
import requests
from flask import Flask, jsonify, request
app = Flask(__name__)
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
Node = namedtuple('Node', ['endpoint', 'data'])
# A simple manager which tracks all event subscriptions
class Manager:
def __init__(self):
self._events = {}
self._nr_sent_events = 0
def subscribe(self, id_, endpoint, event_name, data):
data = data or {}
logger.info(f'[subscribe] id: "{id_}", endpoint:"{endpoint}", '
f'name: "{event_name}", data: %s', data)
if event_name not in self._events:
self._events[event_name] = {}
# Check whether the id is new
if id_ in self._events[event_name]:
return False
self._events[event_name][id_] = Node(endpoint, data)
def unsubscribe(self, id_, event_name):
logger.info(f'[unsubscribe] id: "{id_}", name: "{event_name}"')
if event_name not in self._events:
return False
# Check whether the id exists
if id_ not in self._events[event_name]:
return False
del self._events[event_name][id_]
def publish(self, event_name, data):
logger.info(f'[publish] name: "{event_name}", data: %s', data)
if event_name not in self._events:
return False
for node in self._events[event_name].values():
# filter for user (optional)
if 'user' in node.data and 'user' in data:
if node.data['user'] == data['user']:
self._send_event(node, event_name, data)
else:
self._send_event(node, event_name, data)
return True
def _send_event(self, node, event_name, data):
local_time = datetime.now(timezone.utc).astimezone()
requests.post(node.endpoint, json={
'eventType': event_name,
'type': 'com.microservices.python.template',
'specversion': '0.2',
'source': '/my-source',
'id': f'PYTHON-TEMPLATE-{self._nr_sent_events}',
'time': local_time.isoformat(),
'datacontenttype': 'application/json',
'data': data,
})
self._nr_sent_events = self._nr_sent_events + 1
manager = Manager()
@app.route('/events', methods=['POST'])
def subscribe():
return jsonify({'sucess': manager.subscribe(
id_=request.json['id'],
endpoint=request.json['endpoint'],
event_name=request.json['event'],
data=request.json.get('data', {}),
)})
@app.route('/events', methods=['DELETE'])
def unsubscribe():
return jsonify({'sucess': manager.unsubscribe(
id_=request.json['id'],
event_name=request.json['event'],
)})
@app.route('/publish', methods=['POST'])
def publish():
data = request.json.get('data', {})
if 'user' in request.json:
data['user'] = request.json['user']
return jsonify({'sucess': manager.publish(
event_name=request.json['event'],
data=data,
)})
@app.route('/health', methods=['GET'])
def health():
return 'OK'
# Return errors as JSON objects
def app_error(e):
return jsonify({'message': str(e)}), 400
# Calls a callback every period with args
def set_interval(period, callback, **args):
def wrapper():
while True:
sleep(period)
callback(**args)
Thread(target=wrapper).start()
def heartbeat(user):
manager.publish('heartbeat', {
'user': user,
'time': str(datetime.now()),
})
if __name__ == '__main__':
app.register_error_handler(Exception, app_error)
set_interval(3, heartbeat, user='max')
set_interval(5, heartbeat, user='moritz')
app.run(host='0.0.0.0', port=8080)
|
py
|
1a59bc1f19f77161df1aee2449aae56838642581
|
#!/usr/bin/python
##
# Name: Vote Filters (vote_filters.py)
# Purpose: Contains the filters for vote_counter
# Date: January 5, 2011
#
# Developer: Christopher Woodall <chris.j.woodall at gmail.com>
# Copyright: Apache License 2.0
##
filters = [('Beatiful', 'Beautiful'), ('Deehunter', 'Deerhunter'),
('Monae, Janelle', 'Janelle Monae')] # Filters to make calculations more accurate
def filterAlbumString(title, filters=[]):
''' filterAlbumString: Filters the title of an album with standard filters
so that common typos are corrected and they can be compared more
appropriately.
title
filters: A list of tuples in the format (old,new) for replacement
string.replace(old,new)
'''
# Add in custom filters
for filter in filters:
title = title.replace(filter[0], filter[1])
# for this situation only
title = title.replace('Beatiful', 'Beautiful')
title = title.replace('Deehunter', 'Deerhunter')
title = title.replace('Monae, Janelle', 'Janelle Monae' )
# General cleanup
title = title.replace('\xe2\x80\x93', '-')
title = title.replace('?', '')
title = title.replace('\t', '')
title = title.replace('The', '')
title = title.replace(' ', ' ')
title = title.title()
title = title.split(':')[0]
title = title.rstrip()
title = title.lstrip()
return title
if __name__ == '__main__':
for filter in filters:
print filter
|
py
|
1a59bce16370d462ef69ff10027201334a87f6a0
|
from contextlib import suppress
from datetime import datetime
from typing import Optional
from models_library.clusters import ClusterID
from models_library.projects import ProjectID
from models_library.projects_state import RunningState
from models_library.users import UserID
from pydantic import BaseModel, PositiveInt, validator
from simcore_postgres_database.models.comp_pipeline import StateType
from ...utils.db import DB_TO_RUNNING_STATE
class CompRunsAtDB(BaseModel):
run_id: PositiveInt
project_uuid: ProjectID
user_id: UserID
cluster_id: Optional[ClusterID]
iteration: PositiveInt
result: RunningState
created: datetime
modified: datetime
started: Optional[datetime]
ended: Optional[datetime]
@validator("result", pre=True)
@classmethod
def convert_result_from_state_type_enum_if_needed(cls, v):
if isinstance(v, str):
# try to convert to a StateType, if it fails the validations will continue
# and pydantic will try to convert it to a RunninState later on
with suppress(ValueError):
v = StateType(v)
if isinstance(v, StateType):
return RunningState(DB_TO_RUNNING_STATE[StateType(v)])
return v
class Config:
orm_mode = True
schema_extra = {
"examples": [
# DB model
{
"run_id": 432,
"project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5",
"user_id": 132,
"cluster_id": 0,
"iteration": 42,
"result": "NOT_STARTED",
"created": "2021-03-01 13:07:34.19161",
"modified": "2021-03-01 13:07:34.19161",
},
{
"run_id": 43243,
"project_uuid": "65fee9d2-e030-452c-a29c-45d288577ca5",
"user_id": 132,
"cluster_id": 123,
"iteration": 12,
"result": "SUCCESS",
"created": "2021-03-01 13:07:34.19161",
"modified": "2021-03-01 13:07:34.19161",
"started": "2021-03-01 8:07:34.19161",
"ended": "2021-03-01 13:07:34.10",
},
]
}
|
py
|
1a59bd07f0a67ce8e4c9ea623ee415eeac4488c5
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests deprecation warnings in a few special cases."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class DeprecationTest(test.TestCase):
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedFunction(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.compat.v1.initializers.tables_initializer()
self.assertEqual(0, mock_warning.call_count)
tf.tables_initializer()
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"tables_initializer")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"compat.v1.initializers.tables_initializer")
tf.tables_initializer()
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedClass(self, mock_warning):
value = np.array([1, 2, 3])
row_splits = np.array([1])
self.assertEqual(0, mock_warning.call_count)
tf.compat.v1.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(0, mock_warning.call_count)
tf.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"ragged.RaggedTensorValue")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"compat.v1.ragged.RaggedTensorValue")
tf.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedFunctionEndpoint(self, mock_warning):
array = tf.IndexedSlices(
tf.compat.v1.convert_to_tensor(np.array([1, 2])),
tf.compat.v1.convert_to_tensor(np.array([0, 2])))
mask_indices = tf.compat.v1.convert_to_tensor(np.array([2]))
self.assertEqual(0, mock_warning.call_count)
tf.sparse.mask(array, mask_indices)
self.assertEqual(0, mock_warning.call_count)
tf.sparse_mask(array, mask_indices)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"sparse_mask")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
"sparse.mask")
tf.sparse_mask(array, mask_indices)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedClassEndpoint(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.io.VarLenFeature(tf.dtypes.int32)
self.assertEqual(0, mock_warning.call_count)
tf.VarLenFeature(tf.dtypes.int32)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"VarLenFeature")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"io.VarLenFeature")
tf.VarLenFeature(tf.dtypes.int32)
self.assertEqual(1, mock_warning.call_count)
if __name__ == "__main__":
test.main()
|
py
|
1a59bd50d757db44e254a9ff7c8d3b74d5b62aff
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
Classes that implement ASN.1 data structures.
"""
from asn1.asn1 import *
from asn1.ber import *
from volatile import *
from base_classes import BasePacket
#####################
#### ASN1 Fields ####
#####################
class ASN1F_badsequence(Exception):
pass
class ASN1F_element:
pass
class ASN1F_optionnal(ASN1F_element):
def __init__(self, field):
self._field=field
def __getattr__(self, attr):
return getattr(self._field,attr)
def dissect(self,pkt,s):
try:
return self._field.dissect(pkt,s)
except ASN1F_badsequence:
self._field.set_val(pkt,None)
return s
except BER_Decoding_Error:
self._field.set_val(pkt,None)
return s
def build(self, pkt):
if self._field.is_empty(pkt):
return ""
return self._field.build(pkt)
class ASN1F_field(ASN1F_element):
holds_packets=0
islist=0
ASN1_tag = ASN1_Class_UNIVERSAL.ANY
context=ASN1_Class_UNIVERSAL
def __init__(self, name, default, context=None):
if context is not None:
self.context = context
self.name = name
self.default = default
def i2repr(self, pkt, x):
return repr(x)
def i2h(self, pkt, x):
return x
def any2i(self, pkt, x):
return x
def m2i(self, pkt, x):
return self.ASN1_tag.get_codec(pkt.ASN1_codec).safedec(x, context=self.context)
def i2m(self, pkt, x):
if x is None:
x = 0
if isinstance(x, ASN1_Object):
if ( self.ASN1_tag == ASN1_Class_UNIVERSAL.ANY
or x.tag == ASN1_Class_UNIVERSAL.RAW
or x.tag == ASN1_Class_UNIVERSAL.ERROR
or self.ASN1_tag == x.tag ):
return x.enc(pkt.ASN1_codec)
else:
raise ASN1_Error("Encoding Error: got %r instead of an %r for field [%s]" % (x, self.ASN1_tag, self.name))
return self.ASN1_tag.get_codec(pkt.ASN1_codec).enc(x)
def do_copy(self, x):
if hasattr(x, "copy"):
return x.copy()
if type(x) is list:
x = x[:]
for i in xrange(len(x)):
if isinstance(x[i], BasePacket):
x[i] = x[i].copy()
return x
def build(self, pkt):
return self.i2m(pkt, getattr(pkt, self.name))
def set_val(self, pkt, val):
setattr(pkt, self.name, val)
def is_empty(self, pkt):
return getattr(pkt,self.name) is None
def dissect(self, pkt, s):
v,s = self.m2i(pkt, s)
self.set_val(pkt, v)
return s
def get_fields_list(self):
return [self]
def __hash__(self):
return hash(self.name)
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other
def __repr__(self):
return self.name
def randval(self):
return RandInt()
class ASN1F_INTEGER(ASN1F_field):
ASN1_tag= ASN1_Class_UNIVERSAL.INTEGER
def randval(self):
return RandNum(-2**64, 2**64-1)
class ASN1F_BOOLEAN(ASN1F_field):
ASN1_tag= ASN1_Class_UNIVERSAL.BOOLEAN
def randval(self):
return RandChoice(True,False)
class ASN1F_NULL(ASN1F_INTEGER):
ASN1_tag= ASN1_Class_UNIVERSAL.NULL
class ASN1F_SEP(ASN1F_NULL):
ASN1_tag= ASN1_Class_UNIVERSAL.SEP
class ASN1F_enum_INTEGER(ASN1F_INTEGER):
def __init__(self, name, default, enum):
ASN1F_INTEGER.__init__(self, name, default)
i2s = self.i2s = {}
s2i = self.s2i = {}
if type(enum) is list:
keys = xrange(len(enum))
else:
keys = enum.keys()
if filter(lambda x: type(x) is str, keys):
i2s,s2i = s2i,i2s
for k in keys:
i2s[k] = enum[k]
s2i[enum[k]] = k
def any2i_one(self, pkt, x):
if type(x) is str:
x = self.s2i[x]
return x
def i2repr_one(self, pkt, x):
return self.i2s.get(x, repr(x))
def any2i(self, pkt, x):
if type(x) is list:
return map(lambda z,pkt=pkt:self.any2i_one(pkt,z), x)
else:
return self.any2i_one(pkt,x)
def i2repr(self, pkt, x):
if type(x) is list:
return map(lambda z,pkt=pkt:self.i2repr_one(pkt,z), x)
else:
return self.i2repr_one(pkt,x)
class ASN1F_ENUMERATED(ASN1F_enum_INTEGER):
ASN1_tag = ASN1_Class_UNIVERSAL.ENUMERATED
class ASN1F_STRING(ASN1F_field):
ASN1_tag = ASN1_Class_UNIVERSAL.STRING
def randval(self):
return RandString(RandNum(0, 1000))
class ASN1F_PRINTABLE_STRING(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING
class ASN1F_BIT_STRING(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.BIT_STRING
class ASN1F_IPADDRESS(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.IPADDRESS
class ASN1F_TIME_TICKS(ASN1F_INTEGER):
ASN1_tag = ASN1_Class_UNIVERSAL.TIME_TICKS
class ASN1F_UTC_TIME(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.UTC_TIME
class ASN1F_GENERALIZED_TIME(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME
class ASN1F_OID(ASN1F_field):
ASN1_tag = ASN1_Class_UNIVERSAL.OID
def randval(self):
return RandOID()
class ASN1F_SEQUENCE(ASN1F_field):
ASN1_tag = ASN1_Class_UNIVERSAL.SEQUENCE
def __init__(self, *seq, **kargs):
if "ASN1_tag" in kargs:
self.ASN1_tag = kargs["ASN1_tag"]
self.seq = seq
def __repr__(self):
return "<%s%r>" % (self.__class__.__name__,self.seq,)
def set_val(self, pkt, val):
for f in self.seq:
f.set_val(pkt,val)
def is_empty(self, pkt):
for f in self.seq:
if not f.is_empty(pkt):
return False
return True
def get_fields_list(self):
return reduce(lambda x,y: x+y.get_fields_list(), self.seq, [])
def build(self, pkt):
s = reduce(lambda x,y: x+y.build(pkt), self.seq, "")
return self.i2m(pkt, s)
def dissect(self, pkt, s):
codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
try:
i,s,remain = codec.check_type_check_len(s)
for obj in self.seq:
s = obj.dissect(pkt,s)
if s:
warning("Too many bytes to decode sequence: [%r]" % s) # XXX not reversible!
return remain
except ASN1_Error,e:
raise ASN1F_badsequence(e)
class ASN1F_SET(ASN1F_SEQUENCE):
ASN1_tag = ASN1_Class_UNIVERSAL.SET
class ASN1F_SEQUENCE_OF(ASN1F_SEQUENCE):
holds_packets = 1
islist = 1
def __init__(self, name, default, asn1pkt, ASN1_tag=0x30):
self.asn1pkt = asn1pkt
self.tag = chr(ASN1_tag)
self.name = name
self.default = default
def i2repr(self, pkt, i):
if i is None:
return []
return i
def get_fields_list(self):
return [self]
def set_val(self, pkt, val):
ASN1F_field.set_val(self, pkt, val)
def is_empty(self, pkt):
return ASN1F_field.is_empty(self, pkt)
def build(self, pkt):
val = getattr(pkt, self.name)
if isinstance(val, ASN1_Object) and val.tag == ASN1_Class_UNIVERSAL.RAW:
s = val
elif val is None:
s = ""
else:
s = "".join(map(str, val ))
return self.i2m(pkt, s)
def dissect(self, pkt, s):
codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
i,s1,remain = codec.check_type_check_len(s)
lst = []
while s1:
try:
p = self.asn1pkt(s1)
except ASN1F_badsequence,e:
lst.append(conf.raw_layer(s1))
break
lst.append(p)
if conf.raw_layer in p:
s1 = p[conf.raw_layer].load
del(p[conf.raw_layer].underlayer.payload)
else:
break
self.set_val(pkt, lst)
return remain
def randval(self):
return fuzz(self.asn1pkt())
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__,self.name)
class ASN1F_PACKET(ASN1F_field):
holds_packets = 1
def __init__(self, name, default, cls):
ASN1F_field.__init__(self, name, default)
self.cls = cls
def i2m(self, pkt, x):
if x is None:
x = ""
return str(x)
def extract_packet(self, cls, x):
try:
c = cls(x)
except ASN1F_badsequence:
c = conf.raw_layer(x)
cpad = c.getlayer(conf.padding_layer)
x = ""
if cpad is not None:
x = cpad.load
del(cpad.underlayer.payload)
return c,x
def m2i(self, pkt, x):
return self.extract_packet(self.cls, x)
class ASN1F_CHOICE(ASN1F_PACKET):
ASN1_tag = ASN1_Class_UNIVERSAL.NONE
def __init__(self, name, default, *args):
self.name=name
self.choice = {}
for p in args:
self.choice[p.ASN1_root.ASN1_tag] = p
# self.context=context
self.default=default
def m2i(self, pkt, x):
if len(x) == 0:
return conf.raw_layer(),""
raise ASN1_Error("ASN1F_CHOICE: got empty string")
if ord(x[0]) not in self.choice:
return conf.raw_layer(x),"" # XXX return RawASN1 packet ? Raise error
raise ASN1_Error("Decoding Error: choice [%i] not found in %r" % (ord(x[0]), self.choice.keys()))
z = ASN1F_PACKET.extract_packet(self, self.choice[ord(x[0])], x)
return z
def randval(self):
return RandChoice(*map(lambda x:fuzz(x()), self.choice.values()))
# This import must come in last to avoid problems with cyclic dependencies
import packet
|
py
|
1a59bd87c526a8a5cc251498c88706e21d782c7e
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from google.appengine.ext import ndb
from common.base_handler import BaseHandler, Permission
def _FormatDatetime(dt):
if not dt:
return None # pragma: no cover
else:
return dt.strftime('%Y-%m-%d %H:%M:%S UTC')
class Culprit(BaseHandler):
PERMISSION_LEVEL = Permission.ANYONE
def HandleGet(self):
"""Lists the build cycles in which the culprit caused failures."""
key = self.request.get('key', '')
culprit = ndb.Key(urlsafe=key).get()
if not culprit: # pragma: no cover
return self.CreateError('Culprit not found', 404)
def ConvertBuildInfoToADict(build_info):
return {
'master_name': build_info[0],
'builder_name': build_info[1],
'build_number': build_info[2],
}
data = {
'project_name': culprit.project_name,
'revision': culprit.revision,
'commit_position': culprit.commit_position,
'cr_notified': culprit.cr_notified,
'cr_notification_time': _FormatDatetime(culprit.cr_notification_time),
'builds': map(ConvertBuildInfoToADict, culprit.builds),
'key': key,
}
return {'template': 'waterfall/culprit.html', 'data': data}
|
py
|
1a59bd94b602f809b81d8ad38fbff8348fe32581
|
#!/usr/bin/python -tt
# Expense Calculator
class Expense_Calculator(object):
def Expenses(self, Age, Retirement_Age, Inflation, Current_Expenses):
self.Future_Expenses={}
for x in range(Age,Retirement_Age+1):
if x==Age:
self.Future_Expenses[Age]=Current_Expenses
else:
self.Future_Expenses[x]=self.Future_Expenses[x-1]*(1+Inflation/100)
return self.Future_Expenses
# Modify Expenses
def Modify_Expense(self, Future_Expenses, Age, Value):
self.Future_Expenses[Age]=Value
return self.Future_Expenses
# Calculate Balance available for given corpus
def Balance(self, Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses):
self.Current_Balance={}
for x in range(Age,Retirement_Age+1):
if x==Age:
self.Current_Balance[Age]=Corpus
else:
self.Monthly_Expenses=Expenses[x-1]/12
self.Monthly_rate=Deposit_Rate/1200
self.Current_Balance[x]=(((1 + self.Monthly_rate)**12 * (self.Monthly_rate*self.Current_Balance[x-1] - self.Monthly_Expenses) + self.Monthly_Expenses)/self.Monthly_rate)
return self.Current_Balance
# Calculate Final Balance available at the end
def Final_Balance(self, Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses):
self.End_Balance=self.Balance(Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses)
return self.End_Balance[Retirement_Age]
# Calculate minimum Balance to keep handy
def Minimum_Balance(self, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses):
self.Initial_Corpus=Expenses[Retirement_Age]
epsilon=0.001
self.End_Balance=self.Final_Balance(self.Initial_Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses)
if self.End_Balance>0:
Min=self.Initial_Corpus/2
while self.Final_Balance(Max, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses)>0:
Min=Min/2
Max=self.Initial_Corpus
elif self.End_Balance<0:
Min=self.Initial_Corpus
Max=self.Initial_Corpus*2
while self.Final_Balance(Max, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses)<0:
Max=Max*2
self.Minimum_Corpus=(Min+Max)/2
while abs(self.Final_Balance(self.Minimum_Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses))>=epsilon:
if self.Final_Balance(self.Minimum_Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses)>0:
Max=self.Minimum_Corpus
elif self.Final_Balance(self.Minimum_Corpus, Age, Retirement_Age, Deposit_Rate, Inflation_rate, Expenses)<0:
Min=self.Minimum_Corpus
self.Minimum_Corpus=(Min+Max)/2
return self.Minimum_Corpus
# Age=int(input("Enter your Age : "))
# Retirement_Age=int(input("Enter your Retirement Age : "))
# Inflation_rate=int(input("Enter the Inflation rate : "))
# Deposit_rate=int(input("Enter the Deposit rate : "))
# Corpus=int(input("Enter the Corpus : "))
# Annual_Expenses=int(input("Enter current Annual Expenses : "))
# Future_Expenses=Expenses(Age, Retirement_Age, Inflation_rate, Annual_Expenses)
# for key in Future_Expenses:
# print(f'Age->{key} Expenses->{Future_Expenses[key]}')
# Annual_Balance=Balance(Corpus, Age, Retirement_Age, Deposit_rate, Inflation_rate, Future_Expenses)
# for key in Annual_Balance:
# print(f'Age->{key} Balance->{Annual_Balance[key]}')
# Min_Corpus=Minimum_Balance(Age, Retirement_Age, Deposit_rate, Inflation_rate, Future_Expenses)
# print(f'Minimum Corpus required is {Min_Corpus}')
#if __name__ == '__main__':
# main()
|
py
|
1a59bdcaa47e7c2c99698a109add397726bfc8a1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
from django.conf import settings
from django.template import loader
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils.encoding import smart_str
from puppeteer_pdf.utils import (_options_to_args, make_absolute_paths, render_pdf_from_template,
render_to_temporary_file, RenderedFile, puppeteer_to_pdf)
from puppeteer_pdf.views import PDFResponse, PDFTemplateView, PDFTemplateResponse
class UnicodeContentPDFTemplateView(PDFTemplateView):
"""
PDFTemplateView with the addition of unicode content in his context.
Used in unicode content view testing.
"""
def get_context_data(self, **kwargs):
Base = super(UnicodeContentPDFTemplateView, self)
context = Base.get_context_data(**kwargs)
context['title'] = u'♥'
return context
class TestUtils(TestCase):
def setUp(self):
# Clear standard error
self._stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
self.factory = RequestFactory()
def tearDown(self):
sys.stderr = self._stderr
def test_options_to_args(self):
self.assertEqual(_options_to_args(), [])
self.assertEqual(_options_to_args(heart=u'♥', displayHeaderFooter=True,
path='file-path'),
['--displayHeaderFooter',
'--heart', u'♥',
'--path', 'file-path'])
self.assertEqual(_options_to_args(heart=u'♥', landscape=True,
path='file-path'),
['--heart', u'♥',
'--landscape',
'--path', 'file-path'])
self.assertEqual(_options_to_args(heart=u'♥', landscape=False,
path='file-path'),
['--heart', u'♥',
'--path', 'file-path'])
def test_puppeteer_to_pdf(self):
"""Should run puppeteer to generate a PDF"""
title = 'A test template.'
template = loader.get_template('sample.html')
temp_file = render_to_temporary_file(template, context={'title': title})
try:
# Single page
pdf_output = puppeteer_to_pdf(input=temp_file.name)
self.assertTrue(pdf_output.startswith(b'%PDF'), pdf_output)
# Unicode
pdf_output = puppeteer_to_pdf(input=temp_file.name, title=u'♥')
self.assertTrue(pdf_output.startswith(b'%PDF'), pdf_output)
finally:
temp_file.close()
def test_puppeteer_to_pdf_with_unicode_content(self):
"""A puppeteer_to_pdf call should render unicode content properly"""
title = u'♥'
template = loader.get_template('unicode.html')
temp_file = render_to_temporary_file(template, context={'title': title})
try:
pdf_output = puppeteer_to_pdf(input=temp_file.name)
self.assertTrue(pdf_output.startswith(b'%PDF'), pdf_output)
finally:
temp_file.close()
def test_render_to_temporary_file(self):
"""Should render a template to a temporary file."""
title = 'A test template.'
template = loader.get_template('sample.html')
temp_file = render_to_temporary_file(template, context={'title': title})
temp_file.seek(0)
saved_content = smart_str(temp_file.read())
self.assertTrue(title in saved_content)
temp_file.close()
def _render_file(self, template, context):
"""Helper method for testing rendered file deleted/persists tests."""
render = RenderedFile(template=template, context=context)
render.temporary_file.seek(0)
saved_content = smart_str(render.temporary_file.read())
return (saved_content, render.filename)
def test_rendered_file_deleted_on_production(self):
"""If PUPPETEER_PDF_DEBUG=False, delete rendered file on object close."""
title = 'A test template.'
template = loader.get_template('sample.html')
debug = getattr(settings, 'PUPPETEER_PDF_DEBUG', settings.DEBUG)
saved_content, filename = self._render_file(template=template,
context={'title': title})
# First verify temp file was rendered correctly.
self.assertTrue(title in saved_content)
# Then check if file is deleted when debug=False.
self.assertFalse(debug)
self.assertFalse(os.path.isfile(filename))
def test_rendered_file_persists_on_debug(self):
"""If PUPPETEER_PDF_DEBUG=True, the rendered file should persist."""
title = 'A test template.'
template = loader.get_template('sample.html')
with self.settings(PUPPETEER_PDF_DEBUG=True):
debug = getattr(settings, 'PUPPETEER_PDF_DEBUG', settings.DEBUG)
saved_content, filename = self._render_file(template=template,
context={'title': title})
# First verify temp file was rendered correctly.
self.assertTrue(title in saved_content)
# Then check if file persists when debug=True.
self.assertTrue(debug)
self.assertTrue(os.path.isfile(filename))
def test_render_with_null_request(self):
"""If request=None, the file should render properly."""
title = 'A test template.'
loader.get_template('sample.html')
pdf_content = render_pdf_from_template('sample.html',
header_template=None,
footer_template=None,
context={'title': title})
self.assertTrue(pdf_content.startswith(b'%PDF-'))
self.assertTrue(pdf_content.endswith(b'%%EOF'))
class TestViews(TestCase):
template = 'sample.html'
footer_template = 'footer.html'
pdf_filename = 'output.pdf'
attached_fileheader = 'attachment; filename="{0}"'
inline_fileheader = 'inline; filename="{0}"'
def test_pdf_response(self):
"""Should generate correct HttpResponse object and content type."""
# 404
response = PDFResponse(content='', status=404)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.content, b'')
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertFalse(response.has_header('Content-Disposition'))
content = b'%PDF-1.4\n%%EOF'
# Without filename
response = PDFResponse(content=content)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, content)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertFalse(response.has_header('Content-Disposition'))
# With filename
response = PDFResponse(content=content, filename="nospace.pdf")
self.assertEqual(response['Content-Disposition'],
'attachment; filename="nospace.pdf"')
response = PDFResponse(content=content, filename="one space.pdf")
self.assertEqual(response['Content-Disposition'],
'attachment; filename="one space.pdf"')
response = PDFResponse(content=content, filename="4'5\".pdf")
self.assertEqual(response['Content-Disposition'],
'attachment; filename="4\'5.pdf"')
response = PDFResponse(content=content, filename=u"♥.pdf")
try:
import unidecode
except ImportError:
filename = '?.pdf'
else:
filename = '.pdf'
self.assertEqual(response['Content-Disposition'],
'attachment; filename="{0}"'.format(filename))
# Content as a direct output
response = PDFResponse(content=content, filename="nospace.pdf",
show_content_in_browser=True)
self.assertEqual(response['Content-Disposition'],
'inline; filename="nospace.pdf"')
response = PDFResponse(content=content, filename="one space.pdf",
show_content_in_browser=True)
self.assertEqual(response['Content-Disposition'],
'inline; filename="one space.pdf"')
response = PDFResponse(content=content, filename="4'5\".pdf",
show_content_in_browser=True)
self.assertEqual(response['Content-Disposition'],
'inline; filename="4\'5.pdf"')
response = PDFResponse(content=content, filename=u"♥.pdf",
show_content_in_browser=True)
try:
import unidecode
except ImportError:
filename = '?.pdf'
else:
filename = '.pdf'
self.assertEqual(response['Content-Disposition'],
'inline; filename="{0}"'.format(filename))
# Content-Type
response = PDFResponse(content=content,
content_type='application/x-pdf')
self.assertEqual(response['Content-Type'], 'application/x-pdf')
def test_pdf_template_response(self, show_content=False):
"""Test PDFTemplateResponse."""
context = {'title': 'Heading'}
request = RequestFactory().get('/')
response = PDFTemplateResponse(request=request,
template=self.template,
context=context,
show_content_in_browser=show_content)
self.assertEqual(response._request, request)
self.assertEqual(response.template_name, self.template)
self.assertEqual(response.context_data, context)
self.assertEqual(response.filename, None)
self.assertEqual(response.header_template, None)
self.assertEqual(response.footer_template, None)
self.assertEqual(response.cmd_options, {})
self.assertFalse(response.has_header('Content-Disposition'))
# Render to temporary file
template = loader.get_template(self.template)
tempfile = render_to_temporary_file(template, context=context)
tempfile.seek(0)
html_content = smart_str(tempfile.read())
self.assertTrue(html_content.startswith('<html>'))
self.assertTrue('<h1>{title}</h1>'.format(**context)
in html_content)
pdf_content = response.rendered_content
self.assertTrue(pdf_content.startswith(b'%PDF-'))
self.assertTrue(pdf_content.endswith(b'%%EOF'))
# Footer
cmd_options = {}
response = PDFTemplateResponse(request=request,
template=self.template,
context=context,
filename=self.pdf_filename,
show_content_in_browser=show_content,
footer_template=self.footer_template,
cmd_options=cmd_options)
self.assertEqual(response.filename, self.pdf_filename)
self.assertEqual(response.header_template, None)
self.assertEqual(response.footer_template, self.footer_template)
self.assertEqual(response.cmd_options, cmd_options)
self.assertTrue(response.has_header('Content-Disposition'))
footer_template = loader.get_template(self.footer_template)
tempfile = render_to_temporary_file(footer_template, context=context,
request=request)
tempfile.seek(0)
footer_content = smart_str(tempfile.read())
footer_content = make_absolute_paths(footer_content)
media_url = 'file://{0}/'.format(settings.MEDIA_ROOT)
self.assertTrue(media_url in footer_content, True)
static_url = 'file://{0}/'.format(settings.STATIC_ROOT)
self.assertTrue(static_url in footer_content, True)
def test_pdf_template_response_to_browser(self):
self.test_pdf_template_response(show_content=True)
def test_pdf_template_view(self, show_content=False):
"""Test PDFTemplateView."""
view = PDFTemplateView.as_view(filename=self.pdf_filename,
show_content_in_browser=show_content,
template_name=self.template,
footer_template=self.footer_template)
# As PDF
request = RequestFactory().get('/')
response = view(request)
self.assertEqual(response.status_code, 200)
response.render()
fileheader = self.attached_fileheader
if show_content:
fileheader = self.inline_fileheader
self.assertEqual(response['Content-Disposition'],
fileheader.format(self.pdf_filename))
self.assertTrue(response.content.startswith(b'%PDF-'))
self.assertTrue(response.content.endswith(b'%%EOF'))
# As HTML
request = RequestFactory().get('/?as=html')
response = view(request)
self.assertEqual(response.status_code, 200)
response.render()
self.assertFalse(response.has_header('Content-Disposition'))
self.assertTrue(response.content.startswith(b'<html>'))
# POST
request = RequestFactory().post('/')
response = view(request)
self.assertEqual(response.status_code, 405)
def test_pdf_template_view_to_browser(self):
self.test_pdf_template_view(show_content=True)
def test_pdf_template_view_unicode(self, show_content=False):
"""Test PDFTemplateView with unicode content."""
view = UnicodeContentPDFTemplateView.as_view(
filename=self.pdf_filename,
show_content_in_browser=show_content,
template_name=self.template
)
# As PDF
request = RequestFactory().get('/')
response = view(request)
self.assertEqual(response.status_code, 200)
response.render()
fileheader = self.attached_fileheader
if show_content:
fileheader = self.inline_fileheader
self.assertEqual(response['Content-Disposition'],
fileheader.format(self.pdf_filename))
# not sure how we can test this as the contents is all encoded...
# best we can do for the moment is check it's a pdf and it worked.
# self.assertTrue('☃' in response.content)
self.assertTrue(response.content.startswith(b'%PDF-'))
self.assertTrue(response.content.endswith(b'%%EOF'))
def test_pdf_template_view_unicode_to_browser(self):
self.test_pdf_template_view_unicode(show_content=True)
def test_get_cmd_options(self):
# Default cmd_options
view = PDFTemplateView()
self.assertEqual(view.cmd_options, PDFTemplateView.cmd_options)
self.assertEqual(PDFTemplateView.cmd_options, {})
# Instantiate with new cmd_options
cmd_options = {'orientation': 'landscape'}
view = PDFTemplateView(cmd_options=cmd_options)
self.assertEqual(view.cmd_options, cmd_options)
self.assertEqual(PDFTemplateView.cmd_options, {})
# Update local instance of cmd_options
view = PDFTemplateView()
view.cmd_options.update(cmd_options)
self.assertEqual(view.cmd_options, cmd_options)
self.assertEqual(PDFTemplateView.cmd_options, {})
|
py
|
1a59bece5c2491a930f11179915cb831d6c5e504
|
import os,sys
sys.path.append('../')
import numpy as np
import pandas as pd
from keras.callbacks import EarlyStopping
from keras.layers.advanced_activations import ReLU, PReLU
from keras.layers.core import Dense, Dropout
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from keras.optimizers import SGD, Adam
from keras.utils import np_utils
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, f1_score
from src.model import Model
from src.util import Util
from scipy.sparse import issparse
# tensorflowの警告抑制
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class ModelMLP(Model):
def __init__(self, run_fold_name, **params):
super().__init__(run_fold_name, params)
def train(self, tr_x, tr_y, va_x=None, va_y=None):
validation = va_x is not None
# パラメータ
nb_classes = 5
input_dropout = self.params['input_dropout']
hidden_layers = int(self.params['hidden_layers'])
hidden_units = int(self.params['hidden_units'])
hidden_activation = self.params['hidden_activation']
hidden_dropout = self.params['hidden_dropout']
batch_norm = self.params['batch_norm']
optimizer_type = self.params['optimizer']['type']
optimizer_lr = self.params['optimizer']['lr']
batch_size = int(self.params['batch_size'])
nb_epoch = int(self.params['nb_epoch'])
# 標準化
if issparse(tr_x):
scaler = StandardScaler(with_mean=False)
else:
scaler = StandardScaler()
scaler.fit(tr_x)
tr_x = scaler.transform(tr_x)
tr_y = np_utils.to_categorical(tr_y, num_classes=nb_classes)
if validation:
va_x = scaler.transform(va_x)
va_y = np_utils.to_categorical(va_y, num_classes=nb_classes)
self.scaler = scaler
# Sequentialモデルを定義
self.model = Sequential()
# input dropout
self.model.add(Dropout(input_dropout, input_shape=(tr_x.shape[1],)))
# 中間層
for i in range(hidden_layers):
self.model.add(Dense(hidden_units))
if batch_norm == 'before_act':
self.model.add(BatchNormalization())
if hidden_activation == 'prelu':
self.model.add(PReLU())
elif hidden_activation == 'relu':
self.model.add(ReLU())
else:
raise NotImplementedError
self.model.add(Dropout(hidden_dropout))
# 出力層
self.model.add(Dense(nb_classes, activation='softmax'))
# オプティマイザ
if optimizer_type == 'sgd':
optimizer = SGD(lr=optimizer_lr, decay=1e-6, momentum=0.9, nesterov=True)
elif optimizer_type == 'adam':
optimizer = Adam(lr=optimizer_lr, beta_1=0.9, beta_2=0.999, decay=0.)
else:
raise NotImplementedError
# 目的関数、評価指標などの設定
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# 学習の実行
if validation:
# 何epoch validation-score が更新されなければ中断するか
patience = 12
early_stopping = EarlyStopping(monitor='val_loss', patience=patience,
verbose=2, restore_best_weights=True)
history = self.model.fit(tr_x, tr_y, epochs=nb_epoch, batch_size=batch_size, verbose=2,
validation_data=(va_x, va_y), callbacks=[early_stopping])
else:
history = self.model.fit(tr_x, tr_y, nb_epoch=nb_epoch, batch_size=batch_size, verbose=2)
def predict(self, te_x):
te_x = self.scaler.fit_transform(te_x)
y_pred = self.model.predict(te_x)
return y_pred
def score(self, te_x, te_y):
y_pred = self.predict(te_x)
#print(classification_report(te_y, y_pred))
return f1_score(np.identity(5)[te_y], np.identity(5)[np.argmax(y_pred, axis=1)], average='samples')
def save_model(self, feature):
model_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}.h5')
scaler_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}-scaler.pkl')
os.makedirs(os.path.dirname(model_path), exist_ok=True)
self.model.save(model_path)
Util.dump(self.scaler, scaler_path)
def load_model(self, feature):
model_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}.h5')
scaler_path = os.path.join(f'../model/model/{feature}', f'{self.run_fold_name}-scaler.pkl')
self.model = load_model(model_path)
self.scaler = Util.load(scaler_path)
|
py
|
1a59bf22d8981cc9bf6bb1a988f4252bb3e16b3f
|
from Simulador import Simulador
import math
import pandas as pd
d = pd.read_pickle('C:/Users/Eduar/Documents/GitHub/Trabalho_final_estatistica_cd/dados/simulacoes_chance_30%.pkl')
print(d.head())
nome_simulacao = "simulacoes_chance_100%"
n_simulacoes = 1000
tamanho_matriz = 100
chance_infeccao = 1
chance_infeccao_sintomatico = 0.2
chance_morte = 0.02
atualizacoes_cura = 10
inserir_infectados_aleatorios = False
import numpy as np
import random
dados_simulacoes = pd.DataFrame(dtype=np.int)
for i in range(n_simulacoes):
sim = Simulador(
tamanho_matriz,
chance_infeccao,
chance_infeccao_sintomatico,
chance_morte,
atualizacoes_cura,
inserir_infectados_aleatorios)
sim.executar_simulacao()
dados_simulacoes = dados_simulacoes.append(sim.dict_resumo, ignore_index = True)
dados_simulacoes = dados_simulacoes[["pop_inicial",
"tipo1_inicial",
"tipo2_inicial",
"n/2_100%_infectados",
"tipo1_n/2",
"tipo2_n/2",
"curados_n/2",
"mortos_n/2",
"n/2+1_100%_infectados",
"tipo1_n/2+1",
"tipo2_n/2+1",
"curados_n/2+1",
"mortos_n/2+1",
"n_atualizacoes_100%_infectados",
"tipo1_n",
"tipo2_n",
"curados_n",
"mortos_n",
"numero_total_atualizacoes",
"sadios_final",
"curados_final",
"mortos_final"]].astype(int)
dados_simulacoes.to_pickle('C:/Users/Eduar/Documents/GitHub/Trabalho_final_estatistica_cd/dados/'+ nome_simulacao + '.pkl')
nome_simulacao = "simulacoes_chance_30%"
chance_infeccao = 0.3
dados_simulacoes = pd.DataFrame(dtype=np.int)
for i in range(n_simulacoes):
sim = Simulador(
tamanho_matriz,
chance_infeccao,
chance_infeccao_sintomatico,
chance_morte,
atualizacoes_cura,
inserir_infectados_aleatorios)
sim.executar_simulacao()
dados_simulacoes = dados_simulacoes.append(sim.dict_resumo, ignore_index = True)
dados_simulacoes = dados_simulacoes[["pop_inicial",
"tipo1_inicial",
"tipo2_inicial",
"n/2_100%_infectados",
"tipo1_n/2",
"tipo2_n/2",
"curados_n/2",
"mortos_n/2",
"n/2+1_100%_infectados",
"tipo1_n/2+1",
"tipo2_n/2+1",
"curados_n/2+1",
"mortos_n/2+1",
"n_atualizacoes_100%_infectados",
"tipo1_n",
"tipo2_n",
"curados_n",
"mortos_n",
"numero_total_atualizacoes",
"sadios_final",
"curados_final",
"mortos_final"]].astype(int)
dados_simulacoes.to_pickle('C:/Users/Eduar/Documents/GitHub/Trabalho_final_estatistica_cd/dados/'+ nome_simulacao + '.pkl')
print(dados_simulacoes)
|
py
|
1a59bf3cd432661e8bb72ff7d8cb8dc293eecdb5
|
from typing import List, Dict, Any, Optional
import logging
from pytrec_eval import RelevanceEvaluator
from haystack import MultiLabel, Label
from farm.evaluation.squad_evaluation import compute_f1 as calculate_f1_str
from farm.evaluation.squad_evaluation import compute_exact as calculate_em_str
logger = logging.getLogger(__name__)
SUM_PREFIX = 'sum_'
class EvalRewriter:
"""
TODO
This is a pipeline node that should be placed after a node that returns a `query` and `original_query`, e.g.
Rewriter, in order to assess its performance. Performance metrics are stored in this class and updated as each
sample passes through it. To view the results of the evaluation, call EvalRewriter.print().
"""
def __init__(self):
self.outgoing_edges = 1
self.init_counts()
def init_counts(self):
self.f1_micro = 0
self.query_count = 0
def run(self, **kwargs):
query = kwargs.get('query', None)
original_query = kwargs.get('original_query', None)
self.query_count += 1
if original_query is None or query is None:
raise KeyError(f'The previous component should provide both the `query` and `original_query`, but args '
f'given are: {kwargs}')
return {**kwargs}, "output_1"
class EvalTREC:
"""
This is a pipeline node that should be placed after a node that returns a List of Document, e.g., Retriever or
Ranker, in order to assess its performance. Performance metrics are stored in this class and updated as each
sample passes through it. To view the results of the evaluation, call EvalTREC.print().
"""
def __init__(self,
debug: bool = False,
top_k_eval_documents: int = 10,
metrics: set = None,
name="EvalTREC",
):
"""
@param metrics
Please provide which metrics to use. Please consult the trec_eval documentation
(https://github.com/usnistgov/trec_eval) for the available metrics.
:param debug:
When True, the results for each sample and its evaluation will be stored in self.log
:param top_k_eval_documents:
calculate eval metrics for top k results
"""
self.metrics = metrics if metrics else {'recall', 'ndcg', 'map', 'map_cut', 'recip_rank', 'ndcg_cut.1,3'}
self.outgoing_edges = 1
self.init_counts()
self.debug = debug
self.log: List = []
self.top_k_eval_documents = top_k_eval_documents
self.name = name
self.too_few_docs_warning = False
self.top_k_used = 0
def init_counts(self):
self.correct_retrieval_count = 0
self.query_count = 0
self.has_answer_count = 0
self.has_answer_correct = 0
self.has_answer_recall = 0
self.no_answer_count = 0
self.recall = 0.0
self.mean_reciprocal_rank = 0.0
self.has_answer_mean_reciprocal_rank = 0.0
self.reciprocal_rank_sum = 0.0
self.has_answer_reciprocal_rank_sum = 0.0
# For mean average precision
self.mean_average_precision = 0.0
self.average_precision_sum = 0.0
# Reset sum parameters
self.pytrec_eval_sums = {}
def run(self, documents, labels: dict, top_k_eval_documents: Optional[int] = None, **kwargs):
"""Run this node on one sample and its labels"""
self.query_count += 1
if not top_k_eval_documents:
top_k_eval_documents = self.top_k_eval_documents
if not self.top_k_used:
self.top_k_used = top_k_eval_documents
elif self.top_k_used != top_k_eval_documents:
logger.warning(f"EvalDocuments was last run with top_k_eval_documents={self.top_k_used} but is "
f"being run again with top_k_eval_documents={self.top_k_eval_documents}. "
f"The evaluation counter is being reset from this point so that the evaluation "
f"metrics are interpretable.")
self.init_counts()
if len(documents) < top_k_eval_documents and not self.too_few_docs_warning:
logger.warning(f"EvalDocuments is being provided less candidate documents than top_k_eval_documents "
f"(currently set to {top_k_eval_documents}).")
self.too_few_docs_warning = True
qrels = kwargs.get('qrels', None)
qrels = {k: int(rank) for k, rank in qrels.items()}
# The RelevanceEvaluator wants a dictionary with query id keys. What the ID is, is irrelevant. It is just
# used to retrieve the results.
query_id = 'q1'
evaluator = RelevanceEvaluator({query_id: qrels}, self.metrics)
# The run should have the format {query_id: {doc_id: rank_score}}
run = {query_id: {d.id: d.score for d in documents}}
pytrec_results = evaluator.evaluate(run)[query_id]
retrieved_reciprocal_rank = pytrec_results['recip_rank']
# TODO MAP computed by pytrec_eval differs from Haystack's self.average_precision_retrieved...
average_precision = pytrec_results['map']
for k, score in pytrec_results.items():
sum_key = f"{SUM_PREFIX}{k}"
if sum_key not in self.pytrec_eval_sums:
self.pytrec_eval_sums[sum_key] = 0
self.pytrec_eval_sums[sum_key] += score
self.reciprocal_rank_sum += retrieved_reciprocal_rank
self.average_precision_sum += average_precision
correct_retrieval = True if retrieved_reciprocal_rank > 0 else False
self.has_answer_count += 1
self.has_answer_correct += int(correct_retrieval)
self.has_answer_reciprocal_rank_sum += retrieved_reciprocal_rank
self.has_answer_recall = self.has_answer_correct / self.has_answer_count
self.has_answer_mean_reciprocal_rank = self.has_answer_reciprocal_rank_sum / self.has_answer_count
self.correct_retrieval_count += correct_retrieval
self.recall = self.correct_retrieval_count / self.query_count
self.mean_reciprocal_rank = self.reciprocal_rank_sum / self.query_count
self.mean_average_precision = self.average_precision_sum / self.query_count
self.top_k_used = top_k_eval_documents
return_dict = {"documents": documents,
"labels": labels,
"correct_retrieval": correct_retrieval,
"retrieved_reciprocal_rank": retrieved_reciprocal_rank,
"average_precision": average_precision,
"pytrec_eval_results": pytrec_results,
**kwargs}
if self.debug:
self.log.append(return_dict)
return return_dict, "output_1"
def print(self):
"""Print the evaluation results"""
print(self.name)
print("-----------------")
for key, sum_score in self.pytrec_eval_sums.items():
print(f"{key.replace(SUM_PREFIX, '')}: {(sum_score / self.query_count):.4f}")
class EvalDocuments:
"""
This is a pipeline node that should be placed after a node that returns a List of Document, e.g., Retriever or
Ranker, in order to assess its performance. Performance metrics are stored in this class and updated as each
sample passes through it. To view the results of the evaluation, call EvalDocuments.print(). Note that results
from this Node may differ from that when calling Retriever.eval() since that is a closed domain evaluation. Have
a look at our evaluation tutorial for more info about open vs closed domain eval (
https://haystack.deepset.ai/docs/latest/tutorial5md).
"""
def __init__(self,
debug: bool=False,
open_domain: bool=True,
top_k_eval_documents: int = 10,
name="EvalDocuments",
):
"""
:param open_domain: When True, a document is considered correctly retrieved so long as the answer string can be found within it.
When False, correct retrieval is evaluated based on document_id.
:param debug: When True, a record of each sample and its evaluation will be stored in EvalDocuments.log
:param top_k: calculate eval metrics for top k results, e.g., recall@k
"""
self.outgoing_edges = 1
self.init_counts()
self.no_answer_warning = False
self.debug = debug
self.log: List = []
self.open_domain = open_domain
self.top_k_eval_documents = top_k_eval_documents
self.name = name
self.too_few_docs_warning = False
self.top_k_used = 0
def init_counts(self):
self.correct_retrieval_count = 0
self.query_count = 0
self.has_answer_count = 0
self.has_answer_correct = 0
self.has_answer_recall = 0
self.no_answer_count = 0
self.recall = 0.0
self.mean_reciprocal_rank = 0.0
self.has_answer_mean_reciprocal_rank = 0.0
self.reciprocal_rank_sum = 0.0
self.has_answer_reciprocal_rank_sum = 0.0
# For mean average precision
self.mean_average_precision = 0.0
self.average_precision_sum = 0.0
def run(self, documents, labels: dict, top_k_eval_documents: Optional[int]=None, **kwargs):
"""Run this node on one sample and its labels"""
self.query_count += 1
if not top_k_eval_documents:
top_k_eval_documents = self.top_k_eval_documents
if not self.top_k_used:
self.top_k_used = top_k_eval_documents
elif self.top_k_used != top_k_eval_documents:
logger.warning(f"EvalDocuments was last run with top_k_eval_documents={self.top_k_used} but is "
f"being run again with top_k_eval_documents={self.top_k_eval_documents}. "
f"The evaluation counter is being reset from this point so that the evaluation "
f"metrics are interpretable.")
self.init_counts()
if len(documents) < top_k_eval_documents and not self.too_few_docs_warning:
logger.warning(f"EvalDocuments is being provided less candidate documents than top_k_eval_documents "
f"(currently set to {top_k_eval_documents}).")
self.too_few_docs_warning = True
# TODO retriever_labels is currently a Multilabel object but should eventually be a RetrieverLabel object
retriever_labels = get_label(labels, kwargs["node_id"])
# Haystack native way: If there are answer span annotations in the labels
if retriever_labels.no_answer: # If this sample is impossible to answer and expects a no_answer response
self.no_answer_count += 1
correct_retrieval = 1
retrieved_reciprocal_rank = 1
self.reciprocal_rank_sum += 1
average_precision = 1
self.average_precision_sum += average_precision
if not self.no_answer_warning:
self.no_answer_warning = True
logger.warning("There seem to be empty string labels in the dataset suggesting that there "
"are samples with is_impossible=True. "
"Retrieval of these samples is always treated as correct.")
else:
self.has_answer_count += 1
retrieved_reciprocal_rank = self.reciprocal_rank_retrieved(retriever_labels, documents,
top_k_eval_documents)
self.reciprocal_rank_sum += retrieved_reciprocal_rank
correct_retrieval = True if retrieved_reciprocal_rank > 0 else False
self.has_answer_correct += int(correct_retrieval)
self.has_answer_reciprocal_rank_sum += retrieved_reciprocal_rank
self.has_answer_recall = self.has_answer_correct / self.has_answer_count
self.has_answer_mean_reciprocal_rank = self.has_answer_reciprocal_rank_sum / self.has_answer_count
# For computing MAP
average_precision = self.average_precision_retrieved(retriever_labels, documents, top_k_eval_documents)
self.average_precision_sum += average_precision
self.correct_retrieval_count += correct_retrieval
self.recall = self.correct_retrieval_count / self.query_count
self.mean_reciprocal_rank = self.reciprocal_rank_sum / self.query_count
self.mean_average_precision = self.average_precision_sum / self.query_count
self.top_k_used = top_k_eval_documents
return_dict = {"documents": documents,
"labels": labels,
"correct_retrieval": correct_retrieval,
"retrieved_reciprocal_rank": retrieved_reciprocal_rank,
"average_precision": average_precision,
**kwargs}
if self.debug:
self.log.append(return_dict)
return return_dict, "output_1"
def is_correctly_retrieved(self, retriever_labels, predictions):
return self.reciprocal_rank_retrieved(retriever_labels, predictions) > 0
def reciprocal_rank_retrieved(self, retriever_labels, predictions, top_k_eval_documents):
if self.open_domain:
for label in retriever_labels.multiple_answers:
for rank, p in enumerate(predictions[:top_k_eval_documents]):
if label.lower() in p.text.lower():
return 1/(rank+1)
return False
else:
prediction_ids = [p.id for p in predictions[:top_k_eval_documents]]
label_ids = retriever_labels.multiple_document_ids
for rank, p in enumerate(prediction_ids):
if p in label_ids:
return 1/(rank+1)
return 0
def average_precision_retrieved(self, retriever_labels, predictions, top_k_eval_documents):
prediction_ids = [p.id for p in predictions[:top_k_eval_documents]]
label_ids = set(retriever_labels.multiple_document_ids)
correct = 0
total = 0
for rank, p in enumerate(prediction_ids):
if p in label_ids:
correct += 1
total += correct / (rank + 1)
return total / correct if correct > 0 else 0
def print(self):
"""Print the evaluation results"""
print(self.name)
print("-----------------")
if self.no_answer_count:
print(
f"has_answer recall@{self.top_k_used}: {self.has_answer_recall:.4f} ({self.has_answer_correct}/{self.has_answer_count})")
print(
f"no_answer recall@{self.top_k_used}: 1.00 ({self.no_answer_count}/{self.no_answer_count}) (no_answer samples are always treated as correctly retrieved)")
print(
f"has_answer mean_reciprocal_rank@{self.top_k_used}: {self.has_answer_mean_reciprocal_rank:.4f}")
print(
f"no_answer mean_reciprocal_rank@{self.top_k_used}: 1.0000 (no_answer samples are always treated as correctly retrieved at rank 1)")
print(f"recall@{self.top_k_used}: {self.recall:.4f} ({self.correct_retrieval_count} / {self.query_count})")
print(f"mean_reciprocal_rank@{self.top_k_used}: {self.mean_reciprocal_rank:.4f}")
print(f"mean_average_precision@{self.top_k_used}: {self.mean_average_precision:.4f}")
class EvalAnswers:
"""
This is a pipeline node that should be placed after a Reader in order to assess the performance of the Reader
individually or to assess the extractive QA performance of the whole pipeline. Performance metrics are stored in
this class and updated as each sample passes through it. To view the results of the evaluation, call EvalAnswers.print().
Note that results from this Node may differ from that when calling Reader.eval()
since that is a closed domain evaluation. Have a look at our evaluation tutorial for more info about
open vs closed domain eval (https://haystack.deepset.ai/docs/latest/tutorial5md).
"""
def __init__(self, skip_incorrect_retrieval: bool=True, open_domain: bool=True, debug: bool=False):
"""
:param skip_incorrect_retrieval: When set to True, this eval will ignore the cases where the retriever returned no correct documents
:param open_domain: When True, extracted answers are evaluated purely on string similarity rather than the position of the extracted answer
:param debug: When True, a record of each sample and its evaluation will be stored in EvalAnswers.log
"""
self.outgoing_edges = 1
self.init_counts()
self.log: List = []
self.debug = debug
self.skip_incorrect_retrieval = skip_incorrect_retrieval
self.open_domain = open_domain
def init_counts(self):
self.query_count = 0
self.correct_retrieval_count = 0
self.no_answer_count = 0
self.has_answer_count = 0
self.top_1_no_answer_count = 0
self.top_1_em_count = 0
self.top_k_em_count = 0
self.top_1_f1_sum = 0
self.top_k_f1_sum = 0
self.top_1_no_answer = 0
self.top_1_em = 0.0
self.top_k_em = 0.0
self.top_1_f1 = 0.0
self.top_k_f1 = 0.0
def run(self, labels, answers, **kwargs):
"""Run this node on one sample and its labels"""
self.query_count += 1
predictions = answers
skip = self.skip_incorrect_retrieval and not kwargs.get("correct_retrieval")
if predictions and not skip:
self.correct_retrieval_count += 1
multi_labels = get_label(labels, kwargs["node_id"])
# If this sample is impossible to answer and expects a no_answer response
if multi_labels.no_answer:
self.no_answer_count += 1
if predictions[0]["answer"] is None:
self.top_1_no_answer_count += 1
if self.debug:
self.log.append({"predictions": predictions,
"gold_labels": multi_labels,
"top_1_no_answer": int(predictions[0] == ""),
})
self.update_no_answer_metrics()
# If there are answer span annotations in the labels
else:
self.has_answer_count += 1
predictions = [p for p in predictions if p["answer"]]
top_1_em, top_1_f1, top_k_em, top_k_f1 = self.evaluate_extraction(multi_labels, predictions)
if self.debug:
self.log.append({"predictions": predictions,
"gold_labels": multi_labels,
"top_k_f1": top_k_f1,
"top_k_em": top_k_em
})
self.top_1_em_count += top_1_em
self.top_1_f1_sum += top_1_f1
self.top_k_em_count += top_k_em
self.top_k_f1_sum += top_k_f1
self.update_has_answer_metrics()
return {**kwargs}, "output_1"
def evaluate_extraction(self, gold_labels, predictions):
if self.open_domain:
gold_labels_list = gold_labels.multiple_answers
predictions_str = [p["answer"] for p in predictions]
top_1_em = calculate_em_str_multi(gold_labels_list, predictions_str[0])
top_1_f1 = calculate_f1_str_multi(gold_labels_list, predictions_str[0])
top_k_em = max([calculate_em_str_multi(gold_labels_list, p) for p in predictions_str])
top_k_f1 = max([calculate_f1_str_multi(gold_labels_list, p) for p in predictions_str])
else:
logger.error("Closed Domain Reader Evaluation not yet implemented")
return 0,0,0,0
return top_1_em, top_1_f1, top_k_em, top_k_f1
def update_has_answer_metrics(self):
self.top_1_em = self.top_1_em_count / self.has_answer_count
self.top_k_em = self.top_k_em_count / self.has_answer_count
self.top_1_f1 = self.top_1_f1_sum / self.has_answer_count
self.top_k_f1 = self.top_k_f1_sum / self.has_answer_count
def update_no_answer_metrics(self):
self.top_1_no_answer = self.top_1_no_answer_count / self.no_answer_count
def print(self, mode):
"""Print the evaluation results"""
if mode == "reader":
print("Reader")
print("-----------------")
# print(f"answer in retrieved docs: {correct_retrieval}")
print(f"has answer queries: {self.has_answer_count}")
print(f"top 1 EM: {self.top_1_em:.4f}")
print(f"top k EM: {self.top_k_em:.4f}")
print(f"top 1 F1: {self.top_1_f1:.4f}")
print(f"top k F1: {self.top_k_f1:.4f}")
if self.no_answer_count:
print()
print(f"no_answer queries: {self.no_answer_count}")
print(f"top 1 no_answer accuracy: {self.top_1_no_answer:.4f}")
elif mode == "pipeline":
print("Pipeline")
print("-----------------")
pipeline_top_1_em = (self.top_1_em_count + self.top_1_no_answer_count) / self.query_count
pipeline_top_k_em = (self.top_k_em_count + self.no_answer_count) / self.query_count
pipeline_top_1_f1 = (self.top_1_f1_sum + self.top_1_no_answer_count) / self.query_count
pipeline_top_k_f1 = (self.top_k_f1_sum + self.no_answer_count) / self.query_count
print(f"queries: {self.query_count}")
print(f"top 1 EM: {pipeline_top_1_em:.4f}")
print(f"top k EM: {pipeline_top_k_em:.4f}")
print(f"top 1 F1: {pipeline_top_1_f1:.4f}")
print(f"top k F1: {pipeline_top_k_f1:.4f}")
if self.no_answer_count:
print(
"(top k results are likely inflated since the Reader always returns a no_answer prediction in its top k)"
)
def get_label(labels, node_id):
if type(labels) in [Label, MultiLabel]:
ret = labels
# If labels is a dict, then fetch the value using node_id (e.g. "EvalRetriever") as the key
else:
ret = labels[node_id]
return ret
def calculate_em_str_multi(gold_labels, prediction):
for gold_label in gold_labels:
result = calculate_em_str(gold_label, prediction)
if result == 1.0:
return 1.0
return 0.0
def calculate_f1_str_multi(gold_labels, prediction):
results = []
for gold_label in gold_labels:
result = calculate_f1_str(gold_label, prediction)
results.append(result)
return max(results)
def calculate_reader_metrics(metric_counts: Dict[str, float], correct_retrievals: int):
number_of_has_answer = correct_retrievals - metric_counts["number_of_no_answer"]
metrics = {
"reader_top1_accuracy" : metric_counts["correct_readings_top1"] / correct_retrievals,
"reader_top1_accuracy_has_answer" : metric_counts["correct_readings_top1_has_answer"] / number_of_has_answer,
"reader_topk_accuracy" : metric_counts["correct_readings_topk"] / correct_retrievals,
"reader_topk_accuracy_has_answer" : metric_counts["correct_readings_topk_has_answer"] / number_of_has_answer,
"reader_top1_em" : metric_counts["exact_matches_top1"] / correct_retrievals,
"reader_top1_em_has_answer" : metric_counts["exact_matches_top1_has_answer"] / number_of_has_answer,
"reader_topk_em" : metric_counts["exact_matches_topk"] / correct_retrievals,
"reader_topk_em_has_answer" : metric_counts["exact_matches_topk_has_answer"] / number_of_has_answer,
"reader_top1_f1" : metric_counts["summed_f1_top1"] / correct_retrievals,
"reader_top1_f1_has_answer" : metric_counts["summed_f1_top1_has_answer"] / number_of_has_answer,
"reader_topk_f1" : metric_counts["summed_f1_topk"] / correct_retrievals,
"reader_topk_f1_has_answer" : metric_counts["summed_f1_topk_has_answer"] / number_of_has_answer,
}
if metric_counts["number_of_no_answer"]:
metrics["reader_top1_no_answer_accuracy"] = metric_counts["correct_no_answers_top1"] / metric_counts[
"number_of_no_answer"]
metrics["reader_topk_no_answer_accuracy"] = metric_counts["correct_no_answers_topk"] / metric_counts[
"number_of_no_answer"]
else:
metrics["reader_top1_no_answer_accuracy"] = None # type: ignore
metrics["reader_topk_no_answer_accuracy"] = None # type: ignore
return metrics
def calculate_average_precision_and_reciprocal_rank(questions_with_docs: List[dict]):
questions_with_correct_doc = []
summed_avg_precision_retriever = 0.0
summed_reciprocal_rank_retriever = 0.0
for question in questions_with_docs:
number_relevant_docs = len(set(question["question"].multiple_document_ids))
found_relevant_doc = False
relevant_docs_found = 0
current_avg_precision = 0.0
for doc_idx, doc in enumerate(question["docs"]):
# check if correct doc among retrieved docs
if doc.id in question["question"].multiple_document_ids:
if not found_relevant_doc:
summed_reciprocal_rank_retriever += 1 / (doc_idx + 1)
relevant_docs_found += 1
found_relevant_doc = True
current_avg_precision += relevant_docs_found / (doc_idx + 1)
if relevant_docs_found == number_relevant_docs:
break
if found_relevant_doc:
all_relevant_docs = len(set(question["question"].multiple_document_ids))
summed_avg_precision_retriever += current_avg_precision / all_relevant_docs
if found_relevant_doc:
questions_with_correct_doc.append({
"question": question["question"],
"docs": question["docs"]
})
return questions_with_correct_doc, summed_avg_precision_retriever, summed_reciprocal_rank_retriever
def eval_counts_reader(question: MultiLabel, predicted_answers: Dict[str, Any], metric_counts: Dict[str, float]):
# Calculates evaluation metrics for one question and adds results to counter.
# check if question is answerable
if not question.no_answer:
found_answer = False
found_em = False
best_f1 = 0
for answer_idx, answer in enumerate(predicted_answers["answers"]):
if answer["document_id"] in question.multiple_document_ids:
gold_spans = [{"offset_start": question.multiple_offset_start_in_docs[i],
"offset_end": question.multiple_offset_start_in_docs[i] + len(question.multiple_answers[i]),
"doc_id": question.multiple_document_ids[i]} for i in range(len(question.multiple_answers))] # type: ignore
predicted_span = {"offset_start": answer["offset_start_in_doc"],
"offset_end": answer["offset_end_in_doc"],
"doc_id": answer["document_id"]}
best_f1_in_gold_spans = 0
for gold_span in gold_spans:
if gold_span["doc_id"] == predicted_span["doc_id"]:
# check if overlap between gold answer and predicted answer
if not found_answer:
metric_counts, found_answer = _count_overlap(gold_span, predicted_span, metric_counts, answer_idx) # type: ignore
# check for exact match
if not found_em:
metric_counts, found_em = _count_exact_match(gold_span, predicted_span, metric_counts, answer_idx) # type: ignore
# calculate f1
current_f1 = _calculate_f1(gold_span, predicted_span) # type: ignore
if current_f1 > best_f1_in_gold_spans:
best_f1_in_gold_spans = current_f1
# top-1 f1
if answer_idx == 0:
metric_counts["summed_f1_top1"] += best_f1_in_gold_spans
metric_counts["summed_f1_top1_has_answer"] += best_f1_in_gold_spans
if best_f1_in_gold_spans > best_f1:
best_f1 = best_f1_in_gold_spans
if found_em:
break
# top-k answers: use best f1-score
metric_counts["summed_f1_topk"] += best_f1
metric_counts["summed_f1_topk_has_answer"] += best_f1
# question not answerable
else:
metric_counts["number_of_no_answer"] += 1
metric_counts = _count_no_answer(predicted_answers["answers"], metric_counts)
return metric_counts
def eval_counts_reader_batch(pred: Dict[str, Any], metric_counts: Dict[str, float]):
# Calculates evaluation metrics for one question and adds results to counter.
# check if question is answerable
if not pred["label"].no_answer:
found_answer = False
found_em = False
best_f1 = 0
for answer_idx, answer in enumerate(pred["answers"]):
# check if correct document:
if answer["document_id"] in pred["label"].multiple_document_ids:
gold_spans = [{"offset_start": pred["label"].multiple_offset_start_in_docs[i],
"offset_end": pred["label"].multiple_offset_start_in_docs[i] + len(pred["label"].multiple_answers[i]),
"doc_id": pred["label"].multiple_document_ids[i]}
for i in range(len(pred["label"].multiple_answers))] # type: ignore
predicted_span = {"offset_start": answer["offset_start_in_doc"],
"offset_end": answer["offset_end_in_doc"],
"doc_id": answer["document_id"]}
best_f1_in_gold_spans = 0
for gold_span in gold_spans:
if gold_span["doc_id"] == predicted_span["doc_id"]:
# check if overlap between gold answer and predicted answer
if not found_answer:
metric_counts, found_answer = _count_overlap(
gold_span, predicted_span, metric_counts, answer_idx
)
# check for exact match
if not found_em:
metric_counts, found_em = _count_exact_match(
gold_span, predicted_span, metric_counts, answer_idx
)
# calculate f1
current_f1 = _calculate_f1(gold_span, predicted_span)
if current_f1 > best_f1_in_gold_spans:
best_f1_in_gold_spans = current_f1
# top-1 f1
if answer_idx == 0:
metric_counts["summed_f1_top1"] += best_f1_in_gold_spans
metric_counts["summed_f1_top1_has_answer"] += best_f1_in_gold_spans
if best_f1_in_gold_spans > best_f1:
best_f1 = best_f1_in_gold_spans
if found_em:
break
# top-k answers: use best f1-score
metric_counts["summed_f1_topk"] += best_f1
metric_counts["summed_f1_topk_has_answer"] += best_f1
# question not answerable
else:
metric_counts["number_of_no_answer"] += 1
metric_counts = _count_no_answer(pred["answers"], metric_counts)
return metric_counts
def _count_overlap(
gold_span: Dict[str, Any],
predicted_span: Dict[str, Any],
metric_counts: Dict[str, float],
answer_idx: int
):
# Checks if overlap between prediction and real answer.
found_answer = False
if (gold_span["offset_start"] <= predicted_span["offset_end"]) and \
(predicted_span["offset_start"] <= gold_span["offset_end"]):
# top-1 answer
if answer_idx == 0:
metric_counts["correct_readings_top1"] += 1
metric_counts["correct_readings_top1_has_answer"] += 1
# top-k answers
metric_counts["correct_readings_topk"] += 1
metric_counts["correct_readings_topk_has_answer"] += 1
found_answer = True
return metric_counts, found_answer
def _count_exact_match(
gold_span: Dict[str, Any],
predicted_span: Dict[str, Any],
metric_counts: Dict[str, float],
answer_idx: int
):
# Check if exact match between prediction and real answer.
# As evaluation needs to be framework independent, we cannot use the farm.evaluation.metrics.py functions.
found_em = False
if (gold_span["offset_start"] == predicted_span["offset_start"]) and \
(gold_span["offset_end"] == predicted_span["offset_end"]):
if metric_counts:
# top-1 answer
if answer_idx == 0:
metric_counts["exact_matches_top1"] += 1
metric_counts["exact_matches_top1_has_answer"] += 1
# top-k answers
metric_counts["exact_matches_topk"] += 1
metric_counts["exact_matches_topk_has_answer"] += 1
found_em = True
return metric_counts, found_em
def _calculate_f1(gold_span: Dict[str, Any], predicted_span: Dict[str, Any]):
# Calculates F1-Score for prediction based on real answer using character offsets.
# As evaluation needs to be framework independent, we cannot use the farm.evaluation.metrics.py functions.
pred_indices = list(range(predicted_span["offset_start"], predicted_span["offset_end"]))
gold_indices = list(range(gold_span["offset_start"], gold_span["offset_end"]))
n_overlap = len([x for x in pred_indices if x in gold_indices])
if pred_indices and gold_indices and n_overlap:
precision = n_overlap / len(pred_indices)
recall = n_overlap / len(gold_indices)
f1 = (2 * precision * recall) / (precision + recall)
return f1
else:
return 0
def _count_no_answer(answers: List[dict], metric_counts: Dict[str, float]):
# Checks if one of the answers is 'no answer'.
for answer_idx, answer in enumerate(answers):
# check if 'no answer'
if answer["answer"] is None:
# top-1 answer
if answer_idx == 0:
metric_counts["correct_no_answers_top1"] += 1
metric_counts["correct_readings_top1"] += 1
metric_counts["exact_matches_top1"] += 1
metric_counts["summed_f1_top1"] += 1
# top-k answers
metric_counts["correct_no_answers_topk"] += 1
metric_counts["correct_readings_topk"] += 1
metric_counts["exact_matches_topk"] += 1
metric_counts["summed_f1_topk"] += 1
break
return metric_counts
|
py
|
1a59c010099b0c8cde9cdbafa5147927e3d18895
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Validates V2 proto messages.
Internally, this module is a bit magical. It keeps a stack of fields currently
being validated per thread. It is used to construct a path to an invalid field
value.
"""
import contextlib
import logging
import re
import threading
from components import cipd
from go.chromium.org.luci.buildbucket.proto import common_pb2
import buildtags
import config
import errors
import model
class Error(Exception):
"""Raised on validation errors."""
PUBSUB_USER_DATA_MAX_LENGTH = 4096
# Maximum size of Build.summary_markdown field. Defined in build.proto.
MAX_BUILD_SUMMARY_MARKDOWN_SIZE = 4000 # 4 KB
# swarming.py and api.py reserve these properties.
RESERVED_PROPERTY_PATHS = [
# Reserved for buildbucket internals.
['buildbucket'],
['$recipe_engine/buildbucket'],
# Deprecated in favor of api.buildbucket.builder.builder,
# https://chromium.googlesource.com/infra/luci/recipes-py/+/master/recipe_modules/buildbucket/api.py
# Prohibited.
['buildername'],
# Deprecated in favor of api.buildbucket.build_input.gitiles_commit,
# https://chromium.googlesource.com/infra/luci/recipes-py/+/master/recipe_modules/buildbucket/api.py
# Prohibited.
['branch'],
['repository'],
# Set to const true.
['$recipe_engine/runtime', 'is_luci'],
# Populated from Build.input.experimental.
['$recipe_engine/runtime', 'is_experimental'],
]
# Statuses with start time required.
START_TIME_REQUIRED_STATUSES = (
common_pb2.STARTED,
common_pb2.SUCCESS,
common_pb2.FAILURE,
)
# Step statuses, listed from best to worst and if applicable. See
# https://chromium.googlesource.com/infra/luci/luci-go/+/dffd1081b775979aa1c5a8046d9a65adead1cee8/buildbucket/proto/step.proto#75
STATUS_PRECEDENCE = (
common_pb2.SUCCESS, # best
common_pb2.FAILURE,
common_pb2.INFRA_FAILURE,
common_pb2.CANCELED, # worst
)
# Character separating parent from children steps.
STEP_SEP = '|'
################################################################################
# Validation of common.proto messages.
# The order of functions must match the order of messages in common.proto.
def validate_gerrit_change(change, require_project=False):
"""Validates common_pb2.GerritChange."""
# project is not required.
_check_truth(change, 'host', 'change', 'patchset')
if require_project and not change.project: # pragma: no branch
# TODO(nodir): escalate to an error.
logging.warning('gerrit_change.project is not specified')
def validate_gitiles_commit(commit, require_ref=True):
"""Validates common_pb2.GitilesCommit."""
_check_truth(commit, 'host', 'project')
if require_ref:
_check_truth(commit, 'ref')
if commit.ref:
if not commit.ref.startswith('refs/'):
_enter_err('ref', 'must start with "refs/"')
else:
if not commit.id:
_err('id or ref is required')
if commit.position:
_enter_err('position', 'requires ref')
if commit.id:
with _enter('id'):
_validate_hex_sha1(commit.id)
def validate_tags(string_pairs, mode):
"""Validates a list of common.StringPair tags.
For mode, see buildtags.validate_tags docstring.
"""
for p in string_pairs:
if ':' in p.key:
_err('tag key "%s" cannot have a colon', p.key)
with _handle_invalid_input_error():
tags = ['%s:%s' % (p.key, p.value) for p in string_pairs]
buildtags.validate_tags(tags, mode)
################################################################################
# Validation of build.proto messages.
# The order of functions must match the order of messages in common.proto.
def validate_builder_id(builder_id, require_bucket=True, require_builder=True):
"""Validates build_pb2.BuilderID."""
assert require_bucket or not require_builder
_check_truth(builder_id, 'project')
if require_bucket:
_check_truth(builder_id, 'bucket')
if require_builder:
_check_truth(builder_id, 'builder')
with _enter('project'), _handle_invalid_input_error():
config.validate_project_id(builder_id.project)
with _enter('bucket'), _handle_invalid_input_error():
if builder_id.bucket:
config.validate_bucket_name(builder_id.bucket)
parts = builder_id.bucket.split('.')
if len(parts) >= 3 and parts[0] == 'luci':
_err(
'invalid usage of v1 bucket format in v2 API; use %r instead',
parts[2]
)
elif builder_id.builder:
_err('required by .builder field')
with _enter('builder'), _handle_invalid_input_error():
if builder_id.builder:
errors.validate_builder_name(builder_id.builder)
################################################################################
# Validation of rpc.proto messages.
# The order of functions must match the order of messages in rpc.proto.
def validate_get_build_request(req):
"""Validates rpc_pb2.GetBuildRequest."""
if req.id:
if req.HasField('builder') or req.build_number:
_err('id is mutually exclusive with builder and build_number')
elif req.HasField('builder') and req.build_number:
validate_builder_id(req.builder)
else:
_err('id or (builder and build_number) are required')
def validate_search_builds_request(req):
"""Validates rpc_pb2.SearchBuildRequest."""
with _enter('predicate'):
validate_build_predicate(req.predicate)
_validate_paged_request(req)
def validate_requested_dimension(dim):
"""Validates common_pb2.RequestedDimension."""
_check_truth(dim, 'key', 'value')
with _enter('key'):
if dim.key == 'caches':
_err('"caches" is invalid; define caches instead')
if dim.key == 'pool':
_err('"pool" is not allowed')
with _enter('expiration'):
with _enter('seconds'):
if dim.expiration.seconds < 0:
_err('must not be negative')
if dim.expiration.seconds % 60 != 0:
_err('must be a multiple of 60')
if dim.expiration.nanos:
_enter_err('nanos', 'must be 0')
def validate_schedule_build_request(req, legacy=False):
if '/' in req.request_id: # pragma: no cover
_enter_err('request_id', 'must not contain /')
if not req.HasField('builder') and not req.template_build_id:
_err('builder or template_build_id is required')
if req.HasField('builder'):
with _enter('builder'):
validate_builder_id(req.builder, require_builder=not legacy)
with _enter('exe'):
_check_falsehood(req.exe, 'cipd_package')
if req.exe.cipd_version:
with _enter('cipd_version'):
_validate_cipd_version(req.exe.cipd_version)
with _enter('properties'):
validate_struct(req.properties)
if not legacy: # pragma: no branch
for path in RESERVED_PROPERTY_PATHS:
if _struct_has_path(req.properties, path):
_err('property path %r is reserved', path)
if req.HasField('gitiles_commit'):
with _enter('gitiles_commit'):
validate_gitiles_commit(req.gitiles_commit)
_check_repeated(
req,
'gerrit_changes',
lambda c: validate_gerrit_change(c, require_project=not legacy),
)
with _enter('tags'):
validate_tags(req.tags, 'new')
_check_repeated(req, 'dimensions', validate_requested_dimension)
key_exp = set()
with _enter('dimensions'):
for d in req.dimensions:
t = (d.key, d.expiration.seconds)
if t in key_exp:
_err(
'key "%s" and expiration %ds are not unique', d.key,
d.expiration.seconds
)
key_exp.add(t)
if req.priority < 0 or req.priority > 255:
_enter_err('priority', 'must be in [0, 255]')
if req.HasField('notify'): # pragma: no branch
with _enter('notify'):
validate_notification_config(req.notify)
def validate_cancel_build_request(req):
_check_truth(req, 'id', 'summary_markdown')
with _enter('summary_markdown'):
validate_build_summary_markdown(req.summary_markdown)
def validate_struct(struct):
for name, value in struct.fields.iteritems():
if not value.WhichOneof('kind'):
_enter_err(name, 'value is not set; for null, initialize null_value')
def validate_notification_config(notify):
_check_truth(notify, 'pubsub_topic')
if len(notify.user_data) > PUBSUB_USER_DATA_MAX_LENGTH:
_enter_err('user_data', 'must be <= %d bytes', PUBSUB_USER_DATA_MAX_LENGTH)
# Set of UpdateBuildRequest field paths updatable via UpdateBuild RPC.
UPDATE_BUILD_FIELD_PATHS = {
'build.status',
'build.status_details',
'build.summary_markdown',
'build.steps',
'build.output',
'build.output.properties',
'build.output.gitiles_commit',
'build.tags',
}
# Set of valid build statuses supported by UpdateBuild RPC.
UPDATE_BUILD_STATUSES = {
common_pb2.STARTED,
# kitchen does not actually use SUCCESS. It relies on swarming pubsub
# handler in Buildbucket because a task may fail after recipe succeeded.
common_pb2.SUCCESS,
common_pb2.FAILURE,
common_pb2.INFRA_FAILURE,
}
def validate_update_build_request(req, make_build_steps_func=None):
"""Validates rpc_pb2.UpdateBuildRequest.
If make_build_steps_func is given, it will be called at the end to validate
the size of the its serialized representation. This allows the callee to save
the BuildStep locally and thus avoid re-doing the work later.
"""
update_paths = set(req.update_mask.paths)
with _enter('update_mask', 'paths'):
unsupported = update_paths - UPDATE_BUILD_FIELD_PATHS
if unsupported:
_err('unsupported path(s) %r', sorted(unsupported))
# Check build values, if present in the mask.
with _enter('build'):
_check_truth(req.build, 'id')
if 'build.status' in update_paths:
if req.build.status not in UPDATE_BUILD_STATUSES:
_enter_err(
'status', 'invalid status %s for UpdateBuild',
common_pb2.Status.Name(req.build.status)
)
if 'build.output.gitiles_commit' in update_paths:
with _enter('output', 'gitiles_commit'):
validate_gitiles_commit(req.build.output.gitiles_commit)
if 'build.summary_markdown' in update_paths:
with _enter('summary_markdown'):
validate_build_summary_markdown(req.build.summary_markdown)
if 'build.output.properties' in update_paths:
with _enter('output', 'properties'):
validate_struct(req.build.output.properties)
if 'build.tags' in update_paths:
with _enter('tags'):
validate_tags(req.build.tags, 'append')
if 'build.steps' in update_paths: # pragma: no branch
with _enter('steps'):
build_steps = (
make_build_steps_func()
if make_build_steps_func else model.BuildSteps.make(req.build)
)
limit = model.BuildSteps.MAX_STEPS_LEN
if len(build_steps.step_container_bytes) > limit:
_err('too big to accept')
validate_steps(req.build.steps)
def validate_build_summary_markdown(summary_markdown):
size = len(summary_markdown)
limit = MAX_BUILD_SUMMARY_MARKDOWN_SIZE
if size > limit:
_err('too big to accept (%d > %d bytes)', size, limit)
def validate_steps(steps):
seen_steps = dict()
for i, s in enumerate(steps):
with _enter('step[%d]' % i):
validate_step(s, seen_steps)
def validate_step(step, steps):
"""Validates build's step, internally and relative to (previous) steps."""
_check_truth(step, 'name')
if step.name in steps:
_enter_err('name', 'duplicate: %r', step.name)
validate_internal_timing_consistency(step)
log_names = set()
_check_repeated(step, 'logs', lambda log: validate_log(log, log_names))
name_path = step.name.split(STEP_SEP)
parent_name = STEP_SEP.join(name_path[:-1])
if parent_name:
if parent_name not in steps:
_err('parent to %r must precede', step.name)
parent = steps[parent_name]
validate_status_consistency(step, parent)
validate_timing_consistency(step, parent)
steps[step.name] = step
def validate_internal_timing_consistency(step):
"""Validates internal timing consistency of a step."""
if (step.status not in common_pb2.Status.values() or
step.status == common_pb2.STATUS_UNSPECIFIED):
_err('must have buildbucket.v2.Status that is not STATUS_UNSPECIFIED')
if step.status in START_TIME_REQUIRED_STATUSES and not step.HasField(
'start_time'):
_enter_err(
'start_time', 'required by status %s',
common_pb2.Status.Name(step.status)
)
elif step.status < common_pb2.STARTED and step.HasField('start_time'):
_enter_err(
'start_time', 'invalid for status %s',
common_pb2.Status.Name(step.status)
)
if bool(step.status & common_pb2.ENDED_MASK) ^ step.HasField('end_time'):
_err('must have both or neither end_time and a terminal status')
if (step.HasField('end_time') and
step.start_time.ToDatetime() > step.end_time.ToDatetime()):
_err('start_time after end_time')
def validate_status_consistency(child, parent):
"""Validates inter-step status consistency."""
c, p = child.status, parent.status
c_name, p_name = common_pb2.Status.Name(c), common_pb2.Status.Name(p)
if p == common_pb2.SCHEDULED:
_enter_err('status', 'parent %r must be at least STARTED', parent.name)
if not bool(c & common_pb2.ENDED_MASK) and p != common_pb2.STARTED:
_enter_err(
'status', 'non-terminal (%s) %r must have STARTED parent %r (%s)',
c_name, child.name, parent.name, p_name
)
if (p in STATUS_PRECEDENCE and c in STATUS_PRECEDENCE and
STATUS_PRECEDENCE.index(p) < STATUS_PRECEDENCE.index(c)):
_enter_err(
'status', '%r\'s status %s is worse than parent %r\'s status %s',
child.name, c_name, parent.name, p_name
)
def validate_timing_consistency(child, parent):
"""Validates inter-step timing consistency."""
parent_start = parent.start_time.ToDatetime(
) if parent.HasField('start_time') else None
parent_end = parent.end_time.ToDatetime(
) if parent.HasField('end_time') else None
if child.HasField('start_time'):
child_start = child.start_time.ToDatetime()
with _enter('start_time'):
if parent_start and parent_start > child_start:
_err('cannot precede parent %r\'s start time', parent.name)
if parent_end and parent_end < child_start:
_err('cannot follow parent %r\'s end time', parent.name)
if child.HasField('end_time'):
child_end = child.end_time.ToDatetime()
with _enter('end_time'):
if parent_start and parent_start > child_end:
_err('cannot precede parent %r\'s start time', parent.name)
if parent_end and parent_end < child_end:
_err('cannot follow parent %r\'s end time', parent.name)
def validate_log(log, names):
"""Validates a log within a build step; checks uniqueness against names param.
"""
_check_truth(log, 'name', 'url', 'view_url')
if log.name in names:
_enter_err('name', 'duplicate: %r', log.name)
names.add(log.name)
def validate_build_predicate(predicate):
"""Validates rpc_pb2.BuildPredicate."""
if predicate.HasField('builder'):
with _enter('builder'):
validate_builder_id(
predicate.builder, require_bucket=False, require_builder=False
)
_check_repeated(predicate, 'gerrit_changes', validate_gerrit_change)
if predicate.HasField('output_gitiles_commit'):
with _enter('output_gitiles_commit'):
_validate_predicate_output_gitiles_commit(predicate.output_gitiles_commit)
if predicate.HasField('create_time') and predicate.HasField('build'):
_err('create_time and build are mutually exclusive')
with _enter('tags'):
validate_tags(predicate.tags, 'search')
# List of supported BuildPredicate.output_gitiles_commit field sets.
# It is more restrictied than the generic validate_gitiles_commit because the
# field sets by which builds are indexed are more restricted.
SUPPORTED_PREDICATE_OUTPUT_GITILES_COMMIT_FIELD_SET = {
tuple(sorted(s)) for s in [
('host', 'project', 'id'),
('host', 'project', 'ref'),
('host', 'project', 'ref', 'position'),
]
}
def _validate_predicate_output_gitiles_commit(commit):
"""Validates BuildsPredicate.output_gitiles_commit.
From rpc_pb2.SearchBuildsRequest.output_gitiles_commit comment:
One of the following subfield sets must specified:
- host, project, id
- host, project, ref
- host, project, ref, position
"""
field_set = tuple(sorted(f.name for f, _ in commit.ListFields()))
if field_set not in SUPPORTED_PREDICATE_OUTPUT_GITILES_COMMIT_FIELD_SET:
_err(
'unsupported set of fields %r. Supported field sets: %r', field_set,
SUPPORTED_PREDICATE_OUTPUT_GITILES_COMMIT_FIELD_SET
)
validate_gitiles_commit(commit, require_ref=False)
################################################################################
# Internals.
def _validate_cipd_version(version):
if not cipd.is_valid_version(version):
_err('invalid version "%s"', version)
def _struct_has_path(struct, path):
"""Returns True if struct has a value at field path."""
for p in path:
f = struct.fields.get(p)
if f is None:
return False
struct = f.struct_value
return True
def _validate_hex_sha1(sha1):
pattern = r'[a-z0-9]{40}'
if not re.match(pattern, sha1):
_err('does not match r"%s"', pattern)
def _validate_paged_request(req):
"""Validates req.page_size."""
if req.page_size < 0:
_enter_err('page_size', 'must be not be negative')
def _check_truth(msg, *field_names):
"""Validates that the field values are truish."""
assert field_names, 'at least 1 field is required'
for f in field_names:
if not getattr(msg, f):
_enter_err(f, 'required')
def _check_falsehood(msg, *field_names):
"""Validates that the field values are falsish."""
for f in field_names:
if getattr(msg, f):
_enter_err(f, 'disallowed')
def _check_repeated(msg, field_name, validator):
"""Validates each element of a repeated field."""
for i, c in enumerate(getattr(msg, field_name)):
with _enter('%s[%d]' % (field_name, i)):
validator(c)
@contextlib.contextmanager
def _enter(*names):
_field_stack().extend(names)
try:
yield
finally:
_field_stack()[-len(names):] = []
def _err(fmt, *args):
field_path = '.'.join(_field_stack())
raise Error('%s: %s' % (field_path, fmt % args))
@contextlib.contextmanager
def _handle_invalid_input_error():
try:
yield
except errors.InvalidInputError as ex:
_err('%s', ex.message)
def _enter_err(name, fmt, *args):
with _enter(name):
_err(fmt, *args)
def _field_stack():
if not hasattr(_CONTEXT, 'field_stack'): # pragma: no cover
_CONTEXT.field_stack = []
return _CONTEXT.field_stack
# Validation context of the current thread.
_CONTEXT = threading.local()
|
py
|
1a59c06826303b70139427ea2b657a2033b24247
|
import sys
import random
import re
import asyncio
import aiohttp
import discord
from discord.ext import commands
import xml.etree.ElementTree as ET
import loadconfig
class anime(commands.Cog):
'''Alles rund um Animes'''
def __init__(self, bot):
self.bot = bot
async def cog_command_error(self, ctx, error):
print('Error in {0.command.qualified_name}: {1}'.format(ctx, error))
def checkRole(self, user, roleRec):
ok = False
for all in list(user.roles):
if all.name == roleRec:
ok = True
return ok
@commands.command()
async def kawaii(self, ctx):
'''Gibt ein zufälliges kawaii Bild aus'''
if loadconfig.__kawaiichannel__:
pins = await self.bot.get_channel(loadconfig.__kawaiichannel__).pins()
rnd = random.choice(pins)
img = rnd.attachments[0].url
emojis = [':blush:', ':flushed:', ':heart_eyes:', ':heart_eyes_cat:', ':heart:']
await ctx.send(f'{random.choice(emojis)} Von: {rnd.author.display_name}: {img}')
else:
await ctx.send('**:no_entry:** Es wurde kein Channel für den Bot eingestellt! Wende dich bitte an den Bot Admin')
@commands.command(pass_context=True, hidden=True)
async def nsfw(self, ctx):
'''Vergibt die Rolle um auf die NSFW Channel zugreifen zu können'''
if ctx.guild.id == loadconfig.__botserverid__:
if loadconfig.__selfassignrole__:
role = discord.utils.get(ctx.guild.roles, name=loadconfig.__selfassignrole__)
if role in ctx.author.roles:
try:
await ctx.author.remove_roles(role)
except:
pass
tmp = await ctx.send(f':x: Rolle **{role}** wurde entfernt')
else:
try:
await ctx.author.add_roles(role)
except:
pass
tmp = await ctx.send(f':white_check_mark: Rolle **{role}** wurde hinzugefügt')
else:
tmp = await ctx.send('**:no_entry:** Es wurde keine Rolle für den Bot eingestellt! Wende dich bitte an den Bot Admin')
else:
tmp = await ctx.send(f'**:no_entry:** This command don\'t work on this server!')
await asyncio.sleep(2 * 60)
await tmp.delete()
await ctx.message.delete()
@commands.command(aliases=['wave', 'hi', 'ohaiyo'])
async def hello(self, ctx):
'''Nonsense gifs zum Hallo sagen'''
gifs = ['https://cdn.discordapp.com/attachments/102817255661772800/219512763607678976/large_1.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219512898563735552/large.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219518948251664384/WgQWD.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219518717426532352/tumblr_lnttzfSUM41qgcvsy.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219519191290478592/tumblr_mf76erIF6s1qj96p1o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219519729604231168/giphy_3.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219519737971867649/63953d32c650703cded875ac601e765778ce90d0_hq.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219519738781368321/17201a4342e901e5f1bc2a03ad487219c0434c22_hq.gif']
msg = f':wave: {random.choice(gifs)}'
await ctx.send(msg)
@commands.command(aliases=['nepu', 'topnep'])
async def nep(self, ctx):
'''Can't stop the Nep'''
neps = ['https://cdn.discordapp.com/attachments/102817255661772800/219530759881359360/community_image_1421846157.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219535598187184128/tumblr_nv25gtvX911ubsb68o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219535698309545984/tumblr_mpub9tTuZl1rvrw2eo2_r1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219535820430770176/dd9f3cc873f3e13fe098429388fc24242a545a21_hq.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219535828773371904/tumblr_nl62nrrPar1u0bcbmo1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219535828995538944/dUBNqIH.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219535906942615553/b3886374588ec93849e1210449c4561fa699ff0d_hq.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219536353841381376/tumblr_nl9wb2qMFD1u3qei8o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219536345176080384/tumblr_njhahjh1DB1t0co30o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219536356223877120/tumblr_njkq53Roep1t0co30o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219536424121139210/tumblr_oalathnmFC1uskgfro1_400.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219536451807739904/tumblr_nfg22lqmZ31rjwa86o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219536686529380362/tumblr_o98bm76djb1vv3oz0o1_500.gif',
'https://cdn.discordapp.com/attachments/102817255661772800/219537181440475146/tumblr_mya4mdVhDv1rmk3cyo1_500.gif',
'https://i.imgur.com/4xnJN9x.png',
'https://i.imgur.com/bunWIWD.jpg']
nepnep = ['topnep',
'Can\'t pep the nep',
'Flat is justice',
'nep nep nep nep nep nep nep nep nep nep nep',
'Nepgear > your waifu']
msg = f'{random.choice(nepnep)} {random.choice(neps)}'
await ctx.send(msg)
@commands.command(aliases=['headpat'])
async def pat(self, ctx, member: discord.Member = None):
'''/r/headpats Pat Pat Pat :3
Beispiel:
-----------
:pat @Der-Eddy#6508
'''
gifs = ['https://gfycat.com/PoisedWindingCaecilian',
'https://cdn.awwni.me/sou1.jpg',
'https://i.imgur.com/Nzxa95W.gifv',
'https://cdn.awwni.me/sk0x.png',
'https://i.imgur.com/N0UIRkk.png',
'https://cdn.awwni.me/r915.jpg',
'https://i.imgur.com/VRViMGf.gifv',
'https://i.imgur.com/73dNfOk.gifv',
'https://i.imgur.com/UXAKjRc.jpg',
'https://i.imgur.com/dzlDuNs.jpg',
'https://i.imgur.com/hPR7SOt.gif',
'https://i.imgur.com/IqGRUu4.gif',
'https://68.media.tumblr.com/f95f14437809dfec8057b2bd525e6b4a/tumblr_omvkl2SzeK1ql0375o1_500.gif',
'https://i.redd.it/0ffv8i3p1vrz.jpg',
'http://i.imgur.com/3dzA6OU.png',
'http://i.imgur.com/vkFKabZ.jpg',
'https://i.imgur.com/Lb4p20s.jpg',
'https://cdn.awwni.me/snot.jpg',
'https://i.imgur.com/5yEOa6u.jpg',
'https://i.redd.it/dc7oebkfsetz.jpg']
if member == ctx.me:
msg = f'Arigato {ctx.author.mention} <:Hiding:322410632517517324> \n{random.choice(gifs)}'
await ctx.send(msg)
elif member is not None:
msg = f'{ctx.author.mention} tätschelt dich {member.mention} :3 \n{random.choice(gifs)}'
await ctx.send(msg)
@commands.command(aliases=['rate', 'waifu'])
async def ratewaifu(self, ctx, *, waifuName: str):
'''Rate my waifu
Beispiel:
-----------
:ratewaifu Sagiri
'''
waifu = waifuName.lower()
bestWaifus = ['kobeni', 'emilia', 'shinobu', 'karen', 'shouko', 'shoko',
'minori', 'chidori', 'sagiri', 'mashiro', 'last order',
'saki', 'makoto', 'yui', 'nep', 'nepgear', 'taiga']
trashWaifus = ['shino', 'rikka']
#this lists are highly biased, but who cares ¯\_(ツ)_/¯
if waifu in bestWaifus:
rating = 10
elif waifu in trashWaifus:
rating = 0
else:
rating = hash(waifu) % 10
if waifu == 'emilia':
emoji = '<:Emilia:230684388084416512>'
elif waifu == 'shinobu':
emoji = '<:Shinobu:303302053688770561>'
elif waifu == 'mashiro':
emoji = '<:mashiro:266233568626343936>'
elif waifu == 'sagiri':
emoji = '<:Sagiri:407630432319045634>'
elif waifu == 'nep' or waifu == 'neptunia' or waifu == 'nepgear':
emoji = '<:nep:261230988758220822>'
elif rating < 2:
emoji = ':put_litter_in_its_place:'
elif rating < 5:
emoji = '<:k3llyLUL:341946977266827264>'
elif rating < 7:
emoji = '<:k3llyTHINK:341946932639432704>'
elif rating < 9:
emojis = ['<:faeGasm:298772756412104704>', '<:naroGasm:341200647741243393>']
emoji = random.choice(emojis)
elif rating < 10:
emojis = ['<:kanoLewd:230662559458525185>', '<:fowShy:230662561580843008>', '<:mendoLewd:230662561169801216>']
emoji = random.choice(emojis)
elif rating == 10:
emojis = ['<:okhand:335170448666918923>', '<:nepnep:314906910061101057>', '<:gaku:249970768786489345>', '<:faeWant:313430419661914113>']
emoji = random.choice(emojis)
msg = f'{emoji} Ich bewerte **{waifuName}** als **{rating}/10**'
await ctx.send(msg)
@commands.command(aliases=['anilist'])
async def anime(self, ctx, *, animeName: str):
'''Sucht auf AniList.co nach einem Anime und gibt die Basis-Informationen zurück
Beispiel:
-----------
:anime Mushishi
'''
api = 'https://graphql.anilist.co'
query = '''
query ($name: String){
Media(search: $name, type: ANIME) {
id
idMal
description
title {
romaji
english
}
coverImage {
large
}
startDate {
year
month
day
}
endDate {
year
month
day
}
synonyms
format
status
episodes
duration
nextAiringEpisode {
episode
}
averageScore
meanScore
source
genres
tags {
name
}
studios(isMain: true) {
nodes {
name
}
}
siteUrl
}
}
'''
variables = {
'name': animeName
}
async with aiohttp.ClientSession() as session:
async with session.post(api, json={'query': query, 'variables': variables}, headers = self.bot.userAgentHeaders) as r:
if r.status == 200:
json = await r.json()
data = json['data']['Media']
embed = discord.Embed(color=ctx.author.top_role.colour)
embed.set_footer(text='API provided by AniList.co | ID: {}'.format(str(data['id'])))
embed.set_thumbnail(url=data['coverImage']['large'])
if data['title']['english'] == None or data['title']['english'] == data['title']['romaji']:
embed.add_field(name='Titel', value=data['title']['romaji'], inline=False)
else:
embed.add_field(name='Titel', value='{} ({})'.format(data['title']['english'], data['title']['romaji']), inline=False)
#embed.add_field(name='Beschreibung', value=data['description'], inline=False)
if data['synonyms'] != []:
embed.add_field(name='Synonyme', value=', '.join(data['synonyms']), inline=True)
embed.add_field(name='Typ', value=data['format'].replace('_', ' ').title().replace('Tv', 'TV'), inline=True)
if data['episodes'] > 1:
embed.add_field(name='Folgen', value='{} à {} min'.format(data['episodes'], data['duration']), inline=True)
else:
embed.add_field(name='Dauer', value=str(data['duration']) + ' min', inline=True)
embed.add_field(name='Gestartet', value='{}.{}.{}'.format(data['startDate']['day'], data['startDate']['month'], data['startDate']['year']), inline=True)
if data['endDate']['day'] == None:
embed.add_field(name='Released Folgen', value=data['nextAiringEpisode']['episode'] - 1, inline=True)
elif data['episodes'] > 1:
embed.add_field(name='Beendet', value='{}.{}.{}'.format(data['endDate']['day'], data['endDate']['month'], data['endDate']['year']), inline=True)
embed.add_field(name='Status', value=data['status'].replace('_', ' ').title(), inline=True)
try:
embed.add_field(name='Haupt-Studio', value=data['studios']['nodes'][0]['name'], inline=True)
except IndexError:
pass
embed.add_field(name='Ø Score', value=data['averageScore'], inline=True)
embed.add_field(name='Genres', value=', '.join(data['genres']), inline=False)
tags = ''
for tag in data['tags']:
tags += tag['name'] + ', '
embed.add_field(name='Tags', value=tags[:-2], inline=False)
try:
embed.add_field(name='Adaptiert von', value=data['source'].replace('_', ' ').title(), inline=True)
except AttributeError:
pass
embed.add_field(name='AniList Link', value=data['siteUrl'], inline=False)
embed.add_field(name='MyAnimeList Link', value='https://myanimelist.net/anime/' + str(data['idMal']), inline=False)
await ctx.send(embed=embed)
else:
await ctx.send(':x: Konnte keinen passenden Anime finden!')
@commands.command()
async def manga(self, ctx, *, mangaName: str):
'''Sucht auf AniList.co nach einem Manga und gibt die Basis-Informationen zurück
Beispiel:
-----------
:manga Air Gear
'''
api = 'https://graphql.anilist.co'
query = '''
query ($name: String){
Media(search: $name, type: MANGA) {
id
idMal
description
title {
romaji
english
}
coverImage {
large
}
startDate {
year
month
day
}
endDate {
year
month
day
}
status
chapters
volumes
averageScore
meanScore
genres
tags {
name
}
siteUrl
}
}
'''
variables = {
'name': mangaName
}
async with aiohttp.ClientSession() as session:
async with session.post(api, json={'query': query, 'variables': variables}, headers = self.bot.userAgentHeaders) as r:
if r.status == 200:
json = await r.json()
data = json['data']['Media']
embed = discord.Embed(color=ctx.author.top_role.colour)
embed.set_footer(text='API provided by AniList.co | ID: {}'.format(str(data['id'])))
embed.set_thumbnail(url=data['coverImage']['large'])
if data['title']['english'] == None or data['title']['english'] == data['title']['romaji']:
embed.add_field(name='Titel', value=data['title']['romaji'], inline=False)
else:
embed.add_field(name='Titel', value='{} ({})'.format(data['title']['english'], data['title']['romaji']), inline=False)
#embed.add_field(name='Beschreibung', value=data['description'], inline=False)
if data['chapters'] != None:
# https://github.com/AniList/ApiV2-GraphQL-Docs/issues/47
embed.add_field(name='Kapitel', value=data['chapters'], inline=True)
embed.add_field(name='Bände', value=data['volumes'], inline=True)
embed.add_field(name='Gestartet', value='{}.{}.{}'.format(data['startDate']['day'], data['startDate']['month'], data['startDate']['year']), inline=True)
if data['endDate']['day'] != None:
embed.add_field(name='Beendet', value='{}.{}.{}'.format(data['endDate']['day'], data['endDate']['month'], data['endDate']['year']), inline=True)
embed.add_field(name='Status', value=data['status'].replace('_', ' ').title(), inline=True)
embed.add_field(name='Ø Score', value=data['averageScore'], inline=True)
embed.add_field(name='Genres', value=', '.join(data['genres']), inline=False)
tags = ''
for tag in data['tags']:
tags += tag['name'] + ', '
embed.add_field(name='Tags', value=tags[:-2], inline=False)
embed.add_field(name='AniList Link', value=data['siteUrl'], inline=False)
embed.add_field(name='MyAnimeList Link', value='https://myanimelist.net/anime/' + str(data['idMal']), inline=False)
await ctx.send(embed=embed)
else:
await ctx.send(':x: Konnte keinen passenden Manga finden!')
@commands.command(aliases=['sauce', 'iqdb'])
async def saucenao(self, ctx, url: str = None):
'''Versucht die Quelle eines Anime Bildes zu finden
Beispiel:
-----------
:saucenao
:saucenao https://i.imgur.com/nmnVtgs.jpg
'''
if url == None:
async for message in ctx.channel.history(before=ctx.message):
try:
url = message.attachments[0].url
continue
except IndexError:
pass
elif not url.endswith(('.jpg', '.png', '.bmp', '.gif', '.jpeg')):
await ctx.send(':x: Keine korrekte URL angegeben!')
return
tmp = await ctx.send(f'Versuche die Quelle des Bildes <{url}> zu finden ...')
saucenao = f'http://saucenao.com/search.php?db=999&url={url}'
async with aiohttp.ClientSession(headers = self.bot.userAgentHeaders) as cs:
async with cs.get(saucenao) as r:
#Thanks to https://github.com/MistressMamiya/hsauce_bot/blob/master/get_source.py
content = await r.text()
content = content.split('Low similarity results')[0] # Get rid of the low similarity results
artist = re.search(r'<strong>Creator: <\/strong>(.*?)<br', content)
anime = re.search(r'<strong>Material: <\/strong>(.*?)<br', content)
characters = re.search(r'<strong>Characters: <\/strong><br \/>(.*?)<br \/></div>', content)
pixiv = re.search(r'<strong>Pixiv ID: </strong><a href=\"(.*?)\" class', content)
danbooru = re.search(r'<a href=\"https://danbooru\.donmai\.us/post/show/(\d+)\">', content)
gelbooru = re.search(r'<a href=\"https://gelbooru\.com/index\.php\?page=post&s=view&id=(\d+)\">', content)
yandere = re.search(r'<a href=\"https://yande\.re/post/show/(\d+)\">', content)
konachan = re.search(r'<a href=\"http://konachan\.com/post/show/(\d+)\">', content)
sankaku = re.search(r'<a href=\"https://chan\.sankakucomplex\.com/post/show/(\d+)\">', content)
embed = discord.Embed()
embed.set_footer(text='Provided by https://saucenao.com')
embed.set_thumbnail(url=url)
if anime:
embed.add_field(name='Anime', value=anime.group(1), inline=True)
if artist:
embed.add_field(name='Artist', value=artist.group(1), inline=True)
if characters:
embed.add_field(name='Charaktere', value=str(characters.group(1)).replace('<br />', ', '), inline=True)
if pixiv:
embed.add_field(name='Pixiv Link', value=pixiv.group(1), inline=False)
if danbooru:
embed.add_field(name='Danbooru Link', value='https://danbooru.donmai.us/post/show/' + danbooru.group(1), inline=False)
if gelbooru:
embed.add_field(name='Gelbooru Link', value='https://gelbooru.com/index.php?page=post&s=view&id=' + gelbooru.group(1), inline=False)
if yandere:
embed.add_field(name='Yande.re Link', value='https://yande.re/post/show/' + yandere.group(1), inline=False)
if konachan:
embed.add_field(name='Konachan Link', value='http://konachan.com/post/show/' + konachan.group(1), inline=False)
if sankaku:
embed.add_field(name='Sankaku Link', value='https://chan.sankakucomplex.com/post/show/' + sankaku.group(1), inline=False)
if anime or artist or characters or pixiv or danbooru or gelbooru or yandere or konachan or sankaku:
await tmp.edit(content='', embed=embed)
else:
await tmp.edit(content=':x: Konnte nichts finden!')
# @commands.command(pass_context=True, hidden=True)
# async def imgur(self, ctx, amount: int = None):
# '''Lädt eine bestimmte Anzahl der letzten hochgeladenen Bilder im Channel bei Imgur hoch'''
# await ctx.send(':new: Befehl in Arbeit!')
#
# @commands.command(pass_context=True, alias=['ani'], hidden=True)
# async def anisearch(self, ctx, url: str = None):
# '''Gibt Informationen über einen AniSearch.de User zurück'''
# async with aiohttp.get(url) as r:
# if r.status == 200:
# content = await r.text()
# animeRE = r"<td class=\"rtype2\">\w+</td><td>(\d+)</td>"
# watchedAnimes = re.search(content, animeRE)
# await ctx.send(str(watchedAnimes.group(0)))
# else:
# await ctx.send(':x: Konnte den Benutzer nicht finden (falsche URL?)')
def setup(bot):
bot.add_cog(anime(bot))
|
py
|
1a59c2081daa8a4f8b6197bd4b885d118609a3b0
|
_base_ = "finetune-eval-base.py"
# dataset settings
data_source_cfg = dict(
type="ImageNet",
memcached=False,
mclient_path='/no/matter',
# this will be ignored if type != ImageListMultihead
)
data_train_list = "data/flowers/meta/train-1000.txt"
data_train_root = 'data/flowers'
data_val_list = "data/flowers/meta/val.txt"
data_val_root = 'data/flowers'
data_test_list = "data/flowers/meta/test.txt"
data_test_root = 'data/flowers'
dataset_type = "ClassificationDataset"
img_norm_cfg = dict(mean=[0.4355,0.3777,0.2880], std=[0.2970, 0.2459, 0.2705])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=256),
dict(type='CenterCrop', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
data = dict(
batch_size=64,
workers_per_gpu=6,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_source=dict(
list_file=data_val_list, root=data_val_root, **data_source_cfg),
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_source=dict(
list_file=data_test_list, root=data_test_root, **data_source_cfg),
pipeline=test_pipeline))
custom_hooks = [
dict(
name="val",
type='ValidateHook',
dataset=data['val'],
by_epoch=False,
initial=False,
interval=100,
imgs_per_gpu=32,
workers_per_gpu=6,
eval_param=dict(topk=(1,5))),
dict(
name="test",
type='ValidateHook',
dataset=data['test'],
by_epoch=False,
initial=False,
interval=100,
imgs_per_gpu=32,
workers_per_gpu=6,
eval_param=dict(topk=(1,5))),
]
by_iter =True
# learning policy
lr_config = dict(
by_epoch=False,
policy='step',
step=[833,1667],
gamma=0.1 # multiply LR by this number at each step
)
# momentum and weight decay from VTAB and IDRL
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.,
paramwise_options={'\Ahead.': dict(lr_mult=100)})
# runtime settings
# total iters or total epochs
total_iters=2500
checkpoint_config = dict(interval=10000)
log_config = dict(
interval=1,
by_epoch=False,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
dict(type='TensorboardLoggerHook', by_epoch=False)
])
optimizer_config = dict(update_interval=4)
|
py
|
1a59c22312e70ce831e4a2f8dc8fe912ee85e779
|
#!/usr/bin/python
# This file is part of python-registry.
#
# Copyright 2011 Will Ballenthin <[email protected]>
# while at Mandiant <http://www.mandiant.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Find all Registry paths, value names, and values that
# contain the given string.
#
# python findkey.py <registry file> <needle>
#
from __future__ import print_function
from __future__ import unicode_literals
import sys
import argparse
from Registry import Registry
def main():
parser = argparse.ArgumentParser(
description="Search for a string in a Windows Registry hive")
parser.add_argument("registry_hive", type=str,
help="Path to the Windows Registry hive to process")
parser.add_argument("query", type=str,
help="Query for which to search")
parser.add_argument("-i", action="store_true", dest="case_insensitive",
help="Query for which to search")
args = parser.parse_args()
paths = []
value_names = []
values = []
def rec(key, depth, needle):
for value in key.values():
if (args.case_insensitive and needle in value.name().lower()) or needle in value.name():
value_names.append((key.path(), value.name()))
sys.stdout.write("n")
sys.stdout.flush()
try:
if (args.case_insensitive and needle in str(value.value()).lower()) or needle in str(value.value()):
values.append((key.path(), value.name()))
sys.stdout.write("v")
sys.stdout.flush()
except UnicodeEncodeError:
pass
except UnicodeDecodeError:
pass
for subkey in key.subkeys():
if needle in subkey.name():
paths.append(subkey.path())
sys.stdout.write("p")
sys.stdout.flush()
rec(subkey, depth + 1, needle)
reg = Registry.Registry(args.registry_hive)
needle = args.query
if args.case_insensitive:
needle = needle.lower()
rec(reg.root(), 0, needle)
print("")
print("[Paths]")
for path in paths:
print(" - %s" % (path))
if len(paths) == 0:
print(" (none)")
print("")
print("[Value Names]")
for pair in value_names:
print(" - %s : %s" % (pair[0], pair[1]))
if len(value_names) == 0:
print(" (none)")
print("")
print("[Values]")
for pair in values:
print(" - %s : %s" % (pair[0], pair[1]))
if len(values) == 0:
print(" (none)")
print("")
if __name__ == "__main__":
main()
|
py
|
1a59c398816baf3536248f97437c926d9fb6daaf
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from django.urls import path
from . import views
urlpatterns = [
path('', views.AIPicsPageView.as_view(), name="list"),
path('<int:pk>', views.AIPicsDetailView.as_view(), name="detail"),
path('api/set-api-pic-state', views.ApiSetAiPicStateView.as_view(), name='set-api-pic-state'),
path('api/delete-api-pic', views.ApiDeleteAiPicsView.as_view(), name='delete-api-pic'),
path('api/delete-attachment', views.ApiDeleteAttachmentView.as_view(), name='delete-attachment'),
]
|
py
|
1a59c45464046dcd340c166e3ed797f82c9e2269
|
import altair as alt
from altair_transform import extract_data, transform_chart
import numpy as np
import pandas as pd
import pytest
@pytest.fixture
def data():
rand = np.random.RandomState(42)
return pd.DataFrame(
{
"x": rand.randint(0, 100, 12),
"y": rand.randint(0, 100, 12),
"t": pd.date_range("2012-01-15", freq="M", periods=12),
"i": range(12),
"c": list("AAABBBCCCDDD"),
}
)
@pytest.fixture
def chart(data):
return (
alt.Chart(data)
.transform_calculate(xpy="datum.x + datum.y", xmy="datum.x - datum.y")
.mark_point()
.encode(x="xpy:Q", y="xmy:Q")
)
def test_extract_data(data, chart):
out1 = extract_data(chart)
out2 = data.copy()
out2["xpy"] = data.x + data.y
out2["xmy"] = data.x - data.y
assert out1.equals(out2)
def test_transform_chart(data, chart):
original_chart = chart.copy()
data_out = extract_data(chart)
chart_out = transform_chart(chart)
# Original chart not modified
assert original_chart == chart
# Transform applied to output chart
assert chart_out.data.equals(data_out)
assert chart_out.transform is alt.Undefined
assert chart.mark == chart_out.mark
assert chart.encoding == chart_out.encoding
def test_transform_chart_with_aggregate():
data = pd.DataFrame({"x": list("AABBBCCCC")})
chart = alt.Chart(data).mark_bar().encode(x="x:N", y="count():Q")
chart_out = transform_chart(chart)
assert chart_out.data.equals(pd.DataFrame({"x": list("ABC"), "__count": [2, 3, 4]}))
assert chart_out.encoding.to_dict() == {
"x": {"field": "x", "type": "nominal"},
"y": {"field": "__count", "type": "quantitative", "title": "Count of Records"},
}
|
py
|
1a59c4961db259347ff8e7cb7d37d976410a97d1
|
import itertools
import re
from cytoolz import (
compose,
curry,
)
from eth_utils import (
remove_0x_prefix,
to_dict,
)
from .filesystem import (
is_under_path,
)
from .hexadecimal import (
hexbytes_to_hexstr,
)
from .string import (
normalize_class_name,
)
def is_project_contract(contracts_source_dirs, contract_data):
return any(
is_under_path(source_dir, contract_data['source_path'])
for source_dir
in contracts_source_dirs
)
def is_test_contract(tests_dir, contract_data):
return is_under_path(tests_dir, contract_data['source_path'])
def package_contracts(contract_factories):
_dict = {
'__len__': lambda s: len(contract_factories),
'__iter__': lambda s: iter(contract_factories.items()),
'__contains__': lambda s, k: contract_factories.__contains__(k),
'__getitem__': lambda s, k: contract_factories.__getitem__(k),
'__setitem__': lambda s, k, v: contract_factories.__setitem__(k, v),
'keys': lambda s: contract_factories.keys(),
'values': lambda s: contract_factories.values(),
}
_dict.update(contract_factories)
return type('contracts', (object,), _dict)()
CONTRACT_FACTORY_FIELDS = {
'abi',
'asm',
'ast',
'bytecode',
'bytecode_runtime',
'clone_bin',
'dev_doc',
'interface',
'metadata',
'opcodes',
'src_map',
'src_map_runtime',
'user_doc',
}
def create_contract_factory(web3, contract_name, contract_data):
factory_kwargs = {
key: contract_data[key]
for key
in CONTRACT_FACTORY_FIELDS
if key in contract_data
}
return web3.eth.contract(
contract_name=normalize_class_name(contract_name),
**factory_kwargs
)
def construct_contract_factories(web3, compiled_contracts):
contract_classes = {
contract_name: create_contract_factory(
web3,
contract_name,
contract_data,
)
for contract_name, contract_data
in compiled_contracts.items()
}
return package_contracts(contract_classes)
@to_dict
def compute_direct_dependency_graph(compiled_contracts):
"""
Given a dictionary or mapping of compiled contract data, this returns a *shallow*
dependency graph of each contracts explicit link dependencies.
"""
for contract_data in compiled_contracts:
yield (
contract_data['name'],
contract_data['direct_dependencies'],
)
def compute_recursive_contract_dependencies(contract_name, dependency_graph):
"""
Recursive computation of the linker dependencies for a specific contract
within a contract dependency graph.
"""
direct_dependencies = dependency_graph.get(contract_name, set())
sub_dependencies = itertools.chain.from_iterable((
compute_recursive_contract_dependencies(dep, dependency_graph)
for dep in direct_dependencies
))
return set(itertools.chain(direct_dependencies, sub_dependencies))
CONTRACT_NAME_REGEX = '^[_a-zA-Z][_a-zA-Z0-9]*$'
def is_contract_name(value):
return bool(re.match(CONTRACT_NAME_REGEX, value))
EMPTY_BYTECODE_VALUES = {None, "0x"}
SWARM_HASH_PREFIX = "a165627a7a72305820"
SWARM_HASH_SUFFIX = "0029"
EMBEDDED_SWARM_HASH_REGEX = (
SWARM_HASH_PREFIX +
"[0-9a-zA-Z]{64}" +
SWARM_HASH_SUFFIX +
"$"
)
SWARM_HASH_REPLACEMENT = (
SWARM_HASH_PREFIX +
"<" +
"-" * 20 +
"swarm-hash-placeholder" +
"-" * 20 +
">" +
SWARM_HASH_SUFFIX
)
PUSH20_OPCODE = "73"
ADDRESS_OPCODE = "30"
EQ_OPCODE = "14"
EMBEDDED_ADDRESS_REGEX = (
'^' +
PUSH20_OPCODE +
"[0-9a-f]{40}" +
ADDRESS_OPCODE +
EQ_OPCODE
)
ADDRESS_REPLACEMENT = (
PUSH20_OPCODE +
"<" +
"-" * 9 +
"address-place-holder" +
"-" * 9 +
">" +
ADDRESS_OPCODE +
EQ_OPCODE
)
def compare_bytecode(left, right):
unprefixed_left = remove_0x_prefix(left)
unprefixed_right = remove_0x_prefix(right)
sub = curry(re.sub)
norm_pipeline = compose(
sub(EMBEDDED_SWARM_HASH_REGEX, SWARM_HASH_REPLACEMENT),
sub(EMBEDDED_ADDRESS_REGEX, ADDRESS_REPLACEMENT)
)
norm_left = norm_pipeline(unprefixed_left)
norm_right = norm_pipeline(unprefixed_right)
if len(norm_left) != len(unprefixed_left) or len(norm_right) != len(unprefixed_right):
raise ValueError(
"Invariant. Normalized bytecodes are not the correct lengths:" +
"\n- left (original) :" +
left +
"\n- left (unprefixed):" +
unprefixed_left +
"\n- left (normalized):" +
norm_left +
"\n- right (original) :" +
right +
"\n- right (unprefixed):" +
unprefixed_right +
"\n- right (normalized):" +
norm_right
)
return norm_left == norm_right
def verify_contract_bytecode(web3, expected_bytecode, address):
"""
TODO: write tests for this.
"""
from populus.contracts.exceptions import BytecodeMismatch
expected_bytecode = hexbytes_to_hexstr(expected_bytecode)
# Check that the contract has bytecode
if expected_bytecode in EMPTY_BYTECODE_VALUES:
raise ValueError(
"Contract instances which contain an address cannot have empty "
"runtime bytecode"
)
chain_bytecode = hexbytes_to_hexstr(web3.eth.getCode(address))
if chain_bytecode in EMPTY_BYTECODE_VALUES:
raise BytecodeMismatch(
"No bytecode found at address: {0}".format(address)
)
elif not compare_bytecode(chain_bytecode, expected_bytecode):
raise BytecodeMismatch(
"Bytecode found at {0} does not match compiled bytecode:\n"
" - chain_bytecode: {1}\n"
" - compiled_bytecode: {2}".format(
address,
chain_bytecode,
expected_bytecode,
)
)
|
py
|
1a59c5706550dc75ac00caaaabbe2d9733b0fb3a
|
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Copyright 2019 The OSArchiver Authors. All rights reserved.
"""
OSArchiver's Source class that implement a db backend
"""
import re
import time
import logging
import pymysql
import arrow
from numpy import array_split
from osarchiver.source import Source
from osarchiver.common.db import DbBase
NOT_OS_DB = ['mysql', 'performance_schema', 'information_schema']
class Db(Source, DbBase):
"""
Database backend of OSArchiver's Source
"""
def __init__(self,
databases=None,
tables=None,
delete_data=0,
excluded_databases='',
excluded_tables='',
where='1=1 LIMIT 0',
archive_data=None,
name=None,
destination=None,
**kwargs):
"""
Create a Source instance with relevant configuration parameters given
in arguments
"""
self.databases = databases
self.tables = tables
self.excluded_databases = NOT_OS_DB
self.excluded_databases.extend([
d for d in re.split(r',|;|\n', excluded_databases)
if d not in NOT_OS_DB
])
self.excluded_tables = excluded_tables
self.archive_data = archive_data
self.delete_data = delete_data
self.destination = destination
self._databases_to_archive = []
self._tables_to_archive = {}
self.tables_with_circular_fk = []
# When selecting data be sure to use the same date to prevent selecting
# parent data newer than children data, it is of the responsability of
# the operator to use the {now} formating value in the configuration
# file in the where option. If {now} is ommitted it it is possible to
# get foreign key check errors because of parents data newer than
# children data
self.now = arrow.utcnow().format(fmt='YYYY-MM-DD HH:mm:ss')
self.where = where.format(now=self.now)
Source.__init__(self, backend='db', name=name)
DbBase.__init__(self, **kwargs)
def __repr__(self):
return "Source {name} [Backend:{backend} Host:{host} - DB:{db} - "\
"Tables:{tables}]".format(backend=self.backend, db=self.databases,
name=self.name, tables=self.tables,
host=self.host)
def databases_to_archive(self):
"""
Return a list of databases that are eligibles to archiving. If no
database are provided or the * character is used the method basically
do a SHOW DATABASE to get available databases
The method exclude the databases that are explicitly excluded
"""
if self._databases_to_archive:
return self._databases_to_archive
if self.databases is None or self.databases == '*':
self._databases_to_archive = self.get_os_databases()
else:
self._databases_to_archive = [
d for d in re.split(r',|;|\n', self.databases)
]
excluded_databases_regex = \
"^(" + "|".join(self.excluded_databases) + ")$"
self._databases_to_archive = [
d for d in self._databases_to_archive
if not re.match(excluded_databases_regex, d)
]
return self._databases_to_archive
def tables_to_archive(self, database=None):
"""
For a given database, return the list of tables that are eligible to
archiving.
- Retrieve tables if needed (*, or empty)
- Check that tables ov teh 'deleted_at' column (deleted_column
parameter)
- Exclude tables in excluded_tables
- Reorder tables depending foreign key
"""
if database is None:
logging.warning("Can not call tables_to_archive on None database")
return []
if database in self._tables_to_archive:
return self._tables_to_archive[database]
database_tables = [
v[0] for (i, v) in enumerate(self.get_database_tables(database))
]
logging.info("Tables list of database '%s': %s", database,
database_tables)
# Step 1: is to get all the tables we want to archive
# no table specified or jocker used means we want all tables
# else we filter against the tables specified
if self.tables is None or self.tables == '*':
self._tables_to_archive[database] = database_tables
else:
self._tables_to_archive[database] = \
[t for t in re.split(r',|;|\n', self.tables)
if t in database_tables]
# Step 2: verify that all tables have the deleted column 'deleted_at'
logging.debug("Verifying that tables have the '%s' column",
self.deleted_column)
tables = []
for table in self._tables_to_archive[database]:
if not self.table_has_deleted_column(table=table,
database=database):
logging.debug(
"Table '%s' has no column named '%s',"
" ignoring it", table, self.deleted_column)
continue
tables.append(table)
# update self._tables_to_archive with the filtered tables
self._tables_to_archive[database] = tables
# Step 3: then exclude to one explicitly given
excluded_tables_regex = "^(" + "|".join(
re.split(r',|;|\n', self.excluded_tables)) + ")$"
logging.debug("Ignoring tables matching '%s'", excluded_tables_regex)
self._tables_to_archive[database] = [
t for t in self._tables_to_archive[database]
if not re.match(excluded_tables_regex, t)
]
# Step 4 for each table retrieve child tables referencing the parent
# table and order them childs first, parents then
ordered_tables = []
for table in self._tables_to_archive[database]:
children = self.get_linked_tables(database=database, table=table)
for child in children:
# never do same things twice
if child['table_name'] in ordered_tables:
ordered_tables.remove(child['table_name'])
# check if table was already checked for deleted column
if not child['table_name'] in \
self._tables_to_archive[database]:
if not self.table_has_deleted_column(
table=child['table_name'],
database=child['table_schema']):
logging.debug(
"Child table '%s' has not column named "
"'%s', can not handle it", child['table_name'],
self.deleted_column)
continue
ordered_tables.append(child['table_name'])
self._tables_to_archive[database] = ordered_tables
logging.debug(
"Tables ordered depending foreign key dependencies: "
"'%s'", self._tables_to_archive[database])
return self._tables_to_archive[database]
def get_linked_tables(self, database=None, table=None):
"""
For a given database.table return tables that have a foreign key
dependency of the current table
"""
children = self.get_tables_with_fk(database=database, table=table)
logging.debug("Ordered tables to archive from %s.%s: %s", database,
table, children)
children_tables = []
for child in children:
if child['table_schema'] == database and \
child['table_name'] == table:
self.tables_with_circular_fk.append('{db}.{table}'.format(
db=database, table=table))
continue
grandchildren = self.get_linked_tables(database=database,
table=child['table_name'])
for grandchild in grandchildren:
if grandchild in children_tables:
children_tables.remove(grandchild)
children_tables.append(grandchild)
children_tables.append({'table_name': table, 'table_schema': database})
logging.debug("Returned child tables of %s.%s: %s", database, table,
children_tables)
return children_tables
def select(self, limit=None, database=None, table=None):
"""
select data from a database.table, apply limit or take the default one
the select by set depends of the primary key type (int vs uuid)
In case of int:
SELECT * FROM <db>.<table> WHERE <pk> > <last_selected_id> AND ...
In case of uuid (uuid are not ordered naturally ordered, we sort them)
SELECT * FROM <db>.<table> WHERE <pk> > "<last_selected_id>" AND...
ORDER BY <pk>
"""
offset = 0
last_selected_id = 0
# Use primary key column to improve performance on large
# dataset vs using OFFSET
primary_key = self.get_table_primary_key(database=database,
table=table)
if limit is None:
limit = self.select_limit
sql = "SELECT * FROM `{database}`.`{table}` WHERE {pk} > "\
"'{last_id}' AND {where} LIMIT {limit}"
pk_type_checked = False
while True:
formatted_sql = sql.format(database=database,
table=table,
where=self.where,
limit=limit,
last_id=last_selected_id,
pk=primary_key,
offset=offset)
result = self.db_request(sql=formatted_sql,
cursor_type=pymysql.cursors.DictCursor,
database=database,
table=table,
fetch_method='fetchall')
logging.info("Fetched %s result in %s.%s", len(result), database,
table)
if not result:
break
last_selected_id = result[-1][primary_key]
yield result
offset += len(result)
if pk_type_checked is False:
# If the primary key is a digit remove the simple quote from
# the last_id variable for performance purpose
if str(last_selected_id).isdigit():
# remove the simple quote arround id
sql = "SELECT * FROM `{database}`.`{table}` WHERE {pk} >"\
" {last_id} AND {where} LIMIT {limit}"
else:
# else this a string and we force to order by that string
# to simulate an integer primary key
sql = "SELECT * FROM `{database}`.`{table}` WHERE {pk} >"\
" '{last_id}' AND {where} ORDER BY {pk} LIMIT {limit}"
pk_type_checked = True
def read(self, limit=None):
"""
The read method that has to be implemented (Source abstract class)
"""
databases_to_archive = self.databases_to_archive()
logging.info("Database elected for archiving: %s",
databases_to_archive)
for database in databases_to_archive:
tables_to_archive = self.tables_to_archive(database=database)
logging.info("Tables elected for archiving: %s", tables_to_archive)
for table in tables_to_archive:
logging.info("%s.%s is to archive", database, table)
yield {
'database':
database,
'table':
table,
'data':
self.select(limit=limit, database=database, table=table)
}
def delete_set(self, database=None, table=None, limit=None, data=None):
"""
Delete a set of data using the primary_key of table
"""
if not self.delete_data:
logging.info(
"Ignoring delete step because delete_data is set to"
" %s", self.delete_data)
return
if limit is None:
limit = self.delete_limit
primary_key = self.get_table_primary_key(database=database,
table=table)
# Check if primary key is a digit to prevent casting by MySQL and
# optimize the request, store the value in metadata for caching
pk_is_digit = self.get_metadata(database=database,
table=table,
key='pk_is_digit')
if pk_is_digit is None:
pk_is_digit = str(data[0][primary_key]).isdigit()
self.add_metadata(database=database,
table=table,
key='pk_is_digit',
value=pk_is_digit)
def create_array_chunks(array, chunk_size):
for i in range(0, len(array), chunk_size):
yield array[i:i + chunk_size]
# For performance purpose split data in subdata of lenght=limit
for subdata in list(create_array_chunks(data, limit)):
if pk_is_digit:
ids = ', '.join([str(d[primary_key]) for d in subdata])
else:
ids = '"' + '", "'.join([str(d['id']) for d in subdata]) + '"'
total_deleted_count = 0
# equivalent to a while True but we know why we are looping
while "there are rows to delete":
if total_deleted_count > 0:
logging.debug(
"Waiting %s seconds before deleting next"
"subset of data ", self.delete_loop_delay)
time.sleep(int(self.delete_loop_delay))
sql = "DELETE FROM `{database}`.`{table}` WHERE "\
"`{pk}` IN ({ids}) LIMIT {limit}".format(
database=database,
table=table,
ids=ids,
pk=primary_key,
limit=limit)
foreign_key_check = \
'{db}.{table}'.format(db=database, table=table) \
not in self.tables_with_circular_fk
count = self.db_request(sql=sql,
foreign_key_check=foreign_key_check,
database=database,
table=table)
logging.info("%s rows deleted from %s.%s", count, database,
table)
total_deleted_count += count
if int(count) < int(limit) or \
total_deleted_count == len(subdata):
logging.debug("No more row to delete in this data set")
break
logging.debug("Waiting %s seconds after a deletion",
self.delete_loop_delay)
time.sleep(int(self.delete_loop_delay))
def delete(self, database=None, table=None, limit=None, data=None):
"""
The delete method that has to be implemented (Source abstract class)
"""
try:
self.delete_set(database=database,
table=table,
limit=limit,
data=data)
except pymysql.err.IntegrityError as integrity_error:
# foreign key constraint fails usually because of error while
# processing openstack tasks
# to prevent never deleting some of data, we re run delete with
# half set of data if we caught an integrity error (1451)
# To prevent never deleting rest of data of a set, we re run delete
# with a half set if we caught an integrity error (1451)
# until we caught the offending row
if integrity_error.args[0] != 1451:
raise integrity_error
# we caught the row causing integrity error
if len(data) == 1:
logging.error("OSArchiver hit a row that will never be deleted"
" unless you fix remaining chlidren data")
logging.error("Parent row that can not be deleted: %s", data)
logging.error("To get children items:")
logging.error(
self.integrity_exception_select_statement(
error=integrity_error.args[1], row=data[0]))
logging.error("Here a POTENTIAL fix, ensure BEFORE that data "
"should be effectively deleted, then run "
"osarchiver again:")
logging.error(
self.integrity_exception_potential_fix(
error=integrity_error.args[1], row=data[0]))
else:
logging.error("Integrity error caught, deleting with "
"dichotomy")
for subdata in array_split(data, 2):
logging.debug(
"Dichotomy delete with a set of %s data "
"length", len(subdata))
self.delete(database=database,
table=table,
data=subdata,
limit=len(subdata))
def clean_exit(self):
"""
Tasks to be executed to exit cleanly:
- Disconnect from the database
"""
logging.info("Closing source DB connection")
self.disconnect()
|
py
|
1a59c5fd3c5385a45d644a6f00902ba0379f36b1
|
"""Tests for the GogoGate2 component."""
from datetime import timedelta
from unittest.mock import MagicMock, patch
from ismartgate import GogoGate2Api, ISmartGateApi
from ismartgate.common import (
DoorMode,
DoorStatus,
GogoGate2ActivateResponse,
GogoGate2Door,
GogoGate2InfoResponse,
ISmartGateDoor,
ISmartGateInfoResponse,
Network,
Outputs,
Wifi,
)
from homeassistant.components.gogogate2.const import DEVICE_TYPE_ISMARTGATE, DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
CONF_DEVICE,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_USERNAME,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_TEMPERATURE,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
from homeassistant.util.dt import utcnow
from tests.common import MockConfigEntry, async_fire_time_changed
def _mocked_gogogate_sensor_response(battery_level: int, temperature: float):
return GogoGate2InfoResponse(
user="user1",
gogogatename="gogogatename0",
model="",
apiversion="",
remoteaccessenabled=False,
remoteaccess="abc123.blah.blah",
firmwareversion="",
apicode="",
door1=GogoGate2Door(
door_id=1,
permission=True,
name="Door1",
gate=False,
mode=DoorMode.GARAGE,
status=DoorStatus.OPENED,
sensor=True,
sensorid="ABCD",
camera=False,
events=2,
temperature=temperature,
voltage=battery_level,
),
door2=GogoGate2Door(
door_id=2,
permission=True,
name="Door2",
gate=True,
mode=DoorMode.GARAGE,
status=DoorStatus.UNDEFINED,
sensor=True,
sensorid="WIRE",
camera=False,
events=0,
temperature=temperature,
voltage=battery_level,
),
door3=GogoGate2Door(
door_id=3,
permission=True,
name="Door3",
gate=False,
mode=DoorMode.GARAGE,
status=DoorStatus.UNDEFINED,
sensor=True,
sensorid=None,
camera=False,
events=0,
temperature=temperature,
voltage=battery_level,
),
outputs=Outputs(output1=True, output2=False, output3=True),
network=Network(ip=""),
wifi=Wifi(SSID="", linkquality="", signal=""),
)
def _mocked_ismartgate_sensor_response(battery_level: int, temperature: float):
return ISmartGateInfoResponse(
user="user1",
ismartgatename="ismartgatename0",
model="ismartgatePRO",
apiversion="",
remoteaccessenabled=False,
remoteaccess="abc321.blah.blah",
firmwareversion="555",
pin=123,
lang="en",
newfirmware=False,
door1=ISmartGateDoor(
door_id=1,
permission=True,
name="Door1",
gate=False,
mode=DoorMode.GARAGE,
status=DoorStatus.CLOSED,
sensor=True,
sensorid="ABCD",
camera=False,
events=2,
temperature=temperature,
enabled=True,
apicode="apicode0",
customimage=False,
voltage=battery_level,
),
door2=ISmartGateDoor(
door_id=2,
permission=True,
name="Door2",
gate=True,
mode=DoorMode.GARAGE,
status=DoorStatus.CLOSED,
sensor=True,
sensorid="WIRE",
camera=False,
events=2,
temperature=temperature,
enabled=True,
apicode="apicode0",
customimage=False,
voltage=battery_level,
),
door3=ISmartGateDoor(
door_id=3,
permission=True,
name="Door3",
gate=False,
mode=DoorMode.GARAGE,
status=DoorStatus.UNDEFINED,
sensor=True,
sensorid=None,
camera=False,
events=0,
temperature=temperature,
enabled=True,
apicode="apicode0",
customimage=False,
voltage=battery_level,
),
network=Network(ip=""),
wifi=Wifi(SSID="", linkquality="", signal=""),
)
@patch("homeassistant.components.gogogate2.common.GogoGate2Api")
async def test_sensor_update(gogogate2api_mock, hass: HomeAssistant) -> None:
"""Test data update."""
bat_attributes = {
"device_class": "battery",
"door_id": 1,
"friendly_name": "Door1 battery",
"sensor_id": "ABCD",
"state_class": "measurement",
}
temp_attributes = {
"device_class": "temperature",
"door_id": 1,
"friendly_name": "Door1 temperature",
"sensor_id": "ABCD",
"unit_of_measurement": "°C",
"state_class": "measurement",
}
api = MagicMock(GogoGate2Api)
api.async_activate.return_value = GogoGate2ActivateResponse(result=True)
api.async_info.return_value = _mocked_gogogate_sensor_response(25, 7.0)
gogogate2api_mock.return_value = api
config_entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_USER,
data={
CONF_IP_ADDRESS: "127.0.0.1",
CONF_USERNAME: "admin",
CONF_PASSWORD: "password",
},
)
config_entry.add_to_hass(hass)
assert hass.states.get("cover.door1") is None
assert hass.states.get("cover.door2") is None
assert hass.states.get("cover.door3") is None
assert hass.states.get("sensor.door1_battery") is None
assert hass.states.get("sensor.door2_battery") is None
assert hass.states.get("sensor.door3_battery") is None
assert hass.states.get("sensor.door1_temperature") is None
assert hass.states.get("sensor.door2_temperature") is None
assert hass.states.get("sensor.door3_temperature") is None
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get("cover.door1")
assert hass.states.get("cover.door2")
assert hass.states.get("cover.door3")
assert hass.states.get("sensor.door1_battery").state == "25"
assert dict(hass.states.get("sensor.door1_battery").attributes) == bat_attributes
assert hass.states.get("sensor.door2_battery") is None
assert hass.states.get("sensor.door2_battery") is None
assert hass.states.get("sensor.door1_temperature").state == "7.0"
assert (
dict(hass.states.get("sensor.door1_temperature").attributes) == temp_attributes
)
assert hass.states.get("sensor.door2_temperature") is None
assert hass.states.get("sensor.door3_temperature") is None
api.async_info.return_value = _mocked_gogogate_sensor_response(40, 10.0)
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
assert hass.states.get("sensor.door1_battery").state == "40"
assert hass.states.get("sensor.door1_temperature").state == "10.0"
api.async_info.return_value = _mocked_gogogate_sensor_response(None, None)
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
assert hass.states.get("sensor.door1_battery").state == STATE_UNKNOWN
assert hass.states.get("sensor.door1_temperature").state == STATE_UNKNOWN
assert await hass.config_entries.async_unload(config_entry.entry_id)
assert not hass.states.async_entity_ids(DOMAIN)
@patch("homeassistant.components.gogogate2.common.ISmartGateApi")
async def test_availability(ismartgateapi_mock, hass: HomeAssistant) -> None:
"""Test availability."""
bat_attributes = {
"device_class": "battery",
"door_id": 1,
"friendly_name": "Door1 battery",
"sensor_id": "ABCD",
"state_class": "measurement",
}
temp_attributes = {
"device_class": "temperature",
"door_id": 1,
"friendly_name": "Door1 temperature",
"sensor_id": "ABCD",
"unit_of_measurement": "°C",
"state_class": "measurement",
}
sensor_response = _mocked_ismartgate_sensor_response(35, -4.0)
api = MagicMock(ISmartGateApi)
api.async_info.return_value = sensor_response
ismartgateapi_mock.return_value = api
config_entry = MockConfigEntry(
domain=DOMAIN,
source=SOURCE_USER,
data={
CONF_DEVICE: DEVICE_TYPE_ISMARTGATE,
CONF_IP_ADDRESS: "127.0.0.1",
CONF_USERNAME: "admin",
CONF_PASSWORD: "password",
},
)
config_entry.add_to_hass(hass)
assert hass.states.get("cover.door1") is None
assert hass.states.get("cover.door2") is None
assert hass.states.get("cover.door3") is None
assert hass.states.get("sensor.door1_battery") is None
assert hass.states.get("sensor.door2_battery") is None
assert hass.states.get("sensor.door3_battery") is None
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get("cover.door1")
assert hass.states.get("cover.door2")
assert hass.states.get("cover.door3")
assert hass.states.get("sensor.door1_battery").state == "35"
assert hass.states.get("sensor.door2_battery") is None
assert hass.states.get("sensor.door3_battery") is None
assert hass.states.get("sensor.door1_temperature").state == "-4.0"
assert hass.states.get("sensor.door2_temperature") is None
assert hass.states.get("sensor.door3_temperature") is None
assert (
hass.states.get("sensor.door1_battery").attributes[ATTR_DEVICE_CLASS]
== DEVICE_CLASS_BATTERY
)
assert (
hass.states.get("sensor.door1_temperature").attributes[ATTR_DEVICE_CLASS]
== DEVICE_CLASS_TEMPERATURE
)
assert (
hass.states.get("sensor.door1_temperature").attributes[ATTR_UNIT_OF_MEASUREMENT]
== "°C"
)
api.async_info.side_effect = Exception("Error")
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
assert hass.states.get("sensor.door1_battery").state == STATE_UNAVAILABLE
assert hass.states.get("sensor.door1_temperature").state == STATE_UNAVAILABLE
api.async_info.side_effect = None
api.async_info.return_value = sensor_response
async_fire_time_changed(hass, utcnow() + timedelta(hours=2))
await hass.async_block_till_done()
assert hass.states.get("sensor.door1_battery").state == "35"
assert dict(hass.states.get("sensor.door1_battery").attributes) == bat_attributes
assert (
dict(hass.states.get("sensor.door1_temperature").attributes) == temp_attributes
)
|
py
|
1a59c643057c3d00b7cb70471815aa91d3eb6706
|
# Original source: https://github.com/pytorch/examples/blob/master/fast_neural_style/neural_style/neural_style.py
import argparse
import os
import sys
import re
from PIL import Image
import torch
from torchvision import transforms
def load_image(filename, size=None, scale=None):
img = Image.open(filename)
if size is not None:
img = img.resize((size, size), Image.ANTIALIAS)
elif scale is not None:
img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)
return img
def save_image(filename, data):
img = data.clone().clamp(0, 255).numpy()
img = img.transpose(1, 2, 0).astype("uint8")
img = Image.fromarray(img)
img.save(filename)
class TransformerNet(torch.nn.Module):
def __init__(self):
super(TransformerNet, self).__init__()
# Initial convolution layers
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = torch.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = torch.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = torch.nn.InstanceNorm2d(128, affine=True)
# Residual layers
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
# Upsampling Layers
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
self.in4 = torch.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
self.in5 = torch.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
# Non-linearities
self.relu = torch.nn.ReLU()
def forward(self, X):
y = self.relu(self.in1(self.conv1(X)))
y = self.relu(self.in2(self.conv2(y)))
y = self.relu(self.in3(self.conv3(y)))
y = self.res1(y)
y = self.res2(y)
y = self.res3(y)
y = self.res4(y)
y = self.res5(y)
y = self.relu(self.in4(self.deconv1(y)))
y = self.relu(self.in5(self.deconv2(y)))
y = self.deconv3(y)
return y
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
if upsample:
self.upsample_layer = torch.nn.Upsample(mode='nearest', scale_factor=upsample)
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.upsample_layer(x_in)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
def stylize(args):
device = torch.device("cuda" if args.cuda else "cpu")
with torch.no_grad():
style_model = TransformerNet()
state_dict = torch.load(os.path.join(args.model_dir, args.style+".pth"))
# remove saved deprecated running_* keys in InstanceNorm from the checkpoint
for k in list(state_dict.keys()):
if re.search(r'in\d+\.running_(mean|var)$', k):
del state_dict[k]
style_model.load_state_dict(state_dict)
style_model.to(device)
filenames = os.listdir(args.content_dir)
for filename in filenames:
print("Processing {}".format(filename))
full_path = os.path.join(args.content_dir, filename)
content_image = load_image(full_path, scale=args.content_scale)
content_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
content_image = content_transform(content_image)
content_image = content_image.unsqueeze(0).to(device)
output = style_model(content_image).cpu()
output_path = os.path.join(args.output_dir, filename)
save_image(output_path, output[0])
def main():
arg_parser = argparse.ArgumentParser(description="parser for fast-neural-style")
arg_parser.add_argument("--content-scale", type=float, default=None,
help="factor for scaling down the content image")
arg_parser.add_argument("--model-dir", type=str, required=True,
help="saved model to be used for stylizing the image.")
arg_parser.add_argument("--cuda", type=int, required=True,
help="set it to 1 for running on GPU, 0 for CPU")
arg_parser.add_argument("--style", type=str,
help="style name")
arg_parser.add_argument("--content-dir", type=str, required=True,
help="directory holding the images")
arg_parser.add_argument("--output-dir", type=str, required=True,
help="directory holding the output images")
args = arg_parser.parse_args()
if args.cuda and not torch.cuda.is_available():
print("ERROR: cuda is not available, try running on CPU")
sys.exit(1)
os.makedirs(args.output_dir, exist_ok=True)
stylize(args)
if __name__ == "__main__":
main()
|
py
|
1a59c6fae32e5aaa65ce9346b5c73b88fe7489ad
|
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from .forms import Createform
from django.contrib import messages
@login_required
def editpost(request, id):
obj= get_object_or_404(Post, id=id)
form = Createform(request.POST or None, instance= obj)
context= {'form': form}
if form.is_valid():
obj= form.save(commit= False)
obj.save()
messages.success(request, "You successfully updated the post")
context= {'form': form}
return render(request, 'posts/edit.html', context)
else:
context= {'form': form,
'error': 'The form was not updated successfully. Please enter in a title and content'}
return render(request,'posts/edit.html' , context)
|
py
|
1a59c743cfdc8f31563b1c5cf99642cf0658c9d6
|
import numpy
import pandas
import xarray as xr
import numpy as np
from dolo.numeric.optimize.ncpsolve import ncpsolve
from dolo.numeric.optimize.newton import newton as newton_solver
from dolo.numeric.optimize.newton import SerialDifferentiableFunction
## TODO: extend for mc process
def response(model, dr, varname, T=40, impulse:float=None):
i_exo = model.symbols["exogenous"].index(varname)
if impulse is None:
try:
impulse = numpy.sqrt( model.exogenous.Σ[i_exo, i_exo] ) # works only for IID/AR1
except:
impulse = numpy.sqrt( model.exogenous.σ ) # works only for IID/AR1
e1 = numpy.zeros(len(model.symbols["exogenous"]))
e1[i_exo] = impulse
m_simul = model.exogenous.response(T, e1)
m_simul = m_simul[:,None,:]
sim = simulate(model, dr, N=1, T=T, driving_process=m_simul, stochastic=False)
irf = sim.sel(N=0)
return irf
def find_index(sim, values):
sh = sim.shape
N = sh[0]
T = sh[1]
indices = np.zeros((N,T), dtype=int)
for n in range(N):
for t in range(T):
v = sim[n,t,:]
ind = np.where((values == v[None,:]).all(axis=1))[0][0]
indices[n,t] = ind
return indices
from dolo.numeric.grids import CartesianGrid, UnstructuredGrid
from dolo.algos.results import AlgoResult
def simulate(model, dr, process=None, N=1, T=40, s0=None, i0=None, m0=None,
driving_process=None, seed=42, stochastic=True):
'''
Simulate a model using the specified decision rule.
Parameters
----------
model: Model
dr: decision rule
process:
s0: ndarray
initial state where all simulations start
driving_process: ndarray
realization of exogenous driving process (drawn randomly if None)
N: int
number of simulations
T: int
horizon for the simulations
seed: int
used to initialize the random number generator. Use it to replicate
exact same results among simulations
discard: boolean (False)
if True, then all simulations containing at least one non finite value
are discarded
Returns
-------
xarray.DataArray:
returns a ``T x N x n_v`` array where ``n_v``
is the number of variables.
'''
if isinstance(dr, AlgoResult):
dr = dr.dr
calib = model.calibration
parms = numpy.array(calib['parameters'])
if s0 is None:
s0 = calib['states']
n_x = len(model.symbols["controls"])
n_s = len(model.symbols["states"])
s_simul = numpy.zeros((T, N, n_s))
x_simul = numpy.zeros((T, N, n_x))
s_simul[0, :, :] = s0[None, :]
# are we simulating a markov chain or a continuous process ?
if driving_process is not None:
if len(driving_process.shape)==3:
m_simul = driving_process
sim_type = 'continuous'
if m0 is None:
m0 = model.calibration["exogenous"]
x_simul[0,:,:] = dr.eval_ms(m0[None,:], s0[None,:])[0,:]
elif len(driving_process.shape)==2:
i_simul = driving_process
nodes = dr.exo_grid.nodes
m_simul = nodes[i_simul]
# inds = i_simul.ravel()
# m_simul = np.reshape( np.concatenate( [nodes[i,:][None,:] for i in inds.ravel()], axis=0 ), inds.shape + (-1,) )
sim_type = 'discrete'
x_simul[0,:,:] = dr.eval_is(i0, s0[None,:])[0,:]
else:
raise Exception("Incorrect specification of driving values.")
m0 = m_simul[0,:,:]
else:
from dolo.numeric.processes import ContinuousProcess
if process is None:
if hasattr(dr,'dprocess') and hasattr(dr.dprocess, 'simulate'):
process = dr.dprocess
else:
process = model.exogenous
# detect type of simulation
if isinstance(process, ContinuousProcess):
sim_type = 'continuous'
else:
sim_type = 'discrete'
if sim_type =='discrete':
if i0 is None:
i0 = 0
dp = process
m_simul = dp.simulate(N, T, i0=i0, stochastic=stochastic)
i_simul = find_index(m_simul, dp.values)
m0 = dp.node(i0)
x0 = dr.eval_is(i0, s0[None,:])[0,:]
else:
m_simul = process.simulate(N, T, m0=m0, stochastic=stochastic)
sim_type = 'continuous'
if m0 is None:
m0 = model.calibration["exogenous"]
x0 = dr.eval_ms(m0[None,:], s0[None,:])[0,:]
x_simul[0, :, :] = x0[None, :]
f = model.functions['arbitrage']
g = model.functions['transition']
numpy.random.seed(seed)
mp = m0
for i in range(T):
m = m_simul[i,:,:]
s = s_simul[i,:,:]
if sim_type=='discrete':
i_m = i_simul[i,:]
xx = [dr.eval_is(i_m[ii], s[ii,:][None,:])[0,:] for ii in range(s.shape[0])]
x = np.row_stack(xx)
else:
x = dr.eval_ms(m, s)
x_simul[i,:,:] = x
ss = g(mp, s, x, m, parms)
if i < T-1:
s_simul[i + 1, :, :] = ss
mp = m
if 'auxiliary' not in model.functions: # TODO: find a better test than this
l = [s_simul, x_simul]
varnames = model.symbols['states'] + model.symbols['controls']
else:
aux = model.functions['auxiliary']
a_simul = aux(
m_simul.reshape((N * T, -1)),
s_simul.reshape((N * T, -1)),
x_simul.reshape((N * T, -1)), parms)
a_simul = a_simul.reshape(T, N, -1)
l = [m_simul, s_simul, x_simul, a_simul]
varnames = model.symbols['exogenous'] + model.symbols['states'] + model.symbols[
'controls'] + model.symbols['auxiliaries']
simul = numpy.concatenate(l, axis=2)
if sim_type=='discrete':
varnames = ['_i_m'] + varnames
simul = np.concatenate([i_simul[:,:,None], simul], axis=2)
data = xr.DataArray(
simul,
dims=['T','N','V'],
coords={'T': range(T), 'N': range(N), 'V': varnames}
)
return data
def tabulate(model, dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None, **kwargs):
import numpy
if isinstance(dr, AlgoResult):
dr = dr.dr
states_names = model.symbols['states']
controls_names = model.symbols['controls']
index = states_names.index(str(state))
if bounds is None:
try:
endo_grid = dr.endo_grid
bounds = [endo_grid.min[index], endo_grid.max[index]]
except:
domain = model.domain
bounds = [domain.min[index], domain.max[index]]
if bounds is None:
raise Exception("No bounds provided for simulation or by model.")
values = numpy.linspace(bounds[0], bounds[1], n_steps)
if s0 is None:
s0 = model.calibration['states']
svec = numpy.row_stack([s0]*n_steps)
svec[:,index] = values
try:
dp = dr.dprocess
except:
dp = model.exogenous.discretize()
if (i0 is None) and (m0 is None):
from dolo.numeric.grids import UnstructuredGrid
if isinstance(dp.grid, UnstructuredGrid):
n_ms = dp.n_nodes
[q,r] = divmod(n_ms,2)
i0 = q-1+r
else:
m0 = model.calibration["exogenous"]
if i0 is not None:
m = dp.node(i0)
xvec = dr.eval_is(i0,svec)
elif m0 is not None:
m = m0
xvec = dr.eval_ms(m0,svec)
mm = numpy.row_stack([m]*n_steps)
l = [mm, svec, xvec]
series = model.symbols['exogenous'] + model.symbols['states'] + model.symbols['controls']
if 'auxiliary' in model.functions:
p = model.calibration['parameters']
pp = numpy.row_stack([p]*n_steps)
avec = model.functions['auxiliary'](mm, svec,xvec,pp)
l.append(avec)
series.extend(model.symbols['auxiliaries'])
import pandas
tb = numpy.concatenate(l, axis=1)
df = pandas.DataFrame(tb, columns=series)
return df
def tabulate_2d(model, dr, states=None, i0=0, s0=None, n=[12,13]):
import numpy
import xarray as xr
if isinstance(dr, AlgoResult):
dr = dr.dr
if s0 is None:
s0 = model.calibration["states"]
if states is None:
states = model.symbols["states"]
assert(len(states)==2)
domain = model.get_domain()
lps = [numpy.linspace(*domain[s], n[i]) for i,s in enumerate(states)]
i_x = model.symbols["states"].index(states[0])
i_y = model.symbols["states"].index(states[1])
vals = []
vstates = []
s = s0.copy()
for xx in lps[0]:
vv = []
s[i_x] = xx
for yy in lps[1]:
s[i_y] = yy
x = dr.eval_is(i0, s)
vv.append(numpy.concatenate([s,x]))
vals.append(vv)
vv = numpy.array(vals)
controls = model.symbols["states"] + model.symbols["controls"]
# tab = xr.DataArray(vv, dims=[states[0], states[1], 'V'], coords=[lps[0], lps[1], 'V'])
tab = xr.DataArray(vv, dims=[states[0], states[1], 'V'], coords={states[0]:lps[0], states[1]:lps[1], 'V':controls})
return tab
def plot3d(tab, varname):
X = numpy.array( tab[tab.dims[0]] )
Y = numpy.array( tab[tab.dims[1]] )
Z = numpy.array( tab.loc[:,:,varname] )
data = [
go.Surface(
x=X,
y=Y,
z=Z
)
]
layout = go.Layout(
title='Equity',
autosize=False,
width=500,
height=500,
# xaxis=go.XAxis(title=tab.dims[0]),
# yaxis={'title':tab.dims[1]},
# zaxis={'title':varname},
xaxis=dict(
title='x Axis',
nticks=7,
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
margin=dict(
l=65,
r=50,
b=65,
t=90
)
)
fig = go.Figure(data=data, layout=layout)
return iplot(fig, filename='graph_'+varname)
def plot_decision_rule(plot_controls=None,**kwargs):
if isinstance(dr, AlgoResult):
dr = dr.dr
df = tabulate(dr, state, bounds=None, n_steps=100, s0=None, i0=None, m0=None)
from matplotlib import pyplot
if isinstance(plot_controls, str):
cn = plot_controls
pyplot.plot(values, df[cn], **kwargs)
else:
for cn in plot_controls:
pyplot.plot(values, df[cn], label=cn, **kwargs)
pyplot.legend()
pyplot.xlabel('state = {} | mstate = {}'.format(state, i0))
|
py
|
1a59c9524ad9b8f121c00e96855b96ba16a3fb6c
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urllib
from django.urls import reverse
from django.utils.translation import ugettext as _
from desktop.conf import DEFAULT_USER
from desktop.lib.paths import get_desktop_root, SAFE_CHARACTERS_URI_COMPONENTS, SAFE_CHARACTERS_URI
from notebook.connectors.base import Notebook
def compress_files_in_hdfs(request, file_names, upload_path, archive_name):
_upload_compress_files_script_to_hdfs(request.fs)
files = [{"value": upload_path + '/' + urllib.quote(file_name.encode('utf-8'), SAFE_CHARACTERS_URI)} for file_name in file_names]
files.append({'value': '/user/' + DEFAULT_USER.get() + '/common/compress_files_in_hdfs.sh'})
start_time = json.loads(request.POST.get('start_time', '-1'))
shell_notebook = Notebook(
name=_('HDFS Compression to %(upload_path)s/hue_compressed.zip') % {'upload_path': upload_path},
isManaged=True,
onSuccessUrl='/filebrowser/view=' + urllib.quote(upload_path.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS)
)
shell_notebook.add_shell_snippet(
shell_command='compress_files_in_hdfs.sh',
arguments=[{'value': '-u=' + upload_path}, {'value': '-f=' + ','.join(file_names)}, {'value': '-n=' + archive_name}],
archives=[],
files=files,
env_var=[{'value': 'HADOOP_USER_NAME=${wf:user()}'}],
last_executed=start_time
)
return shell_notebook.execute(request, batch=True)
def _upload_compress_files_script_to_hdfs(fs):
if not fs.exists('/user/' + DEFAULT_USER.get() + '/common/'):
fs.do_as_user(DEFAULT_USER.get(), fs.mkdir, '/user/' + DEFAULT_USER.get() + '/common/')
fs.do_as_user(DEFAULT_USER.get(), fs.chmod, '/user/' + DEFAULT_USER.get() + '/common/', 0755)
if not fs.do_as_user(DEFAULT_USER.get(), fs.exists, '/user/' + DEFAULT_USER.get() + '/common/compress_files_in_hdfs.sh'):
fs.do_as_user(DEFAULT_USER.get(), fs.copyFromLocal, get_desktop_root() + '/core/src/desktop/lib/tasks/compress_files/compress_in_hdfs.sh',
'/user/' + DEFAULT_USER.get() + '/common/compress_files_in_hdfs.sh')
fs.do_as_user(DEFAULT_USER.get(), fs.chmod, '/user/' + DEFAULT_USER.get() + '/common/', 0755)
|
py
|
1a59c98abe24e21c6157ddf3660b35d8e7a011b0
|
import tensorflow as tf
import numpy as np
import cv2
# from .base_model import BaseModel
# from .utils import box_nms
def classical_detector_descriptor(im, **config):
im = np.uint8(im)
if config['method'] == 'sift':
sift = cv2.xfeatures2d.SIFT_create(nfeatures=1500)
keypoints, desc = sift.detectAndCompute(im, None)
responses = np.array([k.response for k in keypoints])
keypoints = np.array([k.pt for k in keypoints]).astype(int)
desc = np.array(desc)
detections = np.zeros(im.shape[:2], np.float)
detections[keypoints[:, 1], keypoints[:, 0]] = responses
descriptors = np.zeros((im.shape[0], im.shape[1], 128), np.float)
descriptors[keypoints[:, 1], keypoints[:, 0]] = desc
elif config['method'] == 'orb':
orb = cv2.ORB_create(nfeatures=1500)
keypoints, desc = orb.detectAndCompute(im, None)
responses = np.array([k.response for k in keypoints])
keypoints = np.array([k.pt for k in keypoints]).astype(int)
desc = np.array(desc)
detections = np.zeros(im.shape[:2], np.float)
detections[keypoints[:, 1], keypoints[:, 0]] = responses
descriptors = np.zeros((im.shape[0], im.shape[1], 32), np.float)
descriptors[keypoints[:, 1], keypoints[:, 0]] = desc
detections = detections.astype(np.float32)
descriptors = descriptors.astype(np.float32)
return (detections, descriptors)
# from models.classical_detector_descriptors import SIFT_det
def SIFT_det(img, img_rgb, visualize=False, nfeatures=2000):
"""
return:
x_all: np [N, 2] (x, y)
des: np [N, 128] (descriptors)
"""
# Initiate SIFT detector
# pip install opencv-python==3.4.2.16, opencv-contrib-python==3.4.2.16
# https://www.pyimagesearch.com/2015/07/16/where-did-sift-and-surf-go-in-opencv-3/
img = np.uint8(img)
# print("img: ", img)
sift = cv2.xfeatures2d.SIFT_create(contrastThreshold=1e-5)
# find the keypoints and descriptors with SIFT
kp, des = sift.detectAndCompute(img, None)
# print("# kps: {}, descriptors: {}".format(len(kp), des.shape))
x_all = np.array([p.pt for p in kp])
if visualize:
plt.figure(figsize=(30, 4))
plt.imshow(img_rgb)
plt.scatter(x_all[:, 0], x_all[:, 1], s=10, marker='o', c='y')
plt.show()
# return x_all, kp, des
return x_all, des
'''
class ClassicalDetectorsDescriptors(BaseModel):
input_spec = {
'image': {'shape': [None, None, None, 1], 'type': tf.float32}
}
default_config = {
'method': 'sift', # 'orb'
'threshold': 0.5,
'nms': 4,
'top_k': 300,
}
trainable = False
def _model(self, inputs, mode, **config):
im = inputs['image']
with tf.device('/cpu:0'):
keypoints, descriptors = tf.map_fn(lambda i: tf.py_func(
lambda x: classical_detector_descriptor(x, **config),
[i],
(tf.float32, tf.float32)),
im, [tf.float32, tf.float32])
prob = keypoints
prob_nms = prob
if config['nms']:
prob_nms = tf.map_fn(lambda p: box_nms(p, config['nms'], min_prob=0.,
keep_top_k=config['top_k']), prob)
pred = tf.cast(tf.greater_equal(prob_nms, config['threshold']), tf.int32)
keypoints = {'prob': prob, 'prob_nms': prob_nms, 'pred': pred}
return {**keypoints, 'descriptors': descriptors}
def _loss(self, outputs, inputs, **config):
raise NotImplementedError
def _metrics(self, outputs, inputs, **config):
pred = outputs['pred']
labels = inputs['keypoint_map']
precision = tf.reduce_sum(pred*labels) / tf.reduce_sum(pred)
recall = tf.reduce_sum(pred*labels) / tf.reduce_sum(labels)
return {'precision': precision, 'recall': recall}
'''
|
py
|
1a59c9e490a41b71811a4e159ebbaa2e1d78daf6
|
# Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
from typing import Any, Dict, List, Sequence
import torch
import torch.nn as nn
from solo.losses.vicreg import vicreg_loss_func
from solo.methods.base import BaseMethod
from solo.utils.misc import gather, get_rank
import torch.nn.functional as F
from solo.losses.oursloss import ours_loss_func
from solo.utils.metrics import corrcoef, pearsonr_cor
class VICReg(BaseMethod):
def __init__(
self,
proj_output_dim: int,
proj_hidden_dim: int,
sim_loss_weight: float,
var_loss_weight: float,
cov_loss_weight: float,
lam: float,
tau_decor: float,
our_loss: str,
**kwargs
):
"""Implements VICReg (https://arxiv.org/abs/2105.04906)
Args:
proj_output_dim (int): number of dimensions of the projected features.
proj_hidden_dim (int): number of neurons in the hidden layers of the projector.
sim_loss_weight (float): weight of the invariance term.
var_loss_weight (float): weight of the variance term.
cov_loss_weight (float): weight of the covariance term.
"""
super().__init__(**kwargs)
self.lam = lam
self.tau_decor = tau_decor
self.our_loss = our_loss
self.sim_loss_weight = sim_loss_weight
self.var_loss_weight = var_loss_weight
self.cov_loss_weight = cov_loss_weight
# projector
self.projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_hidden_dim),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_output_dim),
)
@staticmethod
def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parent_parser = super(VICReg, VICReg).add_model_specific_args(parent_parser)
parser = parent_parser.add_argument_group("vicreg")
# projector
parser.add_argument("--proj_output_dim", type=int, default=2048)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
# parameters
parser.add_argument("--sim_loss_weight", default=25, type=float)
parser.add_argument("--var_loss_weight", default=25, type=float)
parser.add_argument("--cov_loss_weight", default=1.0, type=float)
# our loss
parser.add_argument("--lam", type=float, default=0.1)
parser.add_argument("--tau_decor", type=float, default=0.1)
parser.add_argument("--our_loss", type=str, default='False')
return parent_parser
@property
def learnable_params(self) -> List[dict]:
"""Adds projector parameters to the parent's learnable parameters.
Returns:
List[dict]: list of learnable parameters.
"""
extra_learnable_params = [{"params": self.projector.parameters()}]
return super().learnable_params + extra_learnable_params
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
"""Performs the forward pass of the backbone and the projector.
Args:
X (torch.Tensor): a batch of images in the tensor format.
Returns:
Dict[str, Any]: a dict containing the outputs of the parent and the projected features.
"""
out = super().forward(X, *args, **kwargs)
z = self.projector(out["feats"])
return {**out, "z": z}
def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
"""Training step for VICReg reusing BaseMethod training step.
Args:
batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where
[X] is a list of size num_crops containing batches of images.
batch_idx (int): index of the batch.
Returns:
torch.Tensor: total loss composed of VICReg loss and classification loss.
"""
out = super().training_step(batch, batch_idx)
class_loss = out["loss"]
feats1, feats2 = out["feats"]
z1 = self.projector(feats1)
z2 = self.projector(feats2)
# ------- vicreg loss -------
total_loss = vicreg_loss_func(
z1,
z2,
sim_loss_weight=self.sim_loss_weight,
var_loss_weight=self.var_loss_weight,
cov_loss_weight=self.cov_loss_weight,
)
self.log("train_vicreg_loss", total_loss, on_epoch=True, sync_dist=True)
with torch.no_grad():
z_std = F.normalize(torch.stack((z1,z2)), dim=-1).std(dim=1).mean()
corr_z = (torch.abs(corrcoef(z1, z2).triu(1)) + torch.abs(corrcoef(z1, z2).tril(-1))).mean()
pear_z = pearsonr_cor(z1, z2).mean()
corr_feats = (torch.abs(corrcoef(feats1, feats2).triu(1)) + torch.abs(corrcoef(feats1, feats2).tril(-1)) ).mean()
pear_feats = pearsonr_cor(feats1, feats2).mean()
### new metrics
metrics = {
"Logits/avg_sum_logits_Z": (torch.stack((z1,z2))).sum(-1).mean(),
"Logits/avg_sum_logits_Z_normalized": F.normalize(torch.stack((z1,z2)), dim=-1).sum(-1).mean(),
"Logits/logits_Z_max": (torch.stack((z1,z2))).max(),
"Logits/logits_Z_min": (torch.stack((z1,z2))).min(),
"Logits/var_Z": (torch.stack((z1,z2))).var(-1).mean(),
"Logits/logits_Z_normalized_max": F.normalize(torch.stack((z1,z2)), dim=-1).max(),
"Logits/logits_Z_normalized_min": F.normalize(torch.stack((z1,z2)), dim=-1).min(),
"MeanVector/mean_vector_Z_max": (torch.stack((z1,z2))).mean(1).max(),
"MeanVector/mean_vector_Z_min": (torch.stack((z1,z2))).mean(1).min(),
"MeanVector/mean_vector_Z_normalized_max": F.normalize(torch.stack((z1,z2)), dim=-1).mean(1).max(),
"MeanVector/mean_vector_Z_normalized_min": F.normalize(torch.stack((z1,z2)), dim=-1).mean(1).min(),
"MeanVector/norm_vector_Z": (torch.stack((z1,z2))).mean(1).mean(0).norm(),
"MeanVector/norm_vector_Z_normalized": F.normalize(torch.stack((z1,z2)), dim=-1).mean(1).mean(0).norm(),
"Backbone/var": (torch.stack((feats1,feats2))).var(-1).mean(),
"Backbone/max": (torch.stack((feats1,feats2))).max(),
"train_z_std": z_std,
"Corr/corr_z": corr_z,
"Corr/pear_z": pear_z,
"Corr/corr_feats": corr_feats,
"Corr/pear_feats": pear_feats,
}
self.log_dict(metrics, on_epoch=True, sync_dist=True)
### new metrics
return total_loss + class_loss
|
py
|
1a59caa1073dd9020ec8f42cd46936244d58a372
|
"""
Django settings for MES_srv project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yzov6y2jkz7=(1qh_8w*xre1x%z)7*v1%0p8^@&$2f6x$tq!fk'
GCM_APIKEY = 'AIzaSyCbak3660yhOL_pOvHozDbzRT4l0b2SVzg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
)
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'MES_srv.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MES_srv.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
py
|
1a59cb857685796f892cccacd40ae57685d219e6
|
from collections import OrderedDict
import torch
from torch import optim
class LightValidationStepMixin:
"""
Add val_dataloader and validation_step methods for the case
when val_dataloader returns a single dataloader
"""
def val_dataloader(self):
return self._dataloader(train=False)
def validation_step(self, batch, batch_idx, *args, **kwargs):
"""
Lightning calls this inside the validation loop
:param batch:
:return:
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_val = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
val_acc = torch.tensor(val_acc)
if self.on_gpu:
val_acc = val_acc.cuda(loss_val.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp:
loss_val = loss_val.unsqueeze(0)
val_acc = val_acc.unsqueeze(0)
# alternate possible outputs to test
if batch_idx % 1 == 0:
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
})
return output
if batch_idx % 2 == 0:
return val_acc
if batch_idx % 3 == 0:
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
'test_dic': {'val_loss_a': loss_val}
})
return output
class LightValidationMixin(LightValidationStepMixin):
"""
Add val_dataloader, validation_step, and validation_end methods for the case
when val_dataloader returns a single dataloader
"""
def validation_epoch_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
# if returned a scalar from validation_step, outputs is a list of tensor scalars
# we return just the average in this case (if we want)
# return torch.stack(outputs).mean()
val_loss_mean = 0
val_acc_mean = 0
for output in outputs:
val_loss = _get_output_metric(output, 'val_loss')
# reduce manually when using dp
if self.trainer.use_dp or self.trainer.use_ddp2:
val_loss = torch.mean(val_loss)
val_loss_mean += val_loss
# reduce manually when using dp
val_acc = _get_output_metric(output, 'val_acc')
if self.trainer.use_dp or self.trainer.use_ddp2:
val_acc = torch.mean(val_acc)
val_acc_mean += val_acc
val_loss_mean /= len(outputs)
val_acc_mean /= len(outputs)
tqdm_dict = {'val_loss': val_loss_mean.item(), 'val_acc': val_acc_mean.item()}
results = {'progress_bar': tqdm_dict, 'log': tqdm_dict}
return results
class LightValidationStepMultipleDataloadersMixin:
"""
Add val_dataloader and validation_step methods for the case
when val_dataloader returns multiple dataloaders
"""
def val_dataloader(self):
return [self._dataloader(train=False), self._dataloader(train=False)]
def validation_step(self, batch, batch_idx, dataloader_idx, **kwargs):
"""
Lightning calls this inside the validation loop
:param batch:
:return:
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_val = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
val_acc = torch.tensor(val_acc)
if self.on_gpu:
val_acc = val_acc.cuda(loss_val.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp:
loss_val = loss_val.unsqueeze(0)
val_acc = val_acc.unsqueeze(0)
# alternate possible outputs to test
if batch_idx % 1 == 0:
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
})
return output
if batch_idx % 2 == 0:
return val_acc
if batch_idx % 3 == 0:
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
'test_dic': {'val_loss_a': loss_val}
})
return output
if batch_idx % 5 == 0:
output = OrderedDict({
f'val_loss_{dataloader_idx}': loss_val,
f'val_acc_{dataloader_idx}': val_acc,
})
return output
class LightValidationMultipleDataloadersMixin(LightValidationStepMultipleDataloadersMixin):
"""
Add val_dataloader, validation_step, and validation_end methods for the case
when val_dataloader returns multiple dataloaders
"""
def validation_epoch_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
# if returned a scalar from validation_step, outputs is a list of tensor scalars
# we return just the average in this case (if we want)
# return torch.stack(outputs).mean()
val_loss_mean = 0
val_acc_mean = 0
i = 0
for dl_output in outputs:
for output in dl_output:
val_loss = output['val_loss']
# reduce manually when using dp
if self.trainer.use_dp:
val_loss = torch.mean(val_loss)
val_loss_mean += val_loss
# reduce manually when using dp
val_acc = output['val_acc']
if self.trainer.use_dp:
val_acc = torch.mean(val_acc)
val_acc_mean += val_acc
i += 1
val_loss_mean /= i
val_acc_mean /= i
tqdm_dict = {'val_loss': val_loss_mean.item(), 'val_acc': val_acc_mean.item()}
result = {'progress_bar': tqdm_dict}
return result
class LightTrainDataloader:
"""Simple train dataloader."""
def train_dataloader(self):
return self._dataloader(train=True)
class LightTestDataloader:
"""Simple test dataloader."""
def test_dataloader(self):
return self._dataloader(train=False)
class LightEmptyTestStep:
"""Empty test step."""
def test_step(self, *args, **kwargs):
return dict()
class LightTestStepMixin(LightTestDataloader):
"""Test step mixin."""
def test_step(self, batch, batch_idx, *args, **kwargs):
"""
Lightning calls this inside the validation loop
:param batch:
:return:
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_test = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
test_acc = torch.tensor(test_acc)
if self.on_gpu:
test_acc = test_acc.cuda(loss_test.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp:
loss_test = loss_test.unsqueeze(0)
test_acc = test_acc.unsqueeze(0)
# alternate possible outputs to test
if batch_idx % 1 == 0:
output = OrderedDict({
'test_loss': loss_test,
'test_acc': test_acc,
})
return output
if batch_idx % 2 == 0:
return test_acc
if batch_idx % 3 == 0:
output = OrderedDict({
'test_loss': loss_test,
'test_acc': test_acc,
'test_dic': {'test_loss_a': loss_test}
})
return output
class LightTestMixin(LightTestStepMixin):
"""Ritch test mixin."""
def test_epoch_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
# if returned a scalar from test_step, outputs is a list of tensor scalars
# we return just the average in this case (if we want)
# return torch.stack(outputs).mean()
test_loss_mean = 0
test_acc_mean = 0
for output in outputs:
test_loss = _get_output_metric(output, 'test_loss')
# reduce manually when using dp
if self.trainer.use_dp:
test_loss = torch.mean(test_loss)
test_loss_mean += test_loss
# reduce manually when using dp
test_acc = _get_output_metric(output, 'test_acc')
if self.trainer.use_dp:
test_acc = torch.mean(test_acc)
test_acc_mean += test_acc
test_loss_mean /= len(outputs)
test_acc_mean /= len(outputs)
tqdm_dict = {'test_loss': test_loss_mean.item(), 'test_acc': test_acc_mean.item()}
result = {'progress_bar': tqdm_dict}
return result
class LightTestStepMultipleDataloadersMixin:
"""Test step multiple dataloaders mixin."""
def test_dataloader(self):
return [self._dataloader(train=False), self._dataloader(train=False)]
def test_step(self, batch, batch_idx, dataloader_idx, **kwargs):
"""
Lightning calls this inside the validation loop
:param batch:
:return:
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_test = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
test_acc = torch.tensor(test_acc)
if self.on_gpu:
test_acc = test_acc.cuda(loss_test.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp:
loss_test = loss_test.unsqueeze(0)
test_acc = test_acc.unsqueeze(0)
# alternate possible outputs to test
if batch_idx % 1 == 0:
output = OrderedDict({
'test_loss': loss_test,
'test_acc': test_acc,
})
return output
if batch_idx % 2 == 0:
return test_acc
if batch_idx % 3 == 0:
output = OrderedDict({
'test_loss': loss_test,
'test_acc': test_acc,
'test_dic': {'test_loss_a': loss_test}
})
return output
if batch_idx % 5 == 0:
output = OrderedDict({
f'test_loss_{dataloader_idx}': loss_test,
f'test_acc_{dataloader_idx}': test_acc,
})
return output
class LightTestFitSingleTestDataloadersMixin:
"""Test fit single test dataloaders mixin."""
def test_step(self, batch, batch_idx, *args, **kwargs):
"""
Lightning calls this inside the validation loop
:param batch:
:return:
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_test = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
test_acc = torch.tensor(test_acc)
if self.on_gpu:
test_acc = test_acc.cuda(loss_test.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp:
loss_test = loss_test.unsqueeze(0)
test_acc = test_acc.unsqueeze(0)
# alternate possible outputs to test
if batch_idx % 1 == 0:
output = OrderedDict({
'test_loss': loss_test,
'test_acc': test_acc,
})
return output
if batch_idx % 2 == 0:
return test_acc
if batch_idx % 3 == 0:
output = OrderedDict({
'test_loss': loss_test,
'test_acc': test_acc,
'test_dic': {'test_loss_a': loss_test}
})
return output
class LightTestFitMultipleTestDataloadersMixin:
"""Test fit multiple test dataloaders mixin."""
def test_step(self, batch, batch_idx, dataloader_idx, **kwargs):
"""
Lightning calls this inside the validation loop
:param batch:
:return:
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_test = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
test_acc = torch.tensor(test_acc)
if self.on_gpu:
test_acc = test_acc.cuda(loss_test.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp:
loss_test = loss_test.unsqueeze(0)
test_acc = test_acc.unsqueeze(0)
# alternate possible outputs to test
if batch_idx % 1 == 0:
output = OrderedDict({
'test_loss': loss_test,
'test_acc': test_acc,
})
return output
if batch_idx % 2 == 0:
return test_acc
if batch_idx % 3 == 0:
output = OrderedDict({
'test_loss': loss_test,
'test_acc': test_acc,
'test_dic': {'test_loss_a': loss_test}
})
return output
if batch_idx % 5 == 0:
output = OrderedDict({
f'test_loss_{dataloader_idx}': loss_test,
f'test_acc_{dataloader_idx}': test_acc,
})
return output
class LightValStepFitSingleDataloaderMixin:
def validation_step(self, batch, batch_idx, *args, **kwargs):
"""
Lightning calls this inside the validation loop
:param batch:
:return:
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_val = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
val_acc = torch.tensor(val_acc)
if self.on_gpu:
val_acc = val_acc.cuda(loss_val.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp:
loss_val = loss_val.unsqueeze(0)
val_acc = val_acc.unsqueeze(0)
# alternate possible outputs to test
if batch_idx % 1 == 0:
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
})
return output
if batch_idx % 2 == 0:
return val_acc
if batch_idx % 3 == 0:
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
'test_dic': {'val_loss_a': loss_val}
})
return output
class LightValStepFitMultipleDataloadersMixin:
def validation_step(self, batch, batch_idx, dataloader_idx, **kwargs):
"""
Lightning calls this inside the validation loop
:param batch:
:return:
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_val = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
val_acc = torch.tensor(val_acc)
if self.on_gpu:
val_acc = val_acc.cuda(loss_val.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp:
loss_val = loss_val.unsqueeze(0)
val_acc = val_acc.unsqueeze(0)
# alternate possible outputs to test
if batch_idx % 1 == 0:
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
})
return output
if batch_idx % 2 == 0:
return val_acc
if batch_idx % 3 == 0:
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
'test_dic': {'val_loss_a': loss_val}
})
return output
if batch_idx % 5 == 0:
output = OrderedDict({
f'val_loss_{dataloader_idx}': loss_val,
f'val_acc_{dataloader_idx}': val_acc,
})
return output
class LightTestMultipleDataloadersMixin(LightTestStepMultipleDataloadersMixin):
def test_epoch_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
# if returned a scalar from test_step, outputs is a list of tensor scalars
# we return just the average in this case (if we want)
# return torch.stack(outputs).mean()
test_loss_mean = 0
test_acc_mean = 0
i = 0
for dl_output in outputs:
for output in dl_output:
test_loss = output['test_loss']
# reduce manually when using dp
if self.trainer.use_dp:
test_loss = torch.mean(test_loss)
test_loss_mean += test_loss
# reduce manually when using dp
test_acc = output['test_acc']
if self.trainer.use_dp:
test_acc = torch.mean(test_acc)
test_acc_mean += test_acc
i += 1
test_loss_mean /= i
test_acc_mean /= i
tqdm_dict = {'test_loss': test_loss_mean.item(), 'test_acc': test_acc_mean.item()}
result = {'progress_bar': tqdm_dict}
return result
class LightTestOptimizerWithSchedulingMixin:
def configure_optimizers(self):
if self.hparams.optimizer_name == 'lbfgs':
optimizer = optim.LBFGS(self.parameters(), lr=self.hparams.learning_rate)
else:
optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.1)
return [optimizer], [lr_scheduler]
class LightTestMultipleOptimizersWithSchedulingMixin:
def configure_optimizers(self):
if self.hparams.optimizer_name == 'lbfgs':
optimizer1 = optim.LBFGS(self.parameters(), lr=self.hparams.learning_rate)
optimizer2 = optim.LBFGS(self.parameters(), lr=self.hparams.learning_rate)
else:
optimizer1 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
optimizer2 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, 1, gamma=0.1)
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, 1, gamma=0.1)
return [optimizer1, optimizer2], [lr_scheduler1, lr_scheduler2]
class LightTestOptimizersWithMixedSchedulingMixin:
def configure_optimizers(self):
if self.hparams.optimizer_name == 'lbfgs':
optimizer1 = optim.LBFGS(self.parameters(), lr=self.hparams.learning_rate)
optimizer2 = optim.LBFGS(self.parameters(), lr=self.hparams.learning_rate)
else:
optimizer1 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
optimizer2 = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, 4, gamma=0.1)
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, 1, gamma=0.1)
return [optimizer1, optimizer2], \
[{'scheduler': lr_scheduler1, 'interval': 'step'}, lr_scheduler2]
def _get_output_metric(output, name):
if isinstance(output, dict):
val = output[name]
else: # if it is 2level deep -> per dataloader and per batch
val = sum(out[name] for out in output) / len(output)
return val
|
py
|
1a59cba7428a7564cbaf81e9e85b0a8558d99143
|
import torch
import torchvision
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.autograd import Variable
from ssd import SSD300
from encoder import DataEncoder
from PIL import Image, ImageDraw
# Load model
net = SSD300()
net.load_state_dict(torch.load('model/net.pth'))
net.eval()
# Load test image
img = Image.open('./image/img1.jpg')
img1 = img.resize((300,300))
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
img1 = transform(img1)
# Forward
loc, conf = net(Variable(img1[None,:,:,:], volatile=True))
# Decode
data_encoder = DataEncoder()
boxes, labels, scores = data_encoder.decode(loc.data.squeeze(0), F.softmax(conf.squeeze(0)).data)
draw = ImageDraw.Draw(img)
for box in boxes:
box[::2] *= img.width
box[1::2] *= img.height
draw.rectangle(list(box), outline='red')
img.show()
|
py
|
1a59cc39b7374481c071af3c2a93a9a1ad09e726
|
#!/usr/bin/python3
from simulation import *
from integrators import *
import utils
import schemas
from pyspark.sql.session import SparkSession
import os
"""arguments"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("dt", help="delta t for calculating steps",
type=float)
parser.add_argument("target", help="target time to reach in the simulation",
type=float)
parser.add_argument("integrator", help="integrator to use for running the simulation",
choices=['eul1', 'eul2', 'rk4', 'vlf'])
parser.add_argument("input", help="path(s) to input data")
parser.add_argument("--dtout", help="time interval between cluster snapshots",
default=None, type=float)
parser.add_argument("--dtdiag", help="time interval between cdiagnosting output",
default=None, type=float)
parser.add_argument("--saveDiag", help="should diagnostic be saved to disk instead of printed",
nargs="?", const=True, default=False, type=bool)
parser.add_argument("--addT", help="should t be added to cluster snapshots",
nargs="?", const=True, default=False, type=bool)
parser.add_argument("-l", "--limit", help="limit the number of input rows to read",
nargs="?", const=1000, type=int)
parser.add_argument("-o", "--outputDir", help="output path",
default="../output/")
parser.add_argument("-f", help="format to save output in",
choices=['parquet', 'csv'], default="parquet")
parser.add_argument("--comp", help="format to save output in",
type=str, default="none")
parser.add_argument("-G", help="gravitational constant for the simulation",
default=1, type=float)
args = parser.parse_args()
"""/arguments"""
"""adjust spark settings"""
spark = SparkSession.builder.getOrCreate()
spark.conf.set("spark.sql.caseSensitive", "true")
"""load data"""
df_t0 = utils.load_df(args.input,
schema=schemas.clust, part="id", limit=args.limit)
"""setup simulation"""
methods = {
"eul1": IntergratorEuler(args.dt, args.G),
"eul2": IntergratorEuler2(args.dt, args.G),
"rk4": IntegratorRungeKutta4(args.dt, args.G),
"vlf": IntegratorLeapfrog(args.dt, args.G),
}
nameStr = utils.clean_str(spark.conf.get("spark.app.name")) + "-" + spark.conf.get("spark.app.id")
sopts = utils.SaveOptions(os.path.join(args.outputDir, nameStr), fformat=args.f,
compression=args.comp, header="true")
sim = Simulation(df_t0, methods[args.integrator], args.target, sopts,
add_t_snap=args.addT, dt_out=args.dtout, dt_diag=args.dtdiag, saveDiag=args.saveDiag)
"""run"""
sim.run()
|
py
|
1a59cd64f76d5061aa6ab67e80cb63d46cbc9d3d
|
# Configuration file for ipcontroller.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
#------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## Whether to create profile dir if it doesn't exist
#c.BaseIPythonApplication.auto_create = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
#c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
#c.BaseIPythonApplication.extra_config_file = ''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
#c.BaseIPythonApplication.ipython_dir = ''
## Whether to overwrite existing config files when copying
#c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
#c.BaseIPythonApplication.profile = 'default'
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
#c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# BaseParallelApplication(BaseIPythonApplication) configuration
#------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## whether to cleanup old logfiles before starting
#c.BaseParallelApplication.clean_logs = False
## String id to add to runtime files, to prevent name collisions when using
# multiple clusters with a single profile simultaneously.
#
# When set, files will be named like: 'ipcontroller-<cluster_id>-engine.json'
#
# Since this is text inserted into filenames, typical recommendations apply:
# Simple character strings are ideal, and spaces are not recommended (but should
# generally work).
#c.BaseParallelApplication.cluster_id = ''
c.BaseParallelApplication.cluster_id = 'mpi'
## whether to log to a file
#c.BaseParallelApplication.log_to_file = False
## The ZMQ URL of the iplogger to aggregate logging.
#c.BaseParallelApplication.log_url = ''
## Set the working dir for the process.
#c.BaseParallelApplication.work_dir = 'C:\\Users\\adm'
#------------------------------------------------------------------------------
# IPControllerApp(BaseParallelApplication) configuration
#------------------------------------------------------------------------------
## Whether to create profile dir if it doesn't exist.
#c.IPControllerApp.auto_create = True
## JSON filename where client connection info will be stored.
#c.IPControllerApp.client_json_file = 'ipcontroller-client.json'
## JSON filename where engine connection info will be stored.
#c.IPControllerApp.engine_json_file = 'ipcontroller-engine.json'
## ssh url for engines to use when connecting to the Controller processes. It
# should be of the form: [user@]server[:port]. The Controller's listening
# addresses must be accessible from the ssh server
#c.IPControllerApp.engine_ssh_server = ''
## import statements to be run at startup. Necessary in some environments
#c.IPControllerApp.import_statements = []
## The external IP or domain name of the Controller, used for disambiguating
# engine and client connections.
#c.IPControllerApp.location = 'DESKTOP-10711'
## Reload engine state from JSON file
#c.IPControllerApp.restore_engines = False
## Whether to reuse existing json connection files. If False, connection files
# will be removed on a clean exit.
#c.IPControllerApp.reuse_files = False
## ssh url for clients to use when connecting to the Controller processes. It
# should be of the form: [user@]server[:port]. The Controller's listening
# addresses must be accessible from the ssh server
#c.IPControllerApp.ssh_server = ''
## Use threads instead of processes for the schedulers
#c.IPControllerApp.use_threads = False
#------------------------------------------------------------------------------
# ProfileDir(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
## Set the profile location directly. This overrides the logic used by the
# `profile` option.
#c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# RegistrationFactory(SessionFactory) configuration
#------------------------------------------------------------------------------
## The Base Configurable for objects that involve registration.
## The IP address for registration. This is generally either '127.0.0.1' for
# loopback only or '*' for all interfaces.
#c.RegistrationFactory.ip = ''
## The port on which the Hub listens for registration.
#c.RegistrationFactory.regport = 0
## The 0MQ transport for communications. This will likely be the default of
# 'tcp', but other values include 'ipc', 'epgm', 'inproc'.
#c.RegistrationFactory.transport = 'tcp'
## The 0MQ url used for registration. This sets transport, ip, and port in one
# variable. For example: url='tcp://127.0.0.1:12345' or url='epgm://*:90210'
#c.RegistrationFactory.url = ''
#------------------------------------------------------------------------------
# HubFactory(RegistrationFactory) configuration
#------------------------------------------------------------------------------
## The Configurable for setting up a Hub.
## IP on which to listen for client connections. [default: loopback]
#c.HubFactory.client_ip = ''
## 0MQ transport for client connections. [default : tcp]
#c.HubFactory.client_transport = 'tcp'
## Client/Engine Port pair for Control queue
#c.HubFactory.control = ()
## The class to use for the DB backend
#
# Options include:
#
# SQLiteDB: SQLite MongoDB : use MongoDB DictDB : in-memory storage (fastest,
# but be mindful of memory growth of the Hub) NoDB : disable database
# altogether (default)
#c.HubFactory.db_class = 'DictDB'
## IP on which to listen for engine connections. [default: loopback]
#c.HubFactory.engine_ip = ''
## 0MQ transport for engine connections. [default: tcp]
#c.HubFactory.engine_transport = 'tcp'
## PUB/ROUTER Port pair for Engine heartbeats
#c.HubFactory.hb = ()
## Client/Engine Port pair for IOPub relay
#c.HubFactory.iopub = ()
## Monitor (SUB) port for queue traffic
#c.HubFactory.mon_port = 0
## IP on which to listen for monitor messages. [default: loopback]
#c.HubFactory.monitor_ip = ''
## 0MQ transport for monitor messages. [default : tcp]
#c.HubFactory.monitor_transport = 'tcp'
## Client/Engine Port pair for MUX queue
#c.HubFactory.mux = ()
## PUB port for sending engine status notifications
#c.HubFactory.notifier_port = 0
## Engine registration timeout in seconds [default:
# max(30,10*heartmonitor.period)]
#c.HubFactory.registration_timeout = 0
## Client/Engine Port pair for Task queue
#c.HubFactory.task = ()
#------------------------------------------------------------------------------
# TaskScheduler(SessionFactory) configuration
#------------------------------------------------------------------------------
## Python TaskScheduler object.
#
# This is the simplest object that supports msg_id based DAG dependencies.
# *Only* task msg_ids are checked, not msg_ids of jobs submitted via the MUX
# queue.
## specify the High Water Mark (HWM) for the downstream socket in the Task
# scheduler. This is the maximum number of allowed outstanding tasks on each
# engine.
#
# The default (1) means that only one task can be outstanding on each engine.
# Setting TaskScheduler.hwm=0 means there is no limit, and the engines continue
# to be assigned tasks while they are working, effectively hiding network
# latency behind computation, but can result in an imbalance of work when
# submitting many heterogenous tasks all at once. Any positive value greater
# than one is a compromise between the two.
#c.TaskScheduler.hwm = 1
## select the task scheduler scheme [default: Python LRU] Options are: 'pure',
# 'lru', 'plainrandom', 'weighted', 'twobin','leastload'
#c.TaskScheduler.scheme_name = 'leastload'
#------------------------------------------------------------------------------
# HeartMonitor(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A basic HeartMonitor class pingstream: a PUB stream pongstream: an ROUTER
# stream period: the period of the heartbeat in milliseconds
## Whether to include every heartbeat in debugging output.
#
# Has to be set explicitly, because there will be *a lot* of output.
#c.HeartMonitor.debug = False
## Allowed consecutive missed pings from controller Hub to engine before
# unregistering.
#c.HeartMonitor.max_heartmonitor_misses = 10
## The frequency at which the Hub pings the engines for heartbeats (in ms)
#c.HeartMonitor.period = 3000
#------------------------------------------------------------------------------
# DictDB(BaseDB) configuration
#------------------------------------------------------------------------------
## Basic in-memory dict-based object for saving Task Records.
#
# This is the first object to present the DB interface for logging tasks out of
# memory.
#
# The interface is based on MongoDB, so adding a MongoDB backend should be
# straightforward.
## The fraction by which the db should culled when one of the limits is exceeded
#
# In general, the db size will spend most of its time with a size in the range:
#
# [limit * (1-cull_fraction), limit]
#
# for each of size_limit and record_limit.
#c.DictDB.cull_fraction = 0.1
## The maximum number of records in the db
#
# When the history exceeds this size, the first record_limit * cull_fraction
# records will be culled.
#c.DictDB.record_limit = 1024
## The maximum total size (in bytes) of the buffers stored in the db
#
# When the db exceeds this size, the oldest records will be culled until the
# total size is under size_limit * (1-cull_fraction). default: 1 GB
#c.DictDB.size_limit = 1073741824
#------------------------------------------------------------------------------
# SQLiteDB(BaseDB) configuration
#------------------------------------------------------------------------------
## SQLite3 TaskRecord backend.
## The filename of the sqlite task database. [default: 'tasks.db']
#c.SQLiteDB.filename = 'tasks.db'
## The directory containing the sqlite task database. The default is to use the
# cluster_dir location.
#c.SQLiteDB.location = ''
## The SQLite Table to use for storing tasks for this session. If unspecified, a
# new table will be created with the Hub's IDENT. Specifying the table will
# result in tasks from previous sessions being available via Clients' db_query
# and get_result methods.
#c.SQLiteDB.table = 'ipython-tasks'
|
py
|
1a59cd7ce197294cb7d3f8fdf88e3ce45a5a625c
|
#!/usr/bin/env python
#
# run phmmer against comma separated list of Uniprot IDs.
# produce csv of pairwise match alignment.
#
#
#
#
#
import argparse
import os
import sys
import logging
import traceback
import pandas as pd
gitpath=os.path.expanduser("~/git/cshlwork")
sys.path.append(gitpath)
from protlib import uniprot
from protlib import phmmer
def indexbypacc(lod):
logging.debug(f"indexing uniprot list of dicts len: {len(lod)}")
upbypacc = {}
for p in lod:
pacc = p['proteinacc']
#if pacc == "A0A0J9YTW6":
# logging.debug("Indexing later missing pacc! A0A0R4J0X7")
seq = p['sequence']
upbypacc[pacc] = p
logging.debug(f"produced indexed dict len: {len(upbypacc)}")
return upbypacc
def parse_pairfile(filename):
f = open(filename)
lines = f.readlines()
dupelist = []
lnum = 0
knum = 0
for line in lines:
(p1, p2) = line.split(',')
p1 = p1.strip()
p2 = p2.strip()
if p2 != "NA":
dupelist.append( (p1, p2) )
else:
knum += 1
#logging.debug("skipping NA target. ")
lnum += 1
logging.debug(f" processed {lnum} lines. skipped {knum} NAs. produced {len(dupelist)} items in dupelist[0] = {dupelist[0]}")
#logging.debug(f"dupelist: {dupelist}")
return dupelist
def add_altcodes(upbypacc, infile):
'''
upbypacc { <pacc> : { 'proteinacc' : <pacc>,
'sequence' : <seq> }
,
,
,
}
altcodes:
cat <uniprot>.dat | grep "^AC" > <altcodes>.txt
AC Q9CQV8; O70455; Q3TY33; Q3UAN6;
AC P35213;
AC P62259; P29360; P42655; Q63631;
'''
logging.debug(f"len upbypacc before: {len(upbypacc)}")
nadded = 0
nmissing = 0
try:
f = open(infile)
lines = f.readlines()
for line in lines:
# remove leading AC
fields = line.split()[1:]
#logging.debug(f"fields: {fields}")
if len(fields) > 1:
#logging.debug("more than one field.")
ecode = fields[0].replace(';','')
try:
entry = upbypacc[ecode]
for alt in fields[1:]:
alt = alt.replace(';','')
upbypacc[alt] = entry
#logging.debug(f"added alt {alt} for entry code {ecode}")
nadded += 1
except KeyError:
#logging.warn(f"entry {ecode} not found in upbypacc.")
nmissing += 1
except IOError:
logging.error(f"could not read file {infile}")
traceback.print_exc(file=sys.stdout)
finally:
f.close()
logging.debug(f"len ubypacc after: {len(upbypacc)} {nadded} alts added. {nmissing} missing.")
def parse_filebase(filepath):
'''
gives back filepath minus the last dot extension, or the
same filepath if there is not extension.
'''
return os.path.splitext(filepath)[0]
def run_phmmer(pairlist, uniprot_fasta, uniprot_altcodes, pairtfa, targettfa):
config = get_default_config()
up = parse_uniprot_fasta(uniprot_fasta)
logging.debug(f"up len: {len(up)}")
upbypacc = indexbypacc(up)
add_altcodes(upbypacc, uniprot_altcodes)
logging.debug(f"upbypacc len: {len(upbypacc)}")
write_sequences( pairlist, upbypacc, pairtfa, targettfa )
outfile, exclude_list, cidgidmap = execute_phmmer(config, pairtfa, version='current')
logging.info(f"wrote phmmer output to {outfile}")
df = get_phmmer_df(config, pairtfa)
logging.debug(f"df: {df}")
return df
def get_match(query, target, df):
logging.debug(f"query={query} target={target}")
qdf = df[df['query'] == query]
row = qdf[qdf['target'] == target]
if len(row) > 1 :
logging.warning(f'multiple matches for query={query} target={target} ')
return None
elif len(row) == 1:
r = row.iloc[0]
eval = r['eval']
score =r['score']
bias = r['bias']
return (eval, score, bias)
else:
logging.warning(f'no matches for query={query} target={target} ')
return None
def make_evaltable(pdf, pairlist, evalfile ):
#config = get_default_config()
#pdf = pd.read_csv(phmmerdf, index_col=0)
pdf.drop_duplicates(inplace=True,ignore_index=True)
#dupelist = parse_dupepairs()
lod = []
for tup in pairlist:
(p1, p2) = tup
logging.debug(f"looking for {p1} -> {p2}")
rv = get_match(p1, p2, pdf)
if rv is not None:
(eval, score, bias ) = rv
lod.append( { 'query' : p1,
'target' : p2,
'eval' : eval,
'score' : score,
'bias' : bias,
}
)
logging.debug(f"dupelist length: {len(pairlist)}")
logging.debug(f"matchlist length: {len(lod)}")
edf = pd.DataFrame(lod)
edf.to_csv(evalfile)
logging.debug(f"wrote match df to {evalfile}")
def split_pairlist(pairlist):
qlist = []
tlist = []
for (q, t) in pairlist:
qlist.append(q)
tlist.append(t)
return (qlist, tlist)
if __name__=='__main__':
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(filename)s:%(lineno)d %(name)s.%(funcName)s(): %(message)s'
logging.basicConfig(format=FORMAT)
logging.getLogger().setLevel(logging.WARNING)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug',
action="store_true",
dest='debug',
help='debug logging')
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help='verbose logging')
parser.add_argument('pairfile',
metavar='pairfile',
type=str,
help='')
parser.add_argument('uniprotdat',
metavar='uniprotdat',
default=os.path.expanduser('~/data/uniprot/uniprot_all_vertebrates.dat'),
nargs="?",
type=str,
help='A uniprot .dat database with sequences for all queries.')
# alt codes now handled natively by uniprot.py
# any tfa files created will use whatever accession is in list.
#parser.add_argument('uniprotalt',
# metavar='uniprotalt',
# default=os.path.expanduser("~/project/hamsini2/uniprot_all_rodents_altcodes.txt"),
# type=str,
# help='')
args= parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
fbase = parse_filebase(args.pairfile)
querytfa = f"{fbase}_query.tfa"
targettfa = f"{fbase}_targets.tfa"
phdf = f"{fbase}_phdf.csv"
evalfile = f"{fbase}_scores.csv"
logging.debug(f"fbase={fbase} querytfa={querytfa} targettffa={targettfa} phdf={phdf}")
logging.debug(f"uniprotdat={args.uniprotdat}")
pairlist = parse_pairfile(args.pairfile)
(querylist, targetlist) = split_pairlist(pairlist)
logging.debug(f"qlist[:2] = {querylist[:2]} tlist[:2] = {targetlist[:2]} ")
logging.info(f"Getting uniprot from {args.uniprotdat}...")
uc = uniprot.get_default_config()
upbypacc = uniprot.parse_uniprot_dat(uc, args.uniprotdat)
logging.info(f"Creating tfa files: query={querytfa} db={targettfa}")
uniprot.write_tfa_fromlist(querylist, upbypacc, querytfa)
uniprot.write_tfa_fromlist(targetlist, upbypacc, targettfa)
pc = phmmer.get_default_config()
logging.info(f"Running phmmer query={querytfa} db={targettfa}")
pdf = phmmer.get_phmmer_df(pc, querytfa, targettfa)
pdf.to_csv(phdf)
logging.debug(f"Wrote phmmer DF to {phdf}")
make_evaltable(pdf, pairlist, evalfile )
|
py
|
1a59ce1e0dff4e46bf1192b50fa0f6172b1633ff
|
import enum
class ItemProperty(enum.IntEnum):
QUALITY = 0
FLAW = 1
class Item(enum.IntEnum):
MELEE = 0
RANGED = 1
AMMO = 2
ARMOR = 3
CONTAINER = 4
OTHER = 5
class ItemAvailability(enum.IntEnum):
COMMON = 0
SCARCE = 1
RARE = 2
EXOTIC = 3
class MeleeReach(enum.IntEnum):
PERSONAL = 0
VERY_SHORT = 1
SHORT = 2
AVERAGE = 3
LONG = 4
VERY_LONG = 5
MASSIVE = 6
class MeleeGroup(enum.IntEnum):
BASIC = 0
CAVALRY = 1
FENCING = 2
BRAWLING = 3
FLAIL = 4
PARRY = 5
POLEARM = 6
TWO_HANDED = 7
class RangedGroup(enum.IntEnum):
BLACKPOWDER = 0
BOW = 1
CROSSBOW = 2
ENGINEERING = 3
ENTANGLING = 4
EXPLOSIVES = 5
SLING = 6
THROWING = 7
class AmmunitionGroup(enum.IntEnum):
BLACK_POWDER_AND_ENGINEERING = 0
BOW = 1
CROSSBOW = 2
SLING = 3
class ArmorLocation(enum.IntEnum):
ARMS = 0
BODY = 1
LEGS = 2
HEAD = 3
class ArmorGroup(enum.IntEnum):
SOFT_LEATHER = 0
BOILED_LEATHER = 1
MAIL = 2
PLATE = 3
class Attribute(enum.IntEnum):
NONE = 0
WS = 1
BS = 2
S = 3
T = 4
II = 5
AG = 6
DEX = 7
INT = 8
WP = 9
FEL = 10
VARIOUS = 11
class Skill(enum.IntEnum):
BASIC = 0
ADVANCED = 1
MIXED = 2
class StatusTier(enum.IntEnum):
BRASS = 0
SILVER = 1
GOLD = 2
class StatusStanding(enum.IntEnum):
ZERO = 0
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
FIVE = 5
SIX = 6
SEVEN = 7
class CareerCass(enum.IntEnum):
ACADEMIC = 0
BURGHERS = 1
COURTIER = 2
PEASANT = 3
RANGER = 4
RIVERFOLK = 5
ROGUE = 6
WARRIOR = 7
class Species(enum.IntEnum):
HUMAN = 0
HALFLING = 1
DWARF = 2
HIGH_ELF = 3
WOOD_ELF = 4
class Mutation(enum.IntEnum):
PHYSICAL = 0
MENTAL = 1
|
py
|
1a59ceb676e4fcba67ecd635baa8ff84295344fa
|
from copy import copy
from mysql.connector import MySQLConnection, Error
from python_mysql_dbconfig import read_db_config
import sys
import csv
import boto3
import json
import socket
def query_with_fetchone(query2run,secret,region):
try:
# Grab MySQL connection and database settings. We areusing AWS Secrets Manager
# but you could use another service like Hashicorp Vault
# We cannot use Apache Airflow to store these as this script runs stand alone
secret_name = secret
region_name = region
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
info=json.loads(get_secret_value_response['SecretString'])
pw=info['password']
un=info['username']
hs=info['host']
db=info['database']
# Output to the log so we can see and confirm WHERE we are running and WHAT
# we are connecting to
print("Connecting to ",str(hs)," database ", str(db), " as user ", str(un))
print("Database host IP is :", socket.gethostbyname(hs))
print("Source IP is ", socket.gethostname())
conn = MySQLConnection(user=un, password=pw, host=hs, database=db)
cursor = conn.cursor()
query=query2run
print("Query is", str(query))
cursor.execute(query)
records = cursor.fetchall()
c = csv.writer(open("temp.csv","w"))
c.writerows(records)
print("Records exported:")
for row in records:
print(row[0],",",row[1],",",row[2],",",row[3],",",row[4],",",row[5], ",",row[6],",",row[7] )
except Error as e:
print(e)
sys.exit(1)
finally:
cursor.close()
conn.close()
def upload_to_s3(s3bucket,s3folder,region):
# We will upload the temp (temp.csv) file and copy it based on the input params of the script (bucket and dir/file)
try:
s3 = boto3.client('s3', region_name=region)
s3.upload_file('temp.csv',s3bucket,s3folder)
except FileNotFoundError:
print("The file was not found")
return False
except Error as e:
print(e)
sys.exit(1)
if __name__ == '__main__':
try:
arg = sys.argv[2]
except IndexError:
raise SystemExit(f"Usage: {sys.argv[0]} <s3 bucket><s3 file><query><secret><region>")
# The script needs the following arguments to run
# 1. Target S3 bucket where the output of the SQL script will be copied
# 2. Target S3 folder/filename
# 3. The query to execute
# 4. The parameter store (we use AWS Secrets) which holds the values on where to find the MySQL database
# 5. The AWS region
s3bucket=sys.argv[1]
s3folder=sys.argv[2]
query2run=sys.argv[3]
secret=sys.argv[4]
region=sys.argv[5]
query_with_fetchone(query2run,secret,region)
upload_to_s3(s3bucket,s3folder,region)
# demo command to test this from the cli
# for Cloud based MySQL
# python app/read-data-q.py ricsue-airflow-hybrid period1/temp.csv "select * from customers WHERE location = 'Poland' AND (date BETWEEN '2022-01-01 14:15:55' AND '2022-09-29 10:15:55')" rds-airflow-hybrid eu-west-2
# for local/remote based MySQL
# python app/read-data-q.py ricsue-airflow-hybrid period1/temp2.csv "select * from customers WHERE location = 'China' AND (date BETWEEN '2022-01-01 14:15:55' AND '2022-09-29 10:15:55')" localmysql-airflow-hybrid eu-west-2
# other queries you can try, for example
# "select * from customers WHERE location = '{country}' AND (date BETWEEN '{start}' AND '{end}')".format(country=country,start=start,end=end)
|
py
|
1a59d071e8635c4210290e9828a48645cf012981
|
from ._indicadores import Indicadores
from ._pib import PIB
from ._denue import DENUE
from ._marco_geoestadistico import MarcoGeoestadistico
from ._ruteo import Ruteo
|
py
|
1a59d254673a6ae3c4eb6f2fa89e7217fef50b9b
|
from datetime import timedelta
import exif_editor as ee
def test_add_mo_to_str():
offset = timedelta(days=30, hours=12)
assert ee.offset_datetime(b'2020:08:01 10:01:00', offset) == b'2020:08:31 22:01:00'
assert ee.offset_datetime(b'2020:11:01 10:01:00', offset) == b'2020:12:01 22:01:00'
assert ee.offset_datetime(b'2020:12:02 14:01:00', offset) == b'2021:01:02 02:01:00'
|
py
|
1a59d2e8fa9256ec169f8d084992a6e3a91907fc
|
import time
from typing import Optional, Union, List, Dict, Tuple
import uuid
import aiohttp
from blob import Context
from config import Config
from helpers import userHelper
from lib import logger
from objects.constants import Privileges, Countries
from objects.constants.BanchoRanks import BanchoRanks
from objects.constants.GameModes import GameModes
from objects.constants.IdleStatuses import Action
from objects.constants.KurikkuPrivileges import KurikkuPrivileges
from objects.constants.Modificators import Mods
from objects.constants.PresenceFilter import PresenceFilter
from packets.Builder.index import PacketBuilder
from objects.Channel import Channel
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from objects.TypedDicts import TypedStats
from objects.BanchoObjects import Message
from objects.Multiplayer import Match
# I wan't use construction in python like <class>.__dict__.update
# but i forgot if class has __slots__ __dict__ is unavailable, sadly ;-;
class StatsMode:
__slots__ = ("game_mode", "total_score", "ranked_score", "pp",
"accuracy", "total_plays", "playtime", "max_combo", "leaderboard_rank")
def __init__(self):
self.total_score: int = 0
self.ranked_score: int = 0
self.pp: int = 0
self.accuracy: float = 0.00
self.total_plays: int = 0
self.playtime: int = 0
self.leaderboard_rank: int = 0
def update(self, **kwargs: 'TypedStats'):
self.total_score = kwargs.get('total_score', 0)
self.ranked_score = kwargs.get('ranked_score', 0)
self.pp = kwargs.get('pp', 0)
self.accuracy = kwargs.get('accuracy', 0)
self.total_plays = kwargs.get('total_plays', 0)
self.playtime = kwargs.get('playtime', 0)
self.leaderboard_rank = kwargs.get('leaderboard_rank', 0)
class Status:
__slots__ = (
'action', 'action_text', 'map_md5',
'mods', 'mode', 'map_id'
)
def __init__(self):
self.action: Action = Action.Idle
self.action_text: str = ''
self.map_md5: str = ''
self.mode: GameModes = GameModes.STD
self.mods: Mods = Mods.NoMod
self.map_id: int = 0
def update(self, **kwargs):
self.action = Action(kwargs.get('action', 0))
self.action_text = kwargs.get('action_text', '')
self.map_md5 = kwargs.get('map_md5', '')
self.mode = GameModes(kwargs.get('mode', 0))
self.mods = Mods(kwargs.get('mods', 0))
self.map_id = kwargs.get('map_id', 0)
class Player:
def __init__(self, user_id: Union[int], user_name: Union[str],
privileges: Union[int], utc_offset: Optional[int] = 0,
pm_private: bool = False, silence_end: int = 0, is_tourneymode: bool = False,
is_bot: bool = False, ip: str = ''):
self.token: str = self.generate_token()
self.id: int = user_id
self.name: str = user_name
self.ip: str = ip
self.privileges: int = privileges
self.selected_game_mode: GameModes = GameModes.STD
self.stats: Dict[GameModes, StatsMode] = {mode: StatsMode() for mode in GameModes} # setup dictionary with stats
self.pr_status: Status = Status()
self.spectators: List[Player] = []
self.spectating: Optional[Player] = None
self.country: Tuple[int, str] = (0, 'XX')
self.location: Tuple[float, float] = (0.0, 0.0)
self.timezone: int = 24 + utc_offset
self.timezone_offset: int = utc_offset
self.pm_private: bool = pm_private # Как я понял, это типо только друзья могут писать
self.friends: Union[List[int]] = []
self.away_msg: Optional[str] = None
self.silence_end: int = silence_end
self.presence_filter: PresenceFilter = PresenceFilter(1)
self.bot_np: Optional[dict] = None # TODO: Beatmap
self._match: Optional['Match'] = None
self.friends: Union[List[int]] = [] # bot by default xd
self.queue: bytearray = bytearray() # main thing
self.login_time: int = int(time.time())
self.last_packet_unix: int = int(time.time())
self.is_tourneymode: bool = is_tourneymode
self.id_tourney: int = -1
self.is_in_lobby: bool = False
self.is_bot: bool = is_bot
self.tillerino: List[Union[int, Mods]] = [0, Mods(0), -1.0] # 1 - map id, 2 - current_mods, 3 - acc <- legacy code
self.user_chat_log: List['Message'] = []
@property
def match(self):
return self._match
@property
def get_formatted_chatlog(self):
return "\n".join(
f"{time.strftime('%H:%M', time.localtime(message.when))} - {self.name}@{message.to}: {message.body[:50]}"
for message in self.user_chat_log
)
@property
def silenced(self) -> bool:
return self.silence_end > 0
@property
def safe_name(self) -> str:
return self.name.lower().strip().replace(" ", "_")
@property
def irc_name(self) -> str:
return self.name.replace(" ", "_")
@property
def is_restricted(self) -> bool:
# return (self.privileges & Privileges.USER_NORMAL) and not (self.privileges & Privileges.USER_PUBLIC)
return (self.privileges & KurikkuPrivileges.Normal) != KurikkuPrivileges.Normal
@property
def bancho_privs(self) -> BanchoRanks:
privs = BanchoRanks(0)
if (self.privileges & KurikkuPrivileges.Normal.value) == KurikkuPrivileges.Normal.value:
privs |= (BanchoRanks.PLAYER | BanchoRanks.SUPPORTER)
if (self.privileges & KurikkuPrivileges.Bat.value) == KurikkuPrivileges.Bat.value:
privs |= BanchoRanks.BAT
if (self.privileges & KurikkuPrivileges.ChatMod.value) == KurikkuPrivileges.ChatMod.value or \
(self.privileges & KurikkuPrivileges.ReplayModerator.value) == KurikkuPrivileges.ReplayModerator.value:
privs |= BanchoRanks.MOD
if (self.privileges & KurikkuPrivileges.CM.value) == KurikkuPrivileges.CM.value:
privs |= BanchoRanks.ADMIN
if (self.privileges & KurikkuPrivileges.Owner.value) == KurikkuPrivileges.Owner.value:
privs |= BanchoRanks.PEPPY
return privs
@property
def is_admin(self) -> bool:
if (self.privileges & KurikkuPrivileges.Developer) == KurikkuPrivileges.Developer or \
(self.privileges & KurikkuPrivileges.ChatMod) == KurikkuPrivileges.ChatMod or \
(self.privileges & KurikkuPrivileges.CM) == KurikkuPrivileges.CM:
return True
return False
@property
def current_stats(self) -> StatsMode:
return self.stats[self.selected_game_mode]
@classmethod
def generate_token(cls) -> str:
return str(uuid.uuid4())
async def parse_friends(self) -> bool:
async for friend in Context.mysql.iterall(
# why in my db, exists user2 with id = -1?
'select user2 from users_relationships where user1 = %s and user2 > 0',
[self.id]
):
self.friends.append(friend['user2'])
return True
async def parse_country(self, ip: str) -> bool:
if self.privileges & Privileges.USER_DONOR:
# we need to remember donor have locked location
donor_location: str = (await Context.mysql.fetch(
'select country from users_stats where id = %s',
[self.id]
))['country'].upper()
self.country = (Countries.get_country_id(donor_location), donor_location)
else:
if Context.geoip_db:
# You have local geoip2 database, nice!
try:
data = Context.geoip_db.city(ip)
except:
logger.elog(f"[Player/{self.name}] Can't parse location for {ip}")
return False
self.country = (Countries.get_country_id(data.country.iso_code), data.country.iso_code)
self.location = (data.location.latitude, data.location.longitude)
return True
data = None
async with aiohttp.ClientSession() as sess:
async with sess.get(Config.config['geoloc_ip'] + ip) as resp:
try:
data = await resp.json()
finally:
pass
if not data:
logger.elog(f"[Player/{self.name}] Can't parse geoloc")
return False
self.country = (Countries.get_country_id(data['country']), data['country'])
loc = data['loc'].split(",")
self.location = (float(loc[0]), float(loc[1]))
return True
async def update_stats(self, selected_mode: GameModes = None) -> bool:
for mode in GameModes if not selected_mode else [selected_mode]:
res = await Context.mysql.fetch(
'select total_score_{0} as total_score, ranked_score_{0} as ranked_score, '
'pp_{0} as pp, playcount_{0} as total_plays, avg_accuracy_{0} as accuracy, playtime_{0} as playtime '
'from users_stats where id = %s'.format(GameModes.resolve_to_str(mode)),
[self.id]
)
if not res:
logger.elog(f"[Player/{self.name}] Can't parse stats for {GameModes.resolve_to_str(mode)}")
return False
position = await Context.redis.zrevrank(
f"ripple:leaderboard:{GameModes.resolve_to_str(mode)}",
str(self.id)
)
res['leaderboard_rank'] = int(position) + 1 if position else 0
self.stats[mode].update(**res)
async def logout(self) -> None:
if not self.is_tourneymode:
await Context.redis.set("ripple:online_users", len(Context.players.get_all_tokens(True)))
if self.ip:
await userHelper.deleteBanchoSession(self.id, self.ip)
# logic
# leave multiplayer
if self.match:
await self.match.leave_player(self)
# leave specatating
if self.spectating:
await self.spectating.remove_spectator(self)
# leave channels
for (_, chan) in Context.channels.items():
if self.id in chan.users:
await chan.leave_channel(self)
if not self.is_tourneymode:
for p in Context.players.get_all_tokens():
p.enqueue(await PacketBuilder.Logout(self.id))
Context.players.delete_token(self)
return
async def kick(self, message: str = "You have been kicked from the server. Please login again.",
reason: str = "kick") -> bool:
if self.is_bot:
return False
logger.wlog(f"[Player/{self.name}] has been disconnected. {reason}")
if message:
self.enqueue(await PacketBuilder.Notification(message))
self.enqueue(await PacketBuilder.UserID(-1)) # login failed
await self.logout()
return True
# legacy code
async def silence(self, seconds: int = None, reason: str = "", author: int = 999) -> bool:
if seconds is None:
# Get silence expire from db if needed
seconds = max(0, await userHelper.getSilenceEnd(self.id) - int(time.time()))
else:
# Silence in db and token
await userHelper.silence(self.id, seconds, reason, author)
# Silence token
self.silence_end = int(time.time()) + seconds
# Send silence packet to user
self.enqueue(await PacketBuilder.SilenceEnd(seconds))
# Send silenced packet to everyone else
user_silenced = await PacketBuilder.UserSilenced(self.id)
for user in Context.players.get_all_tokens():
user.enqueue(user_silenced)
return True
async def send_message(self, message: 'Message') -> bool:
message.body = f'{message.body[:2045]}...' if message.body[2048:] else message.body
chan: str = message.to
if chan.startswith("#"):
# this is channel object
if chan.startswith("#multi"):
if self.is_tourneymode:
if self.id_tourney > 0:
chan = f"#multi_{self.id_tourney}"
else:
return False
else:
chan = f"#multi_{self.match.id}"
elif chan.startswith("#spec"):
if self.spectating:
chan = f"#spec_{self.spectating.id}"
else:
chan = f"#spec_{self.id}"
channel: 'Channel' = Context.channels.get(chan, None)
if not channel:
logger.klog(f"[{self.name}] Tried to send message in unknown channel. Ignoring it...")
return False
self.user_chat_log.append(message)
logger.klog(
f"{self.name}({self.id}) -> {channel.server_name}: {bytes(message.body, 'latin_1').decode()}"
)
await channel.send_message(self.id, message)
return True
# DM
receiver = Context.players.get_token(name=message.to.lower().strip().replace(" ", "_"))
if not receiver:
logger.klog(f"[{self.name}] Tried to offline user. Ignoring it...")
return False
if receiver.pm_private and self.id not in receiver.friends:
self.enqueue(await PacketBuilder.PMBlocked(message.to))
logger.klog(f"[{self.name}] Tried message {message.to} which has private PM.")
return False
if self.pm_private and receiver.id not in self.friends:
self.pm_private = False
logger.klog(f"[{self.name}] which has private pm sended message to non-friend user. PM unlocked")
if receiver.silenced:
self.enqueue(await PacketBuilder.TargetSilenced(message.to))
logger.klog(f'[{self.name}] Tried message {message.to}, but has been silenced.')
return False
self.user_chat_log.append(message)
logger.klog(
f"#DM {self.name}({self.id}) -> {message.to}({receiver.id}): {bytes(message.body, 'latin_1').decode()}"
)
receiver.enqueue(
await PacketBuilder.BuildMessage(self.id, message)
)
return True
async def add_spectator(self, new_spec: 'Player') -> bool:
spec_chan_name = f"#spec_{self.id}"
if not Context.channels.get(spec_chan_name):
# in this case, we need to create channel for our spectator in temp mode
spec = Channel(
server_name=spec_chan_name,
description=f"Spectator channel for {self.name}",
public_read=True,
public_write=True,
temp_channel=True
)
Context.channels[spec_chan_name] = spec
await spec.join_channel(self)
c: 'Channel' = Context.channels.get(spec_chan_name)
if not await c.join_channel(new_spec):
logger.elog(f"{self.name} failed to join in {spec_chan_name} spectator channel!")
return False
fellow_packet = await PacketBuilder.FellowSpectatorJoined(new_spec.id)
for spectator in self.spectators:
spectator.enqueue(fellow_packet)
new_spec.enqueue(await PacketBuilder.FellowSpectatorJoined(spectator.id))
self.spectators.append(new_spec)
new_spec.spectating = self
self.enqueue(await PacketBuilder.SpectatorJoined(new_spec.id))
logger.slog(f"{new_spec.name} started to spectating {self.name}!")
return True
async def remove_spectator(self, old_spec: 'Player') -> bool:
spec_chan_name = f"#spec_{self.id}"
self.spectators.remove(old_spec) # attempt to remove old player from array
old_spec.spectating = None
spec_chan: Channel = Context.channels.get(spec_chan_name)
await spec_chan.leave_channel(old_spec) # remove our spectator from channel
fellow_packet = await PacketBuilder.FellowSpectatorLeft(old_spec.id)
if not self.spectators:
await spec_chan.leave_channel(self)
else:
for spectator in self.spectators:
spectator.enqueue(fellow_packet)
self.enqueue(await PacketBuilder.SpectatorLeft(old_spec.id))
logger.slog(f"{old_spec.name} has stopped spectating for {self.name}")
return True
async def remove_hidden_spectator(self, old_spec: 'Player') -> bool:
self.spectators.remove(old_spec) # attempt to remove old player from array
old_spec.spectating = None
self.enqueue(await PacketBuilder.SpectatorLeft(old_spec.id))
logger.slog(f"{old_spec.name} has stopped hidden spectating for {self.name}")
return True
async def say_bancho_restarting(self, delay: int = 20) -> bool:
self.enqueue(
await PacketBuilder.BanchoRestarting(delay * 1000)
)
return True
def enqueue(self, b: bytes) -> None:
self.queue += b
def dequeue(self) -> Optional[bytes]:
if self.queue:
data = bytes(self.queue)
self.queue.clear()
return data
return b''
|
py
|
1a59d37706bac9d5c21267220f73a074dff86022
|
from eth_utils import (
is_bytes,
)
from ssz.sedes import (
Serializable,
infer_sedes,
sedes_by_name,
)
from ssz.sedes.base import (
BaseSedes,
)
def encode(value, sedes=None, cache=True):
"""
Encode object in SSZ format.
`sedes` needs to be explicitly mentioned for encode/decode
of integers(as of now).
`sedes` parameter could be given as a string or as the
actual sedes object itself.
If `value` has an attribute :attr:`_cached_ssz` (as, notably,
:class:`ssz.sedes.Serializable`) and its value is not `None`, this value is
returned bypassing serialization and encoding, unless `sedes` is given (as
the cache is assumed to refer to the standard serialization which can be
replaced by specifying `sedes`).
If `value` is a :class:`ssz.sedes.Serializable` and `cache` is true, the result of
the encoding will be stored in :attr:`_cached_ssz` if it is empty.
"""
if isinstance(value, Serializable):
cached_ssz = value._cached_ssz
if sedes is None and cached_ssz is not None:
return cached_ssz
else:
really_cache = cache and sedes is None
else:
really_cache = False
if sedes is not None:
if sedes in sedes_by_name:
# Get the actual sedes object from string representation
sedes_obj = sedes_by_name[sedes]
else:
sedes_obj = sedes
if not isinstance(sedes_obj, BaseSedes):
raise TypeError("Invalid sedes object")
else:
sedes_obj = infer_sedes(value)
serialized_obj = sedes_obj.serialize(value)
if really_cache:
value._cached_ssz = serialized_obj
return serialized_obj
def decode(ssz, sedes):
"""
Decode a SSZ encoded object.
"""
if not is_bytes(ssz):
raise TypeError(f"Can only decode SSZ bytes, got type {type(ssz).__name__}")
value = sedes.deserialize(ssz)
return value
|
py
|
1a59d45977dada99bc24ae3baee3ccd00e6c8426
|
# A OpenTraced server for a Python service that implements the store interface.
from __future__ import print_function
import time
import argparse
from collections import defaultdict
from six import iteritems
import grpc
from concurrent import futures
from jaeger_client import Config
from grpc_opentracing import open_tracing_server_interceptor, \
SpanDecorator
from grpc_opentracing.grpcext import intercept_server
import store_pb2
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Store(store_pb2.StoreServicer):
def __init__(self):
self._inventory = defaultdict(int)
def AddItem(self, request, context):
self._inventory[request.name] += 1
return store_pb2.Empty()
def AddItems(self, request_iter, context):
for request in request_iter:
self._inventory[request.name] += 1
return store_pb2.Empty()
def RemoveItem(self, request, context):
new_quantity = self._inventory[request.name] - 1
if new_quantity < 0:
return store_pb2.RemoveItemResponse(was_successful=False)
self._inventory[request.name] = new_quantity
return store_pb2.RemoveItemResponse(was_successful=True)
def RemoveItems(self, request_iter, context):
response = store_pb2.RemoveItemResponse(was_successful=True)
for request in request_iter:
response = self.RemoveItem(request, context)
if not response.was_successful:
break
return response
def ListInventory(self, request, context):
for name, count in iteritems(self._inventory):
if not count:
continue
else:
yield store_pb2.QuantityResponse(name=name, count=count)
def QueryQuantity(self, request, context):
count = self._inventory[request.name]
return store_pb2.QuantityResponse(name=request.name, count=count)
def QueryQuantities(self, request_iter, context):
for request in request_iter:
count = self._inventory[request.name]
yield store_pb2.QuantityResponse(name=request.name, count=count)
class StoreSpanDecorator(SpanDecorator):
def __call__(self, span, rpc_info):
span.set_tag('grpc.method', rpc_info.full_method)
span.set_tag('grpc.headers', str(rpc_info.metadata))
span.set_tag('grpc.deadline', str(rpc_info.timeout))
def serve():
parser = argparse.ArgumentParser()
parser.add_argument(
'--log_payloads',
action='store_true',
help='log request/response objects to open-tracing spans')
parser.add_argument(
'--include_grpc_tags',
action='store_true',
help='set gRPC-specific tags on spans')
args = parser.parse_args()
config = Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
'logging': True,
},
service_name='store-server')
tracer = config.initialize_tracer()
span_decorator = None
if args.include_grpc_tags:
span_decorator = StoreSpanDecorator()
tracer_interceptor = open_tracing_server_interceptor(
tracer, log_payloads=args.log_payloads, span_decorator=span_decorator)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
server = intercept_server(server, tracer_interceptor)
store_pb2.add_StoreServicer_to_server(Store(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
time.sleep(2)
tracer.close()
time.sleep(2)
if __name__ == '__main__':
serve()
|
py
|
1a59d4ed6d7a1472eb465c48b440eb4e50a01b83
|
#!/usr/bin/env python
from pvaccess import Channel
from pvaccess import PvBoolean
from pvaccess import PvByte
from pvaccess import PvUByte
from pvaccess import PvShort
from pvaccess import PvUShort
from pvaccess import PvInt
from pvaccess import PvUInt
from pvaccess import PvLong
from pvaccess import PvULong
from pvaccess import PvFloat
from pvaccess import PvDouble
from pvaccess import PvString
from testUtility import TestUtility
class TestChannelPutGet:
#
# Boolean PutGet
#
def testPutGet_PvBoolean(self):
value = TestUtility.getRandomBoolean()
c = TestUtility.getBooleanChannel()
value2 = c.putGet(PvBoolean(value)).getPyObject()
assert(value == value2)
# put() must be done using strings 'true'/'false'
def testPutGet_Boolean(self):
value = TestUtility.getRandomBooleanString()
c = TestUtility.getBooleanChannel()
value2 = c.putGet(value).getPyObject()
TestUtility.assertBooleanEquality(value,value2)
def testPutGetBoolean_Boolean(self):
value = TestUtility.getRandomBoolean()
c = TestUtility.getBooleanChannel()
value2 = c.putGetBoolean(value).getPyObject()
assert(value == value2)
#
# Byte PutGet
#
# python chars are unsigned
def testPutGet_PvByte(self):
value = chr(TestUtility.getRandomUByte())
c = TestUtility.getByteChannel()
value2 = c.putGet(PvByte(value)).getPyObject()
assert(value == value2)
# put(byte) must be done using integers
# we need to compare result in python chars, which are unsigned
def testPutGet_Byte(self):
value = TestUtility.getRandomByte()
c = TestUtility.getByteChannel()
value2 = c.putGet(value).getPyObject()
TestUtility.assertCharEquality(value,value2)
def testPutGetByte_Byte(self):
value = chr(TestUtility.getRandomUByte())
c = TestUtility.getByteChannel()
value2 = c.putGetByte(value).getPyObject()
assert(value == value2)
#
# UByte PutGet
#
def testPutGet_PvUByte(self):
value = TestUtility.getRandomUByte()
c = TestUtility.getUByteChannel()
value2 = c.putGet(PvUByte(value)).getPyObject()
assert(value == value2)
def testPutGet_UByte(self):
value = TestUtility.getRandomUByte()
c = TestUtility.getUByteChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetUByte_UByte(self):
value = TestUtility.getRandomUByte()
c = TestUtility.getUByteChannel()
value2 = c.putGetUByte(value).getPyObject()
assert(value == value2)
#
# Short PutGet
#
def testPutGet_PvShort(self):
value = TestUtility.getRandomShort()
c = TestUtility.getShortChannel()
value2 = c.putGet(PvShort(value)).getPyObject()
assert(value == value2)
def testPutGet_Short(self):
value = TestUtility.getRandomShort()
c = TestUtility.getShortChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetShort_Short(self):
value = TestUtility.getRandomShort()
c = TestUtility.getShortChannel()
value2 = c.putGetShort(value).getPyObject()
assert(value == value2)
#
# UShort PutGet
#
def testPutGet_PvUShort(self):
value = TestUtility.getRandomUShort()
c = TestUtility.getUShortChannel()
value2 = c.putGet(PvUShort(value)).getPyObject()
assert(value == value2)
def testPutGet_UShort(self):
value = TestUtility.getRandomUShort()
c = TestUtility.getUShortChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetUShort_UShort(self):
value = TestUtility.getRandomUShort()
c = TestUtility.getUShortChannel()
value2 = c.putGetUShort(value).getPyObject()
assert(value == value2)
#
# Int PutGet
#
def testPutGet_PvInt(self):
value = TestUtility.getRandomInt()
c = TestUtility.getIntChannel()
value2 = c.putGet(PvInt(value)).getPyObject()
assert(value == value2)
def testPutGet_Int(self):
value = TestUtility.getRandomInt()
c = TestUtility.getIntChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetInt_Int(self):
value = TestUtility.getRandomInt()
c = TestUtility.getIntChannel()
value2 = c.putGetInt(value).getPyObject()
assert(value == value2)
#
# UInt PutGet
#
def testPutGet_PvUInt(self):
value = TestUtility.getRandomUInt()
c = TestUtility.getUIntChannel()
value2 = c.putGet(PvUInt(value)).getPyObject()
assert(value == value2)
def testPutGet_UInt(self):
value = TestUtility.getRandomUInt()
c = TestUtility.getUIntChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetUInt_UInt(self):
value = TestUtility.getRandomUInt()
c = TestUtility.getUIntChannel()
value2 = c.putGetUInt(value).getPyObject()
assert(value == value2)
#
# Long PutGet
#
def testPutGet_PvLong(self):
value = TestUtility.getRandomLong()
c = TestUtility.getLongChannel()
value2 = c.putGet(PvLong(value)).getPyObject()
assert(value == value2)
def testPutGet_Long(self):
value = TestUtility.getRandomLong()
c = TestUtility.getLongChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetLong_Long(self):
value = TestUtility.getRandomLong()
c = TestUtility.getLongChannel()
value2 = c.putGetLong(value).getPyObject()
assert(value == value2)
#
# ULong PutGet
#
def testPutGet_PvULong(self):
value = TestUtility.getRandomULong()
c = TestUtility.getULongChannel()
value2 = c.putGet(PvULong(value)).getPyObject()
assert(value == value2)
def testPutGet_ULong(self):
value = TestUtility.getRandomULong()
c = TestUtility.getULongChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetULong_ULong(self):
value = TestUtility.getRandomULong()
c = TestUtility.getULongChannel()
value2 = c.putGetULong(value).getPyObject()
assert(value == value2)
#
# Float PutGet
#
def testPutGet_PvFloat(self):
value = TestUtility.getRandomFloat()
c = TestUtility.getFloatChannel()
value2 = c.putGet(PvFloat(value)).getPyObject()
TestUtility.assertFloatEquality(value, value2)
def testPutGet_Float(self):
value = TestUtility.getRandomFloat()
c = TestUtility.getFloatChannel()
value2 = c.putGet(value).getPyObject()
TestUtility.assertFloatEquality(value, value2)
def testPutGetFloat_Float(self):
value = TestUtility.getRandomFloat()
c = TestUtility.getFloatChannel()
value2 = c.putGetFloat(value).getPyObject()
TestUtility.assertFloatEquality(value, value2)
#
# Double PutGet
#
def testPutGet_PvDouble(self):
value = TestUtility.getRandomDouble()
c = TestUtility.getDoubleChannel()
value2 = c.putGet(PvDouble(value)).getPyObject()
TestUtility.assertDoubleEquality(value, value2)
def testPutGet_Double(self):
value = TestUtility.getRandomDouble()
c = TestUtility.getDoubleChannel()
value2 = c.putGet(value).getPyObject()
TestUtility.assertDoubleEquality(value, value2)
def testPutGetDouble_Double(self):
value = TestUtility.getRandomDouble()
c = TestUtility.getDoubleChannel()
value2 = c.putGetDouble(value).getPyObject()
TestUtility.assertDoubleEquality(value, value2)
#
# String PutGet
#
def testPutGet_PvString(self):
value = TestUtility.getRandomString()
c = TestUtility.getStringChannel()
value2 = c.putGet(PvString(value)).getPyObject()
assert(value == value2)
def testPutGet_String(self):
value = TestUtility.getRandomString()
c = TestUtility.getStringChannel()
value2 = c.putGet(value).getPyObject()
assert(value == value2)
def testPutGetString_String(self):
value = TestUtility.getRandomString()
c = TestUtility.getStringChannel()
value2 = c.putGetString(value).getPyObject()
assert(value == value2)
|
py
|
1a59d516453f1b391d336f09ea5da443ca227481
|
"""Django settings for workbench project."""
import json
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DJFS = {'type': 'osfs',
'directory_root': 'workbench/static/djpyfs',
'url_root': '/static/djpyfs'}
DEBUG = True
TEMPLATE_DEBUG = DEBUG
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'workbench', 'templates'),
os.path.join(BASE_DIR, 'sample_xblocks' ,'basic', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
if 'WORKBENCH_DATABASES' in os.environ:
DATABASES = json.loads(os.environ['WORKBENCH_DATABASES'])
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'var/workbench.db'
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake'
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '5ftdd9(@p)tg&bqv$(^d!63psz9+g+_i5om_e%!32%po2_+%l7'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'workbench.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'workbench.wsgi.application'
TEMPLATE_DIRS = (
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djpyfs',
'workbench',
'django_nose',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# Only use django-debug-toolbar if it has been installed.
# Installing django-debug-toolbar before running syncdb may cause a
# DatabaseError when trying to run syncdb.
try:
import debug_toolbar # pylint: disable=unused-import, import-error
INSTALLED_APPS += ('debug_toolbar',)
except ImportError:
pass
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'var/workbench.log',
'maxBytes': 50000,
'backupCount': 2,
}
},
'loggers': {
'django.request': {
'handlers': ['logfile'],
'level': 'DEBUG',
'propagate': True,
},
'django': {
'level': 'DEBUG',
'handlers': ['logfile'],
}
}
}
WORKBENCH = {
'reset_state_on_restart': (
os.environ.get('WORKBENCH_RESET_STATE_ON_RESTART', "false").lower() == "true"
),
'services': {
'fs': 'xblock.reference.plugins.FSService'
}
}
|
py
|
1a59d5cb1d5ad6814012dd7ce3356434a9cfe962
|
import os
import re
import logging
from airbrake.notifier import Airbrake
from .secrets import config
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class AirbrakeNotifier:
MAX_NOTIFICATIONS = 50
airbrake_notifier = Airbrake(project_id=config['airbrake_project_id'], api_key=config['airbrake_api_key'])
blacklisted_log_group_names = set(config['airbrake_blacklisted_log_group_names'])
blacklisted_log_message_strings_regex = re.compile('|'.join(config["airbrake_blacklisted_log_message_strings"]))
whitelisted_log_message_terms_regex_string = "|".join(config['airbrake_whitelisted_log_message_terms'])
whitelisted_log_message_terms_regexp = re.compile(whitelisted_log_message_terms_regex_string, re.IGNORECASE)
def __init__(self):
self._report = dict()
self._total_errors = 0
self._airbrake_rate_limited = False
def report(self):
results = []
for log_group, subcounts in self._report.items():
for message_type, count in subcounts.items():
results += [(log_group, message_type, count)]
return results
def notify_on_stream(self, log_event_stream):
for log_event in log_event_stream:
self.notify(log_event)
yield log_event
def notify(self, log_event):
message = log_event['@message']
log_group = log_event['@log_group']
log_stream = log_event['@log_stream']
error_str = None
if AirbrakeNotifier._is_message_appropriate_for_airbrake(message, log_group) and \
not AirbrakeNotifier._contains_blacklisted_string(message):
error_str = "'{0} {1} '@log_stream': {2}".format(log_group, message, log_stream)
try:
if not self._airbrake_rate_limited and self._total_errors < AirbrakeNotifier.MAX_NOTIFICATIONS:
AirbrakeNotifier.airbrake_notifier.notify(error_str)
except Exception as e:
message = str(e)
if message.startswith('420 Client Error'):
self._airbrake_rate_limited = True
else:
logger.error("Airbrake notification failed! {}".format(message))
self._observe(log_group, error_str)
def _observe(self, log_group, error_str):
if log_group not in self._report:
self._report[log_group] = {
'errors': 0,
'total': 0
}
if error_str:
self._report[log_group]['errors'] += 1
self._total_errors += 1
self._report[log_group]['total'] += 1
@staticmethod
def _is_message_appropriate_for_airbrake(message, log_group):
if log_group not in AirbrakeNotifier.blacklisted_log_group_names and \
AirbrakeNotifier.whitelisted_log_message_terms_regexp.search(message):
return True
return False
@staticmethod
def _contains_blacklisted_string(message):
if AirbrakeNotifier.blacklisted_log_message_strings_regex.search(message):
return True
return False
|
py
|
1a59d65f2e025191efdd0748e68d9277299565a3
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for MCMC diagnostic utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow_probability.python.mcmc.diagnostic import _reduce_variance
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class _EffectiveSampleSizeTest(object):
@property
def use_static_shape(self):
raise NotImplementedError(
"Subclass failed to implement `use_static_shape`.")
def _check_versus_expected_effective_sample_size(
self,
x_,
expected_ess,
atol=1e-2,
rtol=1e-2,
filter_threshold=None,
filter_beyond_lag=None,
filter_beyond_positive_pairs=False):
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
ess = tfp.mcmc.effective_sample_size(
x,
filter_threshold=filter_threshold,
filter_beyond_lag=filter_beyond_lag,
filter_beyond_positive_pairs=filter_beyond_positive_pairs)
if self.use_static_shape:
self.assertAllEqual(x.shape[1:], ess.shape)
ess_ = self.evaluate(ess)
self.assertAllClose(
np.ones_like(ess_) * expected_ess, ess_, atol=atol, rtol=rtol)
def testIidRank1NormalHasFullEssMaxLags10(self):
# With a length 5000 iid normal sequence, and filter_beyond_lag = 10, we
# should have a good estimate of ESS, and it should be close to the full
# sequence length of 5000.
# The choice of filter_beyond_lag = 10 is a short cutoff, reasonable only
# since we know the correlation length should be zero right away.
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(5000).astype(np.float32),
expected_ess=5000,
filter_beyond_lag=10,
filter_threshold=None,
rtol=0.3)
def testIidRank2NormalHasFullEssMaxLags10(self):
# See similar test for Rank1Normal for reasoning.
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(5000, 2).astype(np.float32),
expected_ess=5000,
filter_beyond_lag=10,
filter_threshold=None,
rtol=0.3)
def testIidRank1NormalHasFullEssMaxLagThresholdZero(self):
# With a length 5000 iid normal sequence, and filter_threshold = 0,
# we should have a super-duper estimate of ESS, and it should be very close
# to the full sequence length of 5000.
# The choice of filter_beyond_lag = 0 means we cutoff as soon as the
# auto-corr is below zero. This should happen very quickly, due to the fact
# that the theoretical auto-corr is [1, 0, 0,...]
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(5000).astype(np.float32),
expected_ess=5000,
filter_beyond_lag=None,
filter_threshold=0.,
rtol=0.1)
def testIidRank2NormalHasFullEssMaxLagThresholdZero(self):
# See similar test for Rank1Normal for reasoning.
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(5000, 2).astype(np.float32),
expected_ess=5000,
filter_beyond_lag=None,
filter_threshold=0.,
rtol=0.1)
def testIidRank1NormalHasFullEssMaxLagInitialPositive(self):
# See similar test for ThresholdZero for background. This time this uses the
# initial_positive sequence criterion. In this case, initial_positive
# sequence might be a little more noisy than the threshold case because it
# will typically not drop the lag-1 auto-correlation.
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(5000).astype(np.float32),
expected_ess=5000,
filter_beyond_lag=None,
filter_threshold=None,
filter_beyond_positive_pairs=True,
rtol=0.25)
def testIidRank2NormalHasFullEssMaxLagInitialPositive(self):
# See similar test for Rank1Normal for reasoning.
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(5000, 2).astype(np.float32),
expected_ess=5000,
filter_beyond_lag=None,
filter_threshold=None,
filter_beyond_positive_pairs=True,
rtol=0.25)
def testIidRank1NormalHasFullEssMaxLagInitialPositiveOddLength(self):
# See similar test for Rank1Normal for reasoning.
self._check_versus_expected_effective_sample_size(
x_=np.random.randn(4999).astype(np.float32),
expected_ess=4999,
filter_beyond_lag=None,
filter_threshold=None,
filter_beyond_positive_pairs=True,
rtol=0.2)
def testLength10CorrelationHasEssOneTenthTotalLengthUsingMaxLags50(self):
# Create x_, such that
# x_[i] = iid_x_[0], i = 0,...,9
# x_[i] = iid_x_[1], i = 10,..., 19,
# and so on.
iid_x_ = np.random.randn(5000, 1).astype(np.float32)
x_ = (iid_x_ * np.ones((5000, 10)).astype(np.float32)).reshape((50000,))
self._check_versus_expected_effective_sample_size(
x_=x_,
expected_ess=50000 // 10,
filter_beyond_lag=50,
filter_threshold=None,
rtol=0.2)
def testLength10CorrelationHasEssOneTenthTotalLengthUsingMaxLagsThresholdZero(
self):
# Create x_, such that
# x_[i] = iid_x_[0], i = 0,...,9
# x_[i] = iid_x_[1], i = 10,..., 19,
# and so on.
iid_x_ = np.random.randn(5000, 1).astype(np.float32)
x_ = (iid_x_ * np.ones((5000, 10)).astype(np.float32)).reshape((50000,))
self._check_versus_expected_effective_sample_size(
x_=x_,
expected_ess=50000 // 10,
filter_beyond_lag=None,
filter_threshold=0.,
rtol=0.1)
def testLength10CorrelationHasEssOneTenthTotalLengthUsingMaxLagsInitialPos(
self):
# Create x_, such that
# x_[i] = iid_x_[0], i = 0,...,9
# x_[i] = iid_x_[1], i = 10,..., 19,
# and so on.
iid_x_ = np.random.randn(5000, 1).astype(np.float32)
x_ = (iid_x_ * np.ones((5000, 10)).astype(np.float32)).reshape((50000,))
self._check_versus_expected_effective_sample_size(
x_=x_,
expected_ess=50000 // 10,
filter_beyond_lag=None,
filter_threshold=None,
filter_beyond_positive_pairs=True,
rtol=0.15)
def testListArgs(self):
# x_ has correlation length 10 ==> ESS = N / 10
# y_ has correlation length 1 ==> ESS = N
iid_x_ = np.random.randn(5000, 1).astype(np.float32)
x_ = (iid_x_ * np.ones((5000, 10)).astype(np.float32)).reshape((50000,))
y_ = np.random.randn(50000).astype(np.float32)
states = [x_, x_, y_, y_]
filter_threshold = [0., None, 0., None]
filter_beyond_lag = [None, 5, None, 5]
# See other tests for reasoning on tolerance.
ess = tfp.mcmc.effective_sample_size(
states,
filter_threshold=filter_threshold,
filter_beyond_lag=filter_beyond_lag)
ess_ = self.evaluate(ess)
self.assertAllEqual(4, len(ess_))
self.assertAllClose(50000 // 10, ess_[0], rtol=0.3)
self.assertAllClose(50000 // 10, ess_[1], rtol=0.3)
self.assertAllClose(50000, ess_[2], rtol=0.1)
self.assertAllClose(50000, ess_[3], rtol=0.1)
def testMaxLagsThresholdLessThanNeg1SameAsNone(self):
# Setting both means we filter out items R_k from the auto-correlation
# sequence if k > filter_beyond_lag OR k >= j where R_j < filter_threshold.
# x_ has correlation length 10.
iid_x_ = np.random.randn(500, 1).astype(np.float32)
x_ = (iid_x_ * np.ones((500, 10)).astype(np.float32)).reshape((5000,))
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
ess_none_none = tfp.mcmc.effective_sample_size(
x, filter_threshold=None, filter_beyond_lag=None)
ess_none_200 = tfp.mcmc.effective_sample_size(
x, filter_threshold=None, filter_beyond_lag=200)
ess_neg2_200 = tfp.mcmc.effective_sample_size(
x, filter_threshold=-2., filter_beyond_lag=200)
ess_neg2_none = tfp.mcmc.effective_sample_size(
x, filter_threshold=-2., filter_beyond_lag=None)
[ess_none_none_, ess_none_200_, ess_neg2_200_,
ess_neg2_none_] = self.evaluate(
[ess_none_none, ess_none_200, ess_neg2_200, ess_neg2_none])
# filter_threshold=-2 <==> filter_threshold=None.
self.assertAllClose(ess_none_none_, ess_neg2_none_)
self.assertAllClose(ess_none_200_, ess_neg2_200_)
def testMaxLagsArgsAddInAnOrManner(self):
# Setting both means we filter out items R_k from the auto-correlation
# sequence if k > filter_beyond_lag OR k >= j where R_j < filter_threshold.
# x_ has correlation length 10.
iid_x_ = np.random.randn(500, 1).astype(np.float32)
x_ = (iid_x_ * np.ones((500, 10)).astype(np.float32)).reshape((5000,))
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
ess_1_9 = tfp.mcmc.effective_sample_size(
x, filter_threshold=1., filter_beyond_lag=9)
ess_1_none = tfp.mcmc.effective_sample_size(
x, filter_threshold=1., filter_beyond_lag=None)
ess_none_9 = tfp.mcmc.effective_sample_size(
x, filter_threshold=1., filter_beyond_lag=9)
ess_1_9_, ess_1_none_, ess_none_9_ = self.evaluate(
[ess_1_9, ess_1_none, ess_none_9])
# Since R_k = 1 for k < 10, and R_k < 1 for k >= 10,
# filter_threshold = 1 <==> filter_beyond_lag = 9.
self.assertAllClose(ess_1_9_, ess_1_none_)
self.assertAllClose(ess_1_9_, ess_none_9_)
def testInitialPositiveAndLag(self):
# We will use the max_lags argument to verify that initial_positive sequence
# argument does what it should.
# This sequence begins to have non-positive pairwise sums at lag 38
x_ = np.linspace(-1., 1., 100).astype(np.float32)
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
ess_true_37 = tfp.mcmc.effective_sample_size(
x,
filter_beyond_positive_pairs=True,
filter_threshold=None,
filter_beyond_lag=37)
ess_true_none = tfp.mcmc.effective_sample_size(
x,
filter_beyond_positive_pairs=True,
filter_threshold=None,
filter_beyond_lag=None)
ess_false_37 = tfp.mcmc.effective_sample_size(
x,
filter_beyond_positive_pairs=False,
filter_threshold=None,
filter_beyond_lag=37)
ess_true_37_, ess_true_none_, ess_false_37_ = self.evaluate(
[ess_true_37, ess_true_none, ess_false_37])
self.assertAllClose(ess_true_37_, ess_true_none_)
self.assertAllClose(ess_true_37_, ess_false_37_)
def testInitialPositiveSuperEfficient(self):
# Initial positive sequence will correctly estimate the ESS of
# super-efficient MCMC chains.
# This sequence has strong anti-autocorrelation, so will get ESS larger than
# its length.
x_ = ((np.arange(0, 100) % 2).astype(np.float32) -
0.5) * np.exp(-np.linspace(0., 10., 100))
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
ess = tfp.mcmc.effective_sample_size(
x, filter_beyond_positive_pairs=True)
ess_ = self.evaluate(ess)
self.assertGreater(ess_, 100.)
@test_util.run_all_in_graph_and_eager_modes
class EffectiveSampleSizeStaticTest(tfp_test_util.TestCase,
_EffectiveSampleSizeTest):
@property
def use_static_shape(self):
return True
@test_util.run_all_in_graph_and_eager_modes
class EffectiveSampleSizeDynamicTest(tfp_test_util.TestCase,
_EffectiveSampleSizeTest):
@property
def use_static_shape(self):
return False
@test_util.run_all_in_graph_and_eager_modes
class _PotentialScaleReductionTest(object):
@property
def use_static_shape(self):
raise NotImplementedError(
"Subclass failed to implement `use_static_shape`.")
def testListOfStatesWhereFirstPassesSecondFails(self):
"""Simple test showing API with two states. Read first!."""
n_samples = 1000
# state_0 is two scalar chains taken from iid Normal(0, 1). Will pass.
state_0 = np.random.randn(n_samples, 2)
# state_1 is three 4-variate chains taken from Normal(0, 1) that have been
# shifted. Since every chain is shifted, they are not the same, and the
# test should fail.
offset = np.array([1., -1., 2.]).reshape(3, 1)
state_1 = np.random.randn(n_samples, 3, 4) + offset
rhat = tfp.mcmc.potential_scale_reduction(
chains_states=[state_0, state_1], independent_chain_ndims=1)
self.assertIsInstance(rhat, list)
rhat_0_, rhat_1_ = self.evaluate(rhat)
# r_hat_0 should be close to 1, meaning test is passed.
self.assertAllEqual((), rhat_0_.shape)
self.assertAllClose(1., rhat_0_, rtol=0.02)
# r_hat_1 should be greater than 1.2, meaning test has failed.
self.assertAllEqual((4,), rhat_1_.shape)
self.assertAllEqual(np.ones_like(rhat_1_).astype(bool), rhat_1_ > 1.2)
def check_results(self,
state_,
independent_chain_shape,
should_pass,
split_chains=False):
sample_ndims = 1
independent_chain_ndims = len(independent_chain_shape)
state = tf1.placeholder_with_default(
state_, shape=state_.shape if self.use_static_shape else None)
rhat = tfp.mcmc.potential_scale_reduction(
state,
independent_chain_ndims=independent_chain_ndims,
split_chains=split_chains)
if self.use_static_shape:
self.assertAllEqual(
state_.shape[sample_ndims + independent_chain_ndims:], rhat.shape)
rhat_ = self.evaluate(rhat)
if should_pass:
self.assertAllClose(np.ones_like(rhat_), rhat_, atol=0, rtol=0.02)
else:
self.assertAllEqual(np.ones_like(rhat_).astype(bool), rhat_ > 1.2)
def iid_normal_chains_should_pass_wrapper(self,
sample_shape,
independent_chain_shape,
other_shape,
split_chains=False,
dtype=np.float32):
"""Check results with iid normal chains."""
state_shape = sample_shape + independent_chain_shape + other_shape
state_ = np.random.randn(*state_shape).astype(dtype)
# The "other" dimensions do not have to be identical, just independent, so
# force them to not be identical.
if other_shape:
state_ *= np.random.rand(*other_shape).astype(dtype)
self.check_results(
state_,
independent_chain_shape,
should_pass=True,
split_chains=split_chains)
def testPassingIIDNdimsAreIndependentOneOtherZero(self):
self.iid_normal_chains_should_pass_wrapper(
sample_shape=[10000], independent_chain_shape=[4], other_shape=[])
def testPassingIIDNdimsAreIndependentOneOtherOne(self):
self.iid_normal_chains_should_pass_wrapper(
sample_shape=[10000], independent_chain_shape=[3], other_shape=[7])
def testPassingIIDNdimsAreIndependentOneOtherOneSplitChainsEvenNSamples(self):
self.iid_normal_chains_should_pass_wrapper(
sample_shape=[10000],
independent_chain_shape=[3],
other_shape=[7],
split_chains=True)
def testPassingIIDNdimsAreIndependentOneOtherOneSplitChainsOddNSamples(self):
# For odd number of samples we must remove last sample.
self.iid_normal_chains_should_pass_wrapper(
sample_shape=[10001],
independent_chain_shape=[3],
other_shape=[7],
split_chains=True)
def testPassingIIDNdimsAreIndependentOneOtherTwo(self):
self.iid_normal_chains_should_pass_wrapper(
sample_shape=[10000], independent_chain_shape=[2], other_shape=[5, 7])
def testPassingIIDNdimsAreIndependentTwoOtherTwo64Bit(self):
self.iid_normal_chains_should_pass_wrapper(
sample_shape=[10000],
independent_chain_shape=[2, 3],
other_shape=[5, 7],
dtype=np.float64)
def offset_normal_chains_should_fail_wrapper(
self, sample_shape, independent_chain_shape, other_shape):
"""Check results with normal chains that are offset from each other."""
state_shape = sample_shape + independent_chain_shape + other_shape
state_ = np.random.randn(*state_shape)
# Add a significant offset to the different (formerly iid) chains.
offset = np.linspace(
0, 2, num=np.prod(independent_chain_shape)).reshape([1] * len(
sample_shape) + independent_chain_shape + [1] * len(other_shape))
state_ += offset
self.check_results(state_, independent_chain_shape, should_pass=False)
def testFailingOffsetNdimsAreSampleOneIndependentOneOtherOne(self):
self.offset_normal_chains_should_fail_wrapper(
sample_shape=[10000], independent_chain_shape=[2], other_shape=[5])
def testLinearTrendPassesIfNoSplitChains(self):
# A problem with non-split Rhat is that it does not catch linear trends.
n_samples = 1000
n_chains = 10
state_ = (
np.random.randn(n_samples, n_chains) +
np.linspace(0, 1, n_samples).reshape(n_samples, 1))
self.check_results(
state_,
independent_chain_shape=[n_chains],
should_pass=True,
split_chains=False)
def testLinearTrendFailsIfSplitChains(self):
n_samples = 10000
n_chains = 10
state_ = (
np.random.randn(n_samples, n_chains) +
np.linspace(0, 10, n_samples).reshape(n_samples, 1))
self.check_results(
state_,
independent_chain_shape=[n_chains],
should_pass=False,
split_chains=True)
def testNotEnoughSamplesNoSplitChainsFailsIfValidateArgs(self):
input_ = np.random.rand(1, 10)
x = tf1.placeholder_with_default(
input_, shape=input_.shape if self.use_static_shape else None)
with self.assertRaisesError("Must provide at least 2 samples"):
self.evaluate(
tfp.mcmc.potential_scale_reduction(
# Require at least 2 samples...have only 1
x,
independent_chain_ndims=1,
validate_args=True))
def testNotEnoughSamplesWithSplitChainsFailsIfValidateArgs(self):
input_ = np.random.rand(3, 10)
x = tf1.placeholder_with_default(
input_, shape=input_.shape if self.use_static_shape else None)
with self.assertRaisesError("Must provide at least 4 samples"):
self.evaluate(
tfp.mcmc.potential_scale_reduction(
# Require at least 4 samples...have only 3
x,
independent_chain_ndims=1,
split_chains=True,
validate_args=True))
@test_util.run_all_in_graph_and_eager_modes
class PotentialScaleReductionStaticTest(tfp_test_util.TestCase,
_PotentialScaleReductionTest):
@property
def use_static_shape(self):
return True
def assertRaisesError(self, msg):
return self.assertRaisesRegexp(Exception, msg)
def testIndependentNdimsLessThanOneRaises(self):
with self.assertRaisesRegexp(ValueError, "independent_chain_ndims"):
tfp.mcmc.potential_scale_reduction(
np.random.rand(2, 3, 4), independent_chain_ndims=0)
@test_util.run_all_in_graph_and_eager_modes
class PotentialScaleReductionDynamicTest(tfp_test_util.TestCase,
_PotentialScaleReductionTest):
@property
def use_static_shape(self):
return False
def assertRaisesError(self, msg):
if tf.executing_eagerly():
return self.assertRaisesRegexp(Exception, msg)
return self.assertRaisesOpError(msg)
@test_util.run_all_in_graph_and_eager_modes
class _ReduceVarianceTest(object):
@property
def use_static_shape(self):
raise NotImplementedError(
"Subclass failed to implement `use_static_shape`.")
def check_versus_numpy(self, x_, axis, biased, keepdims):
x_ = np.asarray(x_)
x = tf1.placeholder_with_default(
x_, shape=x_.shape if self.use_static_shape else None)
var = _reduce_variance(
x, axis=axis, biased=biased, keepdims=keepdims)
np_var = np.var(x_, axis=axis, ddof=0 if biased else 1, keepdims=keepdims)
if self.use_static_shape:
self.assertAllEqual(np_var.shape, var.shape)
var_ = self.evaluate(var)
# We will mask below, which changes shape, so check shape explicitly here.
self.assertAllEqual(np_var.shape, var_.shape)
# We get NaN when we divide by zero due to the size being the same as ddof
nan_mask = np.isnan(np_var)
if nan_mask.any():
self.assertTrue(np.isnan(var_[nan_mask]).all())
self.assertAllClose(np_var[~nan_mask], var_[~nan_mask], atol=0, rtol=0.02)
def testScalarBiasedTrue(self):
self.check_versus_numpy(x_=-1.234, axis=None, biased=True, keepdims=False)
def testScalarBiasedFalse(self):
# This should result in NaN.
self.check_versus_numpy(x_=-1.234, axis=None, biased=False, keepdims=False)
def testShape2x3x4AxisNoneBiasedFalseKeepdimsFalse(self):
self.check_versus_numpy(
x_=np.random.randn(2, 3, 4), axis=None, biased=True, keepdims=False)
def testShape2x3x4Axis1BiasedFalseKeepdimsTrue(self):
self.check_versus_numpy(
x_=np.random.randn(2, 3, 4), axis=1, biased=True, keepdims=True)
def testShape2x3x4x5Axis13BiasedFalseKeepdimsTrue(self):
self.check_versus_numpy(
x_=np.random.randn(2, 3, 4, 5), axis=1, biased=True, keepdims=True)
def testShape2x3x4x5Axis13BiasedFalseKeepdimsFalse(self):
self.check_versus_numpy(
x_=np.random.randn(2, 3, 4, 5), axis=1, biased=False, keepdims=False)
@test_util.run_all_in_graph_and_eager_modes
class ReduceVarianceTestStaticShape(tfp_test_util.TestCase,
_ReduceVarianceTest):
@property
def use_static_shape(self):
return True
@test_util.run_all_in_graph_and_eager_modes
class ReduceVarianceTestDynamicShape(tfp_test_util.TestCase,
_ReduceVarianceTest):
@property
def use_static_shape(self):
return False
if __name__ == "__main__":
tf.test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.