ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a5a6b2f767b06ccc9ebbc4b873e464e90636cf6
""" ASGI config for apiteste project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apiteste.settings') application = get_asgi_application()
py
1a5a6b90c4a024f8ee80d83bae9e320685459a71
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Create the base directory""" from pathlib import Path import sys Path(sys.argv[2]).mkdir()
py
1a5a6b920bc9d19cf94f19b4a48d1e132f605cb7
from os import path from setuptools import setup, find_packages import sys import versioneer # NOTE: This file must remain Python 2 compatible for the foreseeable future, # to ensure that we error out properly for people with outdated setuptools # and/or pip. if sys.version_info < (3, 6): error = """ niio does not support Python {0}.{2}. Python 3.6 and above is required. Check your Python version like so: python3 --version This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1. Upgrade pip like so: pip install --upgrade pip """.format(3, 6) sys.exit(error) here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file: readme = readme_file.read() with open(path.join(here, 'requirements.txt')) as requirements_file: # Parse requirements.txt, ignoring any commented-out lines. requirements = [line for line in requirements_file.read().splitlines() if not line.startswith('#')] setup( name='niio', version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description="Package for quickly loading common neuroimaging data.", long_description=readme, author="Kristian Eschenburg", author_email='[email protected]', url='https://github.com/kristianeschenburg/niio', packages=find_packages(exclude=['docs', 'tests']), entry_points={ 'console_scripts': [ # 'some.module:some_function', ], }, include_package_data=True, package_data={ 'niio': [ # When adding files here, remember to update MANIFEST.in as well, # or else they will not be included in the distribution on PyPI! # 'path/to/data_file', ] }, install_requires=requirements, license="BSD (3-clause)", classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Natural Language :: English', 'Programming Language :: Python :: 3', ], )
py
1a5a6cf9f1a3da7f415c99a350d389687c44498b
import taichi as ti @ti.test(exclude=[ti.cc]) def test_sort(): def test_sort_for_dtype(dtype, N): keys = ti.field(dtype, N) values = ti.field(dtype, N) @ti.kernel def fill(): for i in keys: keys[i] = ti.random() * N values[i] = keys[i] fill() ti._kernels.parallel_sort(keys, values) keys_host = keys.to_numpy() values_host = values.to_numpy() for i in range(N): if i < N - 1: assert keys_host[i] <= keys_host[i + 1] assert keys_host[i] == values_host[i] test_sort_for_dtype(ti.i32, 1) test_sort_for_dtype(ti.i32, 256) test_sort_for_dtype(ti.i32, 100001) test_sort_for_dtype(ti.f32, 1) test_sort_for_dtype(ti.f32, 256) test_sort_for_dtype(ti.f32, 100001)
py
1a5a6d503bc32a7806a8e262b1a8f471ffc8e278
#!/usr/bin/python # -*- coding: utf-8 -*- import time import boto3 from redis.sentinel import Sentinel ALERT_EMAILS = ['[email protected]'] REDIS_SENTINEL_LIST = [("47.94.197.140",6379)] #[("192.168.0.62", 26379), ("192.168.0.63", 26379), ("192.168.0.64", 26379)] def send_email(to_address, subject, content): ses = boto3.client('ses') r = ses.send_email(Source = '"viabtc-exchange-alert" <[email protected]>', Destination = {'ToAddresses': [to_address]}, Message = {'Subject': { 'Data': subject, 'Charset': 'utf-8'}, 'Body': {'Text': {'Data': content, 'Charset': 'utf-8'}}}) def main(): last_send = None sentinel = Sentinel(REDIS_SENTINEL_LIST) redis_master = sentinel.master_for("mymaster", socket_timeout=120) while True: r = redis_master.blpop('alert:message', 60) if not r: continue current_timestamp = int(time.time()) if last_send and current_timestamp - last_send < 60: continue last_send = current_timestamp message = r[1] for email in ALERT_EMAILS: send_email(email, "viabtc server error", message) if __name__ == '__main__': main()
py
1a5a6e106acbb434f2ecebd4f64b78cfb1716841
from django.conf import settings from django.contrib import admin from django.contrib.auth import views as auth_views from django.contrib.auth.decorators import login_required from django.urls import include, path from django.views.generic import TemplateView urlpatterns = [ path( "", login_required(TemplateView.as_view(template_name="homepage.html")), name="homepage", ), path( "accounts/login/", auth_views.LoginView.as_view(template_name="login.html"), name="login", ), path( "accounts/logout/", auth_views.LogoutView.as_view(next_page=settings.LOGIN_URL), name="logout", ), path("admin/", admin.site.urls), path("problems/", include("problems.urls")), path("quizzes/", include("quizzes.urls")), path("students/", include("students.urls")), ]
py
1a5a6e5e5f714dd5c87ea3c90b8986eb3de78ad1
# -*- coding: utf-8 -*- from .socketservice import get_instance from .alarm_service import AlarmService import binascii import time from app.models.device import Device as DeviceModel from app.models.line import Line from app.models.monitor import Monitor from datetime import datetime from app.libs.utils import dynamic_decimal from app.libs import command from app.libs.utils import command_encode from app.libs.error_code import DeviceException class Device: energy_sign = ["0B04", "0C04", "0D04", "OE04"] energy_line = { "0B04": 0, "0C04": 1, "0D04": 2, "0E04": 3 } upload_sign = ["CD11", "CD12", "CD21", "CD22", "CD31", "CD32", "CD41", "CD42"] upload_type = { "1": "ua", "2": "energy" } upload_line = { "1": 0, "2": 1, "3": 2, "4": 3 } di = { "1": "off", "0": "on" } do = { "1": "on", "0": "off" } def __init__(self): self.device_id = None self.cloud_id = None self.version = None self.sign = None def parse_data(self, data, socket): print(datetime.now().strftime('%H:%M:%S'), data) if str.startswith(data, "AA55"): self.parse_device_data(data, socket) elif not self.check_register(socket): Device.get_basic_info(socket) elif str.startswith(data, "6403") or str.startswith(data, "DD016403"): self.parse_modbus_data(data, socket) elif self.check_energy_data(data): self.parse_energy_data(data, socket) elif self.check_upload_data(data): self.parse_upload_data(data, socket) else: print("其他数据", data) @staticmethod def get_basic_info(socket): """获取基础信息,初始化""" print("获取cloud_id") socket.request.sendall(binascii.a2b_hex("AA550006E00000910177")) time.sleep(0.5) print("获取信号强度") socket.request.sendall(binascii.a2b_hex("AA550004E0230107")) time.sleep(0.5) print("获取线路状态") socket.request.sendall(binascii.a2b_hex("6403001000084C3C")) @staticmethod def get_cloud_id(socket): """获取云ID""" socket.request.sendall(command_encode(command.BASIC["cloud_id"])) @staticmethod def get_version(socket): """获取版本""" socket.request.sendall(command_encode(command.BASIC["version"])) @staticmethod def get_sign_strength(socket): """获取信号强度""" socket.request.sendall(command_encode(command.BASIC["sign"])) @staticmethod def operate(line, operate, socket): """操作电路通断""" operate = operate.upper() if line in getattr(command, operate).keys(): socket.request.sendall(command_encode(getattr(command, operate)[line])) @staticmethod def get_box_detail(socket): for e in command.ENERGY.values(): socket.request.sendall(command_encode(e)) time.sleep(0.5) for u in command.UA.values(): socket.request.sendall(command_encode(u)) time.sleep(0.5) @staticmethod def send_command(socket, command): socket.request.sendall(command_encode(command)) def parse_device_data(self, data, socket): if not self.check_devicec_data(data): return if str.startswith(data, "aa550010e000000a0091".upper()): cloud_id = self.parse_cloud_id(data) self.register(cloud_id, socket) if "EE01" in data: version = self.parse_version(data) self.version = version print("version", version) if "E023" in data: sign = self.parse_sign(data) socket.sign = sign self.sign = sign print("sign", sign) def parse_modbus_data(self, data, socket): if str.startswith(data, "DD01"): data = data[4:] if not self.check_modbus_data(data): return data = data[6:-4] status_map = { 1: data[3:4], 2: data[7:8], 3: data[11:12], 4: data[15:18] } do_map = { 1: data[19:20], 2: data[23:24], 3: data[27:28], 4: data[31:32] } lines = Line.objects.filter(device_id=socket.device_id).all() for line in lines: if line.line in status_map.keys(): status = self.di[status_map[line.line]] do = self.do[do_map[line.line]] value = { "device_id": socket.device_id, "line_id": line.id, "line": line.line, "type": "status", "value": { "status": status, "do": do } } Device.switch_alarm(line, status) Monitor.save_data(value) def parse_cloud_id(self, data): cloud_id = data[20:-4] return cloud_id def parse_version(self, data): sign_index = data.index("EE01") version = data[sign_index + 4:-4] return version def parse_sign(self, data): sign_index = data.index("E023") sign = data[sign_index + 4:-4] return int(sign, 16) def parse_line_status(self, data): pass def parse_energy_data(self, data, socket): sign = data[0:4] line = Line.objects.filter(line=self.energy_line[sign], device_id=socket.device_id).first() value = { "device_id": socket.device_id, "line": line.line, "line_id": line.id } if len(data) < 20: data = data[6:-4] energy = int(data, 16) // 100 value["type"] = "energy" value["value"] = energy else: data = data[6:-4] voltage_a = int(data[0:4], 16) // 10 voltage_b = int(data[4:8], 16) // 10 voltage_c = int(data[8:12], 16) // 10 value["type"] = "voltage" value["value"] = { "a": voltage_a, "b": voltage_b, "c": voltage_c } Monitor.save_data(value) electricity_a = dynamic_decimal((int(data[12:16], 16) / 100)) electricity_b = dynamic_decimal((int(data[16:20], 16) / 100)) electricity_c = dynamic_decimal((int(data[20:24], 16) / 100)) value["type"] = "electricity" value["value"] = { "a": electricity_a, "b": electricity_b, "c": electricity_c } Monitor.save_data(value) print("这是解析电能数据", socket.line_status) def parse_upload_data(self, data, socket): print("开始解析upload") print(socket.device_id) line = Line.objects.filter(line=self.upload_line[data[2]], device_id=socket.device_id).first() type = self.upload_type[data[3]] value = { "device_id": socket.device_id, "line": line.line, "line_id": line.id } if type == "energy": energy = (int(data[10:18], 16) // 100) value["type"] = "energy" value["value"] = energy Monitor.save_data(value) if type == "ua": electricity_a = dynamic_decimal((int(data[22:26], 16)) / 100) electricity_b = dynamic_decimal((int(data[26:30], 16)) / 100) electricity_c = dynamic_decimal((int(data[30:34], 16)) / 100) value["type"] = "electricity" value["value"] = { "a": electricity_a, "b": electricity_b, "c": electricity_c } Monitor.save_data(value) voltage_a = int(data[10:14], 16) // 10 voltage_b = int(data[14:18], 16) // 10 voltage_c = int(data[18:22], 16) // 10 value["type"] = "voltage" value["value"] = { "a": voltage_a, "b": voltage_b, "c": voltage_c } Monitor.save_data(value) Device.current_alarm(line, electricity_a, electricity_b, electricity_c) socket.timestamp = int(round(time.time() * 1000)) @staticmethod def status(socket): """获取线路状态""" print("获取线路状态") socket.request.sendall(command_encode(command.BASIC["line_status"])) def check_devicec_data(self, data): if len(data) < 8: return False length = int(data[4:8], 16) * 2 if len(data[8:]) == length: return True else: return False def check_modbus_data(self, data): length = int(data[4:6], 16) * 2 print("length", length) print("data", len(data[6:-4])) if len(data[6:-4]) == length: return True else: return False def check_upload_data(self, data): if data[0:4] in self.upload_sign: return True else: return False def check_register(self, socket): if socket.cloud_id is None: return False else: return True def check_energy_data(self, data): if data[0:4] in self.energy_sign: if self.check_modbus_data(data): return True else: return False else: return False def register(self, cloud_id, socket): socket.cloud_id = cloud_id self.cloud_id = cloud_id device = DeviceModel.objects.filter(cloud_id=cloud_id).first() if device: socket.device_id = device["device_id"] get_instance().add_client(cloud_id, socket) @staticmethod def update_device(socket): for i in range(0, 3): Device.status(socket) time.sleep(0.5) @staticmethod def search_device_socket(device_id): device = DeviceModel.objects.filter(device_id=device_id).first_or_404() cloud_id = device["cloud_id"] clients = get_instance().clients if cloud_id in clients.keys(): return clients[cloud_id] else: raise DeviceException() @staticmethod def operate_device_plan(device_id, line_id, operate): device = DeviceModel.objects.filter(device_id=device_id).first() if device: cloud_id = device["cloud_id"] clients = get_instance().clients if cloud_id in clients: line = Line.objects.filter(id=line_id).first() socket = clients[cloud_id] socket.request.sendall(command_encode(getattr(command, operate)[line.line])) @staticmethod def current_alarm(line, la, lb, lc): limit = line.limit total = la + lb + lc if (limit * (line.standard / 100)) < total < (limit * 1.1): alarm = AlarmService(device_id=line.device_id, alarm_type="high_current", line_id=line.id, a=la, b=lb, c=lc, limit=line.limit) alarm.gen_alarm() if total > (limit * 1.1): alarm = AlarmService(device_id=line.device_id, alarm_type="overload", line_id=line.id, a=la, b=lb, c=lc) alarm.gen_alarm() @staticmethod def offline_alarm(socket): device_id = socket.device_id alarm = AlarmService(device_id=device_id, alarm_type="offline", line_id=None) alarm.gen_alarm() @staticmethod def trip_alarm(line): alarm = AlarmService(device_id=line.device_id, alarm_type="trip", line_id=line.id) alarm.gen_alarm() @staticmethod def switch_alarm(line, status): monitor = Monitor.objects.filter(device_id=line.device_id, line_id=line.id, type="status").first() if monitor: if monitor["value"]["status"] != status: if status == "on": alarm = AlarmService(device_id=line.device_id, alarm_type="switch", line_id=line.id, type="on") alarm.gen_alarm() else: alarm = AlarmService(device_id=line.device_id, alarm_type="switch", line_id=line.id, type="off") alarm.gen_alarm() @staticmethod def operate_job(line, operate, *args): device_id = line.device_id device = DeviceModel.objects.filter(device_id=device_id).first() cloud_id = device.cloud_id clients = get_instance().clients if cloud_id in clients.keys(): socket = clients[cloud_id] for i in range(0, 3): Device.operate(line.line, operate, socket) time.sleep(0.5)
py
1a5a70059ebfd6cdeee42852858f3e88d8edb484
""" module to methods to main """ import sys import logging from .migrate_isis import migrate_isis_parser from .migrate_articlemeta import migrate_articlemeta_parser from .tools import tools_parser logger = logging.getLogger(__name__) def main_migrate_articlemeta(): """ method main to script setup.py """ try: sys.exit(migrate_articlemeta_parser(sys.argv[1:])) except KeyboardInterrupt: # É convencionado no shell que o programa finalizado pelo signal de # código N deve retornar o código N + 128. sys.exit(130) except Exception as exc: logger.exception( "erro durante a execução da função " "'migrate_articlemeta_parser' com os args %s", sys.argv[1:], ) sys.exit("Um erro inexperado ocorreu: %s" % exc) def main_migrate_isis(): sys.exit(migrate_isis_parser(sys.argv[1:])) def tools(): sys.exit(tools_parser(sys.argv[1:])) if __name__ == "__main__": sys.exit(migrate_articlemeta_parser(sys.argv[1:]))
py
1a5a7028ef34eba11b4c41b781c71ebf6eac195f
from enum import Enum class ModuleType(str, Enum): """Module type enumeration.""" Magnetic = "magdeck" Temperature = "tempdeck" Thermocycler = "thermocycler" Heatershaker = "heatershaker"
py
1a5a7052d17fdc53b8b3d06c54aa7574b6fe6617
from modelvshuman import Plot, Evaluate from modelvshuman import constants as c from plotting_definition import plotting_definition_template def run_evaluation(): models = ["resnet50", "bagnet33", "simclr_resnet50x1"] datasets = c.DEFAULT_DATASETS # or e.g. ["cue-conflict", "uniform-noise"] params = {"batch_size": 64, "print_predictions": True, "num_workers": 20} Evaluate()(models, datasets, **params) def run_plotting(): plot_types = c.DEFAULT_PLOT_TYPES # or e.g. ["accuracy", "shape-bias"] plotting_def = plotting_definition_template figure_dirname = "example-figures/" Plot(plot_types = plot_types, plotting_definition = plotting_def, figure_directory_name = figure_dirname) # In examples/plotting_definition.py, you can edit # plotting_definition_template as desired: this will let # the toolbox know which models to plot, and which colours to use etc. if __name__ == "__main__": # 1. evaluate models on out-of-distribution datasets run_evaluation() # 2. plot the evaluation results run_plotting()
py
1a5a7080ed2049aa53e6b0a648955dcfc5cb9b81
# Copyright (C) 2019 The Raphielscape Company LLC. # # Licensed under the Raphielscape Public License, Version 1.b (the "License"); # you may not use this file except in compliance with the License. # """ Userbot module containing various scrapers. """ import os import shutil from bs4 import BeautifulSoup import re from time import sleep from html import unescape from re import findall from datetime import datetime from selenium import webdriver from urllib.parse import quote_plus from urllib.error import HTTPError from selenium.webdriver.support.ui import Select from selenium.webdriver.chrome.options import Options from wikipedia import summary from wikipedia.exceptions import DisambiguationError, PageError from urbandict import define from requests import get from google_images_download import google_images_download from googleapiclient.discovery import build from googleapiclient.errors import HttpError from googletrans import LANGUAGES, Translator from gtts import gTTS from emoji import get_emoji_regexp from userbot import CMD_HELP, BOTLOG, BOTLOG_CHATID, YOUTUBE_API_KEY, CHROME_DRIVER, GOOGLE_CHROME_BIN from userbot.utils import register CARBONLANG = "auto" LANG = "en" @register(outgoing=True, pattern="^.carbon") async def carbon_api(e): if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"): """ A Wrapper for carbon.now.sh """ await e.edit("`Processing..`") CARBON = 'https://carbon.now.sh/?l={lang}&code={code}' global CARBONLANG textx = await e.get_reply_message() pcode = e.text if pcode[8:]: pcode = str(pcode[8:]) elif textx: pcode = str(textx.message) # Importing message to module code = quote_plus(pcode) # Converting to urlencoded await e.edit("`Meking Carbon...\n25%`") url = CARBON.format(code=code, lang=CARBONLANG) chrome_options = Options() chrome_options.add_argument("--headless") chrome_options.binary_location = GOOGLE_CHROME_BIN chrome_options.add_argument("--window-size=1920x1080") chrome_options.add_argument("--disable-dev-shm-usage") chrome_options.add_argument("--no-sandbox") chrome_options.add_argument("--disable-gpu") prefs = {'download.default_directory' : './'} chrome_options.add_experimental_option('prefs', prefs) driver = webdriver.Chrome(executable_path=CHROME_DRIVER, options=chrome_options) driver.get(url) await e.edit("`Be Patient...\n50%`") download_path = './' driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command') params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_path}} command_result = driver.execute("send_command", params) driver.find_element_by_xpath("//button[contains(text(),'Export')]").click() #driver.find_element_by_xpath("//button[contains(text(),'4x')]").click() #driver.find_element_by_xpath("//button[contains(text(),'PNG')]").click() await e.edit("`Processing..\n75%`") # Waiting for downloading sleep(2.5) await e.edit("`Done Dana Done...\n100%`") file = './carbon.png' await e.edit("`Uploading..`") await e.client.send_file( e.chat_id, file, caption="<< Here's your carbon, \n Carbonised by @surajit1>> ", force_document=True, reply_to=e.message.reply_to_msg_id, ) os.remove('./IndianBot.png') driver.quit() # Removing carbon.png after uploading await e.delete() # Deleting msg
py
1a5a70ba1f6da633f1219b73766c1971c1cc5422
""" ===================================================================== Compute Power Spectral Density of inverse solution from single epochs ===================================================================== Compute PSD of dSPM inverse solution on single trial epochs restricted to a brain label. The PSD is computed using a multi-taper method with Discrete Prolate Spheroidal Sequence (DPSS) windows. """ # Author: Martin Luessi <[email protected]> # # License: BSD-3-Clause # %% import matplotlib.pyplot as plt import mne from mne.datasets import sample from mne.minimum_norm import read_inverse_operator, compute_source_psd_epochs print(__doc__) data_path = sample.data_path() fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_raw = data_path + '/MEG/sample/sample_audvis_raw.fif' fname_event = data_path + '/MEG/sample/sample_audvis_raw-eve.fif' label_name = 'Aud-lh' fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name subjects_dir = data_path + '/subjects' event_id, tmin, tmax = 1, -0.2, 0.5 snr = 1.0 # use smaller SNR for raw data lambda2 = 1.0 / snr ** 2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) # Load data inverse_operator = read_inverse_operator(fname_inv) label = mne.read_label(fname_label) raw = mne.io.read_raw_fif(fname_raw) events = mne.read_events(fname_event) # Set up pick list include = [] raw.info['bads'] += ['EEG 053'] # bads + 1 more # pick MEG channels picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, include=include, exclude='bads') # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13, eog=150e-6)) # define frequencies of interest fmin, fmax = 0., 70. bandwidth = 4. # bandwidth of the windows in Hz # %% # Compute source space PSD in label # --------------------------------- # # ..note:: By using "return_generator=True" stcs will be a generator object # instead of a list. This allows us so to iterate without having to # keep everything in memory. n_epochs_use = 10 stcs = compute_source_psd_epochs(epochs[:n_epochs_use], inverse_operator, lambda2=lambda2, method=method, fmin=fmin, fmax=fmax, bandwidth=bandwidth, label=label, return_generator=True, verbose=True) # compute average PSD over the first 10 epochs psd_avg = 0. for i, stc in enumerate(stcs): psd_avg += stc.data psd_avg /= n_epochs_use freqs = stc.times # the frequencies are stored here stc.data = psd_avg # overwrite the last epoch's data with the average # %% # Visualize the 10 Hz PSD: brain = stc.plot(initial_time=10., hemi='lh', views='lat', # 10 HZ clim=dict(kind='value', lims=(20, 40, 60)), smoothing_steps=3, subjects_dir=subjects_dir) brain.add_label(label, borders=True, color='k') # %% # Visualize the entire spectrum: fig, ax = plt.subplots() ax.plot(freqs, psd_avg.mean(axis=0)) ax.set_xlabel('Freq (Hz)') ax.set_xlim(stc.times[[0, -1]]) ax.set_ylabel('Power Spectral Density')
py
1a5a70f73499672d9c53db28a2efadaac03620a9
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: game.py # ------------------- # Divine Oasis # Text Based RPG Game # By wsngamerz # ------------------- import divineoasis import logging import logging.config import os import platform import pyglet import sys from divineoasis.assets import Assets, Directories from divineoasis.config import Config from divineoasis.colours import Colours from divineoasis.scene_manager import SceneManager from pyglet.window import Window class DivineOasis: def __init__(self, debug: bool = False): self.debug = debug if self.debug: if platform.system() == "Windows": # Set larger console os.system("mode con: cols=200 lines=9999") if platform.system() != "Linux": import pyglet_ffmpeg pyglet_ffmpeg.load_ffmpeg() # Enable Colours using black magic os.system("") # Setup Logging self.game_logger = self.setup_logging(debug) # Get basic system information self.system_data = {} self.system_info() # Basic classes self.game_config = Config() self.game_config.load() self.game_assets = Assets(self.game_config.get("language.lang")) # setup Pyglet pyglet.options['audio'] = ('openal', 'pulse', 'directsound', 'silent') vsync_enabled = self.game_config.get("graphics.vsync") self.window = Window(1280, 720) self.window.set_vsync(vsync_enabled) # TODO: Fix fullscreen mode # self.window.set_fullscreen(self.game_config.get("fullscreen")) self.window.set_caption(self.game_assets.get("lang.title.main_title")) fps_limit = self.game_config.get("graphics.fps") self.scene_manager = SceneManager(self.game_assets, self.window) if vsync_enabled: pyglet.clock.schedule(self.scene_manager.update) else: pyglet.clock.schedule_interval(self.scene_manager.update, 1.0 / fps_limit) def start(self): self.game_logger.info(f"Starting Divine Oasis { divineoasis.__version__ }") # Start Pyglet loop pyglet.app.run() @staticmethod def setup_logging(debug: bool): if debug: level = "DEBUG" else: level = "INFO" logging.config.dictConfig({ "version": 1, "disable_existing_loggers": False, "formatters": { "standard": { "format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s" } }, "handlers": { "default": { "class": "logging.StreamHandler", "formatter": "standard" } }, "loggers": { "": { "handlers": ["default"], "propagate": True, "level": level } } }) logging.addLevelName(logging.DEBUG, Colours.BOLD + Colours.BRIGHT_CYAN + "DEBUG" + Colours.RESET) logging.addLevelName(logging.INFO, Colours.BOLD + Colours.BRIGHT_BLUE + "INFO" + Colours.RESET) logging.addLevelName(logging.WARNING, Colours.BOLD + Colours.BRIGHT_YELLOW + "WARNING" + Colours.RESET) logging.addLevelName(logging.ERROR, Colours.BOLD + Colours.BRIGHT_RED + "ERROR" + Colours.RESET) logging.addLevelName(logging.CRITICAL, Colours.BOLD + Colours.BRIGHT_RED + Colours.BLINK + "CRITICAL" + Colours.RESET) return logging.getLogger(__name__) def system_info(self): self.system_data = { "arguments": sys.argv, "python_version": sys.version, "os": platform.system(), "os_release": platform.release(), "os_version": platform.version(), "os_arch": platform.machine(), "os_platform": platform.platform() } self.game_logger.debug("=*=*=*=*=*=*=*=*=*=*=*= Debug Information =*=*=*=*=*=*=*=*=*=*=*=") self.game_logger.debug(f" Arguments: { self.system_data['arguments'] }") self.game_logger.debug(f" Python Version: { self.system_data['python_version'] }") self.game_logger.debug(f" OS: { self.system_data['os'] }") self.game_logger.debug(f" OS Version: { self.system_data['os_version'] }") self.game_logger.debug(f" OS Release: { self.system_data['os_release'] }") self.game_logger.debug(f" OS Architecture: { self.system_data['os_arch'] }") self.game_logger.debug(f" OS Platform: { self.system_data['os_platform'] }") self.game_logger.debug("=*=*=*=*=*=*=*=*=*=*=*=*=* Directories *=*=*=*=*=*=*=*=*=*=*=*=*=") self.game_logger.debug(f" Application Root: { Directories().application_root }") self.game_logger.debug(f" Assets Directory: { Directories().assets_directory }") self.game_logger.debug(f" Data Directory: { Directories().data_directory }") self.game_logger.debug(f" Config Location: { Directories().config_location }") self.game_logger.debug("=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=")
py
1a5a75e6dd70666b903e415bb1b7b033e50e2dc4
""" Programmer : EOF File : config.py Date : 2016.01.06 E-mail : [email protected] License : MIT License Description : This is a configure file for this project. """ DEBUG_MODEL = True USING_CASCADE = False # training set directory for face and non-face images TRAINING_FACE = "E:/JasonLeasterGraduationProject/FaceDetection/TrainingImages/FACES/" TRAINING_NONFACE = "E:/JasonLeasterGraduationProject/FaceDetection/TrainingImages/NFACES/" # test set directory for face and non-face images TEST_FACE = "./TrainingImages/FACES/" TEST_NONFACE = "./TrainingImages/NFACES/" # single image for testing TEST_IMG = "./Test/soccer.gif" FEATURE_FILE_TRAINING = "./features/features_train.cache" FEATURE_FILE_TESTING = "./features/features_test.cache" FEATURE_FILE_SUBSET = "./features/features_train_subset" FEATURE_FILE_SUBSET_0 = "./features/features_train_subset0.cache" FEATURE_FILE_SUBSET_1 = "./features/features_train_subset1.cache" # For parallel PROCESS_NUM = 2 ADABOOST_CACHE_FILE = "./model/adaboost_classifier.cache" ROC_FILE = "./model/roc.cache" FIGURES = "./figure/" # image size in the training set 19 * 19 TRAINING_IMG_HEIGHT = 19 TRAINING_IMG_WIDTH = 19 # How many different types of Haar-feature FEATURE_TYPE_NUM = 5 # How many number of features that a single training image have FEATURE_NUM = 37862 #FEATURE_NUM = 16373 #FEATURE_NUM = 49608 # number of positive and negative sample will be used in the training process POSITIVE_SAMPLE = 4800 NEGATIVE_SAMPLE = 9000 SAMPLE_NUM = POSITIVE_SAMPLE + NEGATIVE_SAMPLE TESTING_POSITIVE_SAMPLE = 20 TESTING_NEGATIVE_SAMPLE = 20 TESTING_SAMPLE_NUM = TESTING_NEGATIVE_SAMPLE + TESTING_POSITIVE_SAMPLE LABEL_POSITIVE = +1 LABEL_NEGATIVE = -1 WHITE = 255 BLACK = 0 EXPECTED_TPR = 0.999 EXPECTED_FPR = 0.0005 # for CASCADE EXPECTED_FPR_PRE_LAYYER = 0.1 EXPECTED_TPR_PRE_LAYYER = 0.999 # the threshold range of adaboost. (from -inf to +inf) AB_TH_MIN = -15 AB_TH_MAX = +15 HAAR_FEATURE_TYPE_I = "I" HAAR_FEATURE_TYPE_II = "II" HAAR_FEATURE_TYPE_III = "III" HAAR_FEATURE_TYPE_IV = "IV" HAAR_FEATURE_TYPE_V = "V" AB_TH = -3. OVER_LAP_TH = 0.1 MAX_WEAK_NUM = 12 CASACADE_LIMIT = 3 ADABOOST_LIMIT = 150 SEARCH_WIN_STEP = 4 DETECT_START = 1. DETECT_END = 2. DETECT_STEP = 0.2
py
1a5a763bf9dc5af9881a769bb6407ced29462c1a
from flask_restful import reqparse, abort, Resource from api.models.user import User from api.utils.errors import ValidationError class AuthLogin(Resource): def post(self): parser = reqparse.RequestParser() parser.add_argument('email', type=str, help='You need to enter your e-mail address', required=True) parser.add_argument('password', type=str, help='You need to enter your password', required=True) args = parser.parse_args() email = args.get('email') password = args.get('password') try: token = User.validate(email, password) return {'token': token} except ValidationError as e: abort(400, message="There was an error while trying to log you in -> {0}".format(e)) class AuthRegister(Resource): def post(self): parser = reqparse.RequestParser() parser.add_argument('fullname', type=str, help='You need to enter your full name', required=True) parser.add_argument('email', type=str, help='You need to enter your e-mail address', required=True) parser.add_argument('password', type=str, help='You need to enter your chosen password', required=True) parser.add_argument('password_conf', type=str, help='You need to enter the confirm password field', required=True) parser.add_argument('street_number', type=str, help='Address Street Number') parser.add_argument('route', type=str, help='Address route') parser.add_argument('locality', type=str, help='Address House Name') parser.add_argument('postal_town', type=str, help='Address Town') parser.add_argument('administrative_area_level_2', type=str, help='Address Area') parser.add_argument('administrative_area_level_1', type=str, help='Address Area') parser.add_argument('country', type=str, help='Address Country') parser.add_argument('postal_code', type=str, help='Address Postal Code') args = parser.parse_args() email = args.get('email') password = args.get('password') password_conf = args.get('password_conf') fullname = args.get('fullname') street_number = args.get('street_number') route = args.get('route') locality = args.get('locality') postal_town = args.get('postal_town') administrative_area_level_2 = args.get('administrative_area_level_2') administrative_area_level_1 = args.get('administrative_area_level_1') country = args.get('country') postal_code = args.get('postal_code') try: User.create( email=email, password=password, password_conf=password_conf, fullname=fullname, street_number = street_number, route = route, locality = locality, postal_town = postal_town, administrative_area_level_2 = administrative_area_level_2, administrative_area_level_1 = administrative_area_level_1, country = country, postal_code = postal_code ) return {'message': 'Successfully created your account.'} except ValidationError as e: abort(400, message="There was an error while trying to create your account -> {0}".format(e))
py
1a5a765eb3be299e13c2fa9a347c69d32a4b586e
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/ads/googleads_v1/proto/services/billing_setup_service.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.ads.google_ads.v1.proto.resources import billing_setup_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_billing__setup__pb2 from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='google/ads/googleads_v1/proto/services/billing_setup_service.proto', package='google.ads.googleads.v1.services', syntax='proto3', serialized_options=_b('\n$com.google.ads.googleads.v1.servicesB\030BillingSetupServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V1.Services\312\002 Google\\Ads\\GoogleAds\\V1\\Services\352\002$Google::Ads::GoogleAds::V1::Services'), serialized_pb=_b('\nBgoogle/ads/googleads_v1/proto/services/billing_setup_service.proto\x12 google.ads.googleads.v1.services\x1a;google/ads/googleads_v1/proto/resources/billing_setup.proto\x1a\x1cgoogle/api/annotations.proto\"/\n\x16GetBillingSetupRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\"|\n\x19MutateBillingSetupRequest\x12\x13\n\x0b\x63ustomer_id\x18\x01 \x01(\t\x12J\n\toperation\x18\x02 \x01(\x0b\x32\x37.google.ads.googleads.v1.services.BillingSetupOperation\"y\n\x15\x42illingSetupOperation\x12\x41\n\x06\x63reate\x18\x02 \x01(\x0b\x32/.google.ads.googleads.v1.resources.BillingSetupH\x00\x12\x10\n\x06remove\x18\x01 \x01(\tH\x00\x42\x0b\n\toperation\"h\n\x1aMutateBillingSetupResponse\x12J\n\x06result\x18\x01 \x01(\x0b\x32:.google.ads.googleads.v1.services.MutateBillingSetupResult\"1\n\x18MutateBillingSetupResult\x12\x15\n\rresource_name\x18\x01 \x01(\t2\x9e\x03\n\x13\x42illingSetupService\x12\xb5\x01\n\x0fGetBillingSetup\x12\x38.google.ads.googleads.v1.services.GetBillingSetupRequest\x1a/.google.ads.googleads.v1.resources.BillingSetup\"7\x82\xd3\xe4\x93\x02\x31\x12//v1/{resource_name=customers/*/billingSetups/*}\x12\xce\x01\n\x12MutateBillingSetup\x12;.google.ads.googleads.v1.services.MutateBillingSetupRequest\x1a<.google.ads.googleads.v1.services.MutateBillingSetupResponse\"=\x82\xd3\xe4\x93\x02\x37\"2/v1/customers/{customer_id=*}/billingSetups:mutate:\x01*B\xff\x01\n$com.google.ads.googleads.v1.servicesB\x18\x42illingSetupServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V1.Services\xca\x02 Google\\Ads\\GoogleAds\\V1\\Services\xea\x02$Google::Ads::GoogleAds::V1::Servicesb\x06proto3') , dependencies=[google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_billing__setup__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,]) _GETBILLINGSETUPREQUEST = _descriptor.Descriptor( name='GetBillingSetupRequest', full_name='google.ads.googleads.v1.services.GetBillingSetupRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='resource_name', full_name='google.ads.googleads.v1.services.GetBillingSetupRequest.resource_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=195, serialized_end=242, ) _MUTATEBILLINGSETUPREQUEST = _descriptor.Descriptor( name='MutateBillingSetupRequest', full_name='google.ads.googleads.v1.services.MutateBillingSetupRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='customer_id', full_name='google.ads.googleads.v1.services.MutateBillingSetupRequest.customer_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='operation', full_name='google.ads.googleads.v1.services.MutateBillingSetupRequest.operation', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=244, serialized_end=368, ) _BILLINGSETUPOPERATION = _descriptor.Descriptor( name='BillingSetupOperation', full_name='google.ads.googleads.v1.services.BillingSetupOperation', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='create', full_name='google.ads.googleads.v1.services.BillingSetupOperation.create', index=0, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='remove', full_name='google.ads.googleads.v1.services.BillingSetupOperation.remove', index=1, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='operation', full_name='google.ads.googleads.v1.services.BillingSetupOperation.operation', index=0, containing_type=None, fields=[]), ], serialized_start=370, serialized_end=491, ) _MUTATEBILLINGSETUPRESPONSE = _descriptor.Descriptor( name='MutateBillingSetupResponse', full_name='google.ads.googleads.v1.services.MutateBillingSetupResponse', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='result', full_name='google.ads.googleads.v1.services.MutateBillingSetupResponse.result', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=493, serialized_end=597, ) _MUTATEBILLINGSETUPRESULT = _descriptor.Descriptor( name='MutateBillingSetupResult', full_name='google.ads.googleads.v1.services.MutateBillingSetupResult', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='resource_name', full_name='google.ads.googleads.v1.services.MutateBillingSetupResult.resource_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=599, serialized_end=648, ) _MUTATEBILLINGSETUPREQUEST.fields_by_name['operation'].message_type = _BILLINGSETUPOPERATION _BILLINGSETUPOPERATION.fields_by_name['create'].message_type = google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_billing__setup__pb2._BILLINGSETUP _BILLINGSETUPOPERATION.oneofs_by_name['operation'].fields.append( _BILLINGSETUPOPERATION.fields_by_name['create']) _BILLINGSETUPOPERATION.fields_by_name['create'].containing_oneof = _BILLINGSETUPOPERATION.oneofs_by_name['operation'] _BILLINGSETUPOPERATION.oneofs_by_name['operation'].fields.append( _BILLINGSETUPOPERATION.fields_by_name['remove']) _BILLINGSETUPOPERATION.fields_by_name['remove'].containing_oneof = _BILLINGSETUPOPERATION.oneofs_by_name['operation'] _MUTATEBILLINGSETUPRESPONSE.fields_by_name['result'].message_type = _MUTATEBILLINGSETUPRESULT DESCRIPTOR.message_types_by_name['GetBillingSetupRequest'] = _GETBILLINGSETUPREQUEST DESCRIPTOR.message_types_by_name['MutateBillingSetupRequest'] = _MUTATEBILLINGSETUPREQUEST DESCRIPTOR.message_types_by_name['BillingSetupOperation'] = _BILLINGSETUPOPERATION DESCRIPTOR.message_types_by_name['MutateBillingSetupResponse'] = _MUTATEBILLINGSETUPRESPONSE DESCRIPTOR.message_types_by_name['MutateBillingSetupResult'] = _MUTATEBILLINGSETUPRESULT _sym_db.RegisterFileDescriptor(DESCRIPTOR) GetBillingSetupRequest = _reflection.GeneratedProtocolMessageType('GetBillingSetupRequest', (_message.Message,), dict( DESCRIPTOR = _GETBILLINGSETUPREQUEST, __module__ = 'google.ads.googleads_v1.proto.services.billing_setup_service_pb2' , __doc__ = """Request message for [BillingSetupService.GetBillingSetup][google.ads.googleads.v1.services.BillingSetupService.GetBillingSetup]. Attributes: resource_name: The resource name of the billing setup to fetch. """, # @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.GetBillingSetupRequest) )) _sym_db.RegisterMessage(GetBillingSetupRequest) MutateBillingSetupRequest = _reflection.GeneratedProtocolMessageType('MutateBillingSetupRequest', (_message.Message,), dict( DESCRIPTOR = _MUTATEBILLINGSETUPREQUEST, __module__ = 'google.ads.googleads_v1.proto.services.billing_setup_service_pb2' , __doc__ = """Request message for billing setup mutate operations. Attributes: customer_id: Id of the customer to apply the billing setup mutate operation to. operation: The operation to perform. """, # @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.MutateBillingSetupRequest) )) _sym_db.RegisterMessage(MutateBillingSetupRequest) BillingSetupOperation = _reflection.GeneratedProtocolMessageType('BillingSetupOperation', (_message.Message,), dict( DESCRIPTOR = _BILLINGSETUPOPERATION, __module__ = 'google.ads.googleads_v1.proto.services.billing_setup_service_pb2' , __doc__ = """A single operation on a billing setup, which describes the cancellation of an existing billing setup. Attributes: operation: Only one of these operations can be set. "Update" operations are not supported. create: Creates a billing setup. No resource name is expected for the new billing setup. remove: Resource name of the billing setup to remove. A setup cannot be removed unless it is in a pending state or its scheduled start time is in the future. The resource name looks like ``customers/{customer_id}/billingSetups/{billing_id}``. """, # @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.BillingSetupOperation) )) _sym_db.RegisterMessage(BillingSetupOperation) MutateBillingSetupResponse = _reflection.GeneratedProtocolMessageType('MutateBillingSetupResponse', (_message.Message,), dict( DESCRIPTOR = _MUTATEBILLINGSETUPRESPONSE, __module__ = 'google.ads.googleads_v1.proto.services.billing_setup_service_pb2' , __doc__ = """Response message for a billing setup operation. Attributes: result: A result that identifies the resource affected by the mutate request. """, # @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.MutateBillingSetupResponse) )) _sym_db.RegisterMessage(MutateBillingSetupResponse) MutateBillingSetupResult = _reflection.GeneratedProtocolMessageType('MutateBillingSetupResult', (_message.Message,), dict( DESCRIPTOR = _MUTATEBILLINGSETUPRESULT, __module__ = 'google.ads.googleads_v1.proto.services.billing_setup_service_pb2' , __doc__ = """Result for a single billing setup mutate. Attributes: resource_name: Returned for successful operations. """, # @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.MutateBillingSetupResult) )) _sym_db.RegisterMessage(MutateBillingSetupResult) DESCRIPTOR._options = None _BILLINGSETUPSERVICE = _descriptor.ServiceDescriptor( name='BillingSetupService', full_name='google.ads.googleads.v1.services.BillingSetupService', file=DESCRIPTOR, index=0, serialized_options=None, serialized_start=651, serialized_end=1065, methods=[ _descriptor.MethodDescriptor( name='GetBillingSetup', full_name='google.ads.googleads.v1.services.BillingSetupService.GetBillingSetup', index=0, containing_service=None, input_type=_GETBILLINGSETUPREQUEST, output_type=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_billing__setup__pb2._BILLINGSETUP, serialized_options=_b('\202\323\344\223\0021\022//v1/{resource_name=customers/*/billingSetups/*}'), ), _descriptor.MethodDescriptor( name='MutateBillingSetup', full_name='google.ads.googleads.v1.services.BillingSetupService.MutateBillingSetup', index=1, containing_service=None, input_type=_MUTATEBILLINGSETUPREQUEST, output_type=_MUTATEBILLINGSETUPRESPONSE, serialized_options=_b('\202\323\344\223\0027\"2/v1/customers/{customer_id=*}/billingSetups:mutate:\001*'), ), ]) _sym_db.RegisterServiceDescriptor(_BILLINGSETUPSERVICE) DESCRIPTOR.services_by_name['BillingSetupService'] = _BILLINGSETUPSERVICE # @@protoc_insertion_point(module_scope)
py
1a5a76f8a1f205013b93d6932f0b7dbcb05042f3
import os import torch import torch.nn as nn from torch.autograd import Variable import dataset84 import model from unet import UNet,CNNEncoder def main(): # init conv net print("init net") unet = UNet(3,1) if os.path.exists("./unet.pkl"): unet.load_state_dict(torch.load("./unet.pkl")) print("load unet") unet.cuda() cnn = CNNEncoder() if os.path.exists("./cnn.pkl"): cnn.load_state_dict(torch.load("./cnn.pkl")) print("load cnn") cnn.cuda() # init dataset print("init dataset") data_loader = dataset84.jump_data_loader() # init optimizer unet_optimizer = torch.optim.Adam(unet.parameters(),lr=0.001) cnn_optimizer = torch.optim.Adam(cnn.parameters(),lr = 0.001) criterion = nn.MSELoss() # train print("training...") for epoch in range(1000): for i, (images, press_times) in enumerate(data_loader): images = Variable(images).cuda() press_times = Variable(press_times.float()).cuda() masks = unet(images) segmentations = images * masks predict_press_times = cnn(segmentations) loss = criterion(predict_press_times,press_times) unet_optimizer.zero_grad() cnn_optimizer.zero_grad() loss.backward() unet_optimizer.step() cnn_optimizer.step() if (i+1) % 10 == 0: print("epoch:",epoch,"step:",i,"loss:",loss.data[0]) if (epoch+1) % 5 == 0 and i == 0: torch.save(unet.state_dict(),"./unet.pkl") torch.save(cnn.state_dict(),"./cnn.pkl") print("save model") if __name__ == '__main__': main()
py
1a5a77d114f6614593a8d912af11876cefb59b14
from django.template.response import TemplateResponse from rest_framework.settings import api_settings from django.core.paginator import Paginator from rest_framework import viewsets, permissions from . import models from . import serializers class ProductViewset(viewsets.ModelViewSet): permission_classes = [permissions.DjangoModelPermissions] serializer_class = serializers.ProductSerializer filterset_fields = { 'id': ['exact'], 'name': ['exact' , 'contains'], 'description': ['exact', 'contains'], 'team': ['exact'], 'creator': ['exact'], 'imagesets': ['exact'], 'annotationtype': ['exact'], } def get_queryset(self): user = self.request.user return models.Product.objects.filter(team__in=user.team_set.all()).select_related('creator', 'team').order_by('id') def create(self, request): user = self.request.user if "creator" not in request.data: request.data["creator"] = user.id response = super().create(request) return response def list(self, request, *args, **kwargs): if "api" in request.META['PATH_INFO']: return super(ProductViewset, self).list(request, *args, **kwargs) else: products = self.filter_queryset(self.get_queryset()).order_by('team', 'id') current_query = request.META['QUERY_STRING'] if "page" not in request.query_params: current_query += "&page=1" page_id = 1 else: page_id = int(request.query_params.get('page', 1)) limit = int(request.query_params.get('limit', api_settings.PAGE_SIZE)) paginator = Paginator(products, limit) page = paginator.get_page(page_id) previous_query = first_query = current_query.replace("&page="+str(page_id), "&page=1") if page.has_previous(): previous_query = current_query.replace("&page="+str(page_id), "&page={}".format(page.previous_page_number())) next_query = last_query = current_query.replace("&page="+str(page_id), "&page={}".format(paginator.num_pages)) if page.has_next(): next_query = current_query.replace("&page="+str(page_id), "&page={}".format(page.next_page_number())) return TemplateResponse(request, 'base/explore.html', { 'mode': 'products', 'products': page, # to separate what kind of stuff is displayed in the view 'paginator': page, # for page stuff 'first_query': first_query, 'previous_query': previous_query, 'next_query': next_query, 'last_query': last_query, #'filter': self.filterset_class })
py
1a5a77d6bf0a35536f4c4824195e50d0b4c38577
import tensorflow as tf from layers import * def encoder(input): # Create a conv network with 3 conv layers and 1 FC layer # Conv 1: filter: [3, 3, 1], stride: [2, 2], relu # Conv 2: filter: [3, 3, 8], stride: [2, 2], relu # Conv 3: filter: [3, 3, 8], stride: [2, 2], relu # FC: output_dim: 100, no non-linearity raise NotImplementedError def decoder(input): # Create a deconv network with 1 FC layer and 3 deconv layers # FC: output dim: 128, relu # Reshape to [batch_size, 4, 4, 8] # Deconv 1: filter: [3, 3, 8], stride: [2, 2], relu # Deconv 2: filter: [8, 8, 1], stride: [2, 2], padding: valid, relu # Deconv 3: filter: [7, 7, 1], stride: [1, 1], padding: valid, sigmoid raise NotImplementedError def autoencoder(input_shape): # Define place holder with input shape # Define variable scope for autoencoder with tf.variable_scope('autoencoder') as scope: # Pass input to encoder to obtain encoding # Pass encoding into decoder to obtain reconstructed image # Return input image (placeholder) and reconstructed image pass
py
1a5a77eeedc81eef41973d3c68e5d3d88e459b35
""" MNIST example with training and validation monitoring using TensorboardX and Tensorboard. Requirements: TensorboardX (https://github.com/lanpa/tensorboard-pytorch): `pip install tensorboardX` Tensorboard: `pip install tensorflow` (or just install tensorboard without the rest of tensorflow) Usage: Start tensorboard: ```bash tensorboard --logdir=/tmp/tensorboard_logs/ ``` Run the example: ```bash python mnist_with_tensorboardx.py --log_dir=/tmp/tensorboard_logs ``` """ from __future__ import print_function from argparse import ArgumentParser import torch from torch.utils.data import DataLoader from torch import nn import torch.nn.functional as F from torch.optim import SGD from torchvision.datasets import MNIST from torchvision.transforms import Compose, ToTensor, Normalize try: from tensorboardX import SummaryWriter except ImportError: raise RuntimeError("No tensorboardX package is found. Please install with the command: \npip install tensorboardX") from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator from ignite.metrics import Accuracy, Loss class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=-1) def get_data_loaders(train_batch_size, val_batch_size): data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True) val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False) return train_loader, val_loader def create_summary_writer(model, data_loader, log_dir): writer = SummaryWriter(logdir=log_dir) data_loader_iter = iter(data_loader) x, y = next(data_loader_iter) try: writer.add_graph(model, x) except Exception as e: print("Failed to save model graph: {}".format(e)) return writer def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, log_dir): train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) model = Net() writer = create_summary_writer(model, train_loader, log_dir) device = 'cpu' if torch.cuda.is_available(): device = 'cuda' optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device) evaluator = create_supervised_evaluator(model, metrics={'accuracy': Accuracy(), 'nll': Loss(F.nll_loss)}, device=device) @trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) def log_training_loss(engine): print("Epoch[{}] Iteration[{}/{}] Loss: {:.2f}" "".format(engine.state.epoch, iter, len(train_loader), engine.state.output)) writer.add_scalar("training/loss", engine.state.output, engine.state.iteration) @trainer.on(Events.EPOCH_COMPLETED) def log_training_results(engine): evaluator.run(train_loader) metrics = evaluator.state.metrics avg_accuracy = metrics['accuracy'] avg_nll = metrics['nll'] print("Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" .format(engine.state.epoch, avg_accuracy, avg_nll)) writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch) writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch) @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(engine): evaluator.run(val_loader) metrics = evaluator.state.metrics avg_accuracy = metrics['accuracy'] avg_nll = metrics['nll'] print("Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" .format(engine.state.epoch, avg_accuracy, avg_nll)) writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch) writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch) # kick everything off trainer.run(train_loader, max_epochs=epochs) writer.close() if __name__ == "__main__": parser = ArgumentParser() parser.add_argument('--batch_size', type=int, default=64, help='input batch size for training (default: 64)') parser.add_argument('--val_batch_size', type=int, default=1000, help='input batch size for validation (default: 1000)') parser.add_argument('--epochs', type=int, default=10, help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.01, help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, help='SGD momentum (default: 0.5)') parser.add_argument('--log_interval', type=int, default=10, help='how many batches to wait before logging training status') parser.add_argument("--log_dir", type=str, default="tensorboard_logs", help="log directory for Tensorboard log output") args = parser.parse_args() run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval, args.log_dir)
py
1a5a781cb3e3ca196ddebdb17768b59026646bbb
import csv import email.message import json import logging import pathlib import re import zipfile from typing import ( IO, TYPE_CHECKING, Collection, Container, Iterable, Iterator, List, Optional, Tuple, Union, ) from pip._vendor.packaging.requirements import Requirement from pip._vendor.packaging.specifiers import InvalidSpecifier, SpecifierSet from pip._vendor.packaging.utils import NormalizedName from pip._vendor.packaging.version import LegacyVersion, Version from pip._internal.exceptions import NoneMetadataError from pip._internal.locations import site_packages, user_site from pip._internal.models.direct_url import ( DIRECT_URL_METADATA_NAME, DirectUrl, DirectUrlValidationError, ) from pip._internal.utils.compat import stdlib_pkgs # TODO: Move definition here. from pip._internal.utils.egg_link import ( egg_link_path_from_location, egg_link_path_from_sys_path, ) from pip._internal.utils.misc import is_local, normalize_path from pip._internal.utils.urls import url_to_path if TYPE_CHECKING: from typing import Protocol else: Protocol = object DistributionVersion = Union[LegacyVersion, Version] InfoPath = Union[str, pathlib.PurePosixPath] logger = logging.getLogger(__name__) class BaseEntryPoint(Protocol): @property def name(self) -> str: raise NotImplementedError() @property def value(self) -> str: raise NotImplementedError() @property def group(self) -> str: raise NotImplementedError() def _convert_installed_files_path( entry: Tuple[str, ...], info: Tuple[str, ...], ) -> str: """Convert a legacy installed-files.txt path into modern RECORD path. The legacy format stores paths relative to the info directory, while the modern format stores paths relative to the package root, e.g. the site-packages directory. :param entry: Path parts of the installed-files.txt entry. :param info: Path parts of the egg-info directory relative to package root. :returns: The converted entry. For best compatibility with symlinks, this does not use ``abspath()`` or ``Path.resolve()``, but tries to work with path parts: 1. While ``entry`` starts with ``..``, remove the equal amounts of parts from ``info``; if ``info`` is empty, start appending ``..`` instead. 2. Join the two directly. """ while entry and entry[0] == "..": if not info or info[-1] == "..": info += ("..",) else: info = info[:-1] entry = entry[1:] return str(pathlib.Path(*info, *entry)) class BaseDistribution(Protocol): def __repr__(self) -> str: return f"{self.raw_name} {self.version} ({self.location})" def __str__(self) -> str: return f"{self.raw_name} {self.version}" @property def location(self) -> Optional[str]: """Where the distribution is loaded from. A string value is not necessarily a filesystem path, since distributions can be loaded from other sources, e.g. arbitrary zip archives. ``None`` means the distribution is created in-memory. Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If this is a symbolic link, we want to preserve the relative path between it and files in the distribution. """ raise NotImplementedError() @property def editable_project_location(self) -> Optional[str]: """The project location for editable distributions. This is the directory where pyproject.toml or setup.py is located. None if the distribution is not installed in editable mode. """ # TODO: this property is relatively costly to compute, memoize it ? direct_url = self.direct_url if direct_url: if direct_url.is_local_editable(): return url_to_path(direct_url.url) else: # Search for an .egg-link file by walking sys.path, as it was # done before by dist_is_editable(). egg_link_path = egg_link_path_from_sys_path(self.raw_name) if egg_link_path: # TODO: get project location from second line of egg_link file # (https://github.com/pypa/pip/issues/10243) return self.location return None @property def installed_location(self) -> Optional[str]: """The distribution's "installed" location. This should generally be a ``site-packages`` directory. This is usually ``dist.location``, except for legacy develop-installed packages, where ``dist.location`` is the source code location, and this is where the ``.egg-link`` file is. The returned location is normalized (in particular, with symlinks removed). """ egg_link = egg_link_path_from_location(self.raw_name) if egg_link: location = egg_link elif self.location: location = self.location else: return None return normalize_path(location) @property def info_location(self) -> Optional[str]: """Location of the .[egg|dist]-info directory or file. Similarly to ``location``, a string value is not necessarily a filesystem path. ``None`` means the distribution is created in-memory. For a modern .dist-info installation on disk, this should be something like ``{location}/{raw_name}-{version}.dist-info``. Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If this is a symbolic link, we want to preserve the relative path between it and other files in the distribution. """ raise NotImplementedError() @property def installed_by_distutils(self) -> bool: """Whether this distribution is installed with legacy distutils format. A distribution installed with "raw" distutils not patched by setuptools uses one single file at ``info_location`` to store metadata. We need to treat this specially on uninstallation. """ info_location = self.info_location if not info_location: return False return pathlib.Path(info_location).is_file() @property def installed_as_egg(self) -> bool: """Whether this distribution is installed as an egg. This usually indicates the distribution was installed by (older versions of) easy_install. """ location = self.location if not location: return False return location.endswith(".egg") @property def installed_with_setuptools_egg_info(self) -> bool: """Whether this distribution is installed with the ``.egg-info`` format. This usually indicates the distribution was installed with setuptools with an old pip version or with ``single-version-externally-managed``. Note that this ensure the metadata store is a directory. distutils can also installs an ``.egg-info``, but as a file, not a directory. This property is *False* for that case. Also see ``installed_by_distutils``. """ info_location = self.info_location if not info_location: return False if not info_location.endswith(".egg-info"): return False return pathlib.Path(info_location).is_dir() @property def installed_with_dist_info(self) -> bool: """Whether this distribution is installed with the "modern format". This indicates a "modern" installation, e.g. storing metadata in the ``.dist-info`` directory. This applies to installations made by setuptools (but through pip, not directly), or anything using the standardized build backend interface (PEP 517). """ info_location = self.info_location if not info_location: return False if not info_location.endswith(".dist-info"): return False return pathlib.Path(info_location).is_dir() @property def canonical_name(self) -> NormalizedName: raise NotImplementedError() @property def version(self) -> DistributionVersion: raise NotImplementedError() @property def setuptools_filename(self) -> str: """Convert a project name to its setuptools-compatible filename. This is a copy of ``pkg_resources.to_filename()`` for compatibility. """ return self.raw_name.replace("-", "_") @property def direct_url(self) -> Optional[DirectUrl]: """Obtain a DirectUrl from this distribution. Returns None if the distribution has no `direct_url.json` metadata, or if `direct_url.json` is invalid. """ try: content = self.read_text(DIRECT_URL_METADATA_NAME) except FileNotFoundError: return None try: return DirectUrl.from_json(content) except ( UnicodeDecodeError, json.JSONDecodeError, DirectUrlValidationError, ) as e: logger.warning( "Error parsing %s for %s: %s", DIRECT_URL_METADATA_NAME, self.canonical_name, e, ) return None @property def installer(self) -> str: try: installer_text = self.read_text("INSTALLER") except (OSError, ValueError, NoneMetadataError): return "" # Fail silently if the installer file cannot be read. for line in installer_text.splitlines(): cleaned_line = line.strip() if cleaned_line: return cleaned_line return "" @property def editable(self) -> bool: return bool(self.editable_project_location) @property def local(self) -> bool: """If distribution is installed in the current virtual environment. Always True if we're not in a virtualenv. """ if self.installed_location is None: return False return is_local(self.installed_location) @property def in_usersite(self) -> bool: if self.installed_location is None or user_site is None: return False return self.installed_location.startswith(normalize_path(user_site)) @property def in_site_packages(self) -> bool: if self.installed_location is None or site_packages is None: return False return self.installed_location.startswith(normalize_path(site_packages)) def is_file(self, path: InfoPath) -> bool: """Check whether an entry in the info directory is a file.""" raise NotImplementedError() def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]: """Iterate through a directory in the info directory. Each item yielded would be a path relative to the info directory. :raise FileNotFoundError: If ``name`` does not exist in the directory. :raise NotADirectoryError: If ``name`` does not point to a directory. """ raise NotImplementedError() def read_text(self, path: InfoPath) -> str: """Read a file in the info directory. :raise FileNotFoundError: If ``name`` does not exist in the directory. :raise NoneMetadataError: If ``name`` exists in the info directory, but cannot be read. """ raise NotImplementedError() def iter_entry_points(self) -> Iterable[BaseEntryPoint]: raise NotImplementedError() @property def metadata(self) -> email.message.Message: """Metadata of distribution parsed from e.g. METADATA or PKG-INFO. This should return an empty message if the metadata file is unavailable. :raises NoneMetadataError: If the metadata file is available, but does not contain valid metadata. """ raise NotImplementedError() @property def metadata_version(self) -> Optional[str]: """Value of "Metadata-Version:" in distribution metadata, if available.""" return self.metadata.get("Metadata-Version") @property def raw_name(self) -> str: """Value of "Name:" in distribution metadata.""" # The metadata should NEVER be missing the Name: key, but if it somehow # does, fall back to the known canonical name. return self.metadata.get("Name", self.canonical_name) @property def requires_python(self) -> SpecifierSet: """Value of "Requires-Python:" in distribution metadata. If the key does not exist or contains an invalid value, an empty SpecifierSet should be returned. """ value = self.metadata.get("Requires-Python") if value is None: return SpecifierSet() try: # Convert to str to satisfy the type checker; this can be a Header object. spec = SpecifierSet(str(value)) except InvalidSpecifier as e: message = "Package %r has an invalid Requires-Python: %s" logger.warning(message, self.raw_name, e) return SpecifierSet() return spec def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: """Dependencies of this distribution. For modern .dist-info distributions, this is the collection of "Requires-Dist:" entries in distribution metadata. """ raise NotImplementedError() def iter_provided_extras(self) -> Iterable[str]: """Extras provided by this distribution. For modern .dist-info distributions, this is the collection of "Provides-Extra:" entries in distribution metadata. """ raise NotImplementedError() def _iter_declared_entries_from_record(self) -> Optional[Iterator[str]]: try: text = self.read_text("RECORD") except FileNotFoundError: return None # This extra Path-str cast normalizes entries. return (str(pathlib.Path(row[0])) for row in csv.reader(text.splitlines())) def _iter_declared_entries_from_legacy(self) -> Optional[Iterator[str]]: try: text = self.read_text("installed-files.txt") except FileNotFoundError: return None paths = (p for p in text.splitlines(keepends=False) if p) root = self.location info = self.info_location if root is None or info is None: return paths try: info_rel = pathlib.Path(info).relative_to(root) except ValueError: # info is not relative to root. return paths if not info_rel.parts: # info *is* root. return paths return ( _convert_installed_files_path(pathlib.Path(p).parts, info_rel.parts) for p in paths ) def iter_declared_entries(self) -> Optional[Iterator[str]]: """Iterate through file entires declared in this distribution. For modern .dist-info distributions, this is the files listed in the ``RECORD`` metadata file. For legacy setuptools distributions, this comes from ``installed-files.txt``, with entries normalized to be compatible with the format used by ``RECORD``. :return: An iterator for listed entries, or None if the distribution contains neither ``RECORD`` nor ``installed-files.txt``. """ return ( self._iter_declared_entries_from_record() or self._iter_declared_entries_from_legacy() ) class BaseEnvironment: """An environment containing distributions to introspect.""" @classmethod def default(cls) -> "BaseEnvironment": raise NotImplementedError() @classmethod def from_paths(cls, paths: Optional[List[str]]) -> "BaseEnvironment": raise NotImplementedError() def get_distribution(self, name: str) -> Optional["BaseDistribution"]: """Given a requirement name, return the installed distributions. The name may not be normalized. The implementation must canonicalize it for lookup. """ raise NotImplementedError() def _iter_distributions(self) -> Iterator["BaseDistribution"]: """Iterate through installed distributions. This function should be implemented by subclass, but never called directly. Use the public ``iter_distribution()`` instead, which implements additional logic to make sure the distributions are valid. """ raise NotImplementedError() def iter_distributions(self) -> Iterator["BaseDistribution"]: """Iterate through installed distributions.""" for dist in self._iter_distributions(): # Make sure the distribution actually comes from a valid Python # packaging distribution. Pip's AdjacentTempDirectory leaves folders # e.g. ``~atplotlib.dist-info`` if cleanup was interrupted. The # valid project name pattern is taken from PEP 508. project_name_valid = re.match( r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", dist.canonical_name, flags=re.IGNORECASE, ) if not project_name_valid: logger.warning( "Ignoring invalid distribution %s (%s)", dist.canonical_name, dist.location, ) continue yield dist def iter_installed_distributions( self, local_only: bool = True, skip: Container[str] = stdlib_pkgs, include_editables: bool = True, editables_only: bool = False, user_only: bool = False, ) -> Iterator[BaseDistribution]: """Return a list of installed distributions. :param local_only: If True (default), only return installations local to the current virtualenv, if in a virtualenv. :param skip: An iterable of canonicalized project names to ignore; defaults to ``stdlib_pkgs``. :param include_editables: If False, don't report editables. :param editables_only: If True, only report editables. :param user_only: If True, only report installations in the user site directory. """ it = self.iter_distributions() if local_only: it = (d for d in it if d.local) if not include_editables: it = (d for d in it if not d.editable) if editables_only: it = (d for d in it if d.editable) if user_only: it = (d for d in it if d.in_usersite) return (d for d in it if d.canonical_name not in skip) class Wheel(Protocol): location: str def as_zipfile(self) -> zipfile.ZipFile: raise NotImplementedError() class FilesystemWheel(Wheel): def __init__(self, location: str) -> None: self.location = location def as_zipfile(self) -> zipfile.ZipFile: return zipfile.ZipFile(self.location, allowZip64=True) class MemoryWheel(Wheel): def __init__(self, location: str, stream: IO[bytes]) -> None: self.location = location self.stream = stream def as_zipfile(self) -> zipfile.ZipFile: return zipfile.ZipFile(self.stream, allowZip64=True)
py
1a5a783aa9031ec4409365b376a0473bb1ac2664
# -*- coding: utf-8 -*- # @Author : ydf # @Time : 2021/4/3 0008 13:32 from function_scheduling_distributed_framework.publishers.base_publisher import AbstractPublisher from function_scheduling_distributed_framework.utils import RedisMixin class RedisStreamPublisher(AbstractPublisher, RedisMixin): """ redis 的 stream 结构 作为中间件实现的。需要redis 5.0以上,redis stream结构 是redis的消息队列,功能远超 list结构。 """ _has__check_redis_version = False def _check_redis_version(self): redis_server_info_dict = self.redis_db_frame_version3.info() if float(redis_server_info_dict['redis_version'][0]) < 5: raise EnvironmentError('必须是5.0版本以上redis服务端才能支持 stream 数据结构,' '请升级服务端,否则使用 REDIS_ACK_ABLE 方式使用redis 的 list 结构') if self.redis_db_frame_version3.type(self._queue_name) == 'list': raise EnvironmentError(f'检测到已存在 {self._queue_name} 这个键,且类型是list, 必须换个队列名字或者删除这个' f' list 类型的键。' f'RedisStreamConsumer 使用的是 stream数据结构') self._has__check_redis_version = True def concrete_realization_of_publish(self, msg): # redis服务端必须是5.0以上,并且确保这个键的类型是stream不能是list数据结构。 if not self._has__check_redis_version: self._check_redis_version() self.redis_db_frame_version3.xadd(self._queue_name, {"": msg}) def clear(self): self.redis_db_frame.delete(self._queue_name) self.logger.warning(f'清除 {self._queue_name} 队列中的消息成功') def get_message_count(self): # nb_print(self.redis_db7,self._queue_name) return self.redis_db_frame_version3.xlen(self._queue_name) def close(self): # self.redis_db7.connection_pool.disconnect() pass
py
1a5a79131f7e7257846dd278a135d9442e29a953
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # noqa from django.conf.urls import url from gcloud.periodictask import api urlpatterns = [ url(r'^api/enabled/(?P<biz_cc_id>\d+)/(?P<task_id>\d+)/$', api.set_enabled_for_periodic_task), url(r'^api/cron/(?P<biz_cc_id>\d+)/(?P<task_id>\d+)/$', api.modify_cron), url(r'^api/constants/(?P<biz_cc_id>\d+)/(?P<task_id>\d+)/$', api.modify_constants) ]
py
1a5a79718436e52a84e4f5f68e269ffe6cfd2df4
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables __all__ = ['RegisteredPrefix'] class RegisteredPrefix(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, peering_name: Optional[pulumi.Input[str]] = None, prefix: Optional[pulumi.Input[str]] = None, registered_prefix_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ The customer's prefix that is registered by the peering service provider. API Version: 2020-10-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] peering_name: The name of the peering. :param pulumi.Input[str] prefix: The customer's prefix from which traffic originates. :param pulumi.Input[str] registered_prefix_name: The name of the registered prefix. :param pulumi.Input[str] resource_group_name: The name of the resource group. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if peering_name is None and not opts.urn: raise TypeError("Missing required property 'peering_name'") __props__['peering_name'] = peering_name __props__['prefix'] = prefix __props__['registered_prefix_name'] = registered_prefix_name if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['error_message'] = None __props__['name'] = None __props__['peering_service_prefix_key'] = None __props__['prefix_validation_state'] = None __props__['provisioning_state'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:peering/latest:RegisteredPrefix"), pulumi.Alias(type_="azure-nextgen:peering/v20200101preview:RegisteredPrefix"), pulumi.Alias(type_="azure-nextgen:peering/v20200401:RegisteredPrefix"), pulumi.Alias(type_="azure-nextgen:peering/v20201001:RegisteredPrefix")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(RegisteredPrefix, __self__).__init__( 'azure-nextgen:peering:RegisteredPrefix', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'RegisteredPrefix': """ Get an existing RegisteredPrefix resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return RegisteredPrefix(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="errorMessage") def error_message(self) -> pulumi.Output[str]: """ The error message associated with the validation state, if any. """ return pulumi.get(self, "error_message") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the resource. """ return pulumi.get(self, "name") @property @pulumi.getter(name="peeringServicePrefixKey") def peering_service_prefix_key(self) -> pulumi.Output[str]: """ The peering service prefix key that is to be shared with the customer. """ return pulumi.get(self, "peering_service_prefix_key") @property @pulumi.getter def prefix(self) -> pulumi.Output[Optional[str]]: """ The customer's prefix from which traffic originates. """ return pulumi.get(self, "prefix") @property @pulumi.getter(name="prefixValidationState") def prefix_validation_state(self) -> pulumi.Output[str]: """ The prefix validation state. """ return pulumi.get(self, "prefix_validation_state") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ The provisioning state of the resource. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ The type of the resource. """ return pulumi.get(self, "type") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
py
1a5a79b8581acb05b32bdb5926b74c5cd25540ff
from abstract.instruccion import * from tools.console_text import * from tools.tabla_tipos import * from instruccion.create_column import * from storage import jsonMode as funciones from error.errores import * from tools.tabla_simbolos import * class create_table(instruccion): def __init__(self, id_table, columnas, inherits_s, line, column, num_nodo): super().__init__(line, column) self.id_table = id_table self.columnas = columnas self.inherits_s = inherits_s #Nodo AST Create Table self.nodo = nodo_AST('CREATE TABLE', num_nodo) self.nodo.hijos.append(nodo_AST('CREATE TABLE', num_nodo+1)) self.nodo.hijos.append(nodo_AST(id_table, num_nodo+2)) self.nodo.hijos.append(nodo_AST('(', num_nodo+3)) for columna in columnas: self.nodo.hijos.append(columna.nodo) self.nodo.hijos.append(nodo_AST(')', num_nodo+4)) if(inherits_s != None): self.nodo.hijos.append(inherits_s.nodo) #Gramatica self.grammar_ = '<TR><TD> INSTRUCCION ::= CREATE TABLE ' + id_table + ' ( COLUMNAS ) INHERITS </TD><TD> new create_table(' + id_table + ', COLUMNAS, INHERITS); </TD></TR>\n' self.grammar_ += '<TR><TD> COLUMNAS ::= COLUMNA </TD><TD> COLUMNAS = []; </TD></TR>\n' for columna in columnas: self.grammar_ += columna.grammar_ if inherits_s != None: self.grammar_ += inherits_s.grammar_ else: self.grammar_ += '<TR><TD> INHERITS ::= EPSILON </TD><TD> INHERITS = None; </TD></TR>\n' def ejecutar(self): use_actual_db = get_actual_use() #Obtener la cantidad de columnas de la tabla count_rows = 0 for row in self.columnas: if isinstance(row, create_column): count_rows += 1 #Crear table new_table = funciones.createTable(use_actual_db, self.id_table, count_rows) # Valor de retorno: 0 operación exitosa, 1 error en la operación, 2 base de datos inexistente, 3 tabla existente. if new_table == 0: #Crear simbolo para la tabla new_tb = symbol_tb(self.id_table) ts.add_tb(use_actual_db, new_tb) #Crear columnas for row in self.columnas: if isinstance(row, create_column): row.ejecutar(self.id_table) add_text("Tabla creada con exito - " + self.id_table + ' - en base de datos: ' + use_actual_db + '\n') elif new_table == 1: errores.append(nodo_error(self.line, self.column, 'Tabla no puedo ser creada con exito - ' + self.id_table + ' -', 'Semántico')) add_text('Tabla no puedo ser creada con exito - ' + self.id_table + ' -\n') elif new_table == 2: errores.append(nodo_error(self.line, self.column, 'No existe la base de datos - ' + use_actual_db + ' - ', 'Semántico')) add_text('No existe la base de datos - ' + use_actual_db + ' - \n') elif new_table == 3: errores.append(nodo_error(self.line, self.column, 'Ya existe una tabla con el nombre - ' + self.id_table + ' -', 'Semántico')) add_text('Ya existe una tabla con el nombre - ' + self.id_table + ' - \n')
py
1a5a79ed0eea006343e9a1ac1a6a6eb8b0aa9b8a
# encoding: utf-8 """ @version: v1.0 @author: Richard @license: Apache Licence @contact: [email protected] @site: @software: PyCharm @time: 2019/9/22 12:20 """ from pprint import pprint as pp
py
1a5a7a55ad8a62d3eedcf22ab4c5cc66f0621482
from collections import defaultdict from decimal import Decimal from django.db.models import F from django.test import TransactionTestCase from capone.api.actions import create_transaction from capone.api.actions import credit from capone.api.actions import debit from capone.api.actions import void_transaction from capone.api.queries import get_balances_for_object from capone.models import Ledger from capone.models import LedgerBalance from capone.models import LedgerEntry from capone.models import Transaction from capone.tests.factories import LedgerFactory from capone.tests.factories import OrderFactory from capone.tests.factories import UserFactory from capone.tests.models import Order from capone.utils import rebuild_ledger_balances class TestLedgerBalances(TransactionTestCase): """ Test that `LedgerBalances` are automatically created and updated. """ amount = Decimal('50.00') def setUp(self): self.order_1, self.order_2 = OrderFactory.create_batch(2) self.ar_ledger = LedgerFactory(name='A/R') self.cash_ledger = LedgerFactory(name='Cash') self.other_ledger = LedgerFactory(name='Other') self.user = UserFactory() def tearDown(self): Transaction.objects.all().delete() ( Ledger.objects .filter(id__in=(self.ar_ledger.id, self.cash_ledger.id)) .delete() ) self.order_1.delete() self.order_2.delete() self.user.delete() def assert_objects_have_ledger_balances(self, *object_ledger_balances): obj_to_ledger_balances = defaultdict(dict) for obj, ledger, balance in object_ledger_balances: if balance is not None: obj_to_ledger_balances[obj][ledger] = balance for obj, expected_balances in obj_to_ledger_balances.items(): actual_balances = get_balances_for_object(obj) self.assertEqual(actual_balances, expected_balances) self.assertNotIn(self.other_ledger, actual_balances) self.assertEqual(actual_balances[self.other_ledger], Decimal(0)) def add_transaction(self, orders): return create_transaction( self.user, evidence=orders, ledger_entries=[ LedgerEntry( ledger=self.ar_ledger, amount=credit(self.amount)), LedgerEntry( ledger=self.cash_ledger, amount=debit(self.amount)), ], ) def test_no_balances(self): self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, None), (self.order_1, self.cash_ledger, None), (self.order_2, self.ar_ledger, None), (self.order_2, self.cash_ledger, None), ) def test_ledger_balance_update(self): self.add_transaction([self.order_1]) self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, credit(self.amount)), (self.order_1, self.cash_ledger, debit(self.amount)), (self.order_2, self.ar_ledger, None), (self.order_2, self.cash_ledger, None), ) self.add_transaction([self.order_2]) self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, credit(self.amount)), (self.order_1, self.cash_ledger, debit(self.amount)), (self.order_2, self.ar_ledger, credit(self.amount)), (self.order_2, self.cash_ledger, debit(self.amount)), ) self.add_transaction([self.order_1]) self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, credit(self.amount) * 2), (self.order_1, self.cash_ledger, debit(self.amount) * 2), (self.order_2, self.ar_ledger, credit(self.amount)), (self.order_2, self.cash_ledger, debit(self.amount)), ) transaction = self.add_transaction([self.order_1, self.order_2]) self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, credit(self.amount) * 3), (self.order_1, self.cash_ledger, debit(self.amount) * 3), (self.order_2, self.ar_ledger, credit(self.amount) * 2), (self.order_2, self.cash_ledger, debit(self.amount) * 2), ) void_transaction(transaction, self.user) self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, credit(self.amount) * 2), (self.order_1, self.cash_ledger, debit(self.amount) * 2), (self.order_2, self.ar_ledger, credit(self.amount)), (self.order_2, self.cash_ledger, debit(self.amount)), ) def test_rebuild_ledger_balance(self): rebuild_ledger_balances() self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, None), (self.order_1, self.cash_ledger, None), (self.order_2, self.ar_ledger, None), (self.order_2, self.cash_ledger, None), ) self.add_transaction([self.order_1]) rebuild_ledger_balances() self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, credit(self.amount)), (self.order_1, self.cash_ledger, debit(self.amount)), (self.order_2, self.ar_ledger, None), (self.order_2, self.cash_ledger, None), ) self.add_transaction([self.order_2]) rebuild_ledger_balances() self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, credit(self.amount)), (self.order_1, self.cash_ledger, debit(self.amount)), (self.order_2, self.ar_ledger, credit(self.amount)), (self.order_2, self.cash_ledger, debit(self.amount)), ) self.add_transaction([self.order_1]) rebuild_ledger_balances() self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, credit(self.amount) * 2), (self.order_1, self.cash_ledger, debit(self.amount) * 2), (self.order_2, self.ar_ledger, credit(self.amount)), (self.order_2, self.cash_ledger, debit(self.amount)), ) transaction = self.add_transaction([self.order_1, self.order_2]) self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, credit(self.amount) * 3), (self.order_1, self.cash_ledger, debit(self.amount) * 3), (self.order_2, self.ar_ledger, credit(self.amount) * 2), (self.order_2, self.cash_ledger, debit(self.amount) * 2), ) void_transaction(transaction, self.user) rebuild_ledger_balances() self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, credit(self.amount) * 2), (self.order_1, self.cash_ledger, debit(self.amount) * 2), (self.order_2, self.ar_ledger, credit(self.amount)), (self.order_2, self.cash_ledger, debit(self.amount)), ) LedgerBalance.objects.update(balance=Decimal('1.00')) LedgerBalance.objects.first().delete() rebuild_ledger_balances() self.assert_objects_have_ledger_balances( (self.order_1, self.ar_ledger, credit(self.amount) * 2), (self.order_1, self.cash_ledger, debit(self.amount) * 2), (self.order_2, self.ar_ledger, credit(self.amount)), (self.order_2, self.cash_ledger, debit(self.amount)), ) def test_ledger_balances_filtering(self): Order.objects.update(amount=self.amount * 2) def all_cash_orders(): return set( Order.objects .filter( id__in=(self.order_1.id, self.order_2.id), ledger_balances__ledger=self.cash_ledger, ledger_balances__balance=F('amount'), ) ) self.assertEqual(all_cash_orders(), set()) self.add_transaction([self.order_1]) self.assertEqual(all_cash_orders(), set()) self.add_transaction([self.order_1]) self.assertEqual(all_cash_orders(), {self.order_1}) self.add_transaction([self.order_2]) self.assertEqual(all_cash_orders(), {self.order_1}) self.add_transaction([self.order_2]) self.assertEqual(all_cash_orders(), {self.order_1, self.order_2})
py
1a5a7ab4a884240ca5a5d726a631d54c875f2b08
from enum import Enum class DataSections(Enum): RACE = 0 GENDER = 1 JOB = 2 SENIORITY = 3 SALARY = 4
py
1a5a7acca133007279711f5cfb100ebd87312f24
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2019, John McNamara, [email protected] # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename('chart_crossing02.xlsx') def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({'type': 'column'}) chart.axis_ids = [43812352, 43814272] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column('A1', data[0]) worksheet.write_column('B1', data[1]) worksheet.write_column('C1', data[2]) chart.add_series({'values': '=Sheet1!$A$1:$A$5'}) chart.add_series({'values': '=Sheet1!$B$1:$B$5'}) chart.add_series({'values': '=Sheet1!$C$1:$C$5'}) chart.set_x_axis({'crossing': 3}) chart.set_y_axis({'crossing': 8}) worksheet.insert_chart('E9', chart) workbook.close() self.assertExcelEqual()
py
1a5a7d60b2894702b760d6c756fba970d8597166
import urllib.request from bs4 import BeautifulSoup from assets import data from assets import functions from models.Fish import Fish page = functions.scrape_file("fish.html") table = page.find('table', {"class": "wikitable"}) tableRows = table.find_all('tr') rowCount = 0 for row in tableRows: rowCount = rowCount + 1 if rowCount is not 1: rowData = row.find_all('td') dataCount = 0 for dataCol in rowData: dataCount = dataCount + 1 ## NAME if dataCount is 1: fishName = dataCol.text print(fishName) ## LOCATION if dataCount is 2: fishLocation = dataCol.text ## BUY if dataCount is 3: fishBuy = dataCol.text ## SELL if dataCount is 4: fishSell = dataCol.text ## RARE SELL if dataCount is 5: fishRareSell = dataCol.text print( '-------' ) fish = Fish(fishName.rstrip(), fishLocation.rstrip(), '', '', fishBuy.rstrip(), fishSell.rstrip(), fishRareSell.rstrip()) functions.add_object_json_to_file(fish, "fish_1.json")
py
1a5a7e3f9aad3f0be5b7a6a2661007462472c465
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/furniture/modern/shared_searchable_desk_01.iff" result.attribute_template_id = 6 result.stfName("frn_n","frn_searchable_desk_01") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
py
1a5a7fd41fe1bc8036557e62c1c7b97f236cdb64
r""" Orthogonal arrays (OA) This module gathers some construction related to orthogonal arrays (or transversal designs). One can build an `OA(k,n)` (or check that it can be built) from the Sage console with ``designs.orthogonal_arrays.build``:: sage: OA = designs.orthogonal_arrays.build(4,8) See also the modules :mod:`~sage.combinat.designs.orthogonal_arrays_build_recursive` or :mod:`~sage.combinat.designs.orthogonal_arrays_find_recursive` for recursive constructions. This module defines the following functions: .. csv-table:: :class: contentstable :widths: 30, 70 :delim: | :meth:`orthogonal_array` | Return an orthogonal array of parameters `k,n,t`. :meth:`transversal_design` | Return a transversal design of parameters `k,n`. :meth:`incomplete_orthogonal_array` | Return an `OA(k,n)-\sum_{1\leq i\leq x} OA(k,s_i)`. .. csv-table:: :class: contentstable :widths: 30, 70 :delim: | :meth:`is_transversal_design` | Check that a given set of blocks ``B`` is a transversal design. :meth:`~sage.combinat.designs.designs_pyx.is_orthogonal_array` | Check that the integer matrix `OA` is an `OA(k,n,t)`. :meth:`wilson_construction` | Return a `OA(k,rm+u)` from a truncated `OA(k+s,r)` by Wilson's construction. :meth:`TD_product` | Return the product of two transversal designs. :meth:`OA_find_disjoint_blocks` | Return `x` disjoint blocks contained in a given `OA(k,n)`. :meth:`OA_relabel` | Return a relabelled version of the OA. :meth:`OA_from_quasi_difference_matrix` | Return an Orthogonal Array from a Quasi-Difference matrix :meth:`OA_from_Vmt` | Return an Orthogonal Array from a `V(m,t)` :meth:`OA_from_PBD` | Return an `OA(k,n)` from a PBD :meth:`OA_n_times_2_pow_c_from_matrix` | Return an `OA(k, \vert G\vert \cdot 2^c)` from a constrained `(G,k-1,2)`-difference matrix. :meth:`OA_from_wider_OA` | Return the first `k` columns of `OA`. :meth:`QDM_from_Vmt` | Return a QDM a `V(m,t)` REFERENCES: .. [CD96] Making the MOLS table Charles Colbourn and Jeffrey Dinitz Computational and constructive design theory vol 368,pages 67-134 1996 Functions --------- """ from __future__ import print_function from __future__ import absolute_import from sage.misc.cachefunc import cached_function from sage.categories.sets_cat import EmptySetError from sage.misc.unknown import Unknown from .designs_pyx import is_orthogonal_array from .group_divisible_designs import GroupDivisibleDesign from .designs_pyx import _OA_cache_set, _OA_cache_get, _OA_cache_construction_available def transversal_design(k,n,resolvable=False,check=True,existence=False): r""" Return a transversal design of parameters `k,n`. A transversal design of parameters `k, n` is a collection `\mathcal{S}` of subsets of `V = V_1 \cup \cdots \cup V_k` (where the *groups* `V_i` are disjoint and have cardinality `n`) such that: * Any `S \in \mathcal{S}` has cardinality `k` and intersects each group on exactly one element. * Any two elements from distincts groups are contained in exactly one element of `\mathcal{S}`. More general definitions sometimes involve a `\lambda` parameter, and we assume here that `\lambda=1`. For more information on transversal designs, see `<http://mathworld.wolfram.com/TransversalDesign.html>`_. INPUT: - `n,k` -- integers. If ``k is None`` it is set to the largest value available. - ``resolvable`` (boolean) -- set to ``True`` if you want the design to be resolvable (see :meth:`sage.combinat.designs.incidence_structures.IncidenceStructure.is_resolvable`). The `n` classes of the resolvable design are obtained as the first `n` blocks, then the next `n` blocks, etc ... Set to ``False`` by default. - ``check`` -- (boolean) Whether to check that output is correct before returning it. As this is expected to be useless (but we are cautious guys), you may want to disable it whenever you want speed. Set to ``True`` by default. - ``existence`` (boolean) -- instead of building the design, return: - ``True`` -- meaning that Sage knows how to build the design - ``Unknown`` -- meaning that Sage does not know how to build the design, but that the design may exist (see :mod:`sage.misc.unknown`). - ``False`` -- meaning that the design does not exist. .. NOTE:: When ``k=None`` and ``existence=True`` the function returns an integer, i.e. the largest `k` such that we can build a `TD(k,n)`. OUTPUT: The kind of output depends on the input: - if ``existence=False`` (the default) then the output is a list of lists that represent a `TD(k,n)` with `V_1=\{0,\dots,n-1\},\dots,V_k=\{(k-1)n,\dots,kn-1\}` - if ``existence=True`` and ``k`` is an integer, then the function returns a troolean: either ``True``, ``Unknown`` or ``False`` - if ``existence=True`` and ``k=None`` then the output is the largest value of ``k`` for which Sage knows how to compute a `TD(k,n)`. .. SEEALSO:: :func:`orthogonal_array` -- a tranversal design `TD(k,n)` is equivalent to an orthogonal array `OA(k,n,2)`. EXAMPLES:: sage: TD = designs.transversal_design(5,5); TD Transversal Design TD(5,5) sage: TD.blocks() [[0, 5, 10, 15, 20], [0, 6, 12, 18, 24], [0, 7, 14, 16, 23], [0, 8, 11, 19, 22], [0, 9, 13, 17, 21], [1, 5, 14, 18, 22], [1, 6, 11, 16, 21], [1, 7, 13, 19, 20], [1, 8, 10, 17, 24], [1, 9, 12, 15, 23], [2, 5, 13, 16, 24], [2, 6, 10, 19, 23], [2, 7, 12, 17, 22], [2, 8, 14, 15, 21], [2, 9, 11, 18, 20], [3, 5, 12, 19, 21], [3, 6, 14, 17, 20], [3, 7, 11, 15, 24], [3, 8, 13, 18, 23], [3, 9, 10, 16, 22], [4, 5, 11, 17, 23], [4, 6, 13, 15, 22], [4, 7, 10, 18, 21], [4, 8, 12, 16, 20], [4, 9, 14, 19, 24]] Some examples of the maximal number of transversal Sage is able to build:: sage: TD_4_10 = designs.transversal_design(4,10) sage: designs.transversal_design(5,10,existence=True) Unknown For prime powers, there is an explicit construction which gives a `TD(n+1,n)`:: sage: designs.transversal_design(4, 3, existence=True) True sage: designs.transversal_design(674, 673, existence=True) True For other values of ``n`` it depends:: sage: designs.transversal_design(7, 6, existence=True) False sage: designs.transversal_design(4, 6, existence=True) Unknown sage: designs.transversal_design(3, 6, existence=True) True sage: designs.transversal_design(11, 10, existence=True) False sage: designs.transversal_design(4, 10, existence=True) True sage: designs.transversal_design(5, 10, existence=True) Unknown sage: designs.transversal_design(7, 20, existence=True) Unknown sage: designs.transversal_design(6, 12, existence=True) True sage: designs.transversal_design(7, 12, existence=True) True sage: designs.transversal_design(8, 12, existence=True) Unknown sage: designs.transversal_design(6, 20, existence = True) True sage: designs.transversal_design(7, 20, existence = True) Unknown If you ask for a transversal design that Sage is not able to build then an ``EmptySetError`` or a ``NotImplementedError`` is raised:: sage: designs.transversal_design(47, 100) Traceback (most recent call last): ... NotImplementedError: I don't know how to build a TD(47,100)! sage: designs.transversal_design(55, 54) Traceback (most recent call last): ... EmptySetError: There exists no TD(55,54)! Those two errors correspond respectively to the cases where Sage answer ``Unknown`` or ``False`` when the parameter ``existence`` is set to ``True``:: sage: designs.transversal_design(47, 100, existence=True) Unknown sage: designs.transversal_design(55, 54, existence=True) False If for a given `n` you want to know the largest `k` for which Sage is able to build a `TD(k,n)` just call the function with `k` set to ``None`` and ``existence`` set to ``True`` as follows:: sage: designs.transversal_design(None, 6, existence=True) 3 sage: designs.transversal_design(None, 20, existence=True) 6 sage: designs.transversal_design(None, 30, existence=True) 6 sage: designs.transversal_design(None, 120, existence=True) 9 TESTS: The case when `n=1`:: sage: designs.transversal_design(5,1).blocks() [[0, 1, 2, 3, 4]] Obtained through Wilson's decomposition:: sage: _ = designs.transversal_design(4,38) Obtained through product decomposition:: sage: _ = designs.transversal_design(6,60) sage: _ = designs.transversal_design(5,60) # checks some tricky divisibility error For small values of the parameter ``n`` we check the coherence of the function :func:`transversal_design`:: sage: for n in xrange(2,25): # long time -- 15 secs ....: i = 2 ....: while designs.transversal_design(i, n, existence=True) is True: ....: i += 1 ....: _ = designs.transversal_design(i-1, n) ....: assert designs.transversal_design(None, n, existence=True) == i - 1 ....: j = i ....: while designs.transversal_design(j, n, existence=True) is Unknown: ....: try: ....: _ = designs.transversal_design(j, n) ....: raise AssertionError("no NotImplementedError") ....: except NotImplementedError: ....: pass ....: j += 1 ....: k = j ....: while k < n+4: ....: assert designs.transversal_design(k, n, existence=True) is False ....: try: ....: _ = designs.transversal_design(k, n) ....: raise AssertionError("no EmptySetError") ....: except EmptySetError: ....: pass ....: k += 1 ....: print("%2d: (%2d, %2d)"%(n,i,j)) 2: ( 4, 4) 3: ( 5, 5) 4: ( 6, 6) 5: ( 7, 7) 6: ( 4, 7) 7: ( 9, 9) 8: (10, 10) 9: (11, 11) 10: ( 5, 11) 11: (13, 13) 12: ( 8, 14) 13: (15, 15) 14: ( 7, 15) 15: ( 7, 17) 16: (18, 18) 17: (19, 19) 18: ( 8, 20) 19: (21, 21) 20: ( 7, 22) 21: ( 8, 22) 22: ( 6, 23) 23: (25, 25) 24: (10, 26) The special case `n=1`:: sage: designs.transversal_design(3, 1).blocks() [[0, 1, 2]] sage: designs.transversal_design(None, 1, existence=True) +Infinity sage: designs.transversal_design(None, 1) Traceback (most recent call last): ... ValueError: there is no upper bound on k when 0<=n<=1 Resolvable TD:: sage: k,n = 5,15 sage: TD = designs.transversal_design(k,n,resolvable=True) sage: TD.is_resolvable() True sage: r = designs.transversal_design(None,n,resolvable=True,existence=True) sage: non_r = designs.transversal_design(None,n,existence=True) sage: r + 1 == non_r True """ if resolvable: if existence: return orthogonal_array(k,n,resolvable=True,existence=True) else: OA = orthogonal_array(k,n,resolvable=True,check=False) # the call to TransversalDesign will sort the block so we can not # rely on the order *after* the call blocks = [[i*n+c for i,c in enumerate(B)] for B in OA] classes = [blocks[i:i+n] for i in range(0,n*n,n)] TD = TransversalDesign(blocks,k,n,check=check,copy=False) TD._classes = classes return TD # Is k is None we find the largest available if k is None: if n == 0 or n == 1: if existence: from sage.rings.infinity import Infinity return Infinity raise ValueError("there is no upper bound on k when 0<=n<=1") k = orthogonal_array(None,n,existence=True) if existence: return k if existence and _OA_cache_get(k,n) is not None: return _OA_cache_get(k,n) may_be_available = _OA_cache_construction_available(k,n) is not False if n == 1: if existence: return True TD = [range(k)] elif k >= n+2: if existence: return False raise EmptySetError("No Transversal Design exists when k>=n+2 if n>=2") # Section 6.6 of [Stinson2004] elif orthogonal_array(k, n, existence=True) is not Unknown: # Forwarding non-existence results if orthogonal_array(k, n, existence=True): if existence: return True else: if existence: return False raise EmptySetError("There exists no TD({},{})!".format(k,n)) OA = orthogonal_array(k,n, check = False) TD = [[i*n+c for i,c in enumerate(l)] for l in OA] else: if existence: return Unknown raise NotImplementedError("I don't know how to build a TD({},{})!".format(k,n)) return TransversalDesign(TD,k,n,check=check) class TransversalDesign(GroupDivisibleDesign): r""" Class for Transversal Designs INPUT: - ``blocks`` -- collection of blocks - ``k,n`` (integers) -- parameters of the transversal design. They can be set to ``None`` (default) in which case their value is determined by the blocks. - ``check`` (boolean) -- whether to check that the design is indeed a transversal design with the right parameters. Set to ``True`` by default. EXAMPLES:: sage: designs.transversal_design(None,5) Transversal Design TD(6,5) sage: designs.transversal_design(None,30) Transversal Design TD(6,30) sage: designs.transversal_design(None,36) Transversal Design TD(10,36) """ def __init__(self, blocks, k=None,n=None,check=True,**kwds): r""" Constructor of the class EXAMPLES:: sage: designs.transversal_design(None,5) Transversal Design TD(6,5) """ from math import sqrt if k is None: if blocks: k=len(blocks[0]) else: k=0 if n is None: n = round(sqrt(len(blocks))) self._n = n self._k = k if check: assert is_transversal_design(blocks,k,n) GroupDivisibleDesign.__init__(self, k*n, [range(i*n,(i+1)*n) for i in range(k)], blocks, check=False, **kwds) def __repr__(self): r""" Returns a string describing the transversal design. EXAMPLES:: sage: designs.transversal_design(None,5) Transversal Design TD(6,5) sage: designs.transversal_design(None,30) Transversal Design TD(6,30) sage: designs.transversal_design(None,36) Transversal Design TD(10,36) """ return "Transversal Design TD({},{})".format(self._k,self._n) def is_transversal_design(B,k,n, verbose=False): r""" Check that a given set of blocks ``B`` is a transversal design. See :func:`~sage.combinat.designs.orthogonal_arrays.transversal_design` for a definition. INPUT: - ``B`` -- the list of blocks - ``k, n`` -- integers - ``verbose`` (boolean) -- whether to display information about what is going wrong. .. NOTE:: The tranversal design must have `\{0, \ldots, kn-1\}` as a ground set, partitioned as `k` sets of size `n`: `\{0, \ldots, k-1\} \sqcup \{k, \ldots, 2k-1\} \sqcup \cdots \sqcup \{k(n-1), \ldots, kn-1\}`. EXAMPLES:: sage: TD = designs.transversal_design(5, 5, check=True) # indirect doctest sage: from sage.combinat.designs.orthogonal_arrays import is_transversal_design sage: is_transversal_design(TD, 5, 5) True sage: is_transversal_design(TD, 4, 4) False """ return is_orthogonal_array([[x%n for x in R] for R in B],k,n,verbose=verbose) def wilson_construction(OA,k,r,m,u,check=True,explain_construction=False): r""" Returns a `OA(k,rm+\sum_i u_i)` from a truncated `OA(k+s,r)` by Wilson's construction. **Simple form:** Let `OA` be a truncated `OA(k+s,r)` with `s` truncated columns of sizes `u_1,...,u_s`, whose blocks have sizes in `\{k+b_1,...,k+b_t\}`. If there exist: - An `OA(k,m+b_i) - b_i.OA(k,1)` for every `1\leq i\leq t` - An `OA(k,u_i)` for every `1\leq i\leq s` Then there exists an `OA(k,rm+\sum u_i)`. The construction is a generalization of Lemma 3.16 in [HananiBIBD]_. **Brouwer-Van Rees form:** Let `OA` be a truncated `OA(k+s,r)` with `s` truncated columns of sizes `u_1,...,u_s`. Let the set `H_i` of the `u_i` points of column `k+i` be partitionned into `\sum_j H_{ij}`. Let `m_{ij}` be integers such that: - For `0\leq i <l` there exists an `OA(k,\sum_j m_{ij}|H_{ij}|)` - For any block `B\in OA` intersecting the sets `H_{ij(i)}` there exists an `OA(k,m+\sum_i m_{ij})-\sum_i OA(k,m_{ij(j)})`. Then there exists an `OA(k,rm+\sum_{i,j}m_{ij})`. This construction appears in [BvR82]_. INPUT: - ``OA`` -- an incomplete orthogonal array with `k+s` columns. The elements of a column of size `c` must belong to `\{0,...,c\}`. The missing entries of a block are represented by ``None`` values. If ``OA=None``, it is defined as a truncated orthogonal arrays with `k+s` columns. - ``k,r,m`` (integers) - ``u`` (list) -- two cases depending on the form to use: - Simple form: a list of length `s` such that column ``k+i`` has size ``u[i]``. The untruncated points of column ``k+i`` are assumed to be ``[0,...,u[i]-1]``. - Brouwer-Van Rees form: a list of length `s` such that ``u[i]`` is the list of pairs `(m_{i0},|H_{i0}|),...,(m_{ip_i},|H_{ip_i}|)`. The untruncated points of column ``k+i`` are assumed to be `[0,...,u_i-1]` where `u_i=\sum_j |H_{ip_i}|`. Besides, the first `|H_{i0}|` points represent `H_{i0}`, the next `|H_{i1}|` points represent `H_{i1}`, etc... - ``explain_construction`` (boolean) -- return a string describing the construction. - ``check`` (boolean) -- whether to check that output is correct before returning it. As this is expected to be useless (but we are cautious guys), you may want to disable it whenever you want speed. Set to ``True`` by default. REFERENCE: .. [HananiBIBD] Balanced incomplete block designs and related designs, Haim Hanani, Discrete Mathematics 11.3 (1975) pages 255-369. EXAMPLES:: sage: from sage.combinat.designs.orthogonal_arrays import wilson_construction sage: from sage.combinat.designs.orthogonal_arrays import OA_relabel sage: from sage.combinat.designs.orthogonal_arrays_find_recursive import find_wilson_decomposition_with_one_truncated_group sage: total = 0 sage: for k in range(3,8): ....: for n in range(1,30): ....: if find_wilson_decomposition_with_one_truncated_group(k,n): ....: total += 1 ....: f, args = find_wilson_decomposition_with_one_truncated_group(k,n) ....: _ = f(*args) sage: total 41 sage: print(designs.orthogonal_arrays.explain_construction(7,58)) Wilson's construction n=8.7+1+1 with master design OA(7+2,8) sage: print(designs.orthogonal_arrays.explain_construction(9,115)) Wilson's construction n=13.8+11 with master design OA(9+1,13) sage: print(wilson_construction(None,5,11,21,[[(5,5)]],explain_construction=True)) Brouwer-van Rees construction n=11.21+(5.5) with master design OA(5+1,11) sage: print(wilson_construction(None,71,17,21,[[(4,9),(1,1)],[(9,9),(1,1)]],explain_construction=True)) Brouwer-van Rees construction n=17.21+(9.4+1.1)+(9.9+1.1) with master design OA(71+2,17) An example using the Brouwer-van Rees generalization:: sage: from sage.combinat.designs.orthogonal_arrays import is_orthogonal_array sage: from sage.combinat.designs.orthogonal_arrays import wilson_construction sage: OA = designs.orthogonal_arrays.build(6,11) sage: OA = [[x if (i<5 or x<5) else None for i,x in enumerate(R)] for R in OA] sage: OAb = wilson_construction(OA,5,11,21,[[(5,5)]]) sage: is_orthogonal_array(OAb,5,256) True """ # Converting the input to Brouwer-Van Rees form try: if u: int(u[0]) except TypeError: pass else: u = [[(1,uu)] for uu in u] n_trunc = len(u) if explain_construction: if not u: return ("Product of orthogonal arrays n={}.{}").format(r,m) elif all(len(uu) == 1 and uu[0][0] == 1 for uu in u): return ("Wilson's construction n={}.{}+{} with master design OA({}+{},{})" .format(r, m, "+".join(str(x) for ((_,x),) in u), k, n_trunc, r)) else: return ("Brouwer-van Rees construction n={}.{}+{} with master design OA({}+{},{})" .format(r, m, "+".join("(" + "+".join(str(x)+"."+str(mul) for mul,x in uu) + ")" for uu in u), k, n_trunc, r)) if OA is None: master_design = orthogonal_array(k+n_trunc,r,check=False) matrix = [range(r)]*k for uu in u: uu = sum(x[1] for x in uu) matrix.append(range(uu)+[None]*(r-uu)) master_design = OA_relabel(master_design, k+n_trunc, r, matrix=matrix) else: master_design = OA for c in u: assert all(m_ij>=0 and h_size>=0 for m_ij,h_size in c) assert sum(h_size for m_ij,h_size in c) <= r # Associates a point ij from a truncated column k+i to # # - its corresponding multiplier # - its corresponding set of points in the final design. point_to_mij = [] point_to_point_set = [] n=r*m for i,partition in enumerate(u): column_i_point_to_mij = [] column_i_point_to_point_set = [] for mij,h_size in partition: for _ in range(h_size): column_i_point_to_mij.append(mij) column_i_point_to_point_set.append(range(n,n+mij)) n+=mij point_to_mij.append(column_i_point_to_mij) point_to_point_set.append(column_i_point_to_point_set) # the set of ij associated with each block block_to_ij = lambda B: ((i,j) for i,j in enumerate(B[k:]) if j is not None) # The different profiles (set of mij associated with each block) block_profiles = set(tuple(point_to_mij[i][j] for i,j in block_to_ij(B)) for B in master_design) # For each block meeting multipliers m_ij(0),...,m_ij(s) we need a # OA(k,m+\sum m_{ij(i)})-\sum OA(k,\sum m_{ij(i)}) OA_incomplete = {profile: incomplete_orthogonal_array(k, m+sum(profile), profile) for profile in block_profiles} # For each truncated column k+i partitionned into H_{i0},...,H_{ip_i} we # need a OA(k,\sum_j m_{ij} * |H_{ij}|) OA_k_u = {sum(c): orthogonal_array(k, sum(c)) for c in point_to_mij} # Building the actual design ! OA = [] for B in master_design: # The missing entries belong to the last n_trunc columns assert all(x is not None for x in B[:k]) # We replace the block of profile m_{ij(0)},...,m_{ij(s)} with a # OA(k,m+\sum_i m_ij(i)) properly relabelled matrix = [range(i*m,(i+1)*m) for i in B[:k]] profile = [] for i,j in block_to_ij(B): profile.append(point_to_mij[i][j]) for C in matrix: C.extend(point_to_point_set[i][j]) OA.extend(OA_relabel(OA_incomplete[tuple(profile)],k,m+sum(profile),matrix=matrix)) # The missing OA(k,uu) for i in range(n_trunc): length = sum(point_to_mij[i]) OA.extend(OA_relabel(OA_k_u[length], k, length, matrix=[sum(point_to_point_set[i],[])]*k)) if check: from .designs_pyx import is_orthogonal_array assert is_orthogonal_array(OA,k,n,2) return OA def TD_product(k,TD1,n1,TD2,n2, check=True): r""" Return the product of two transversal designs. From a transversal design `TD_1` of parameters `k,n_1` and a transversal design `TD_2` of parameters `k,n_2`, this function returns a transversal design of parameters `k,n` where `n=n_1\times n_2`. Formally, if the groups of `TD_1` are `V^1_1,\dots,V^1_k` and the groups of `TD_2` are `V^2_1,\dots,V^2_k`, the groups of the product design are `V^1_1\times V^2_1,\dots,V^1_k\times V^2_k` and its blocks are the `\{(x^1_1,x^2_1),\dots,(x^1_k,x^2_k)\}` where `\{x^1_1,\dots,x^1_k\}` is a block of `TD_1` and `\{x^2_1,\dots,x^2_k\}` is a block of `TD_2`. INPUT: - ``TD1, TD2`` -- transversal designs. - ``k,n1,n2`` (integers) -- see above. - ``check`` (boolean) -- Whether to check that output is correct before returning it. As this is expected to be useless (but we are cautious guys), you may want to disable it whenever you want speed. Set to ``True`` by default. .. NOTE:: This function uses transversal designs with `V_1=\{0,\dots,n-1\},\dots,V_k=\{(k-1)n,\dots,kn-1\}` both as input and ouptut. EXAMPLES:: sage: from sage.combinat.designs.orthogonal_arrays import TD_product sage: TD1 = designs.transversal_design(6,7) sage: TD2 = designs.transversal_design(6,12) sage: TD6_84 = TD_product(6,TD1,7,TD2,12) """ N = n1*n2 TD = [] for X1 in TD1: for X2 in TD2: TD.append([x1*n2+(x2%n2) for x1,x2 in zip(X1,X2)]) if check: assert is_transversal_design(TD,k,N) return TD def orthogonal_array(k,n,t=2,resolvable=False, check=True,existence=False,explain_construction=False): r""" Return an orthogonal array of parameters `k,n,t`. An orthogonal array of parameters `k,n,t` is a matrix with `k` columns filled with integers from `[n]` in such a way that for any `t` columns, each of the `n^t` possible rows occurs exactly once. In particular, the matrix has `n^t` rows. More general definitions sometimes involve a `\lambda` parameter, and we assume here that `\lambda=1`. An orthogonal array is said to be *resolvable* if it corresponds to a resolvable transversal design (see :meth:`sage.combinat.designs.incidence_structures.IncidenceStructure.is_resolvable`). For more information on orthogonal arrays, see :wikipedia:`Orthogonal_array`. INPUT: - ``k`` -- (integer) number of columns. If ``k=None`` it is set to the largest value available. - ``n`` -- (integer) number of symbols - ``t`` -- (integer; default: 2) -- strength of the array - ``resolvable`` (boolean) -- set to ``True`` if you want the design to be resolvable. The `n` classes of the resolvable design are obtained as the first `n` blocks, then the next `n` blocks, etc ... Set to ``False`` by default. - ``check`` -- (boolean) Whether to check that output is correct before returning it. As this is expected to be useless (but we are cautious guys), you may want to disable it whenever you want speed. Set to ``True`` by default. - ``existence`` (boolean) -- instead of building the design, return: - ``True`` -- meaning that Sage knows how to build the design - ``Unknown`` -- meaning that Sage does not know how to build the design, but that the design may exist (see :mod:`sage.misc.unknown`). - ``False`` -- meaning that the design does not exist. .. NOTE:: When ``k=None`` and ``existence=True`` the function returns an integer, i.e. the largest `k` such that we can build a `OA(k,n)`. - ``explain_construction`` (boolean) -- return a string describing the construction. OUTPUT: The kind of output depends on the input: - if ``existence=False`` (the default) then the output is a list of lists that represent an orthogonal array with parameters ``k`` and ``n`` - if ``existence=True`` and ``k`` is an integer, then the function returns a troolean: either ``True``, ``Unknown`` or ``False`` - if ``existence=True`` and ``k=None`` then the output is the largest value of ``k`` for which Sage knows how to compute a `TD(k,n)`. .. NOTE:: This method implements theorems from [Stinson2004]_. See the code's documentation for details. .. SEEALSO:: When `t=2` an orthogonal array is also a transversal design (see :func:`transversal_design`) and a family of mutually orthogonal latin squares (see :func:`~sage.combinat.designs.latin_squares.mutually_orthogonal_latin_squares`). TESTS: The special cases `n=0,1`:: sage: designs.orthogonal_arrays.build(3,0) [] sage: designs.orthogonal_arrays.build(3,1) [[0, 0, 0]] sage: designs.orthogonal_arrays.largest_available_k(0) +Infinity sage: designs.orthogonal_arrays.largest_available_k(1) +Infinity sage: designs.orthogonal_arrays.build(16,0) [] sage: designs.orthogonal_arrays.build(16,1) [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] when `t>2` and `k=None`:: sage: t = 3 sage: designs.orthogonal_arrays.largest_available_k(5,t=t) == t True sage: _ = designs.orthogonal_arrays.build(t,5,t) """ assert n>=0, "n(={}) must be nonnegative".format(n) # A resolvable OA(k,n) is an OA(k+1,n) if resolvable: assert t==2, "resolvable designs are only handled when t=2" if existence and k is not None: return orthogonal_array(k+1,n,existence=True) if k is None: k = orthogonal_array(None,n,existence=True)-1 if existence: return k OA = sorted(orthogonal_array(k+1,n,check=check)) return [B[1:] for B in OA] # If k is set to None we find the largest value available if k is None: if existence: return largest_available_k(n,t) elif n == 0 or n == 1: raise ValueError("there is no upper bound on k when 0<=n<=1") else: k = largest_available_k(n,t) if k < t: raise ValueError("undefined for k<t") if existence and _OA_cache_get(k,n) is not None and t == 2: return _OA_cache_get(k,n) from .block_design import projective_plane from .latin_squares import mutually_orthogonal_latin_squares from .database import OA_constructions, MOLS_constructions, QDM from .orthogonal_arrays_find_recursive import find_recursive_construction from .difference_matrices import difference_matrix may_be_available = _OA_cache_construction_available(k,n) is not False if n <= 1: if existence: return True if explain_construction: return "Trivial construction" OA = [[0]*k]*n elif k >= n+t: # When t=2 then k<n+t as it is equivalent to the existence of n-1 MOLS. # When t>2 the submatrix defined by the rows whose first t-2 elements # are 0s yields a OA with t=2 and k-(t-2) columns. Thus k-(t-2) < n+2, # i.e. k<n+t. if existence: return False msg = "There exists no OA({},{}) as k(={})>n+t-1={}".format(k,n,k,n+t-1) if explain_construction: return msg raise EmptySetError(msg) elif k <= t: if existence: return True if explain_construction: return "Trivial construction [n]^k" from itertools import product return [list(x) for x in product(range(n), repeat=k)] elif t != 2: if existence: return Unknown msg = "Only trivial orthogonal arrays are implemented for t>=2" if explain_construction: return msg raise NotImplementedError(msg) elif k <= 3: if existence: return True if explain_construction: return "Cyclic latin square" return [[i,j,(i+j)%n] for i in xrange(n) for j in xrange(n)] # projective spaces are equivalent to OA(n+1,n,2) elif (projective_plane(n, existence=True) or (k == n+1 and projective_plane(n, existence=True) is False)): _OA_cache_set(n+1,n,projective_plane(n, existence=True)) if k == n+1: if existence: return projective_plane(n, existence=True) if explain_construction: return "From a projective plane of order {}".format(n) from .block_design import projective_plane_to_OA p = projective_plane(n, check=False) OA = projective_plane_to_OA(p, check=False) else: if existence: return True if explain_construction: return "From a projective plane of order {}".format(n) from .block_design import projective_plane_to_OA p = projective_plane(n, check=False) OA = [l[:k] for l in projective_plane_to_OA(p, check=False)] # Constructions from the database (OA) elif may_be_available and n in OA_constructions and k <= OA_constructions[n][0]: _OA_cache_set(OA_constructions[n][0],n,True) if existence: return True if explain_construction: return "the database contains an OA({},{})".format(OA_constructions[n][0],n) _, construction = OA_constructions[n] OA = OA_from_wider_OA(construction(),k) # Constructions from the database II (MOLS: Section 6.5.1 from [Stinson2004]) elif may_be_available and n in MOLS_constructions and k-2 <= MOLS_constructions[n][0]: _OA_cache_set(MOLS_constructions[n][0]+2,n,True) if existence: return True elif explain_construction: return "the database contains {} MOLS of order {}".format(MOLS_constructions[n][0],n) else: construction = MOLS_constructions[n][1] mols = construction() OA = [[i,j]+[m[i,j] for m in mols] for i in range(n) for j in range(n)] OA = OA_from_wider_OA(OA,k) # Constructions from the database III (Quasi-difference matrices) elif (may_be_available and (n,1) in QDM and any(kk>=k and mu<=lmbda and (orthogonal_array(k,u,existence=True) is True) for (_,lmbda,mu,u),(kk,_) in QDM[n,1].items())): _OA_cache_set(k,n,True) for (nn,lmbda,mu,u),(kk,f) in QDM[n,1].items(): if (kk>=k and mu<=lmbda and (orthogonal_array(k,u,existence=True) is True)): if existence: return True elif explain_construction: return "the database contains a ({},{};{},{};{})-quasi difference matrix".format(nn,k,lmbda,mu,u) G,M = f() M = [R[:k] for R in M] OA = OA_from_quasi_difference_matrix(M,G,add_col=False) break # From Difference Matrices elif may_be_available and difference_matrix(n,k-1,existence=True): _OA_cache_set(k,n,True) if existence: return True if explain_construction: return "from a ({},{})-difference matrix".format(n,k-1) G,M = difference_matrix(n,k-1) OA = OA_from_quasi_difference_matrix(M,G,add_col=True) elif may_be_available and find_recursive_construction(k,n): _OA_cache_set(k,n,True) if existence: return True f,args = find_recursive_construction(k,n) if explain_construction: return f(*args,explain_construction=True) OA = f(*args) else: _OA_cache_set(k,n,Unknown) if existence: return Unknown elif explain_construction: return "No idea" raise NotImplementedError("I don't know how to build an OA({},{})!".format(k,n)) if check: assert is_orthogonal_array(OA,k,n,t,verbose=1), "Sage built an incorrect OA({},{}) O_o".format(k,n) return OA def largest_available_k(n,t=2): r""" Return the largest `k` such that Sage can build an `OA(k,n)`. INPUT: - ``n`` (integer) - ``t`` -- (integer; default: 2) -- strength of the array EXAMPLE:: sage: designs.orthogonal_arrays.largest_available_k(0) +Infinity sage: designs.orthogonal_arrays.largest_available_k(1) +Infinity sage: designs.orthogonal_arrays.largest_available_k(10) 4 sage: designs.orthogonal_arrays.largest_available_k(27) 28 sage: designs.orthogonal_arrays.largest_available_k(100) 10 sage: designs.orthogonal_arrays.largest_available_k(-1) Traceback (most recent call last): ... ValueError: n(=-1) was expected to be >=0 """ from .block_design import projective_plane if n<0: raise ValueError("n(={}) was expected to be >=0".format(n)) if t<0: raise ValueError("t(={}) was expected to be >=0".format(t)) if n == 0 or n == 1: from sage.rings.infinity import Infinity return Infinity elif t == 2: if projective_plane(n,existence=True): return n+1 else: k=1 while _OA_cache_construction_available(k+1,n) is True: k=k+1 else: k=t-1 while orthogonal_array(k+1,n,t,existence=True) is True: k += 1 return k def incomplete_orthogonal_array(k,n,holes,resolvable=False, existence=False): r""" Return an `OA(k,n)-\sum_{1\leq i\leq x} OA(k,s_i)`. An `OA(k,n)-\sum_{1\leq i\leq x} OA(k,s_i)` is an orthogonal array from which have been removed disjoint `OA(k,s_1),...,OA(k,s_x)`. If there exist `OA(k,s_1),...,OA(k,s_x)` they can be used to fill the holes and give rise to an `OA(k,n)`. A very useful particular case (see e.g. the Wilson construction in :func:`wilson_construction`) is when all `s_i=1`. In that case the incomplete design is a `OA(k,n)-x.OA(k,1)`. Such design is equivalent to transversal design `TD(k,n)` from which has been removed `x` disjoint blocks. INPUT: - ``k,n`` (integers) - ``holes`` (list of integers) -- respective sizes of the holes to be found. - ``resolvable`` (boolean) -- set to ``True`` if you want the design to be resolvable. The classes of the resolvable design are obtained as the first `n` blocks, then the next `n` blocks, etc ... Set to ``False`` by default. - ``existence`` (boolean) -- instead of building the design, return: - ``True`` -- meaning that Sage knows how to build the design - ``Unknown`` -- meaning that Sage does not know how to build the design, but that the design may exist (see :mod:`sage.misc.unknown`). - ``False`` -- meaning that the design does not exist. .. NOTE:: By convention, the ground set is always `V = \{0, ..., n-1\}`. If all holes have size 1, in the incomplete orthogonal array returned by this function the holes are `\{n-1, ..., n-s_1\}^k`, `\{n-s_1-1,...,n-s_1-s_2\}^k`, etc. More generally, if ``holes`` is equal to `u1,...,uk`, the `i`-th hole is the set of points `\{n-\sum_{j\geq i}u_j,...,n-\sum_{j\geq i+1}u_j\}^k`. .. SEEALSO:: :func:`OA_find_disjoint_blocks` EXAMPLES:: sage: IOA = designs.incomplete_orthogonal_array(3,3,[1,1,1]) sage: IOA [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, 1], [2, 1, 0]] sage: missing_blocks = [[0,0,0],[1,1,1],[2,2,2]] sage: from sage.combinat.designs.orthogonal_arrays import is_orthogonal_array sage: is_orthogonal_array(IOA + missing_blocks,3,3,2) True TESTS: Affine planes and projective planes:: sage: for q in xrange(2,100): ....: if is_prime_power(q): ....: assert designs.incomplete_orthogonal_array(q,q,[1]*q,existence=True) ....: assert not designs.incomplete_orthogonal_array(q+1,q,[1]*2,existence=True) Further tests:: sage: designs.incomplete_orthogonal_array(8,4,[1,1,1],existence=True) False sage: designs.incomplete_orthogonal_array(5,10,[1,1,1],existence=True) Unknown sage: designs.incomplete_orthogonal_array(5,10,[1,1,1]) Traceback (most recent call last): ... NotImplementedError: I don't know how to build an OA(5,10)! sage: designs.incomplete_orthogonal_array(4,3,[1,1]) Traceback (most recent call last): ... EmptySetError: There is no OA(n+1,n) - 2.OA(n+1,1) as all blocks intersect in a projective plane. sage: n=10 sage: k=designs.orthogonal_arrays.largest_available_k(n) sage: designs.incomplete_orthogonal_array(k,n,[1,1,1],existence=True) True sage: _ = designs.incomplete_orthogonal_array(k,n,[1,1,1]) sage: _ = designs.incomplete_orthogonal_array(k,n,[1]) A resolvable `OA(k,n)-n.OA(k,1)`. We check that extending each class and adding the `[i,i,...]` blocks turns it into an `OA(k+1,n)`.:: sage: from sage.combinat.designs.orthogonal_arrays import is_orthogonal_array sage: k,n=5,7 sage: OA = designs.incomplete_orthogonal_array(k,n,[1]*n,resolvable=True) sage: classes = [OA[i*n:(i+1)*n] for i in range(n-1)] sage: for classs in classes: # The design is resolvable ! ....: assert(len(set(col))==n for col in zip(*classs)) sage: OA.extend([[i]*(k) for i in range(n)]) sage: for i,R in enumerate(OA): ....: R.append(i//n) sage: is_orthogonal_array(OA,k+1,n) True Non-existent resolvable incomplete OA:: sage: designs.incomplete_orthogonal_array(9,13,[1]*10,resolvable=True,existence=True) False sage: designs.incomplete_orthogonal_array(9,13,[1]*10,resolvable=True) Traceback (most recent call last): ... EmptySetError: There is no resolvable incomplete OA(9,13) whose holes' sizes sum to 10<n(=13) Error message for big holes:: sage: designs.incomplete_orthogonal_array(6,4*9,[9,9,8]) Traceback (most recent call last): ... NotImplementedError: I was not able to build this OA(6,36)-OA(6,8)-2.OA(6,9) 10 holes of size 9 through the product construction:: sage: iOA = designs.incomplete_orthogonal_array(10,153,[9]*10) # long time sage: OA9 = designs.orthogonal_arrays.build(10,9) # long time sage: for i in range(10): # long time ....: iOA.extend([[153-9*(i+1)+x for x in B] for B in OA9]) # long time sage: is_orthogonal_array(iOA,10,153) # long time True An `OA(9,82)-OA(9,9)-OA(9,1)`:: sage: ioa = designs.incomplete_orthogonal_array(9,82,[9,1]) sage: ioa.extend([[x+72 for x in B] for B in designs.orthogonal_arrays.build(9,9)]) sage: ioa.extend([[x+81 for x in B] for B in designs.orthogonal_arrays.build(9,1)]) sage: is_orthogonal_array(ioa,9,82,verbose=1) True An `OA(9,82)-OA(9,9)-2.OA(9,1)` in different orders:: sage: ioa = designs.incomplete_orthogonal_array(9,82,[1,9,1]) sage: ioa.extend([[x+71 for x in B] for B in designs.orthogonal_arrays.build(9,1)]) sage: ioa.extend([[x+72 for x in B] for B in designs.orthogonal_arrays.build(9,9)]) sage: ioa.extend([[x+81 for x in B] for B in designs.orthogonal_arrays.build(9,1)]) sage: is_orthogonal_array(ioa,9,82,verbose=1) True sage: ioa = designs.incomplete_orthogonal_array(9,82,[9,1,1]) sage: ioa.extend([[x+71 for x in B] for B in designs.orthogonal_arrays.build(9,9)]) sage: ioa.extend([[x+80 for x in B] for B in designs.orthogonal_arrays.build(9,1)]) sage: ioa.extend([[x+81 for x in B] for B in designs.orthogonal_arrays.build(9,1)]) sage: is_orthogonal_array(ioa,9,82,verbose=1) True Three holes of size 1:: sage: ioa = designs.incomplete_orthogonal_array(3,6,[1,1,1]) sage: ioa.extend([[i]*3 for i in [3,4,5]]) sage: is_orthogonal_array(ioa,3,6,verbose=1) True REFERENCES: .. [BvR82] More mutually orthogonal Latin squares, Andries Brouwer and John van Rees Discrete Mathematics vol.39, num.3, pages 263-281 1982 http://oai.cwi.nl/oai/asset/304/0304A.pdf """ from sage.combinat.designs.database import QDM for h in holes: if h<0: raise ValueError("Holes must have size >=0, but {} was in the list").format(h) holes = [h for h in holes if h>0] if not holes: return orthogonal_array(k,n,existence=existence,resolvable=resolvable) sum_of_holes = sum(holes) number_of_holes = len(holes) max_hole = max(holes) min_hole = min(holes) if sum_of_holes > n: if existence: return False raise EmptySetError("The total size of holes must be smaller or equal than the size of the ground set") if (max_hole == 1 and resolvable and sum_of_holes != n): if existence: return False raise EmptySetError("There is no resolvable incomplete OA({},{}) whose holes' sizes sum to {}<n(={})".format(k,n,sum_of_holes,n)) # resolvable OA(k,n)-n.OA(k,1) ==> equivalent to OA(k+1,n) if max_hole==1 and resolvable: if existence: return orthogonal_array(k+1,n,existence=True) OA = sorted(orthogonal_array(k+1,n)) OA = [B[1:] for B in OA] # We now relabel the points so that the last n blocks are the [i,i,...] relabel = [[0]*n for _ in range(k)] for i,B in enumerate(OA[-n:]): for ii,xx in enumerate(B): relabel[ii][xx] = i OA = [[relabel[i][xx] for i,xx in enumerate(B)] for B in OA] # Let's drop the last blocks assert all(OA[-n+i] == [i]*k for i in range(n)), "The last n blocks should be [i,i,...]" return OA[:-n] # Easy case elif max_hole==1 and number_of_holes <= 1: if existence: return orthogonal_array(k,n,existence=True) OA = orthogonal_array(k,n) independent_set = OA[:number_of_holes] # This is lemma 2.3 from [BvR82]_ # # If k>3 and n>(k-1)u and there exists an OA(k,n)-OA(k,u), then there exists # an OA(k,n)-OA(k,u)-2.OA(k,1) elif (k >= 3 and 2 <= number_of_holes <= 3 and n > (k-1)*max_hole and holes.count(1) == number_of_holes-1 and incomplete_orthogonal_array(k,n,[max_hole],existence=True)): if existence: return True # The 1<=?<=2 other holes of size 1 can be picked greedily as the # conflict graph is regular and not complete (see proof of lemma 2.3) # # This code is a bit awkward for max_hole may be equal to 1, and the # holes have to be correctly ordered in the output. IOA = incomplete_orthogonal_array(k,n,[max_hole]) # place the big hole where it belongs i = holes.index(max_hole) holes[i] = [[ii]*k for ii in range(n-max_hole,n)] # place the first hole of size 1 i = holes.index(1) for h1 in IOA: if all(x<n-max_hole for x in h1): break holes[i] = [h1] IOA.remove(h1) # place the potential second hole of size 1 if number_of_holes == 3: i = holes.index(1) for h2 in IOA: if all(h1[j] != x and x<n-max_hole for j,x in enumerate(h2)): break holes[i] = [h2] IOA.remove(h2) holes = sum(holes,[]) holes = map(list,zip(*holes)) # Building the relabel matrix for l in holes: for i in range(n): if i not in l: l.insert(0,i) for i in range(len(holes)): holes[i] = {v:i for i,v in enumerate(holes[i])} IOA = OA_relabel(IOA,k,n,matrix=holes) return IOA elif max_hole==1 and number_of_holes >= 2 and k == n+1: if existence: return False raise EmptySetError(("There is no OA(n+1,n) - {}.OA(n+1,1) as all blocks " "intersect in a projective plane.").format(number_of_holes)) # Holes of size 1 from OA(k+1,n) elif max_hole==1 and orthogonal_array(k+1,n,existence=True): if existence: return True OA = orthogonal_array(k+1,n) independent_set = [B[:-1] for B in OA if B[-1] == 0][:number_of_holes] OA = [B[:-1] for B in OA] elif max_hole==1 and orthogonal_array(k,n,existence=True): OA = orthogonal_array(k,n) try: independent_set = OA_find_disjoint_blocks(OA,k,n,number_of_holes) except ValueError: if existence: return Unknown raise NotImplementedError("I was not able to build this OA({},{})-{}.OA({},1)".format(k,n,number_of_holes,k)) if existence: return True independent_set = OA_find_disjoint_blocks(OA,k,n,number_of_holes) elif max_hole==1 and not orthogonal_array(k,n,existence=True): return orthogonal_array(k,n,existence=existence) # From a quasi-difference matrix elif number_of_holes==1 and any(uu==sum_of_holes and mu<=1 and lmbda==1 and k<=kk+1 for (nn,lmbda,mu,uu),(kk,_) in QDM.get((n,1),{}).iteritems()): for (nn,lmbda,mu,uu),(kk,f) in QDM[n,1].iteritems(): if uu==sum_of_holes and mu<=1 and lmbda==1 and k<=kk+1: break G,M = f() OA = OA_from_quasi_difference_matrix(M,G,fill_hole=False) return [B[:k] for B in OA] # Equal holes [h,h,...] with h>1 through OA product construction # # (i.e. OA(k,n1)-x.OA(k,1) and OA(k,n2) ==> OA(k,n1.n2)-x.OA(k,n2) ) elif (min_hole > 1 and max_hole == min_hole and n%min_hole == 0 and # h divides n orthogonal_array(k,min_hole,existence=True) and # OA(k,h) incomplete_orthogonal_array(k,n//min_hole,[1]*number_of_holes,existence=True)): # OA(k,n/h)-x.OA(k,1) if existence: return True h = min_hole iOA1 = incomplete_orthogonal_array(k,n//holes[0],[1]*number_of_holes) iOA2 = orthogonal_array(k,h) return [[B1[i]*h+B2[i] for i in range(k)] for B1 in iOA1 for B2 in iOA2] else: if existence: return Unknown # format the list of holes f = lambda x: "" if x == 1 else "{}.".format(x) holes_string = "".join("-{}OA({},{})".format(f(holes.count(x)),k,x) for x in sorted(set(holes))) raise NotImplementedError("I was not able to build this OA({},{}){}".format(k,n,holes_string)) assert number_of_holes == len(independent_set) for B in independent_set: OA.remove(B) OA = OA_relabel(OA,k,n,blocks=independent_set) return OA def OA_find_disjoint_blocks(OA,k,n,x): r""" Return `x` disjoint blocks contained in a given `OA(k,n)`. `x` blocks of an `OA` are said to be disjoint if they all have different values for a every given index, i.e. if they correspond to disjoint blocks in the `TD` assciated with the `OA`. INPUT: - ``OA`` -- an orthogonal array - ``k,n,x`` (integers) .. SEEALSO:: :func:`incomplete_orthogonal_array` EXAMPLES:: sage: from sage.combinat.designs.orthogonal_arrays import OA_find_disjoint_blocks sage: k=3;n=4;x=3 sage: Bs = OA_find_disjoint_blocks(designs.orthogonal_arrays.build(k,n),k,n,x) sage: assert len(Bs) == x sage: for i in range(k): ....: assert len(set([B[i] for B in Bs])) == x sage: OA_find_disjoint_blocks(designs.orthogonal_arrays.build(k,n),k,n,5) Traceback (most recent call last): ... ValueError: There does not exist 5 disjoint blocks in this OA(3,4) """ # Computing an independent set of order x with a Linear Program from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException p = MixedIntegerLinearProgram() b = p.new_variable(binary=True) p.add_constraint(p.sum(b[i] for i in range(len(OA))) == x) # t[i][j] lists of blocks of the OA whose i'th component is j t = [[[] for _ in range(n)] for _ in range(k)] for c,B in enumerate(OA): for i,j in enumerate(B): t[i][j].append(c) for R in t: for L in R: p.add_constraint(p.sum(b[i] for i in L) <= 1) try: p.solve() except MIPSolverException: raise ValueError("There does not exist {} disjoint blocks in this OA({},{})".format(x,k,n)) b = p.get_values(b) independent_set = [OA[i] for i,v in b.items() if v] return independent_set def OA_relabel(OA,k,n,blocks=tuple(),matrix=None): r""" Return a relabelled version of the OA. INPUT: - ``OA`` -- an OA, or rather a list of blocks of length `k`, each of which contains integers from `0` to `n-1`. - ``k,n`` (integers) - ``blocks`` (list of blocks) -- relabels the integers of the OA from `[0..n-1]` into `[0..n-1]` in such a way that the `i` blocks from ``block`` are respectively relabeled as ``[n-i,...,n-i]``, ..., ``[n-1,...,n-1]``. Thus, the blocks from this list are expected to have disjoint values for each coordinate. If set to the empty list (default) no such relabelling is performed. - ``matrix`` -- a matrix of dimensions `k,n` such that if the i th coordinate of a block is `x`, this `x` will be relabelled with ``matrix[i][x]``. This is not necessarily an integer between `0` and `n-1`, and it is not necessarily an integer either. This is performed *after* the previous relabelling. If set to ``None`` (default) no such relabelling is performed. .. NOTE:: A ``None`` coordinate in one block remains a ``None`` coordinate in the final block. EXAMPLES:: sage: from sage.combinat.designs.orthogonal_arrays import OA_relabel sage: OA = designs.orthogonal_arrays.build(3,2) sage: OA_relabel(OA,3,2,matrix=[["A","B"],["C","D"],["E","F"]]) [['A', 'C', 'E'], ['A', 'D', 'F'], ['B', 'C', 'F'], ['B', 'D', 'E']] sage: TD = OA_relabel(OA,3,2,matrix=[[0,1],[2,3],[4,5]]); TD [[0, 2, 4], [0, 3, 5], [1, 2, 5], [1, 3, 4]] sage: from sage.combinat.designs.orthogonal_arrays import is_transversal_design sage: is_transversal_design(TD,3,2) True Making sure that ``[2,2,2,2]`` is a block of `OA(4,3)`. We do this by relabelling block ``[0,0,0,0]`` which belongs to the design:: sage: designs.orthogonal_arrays.build(4,3) [[0, 0, 0, 0], [0, 1, 2, 1], [0, 2, 1, 2], [1, 0, 2, 2], [1, 1, 1, 0], [1, 2, 0, 1], [2, 0, 1, 1], [2, 1, 0, 2], [2, 2, 2, 0]] sage: OA_relabel(designs.orthogonal_arrays.build(4,3),4,3,blocks=[[0,0,0,0]]) [[2, 2, 2, 2], [2, 0, 1, 0], [2, 1, 0, 1], [0, 2, 1, 1], [0, 0, 0, 2], [0, 1, 2, 0], [1, 2, 0, 0], [1, 0, 2, 1], [1, 1, 1, 2]] TESTS:: sage: OA_relabel(designs.orthogonal_arrays.build(3,2),3,2,blocks=[[0,1],[0,1]]) Traceback (most recent call last): ... RuntimeError: Two block have the same coordinate for one of the k dimensions """ if blocks: l = [] for i,B in enumerate(zip(*blocks)): # the blocks are disjoint if len(B) != len(set(B)): raise RuntimeError("Two block have the same coordinate for one of the k dimensions") l.append(dict(zip([xx for xx in range(n) if xx not in B] + list(B),range(n)))) OA = [[l[i][x] for i,x in enumerate(R)] for R in OA] if matrix: OA = [[matrix[i][j] if j is not None else None for i,j in enumerate(R)] for R in OA] return OA def OA_n_times_2_pow_c_from_matrix(k,c,G,A,Y,check=True): r""" Return an `OA(k, |G| \cdot 2^c)` from a constrained `(G,k-1,2)`-difference matrix. This construction appears in [AbelCheng1994]_ and [AbelThesis]_. Let `G` be an additive Abelian group. We denote by `H` a `GF(2)`-hyperplane in `GF(2^c)`. Let `A` be a `(k-1) \times 2|G|` array with entries in `G \times GF(2^c)` and `Y` be a vector with `k-1` entries in `GF(2^c)`. Let `B` and `C` be respectively the part of the array that belong to `G` and `GF(2^c)`. The input `A` and `Y` must satisfy the following conditions. For any `i \neq j` and `g \in G`: - there are exactly two values of `s` such that `B_{i,s} - B_{j,s} = g` (i.e. `B` is a `(G,k-1,2)`-difference matrix), - let `s_1` and `s_2` denote the two values of `s` given above, then exactly one of `C_{i,s_1} - C_{j,s_1}` and `C_{i,s_2} - C_{j,s_2}` belongs to the `GF(2)`-hyperplane `(Y_i - Y_j) \cdot H` (we implicitely assumed that `Y_i \not= Y_j`). Under these conditions, it is easy to check that the array whose `k-1` rows of length `|G|\cdot 2^c` indexed by `1 \leq i \leq k-1` given by `A_{i,s} + (0, Y_i \cdot v)` where `1\leq s \leq 2|G|,v\in H` is a `(G \times GF(2^c),k-1,1)`-difference matrix. INPUT: - ``k,c`` (integers) -- integers - ``G`` -- an additive Abelian group - ``A`` -- a matrix with entries in `G \times GF(2^c)` - ``Y`` -- a vector with entries in `GF(2^c)` - ``check`` -- (boolean) Whether to check that output is correct before returning it. As this is expected to be useless (but we are cautious guys), you may want to disable it whenever you want speed. Set to ``True`` by default. .. NOTE:: By convention, a multiplicative generator `w` of `GF(2^c)^*` is fixed (inside the function). The hyperplane `H` is the one spanned by `w^0, w^1, \ldots, w^{c-1}`. The `GF(2^c)` part of the input matrix `A` and vector `Y` are given in the following form: the integer `i` corresponds to the element `w^i` and ``None`` corresponds to `0`. .. SEEALSO:: Several examples use this construction: - :func:`~sage.combinat.designs.database.OA_9_40` - :func:`~sage.combinat.designs.database.OA_11_80` - :func:`~sage.combinat.designs.database.OA_15_112` - :func:`~sage.combinat.designs.database.OA_11_160` - :func:`~sage.combinat.designs.database.OA_16_176` - :func:`~sage.combinat.designs.database.OA_16_208` - :func:`~sage.combinat.designs.database.OA_15_224` - :func:`~sage.combinat.designs.database.OA_20_352` - :func:`~sage.combinat.designs.database.OA_20_416` - :func:`~sage.combinat.designs.database.OA_20_544` - :func:`~sage.combinat.designs.database.OA_11_640` - :func:`~sage.combinat.designs.database.OA_15_896` EXAMPLE:: sage: from sage.combinat.designs.orthogonal_arrays import OA_n_times_2_pow_c_from_matrix sage: from sage.combinat.designs.designs_pyx import is_orthogonal_array sage: A = [ ....: [(0,None),(0,None),(0,None),(0,None),(0,None),(0,None),(0,None),(0,None),(0,None),(0,None)], ....: [(0,None),(1,None), (2,2), (3,2), (4,2),(2,None),(3,None),(4,None), (0,2), (1,2)], ....: [(0,None), (2,5), (4,5), (1,2), (3,6), (3,4), (0,0), (2,1), (4,1), (1,6)], ....: [(0,None), (3,4), (1,4), (4,0), (2,5),(3,None), (1,0), (4,1), (2,2), (0,3)], ....: ] sage: Y = [None, 0, 1, 6] sage: OA = OA_n_times_2_pow_c_from_matrix(5,3,GF(5),A,Y) sage: is_orthogonal_array(OA,5,40,2) True sage: A[0][0] = (1,None) sage: OA_n_times_2_pow_c_from_matrix(5,3,GF(5),A,Y) Traceback (most recent call last): ... ValueError: the first part of the matrix A must be a (G,k-1,2)-difference matrix sage: A[0][0] = (0,0) sage: OA_n_times_2_pow_c_from_matrix(5,3,GF(5),A,Y) Traceback (most recent call last): ... ValueError: B_2,0 - B_0,0 = B_2,6 - B_0,6 but the associated part of the matrix C does not satisfies the required condition REFERENCES: .. [AbelThesis] On the Existence of Balanced Incomplete Block Designs and Transversal Designs, Julian R. Abel, PhD Thesis, University of New South Wales, 1995 .. [AbelCheng1994] \R.J.R. Abel and Y.W. Cheng, Some new MOLS of order 2np for p a prime power, The Australasian Journal of Combinatorics, vol 10 (1994) """ from sage.rings.finite_rings.finite_field_constructor import FiniteField from sage.rings.integer import Integer from itertools import izip,combinations from .designs_pyx import is_difference_matrix G_card = G.cardinality() if len(A) != k-1 or any(len(a) != 2*G_card for a in A): raise ValueError("A must be a (k-1) x (2|G|) array") if len(Y) != k-1: raise ValueError("Y must be a (k-1)-vector") F = FiniteField(2**c,'w') GG = G.cartesian_product(F) # dictionary from integers to elments of GF(2^c): i -> w^i, None -> 0 w = F.multiplicative_generator() r = {i:w**i for i in xrange(2**c-1)} r[None] = F.zero() # check that the first part of the matrix A is a (G,k-1,2)-difference matrix B = [[G(a) for a,b in R] for R in A] if check and not is_difference_matrix(zip(*B),G,k-1,2): raise ValueError("the first part of the matrix A must be a " "(G,k-1,2)-difference matrix") # convert: # the matrix A to a matrix over G \times GF(2^c) # the vector Y to a vector over GF(2^c) A = [[GG((G(a),r[b])) for a,b in R] for R in A] Y = [r[b] for b in Y] # make the list of the elements of GF(2^c) which belong to the # GF(2)-subspace <w^0,...,w^(c-2)> (that is the GF(2)-hyperplane orthogonal # to w^(c-1)) H = [sum((r[i] for i in S), F.zero()) for s in range(c) for S in combinations(range(c-1),s)] assert len(H) == 2**(c-1) # check that the second part of the matrix A satisfy the conditions if check: G_card = G.cardinality() for i in range(len(B)): for j in range(i): g_to_col_indices = {g: [] for g in G} Hij = set([(Y[i] - Y[j]) * v for v in H]) for s in range(2 * G_card): g_to_col_indices[B[i][s] - B[j][s]].append(s) for s1,s2 in g_to_col_indices.itervalues(): v1 = A[i][s1][1] - A[j][s1][1] v2 = A[i][s2][1] - A[j][s2][1] if (v1 in Hij) == (v2 in Hij): raise ValueError("B_{},{} - B_{},{} = B_{},{} - B_{},{} but" " the associated part of the matrix C does not satisfies" " the required condition".format(i,s1,j,s1,i,s2,j,s2)) # build the quasi difference matrix and return the associated OA Mb = [[e+GG((G.zero(),x*v)) for v in H for e in R] for x,R in izip(Y,A)] return OA_from_quasi_difference_matrix(zip(*Mb),GG,add_col=True) def OA_from_quasi_difference_matrix(M,G,add_col=True,fill_hole=True): r""" Return an Orthogonal Array from a Quasi-Difference matrix **Difference Matrices** Let `G` be a group of order `g`. A *difference matrix* `M` is a `g\times k` matrix with entries from `G` such that for any `1\leq i < j < k` the set `\{d_{li}-d_{lj}:1\leq l \leq g\}` is equal to `G`. By concatenating the `g` matrices `M+x` (where `x\in G`), one obtains a matrix of size `g^2\times x` which is also an `OA(k,g)`. **Quasi-difference Matrices** A quasi-difference matrix is a difference matrix with missing entries. The construction above can be applied again in this case, where the missing entries in each column of `M` are replaced by unique values on which `G` has a trivial action. This produces an incomplete orthogonal array with a "hole" (i.e. missing rows) of size 'u' (i.e. the number of missing values per column of `M`). If there exists an `OA(k,u)`, then adding the rows of this `OA(k,u)` to the incomplete orthogonal array should lead to an OA... **Formal definition** (from the Handbook of Combinatorial Designs [DesignHandbook]_) Let `G` be an abelian group of order `n`. A `(n,k;\lambda,\mu;u)`-quasi-difference matrix (QDM) is a matrix `Q=(q_{ij})` with `\lambda(n-1+2u)+\mu` rows and `k` columns, with each entry either empty or containing an element of `G`. Each column contains exactly `\lambda u` entries, and each row contains at most one empty entry. Furthermore, for each `1 \leq i < j \leq k` the multiset .. MATH:: \{ q_{li} - q_{lj}: 1 \leq l \leq \lambda (n-1+2u)+\mu, \text{ with }q_{li}\text{ and }q_{lj}\text{ not empty}\} contains every nonzero element of `G` exactly `\lambda` times, and contains 0 exactly `\mu` times. **Construction** If a `(n,k;\lambda,\mu;u)`-QDM exists and `\mu \leq \lambda`, then an `ITD_\lambda (k,n+u;u)` exists. Start with a `(n,k;\lambda,\mu;u)`-QDM `A` over the group `G`. Append `\lambda-\mu` rows of zeroes. Then select `u` elements `\infty_1,\dots,\infty_u` not in `G`, and replace the empty entries, each by one of these infinite symbols, so that `\infty_i` appears exactly once in each column. Develop the resulting matrix over the group `G` (leaving infinite symbols fixed), to obtain a `\lambda (n^2+2nu)\times k` matrix `T`. Then `T` is an orthogonal array with `k` columns and index `\lambda`, having `n+u` symbols and one hole of size `u`. Adding to `T` an `OA(k,u)` with elements `\infty_1,\dots,\infty_u` yields the `ITD_\lambda(k,n+u;u)`. For more information, see the Handbook of Combinatorial Designs [DesignHandbook]_ or `<http://web.cs.du.edu/~petr/milehigh/2013/Colbourn.pdf>`_. INPUT: - ``M`` -- the difference matrix whose entries belong to ``G`` - ``G`` -- a group - ``add_col`` (boolean) -- whether to add a column to the final OA equal to `(x_1,\dots,x_g,x_1,\dots,x_g,\dots)` where `G=\{x_1,\dots,x_g\}`. - ``fill_hole`` (boolean) -- whether to return the incomplete orthogonal array, or complete it with the `OA(k,u)` (default). When ``fill_hole is None``, no block of the incomplete OA contains more than one value `\geq |G|`. EXAMPLES:: sage: _ = designs.orthogonal_arrays.build(6,20) # indirect doctest """ from itertools import izip Gn = int(G.cardinality()) k = len(M[0])+bool(add_col) G_to_int = {x:i for i,x in enumerate(G)} # A cache for addition in G G_sum = [[0]*Gn for _ in range(Gn)] for x,i in G_to_int.iteritems(): for xx,ii in G_to_int.iteritems(): G_sum[i][ii] = G_to_int[x+xx] # Convert M to integers M = [[None if x is None else G_to_int[G(x)] for x in line] for line in M] # Each line is expanded by [g+x for x in line for g in G] then relabeled # with integers. Missing values are also handled. new_M = [] for line in izip(*M): inf = Gn new_line = [] for x in line: if x is None: new_line.extend([inf]*Gn) inf = inf + 1 else: new_line.extend(G_sum[x]) new_M.append(new_line) if add_col: new_M.append([i//Gn for i in range(len(new_line))]) # new_M = transpose(new_M) new_M = zip(*new_M) # Filling holes with a smaller orthogonal array if inf > Gn and fill_hole: for L in orthogonal_array(k,inf-Gn,2): new_M.append(tuple([x+Gn for x in L])) return new_M def OA_from_Vmt(m,t,V): r""" Return an Orthogonal Array from a `V(m,t)` INPUT: - ``m,t`` (integers) - ``V`` -- the vector `V(m,t)`. .. SEEALSO:: - :func:`QDM_from_Vmt` - :func:`OA_from_quasi_difference_matrix` EXAMPLES:: sage: _ = designs.orthogonal_arrays.build(6,46) # indirect doctest """ from sage.rings.finite_rings.finite_field_constructor import FiniteField q = m*t+1 Fq, M = QDM_from_Vmt(m,t,V) return OA_from_quasi_difference_matrix(M,Fq,add_col = False) def QDM_from_Vmt(m,t,V): r""" Return a QDM from a `V(m,t)` **Definition** Let `q` be a prime power and let `q=mt+1` for `m,t` integers. Let `\omega` be a primitive element of `\mathbb{F}_q`. A `V(m,t)` vector is a vector `(a_1,\dots,a_{m+1}` for which, for each `1\leq k < m`, the differences .. MATH:: \{a_{i+k}-a_i:1\leq i \leq m+1,i+k\neq m+2\} represent the `m` cyclotomic classes of `\mathbb{F}_{mt+1}` (compute subscripts modulo `m+2`). In other words, for fixed `k`, is `a_{i+k}-a_i=\omega^{mx+\alpha}` and `a_{j+k}-a_j=\omega^{my+\beta}` then `\alpha\not\equiv\beta \mod{m}` *Construction of a quasi-difference matrix from a `V(m,t)` vector* Starting with a `V(m,t)` vector `(a_1,\dots,a_{m+1})`, form a single row of length `m+2` whose first entry is empty, and whose remaining entries are `(a_1,\dots,a_{m+1})`. Form `t` rows by multiplying this row by the `t` th roots, i.e. the powers of `\omega^m`. From each of these `t` rows, form `m+2` rows by taking the `m+2` cyclic shifts of the row. The result is a `(a,m+2;1,0;t)-QDM`. For more information, refer to the Handbook of Combinatorial Designs [DesignHandbook]_. INPUT: - ``m,t`` (integers) - ``V`` -- the vector `V(m,t)`. .. SEEALSO:: :func:`OA_from_quasi_difference_matrix` EXAMPLES:: sage: _ = designs.orthogonal_arrays.build(6,46) # indirect doctest """ from sage.rings.finite_rings.finite_field_constructor import FiniteField q = m*t+1 Fq = FiniteField(q, 'x') w = Fq.multiplicative_generator() M = [] wm = w**m for i in range(t): L = [None] for e in V: L.append(e*wm**i) for ii in range(m+2): M.append(L[-ii:]+L[:-ii]) # cyclic shift M.append([0]*(m+2)) return Fq, M def OA_from_PBD(k,n,PBD, check=True): r""" Return an `OA(k,n)` from a PBD **Construction** Let `\mathcal B` be a `(n,K,1)`-PBD. If there exists for every `i\in K` a `TD(k,i)-i\times TD(k,1)` (i.e. if there exist `k` idempotent MOLS), then one can obtain a `OA(k,n)` by concatenating: - A `TD(k,i)-i\times TD(k,1)` defined over the elements of `B` for every `B \in \mathcal B`. - The rows `(i,...,i)` of length `k` for every `i\in [n]`. .. NOTE:: This function raises an exception when Sage is unable to build the necessary designs. INPUT: - ``k,n`` (integers) - ``PBD`` -- a PBD on `0,...,n-1`. EXAMPLES: We start from the example VI.1.2 from the [DesignHandbook]_ to build an `OA(3,10)`:: sage: from sage.combinat.designs.orthogonal_arrays import OA_from_PBD sage: from sage.combinat.designs.designs_pyx import is_orthogonal_array sage: pbd = [[0,1,2,3],[0,4,5,6],[0,7,8,9],[1,4,7],[1,5,8], ....: [1,6,9],[2,4,9],[2,5,7],[2,6,8],[3,4,8],[3,5,9],[3,6,7]] sage: oa = OA_from_PBD(3,10,pbd) sage: is_orthogonal_array(oa, 3, 10) True But we cannot build an `OA(4,10)` for this PBD (although there exists an `OA(4,10)`:: sage: OA_from_PBD(4,10,pbd) Traceback (most recent call last): ... EmptySetError: There is no OA(n+1,n) - 3.OA(n+1,1) as all blocks intersect in a projective plane. Or an `OA(3,6)` (as the PBD has 10 points):: sage: _ = OA_from_PBD(3,6,pbd) Traceback (most recent call last): ... RuntimeError: PBD is not a valid Pairwise Balanced Design on [0,...,5] """ # Size of the sets of the PBD K = set(map(len,PBD)) if check: from .designs_pyx import is_pairwise_balanced_design if not is_pairwise_balanced_design(PBD, n, K): raise RuntimeError("PBD is not a valid Pairwise Balanced Design on [0,...,{}]".format(n-1)) # Building the IOA OAs = {i:incomplete_orthogonal_array(k,i,(1,)*i) for i in K} OA = [] # For every block B of the PBD we add to the OA rows covering all pairs of # (distinct) coordinates within the elements of B. for S in PBD: for B in OAs[len(S)]: OA.append([S[i] for i in B]) # Adding the 0..0, 1..1, 2..2 .... rows for i in range(n): OA.append([i]*k) if check: assert is_orthogonal_array(OA,k,n,2) return OA def OA_from_wider_OA(OA,k): r""" Return the first `k` columns of `OA`. If `OA` has `k` columns, this function returns `OA` immediately. INPUT: - ``OA`` -- an orthogonal array. - ``k`` (integer) EXAMPLES:: sage: from sage.combinat.designs.orthogonal_arrays import OA_from_wider_OA sage: OA_from_wider_OA(designs.orthogonal_arrays.build(6,20,2),1)[:5] [(19,), (19,), (19,), (19,), (19,)] sage: _ = designs.orthogonal_arrays.build(5,46) # indirect doctest """ if len(OA[0]) == k: return OA return [L[:k] for L in OA] class OAMainFunctions(): r""" Functions related to orthogonal arrays. An orthogonal array of parameters `k,n,t` is a matrix with `k` columns filled with integers from `[n]` in such a way that for any `t` columns, each of the `n^t` possible rows occurs exactly once. In particular, the matrix has `n^t` rows. For more information on orthogonal arrays, see :wikipedia:`Orthogonal_array`. From here you have access to: - :meth:`build(k,n,t=2) <build>`: return an orthogonal array with the given parameters. - :meth:`is_available(k,n,t=2) <is_available>`: answer whether there is a construction available in Sage for a given set of parameters. - :meth:`exists(k,n,t=2) <exists>`: answer whether an orthogonal array with these parameters exist. - :meth:`largest_available_k(n,t=2) <largest_available_k>`: return the largest integer `k` such that Sage knows how to build an `OA(k,n)`. - :meth:`explain_construction(k,n,t=2) <explain_construction>`: return a string that explains the construction that Sage uses to build an `OA(k,n)`. EXAMPLES:: sage: designs.orthogonal_arrays.build(3,2) [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]] sage: designs.orthogonal_arrays.build(5,5) [[0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [0, 2, 4, 1, 3], [0, 3, 1, 4, 2], [0, 4, 3, 2, 1], [1, 0, 4, 3, 2], [1, 1, 1, 1, 1], [1, 2, 3, 4, 0], [1, 3, 0, 2, 4], [1, 4, 2, 0, 3], [2, 0, 3, 1, 4], [2, 1, 0, 4, 3], [2, 2, 2, 2, 2], [2, 3, 4, 0, 1], [2, 4, 1, 3, 0], [3, 0, 2, 4, 1], [3, 1, 4, 2, 0], [3, 2, 1, 0, 4], [3, 3, 3, 3, 3], [3, 4, 0, 1, 2], [4, 0, 1, 2, 3], [4, 1, 3, 0, 2], [4, 2, 0, 3, 1], [4, 3, 2, 1, 0], [4, 4, 4, 4, 4]] What is the largest value of `k` for which Sage knows how to compute a `OA(k,14,2)`?:: sage: designs.orthogonal_arrays.largest_available_k(14) 6 If you ask for an orthogonal array that does not exist, then you will either obtain an ``EmptySetError`` (if it knows that such an orthogonal array does not exist) or a ``NotImplementedError``:: sage: designs.orthogonal_arrays.build(4,2) Traceback (most recent call last): ... EmptySetError: There exists no OA(4,2) as k(=4)>n+t-1=3 sage: designs.orthogonal_arrays.build(12,20) Traceback (most recent call last): ... NotImplementedError: I don't know how to build an OA(12,20)! """ def __init__(self,*args,**kwds): r""" There is nothing here. TESTS:: sage: designs.orthogonal_arrays(4,5) # indirect doctest Traceback (most recent call last): ... RuntimeError: This is not a function but a class. You want to call the designs.orthogonal_arrays.* functions """ raise RuntimeError("This is not a function but a class. You want to call the designs.orthogonal_arrays.* functions") largest_available_k = staticmethod(largest_available_k) @staticmethod def explain_construction(k,n,t=2): r""" Return a string describing how to builds an `OA(k,n)` INPUT: - ``k,n,t`` (integers) -- parameters of the orthogonal array. EXAMPLE:: sage: designs.orthogonal_arrays.explain_construction(9,565) "Wilson's construction n=23.24+13 with master design OA(9+1,23)" sage: designs.orthogonal_arrays.explain_construction(10,154) 'the database contains a (137,10;1,0;17)-quasi difference matrix' """ return orthogonal_array(k,n,t,explain_construction=True) @staticmethod def build(k,n,t=2,resolvable=False): r""" Return an `OA(k,n)` of strength `t` An orthogonal array of parameters `k,n,t` is a matrix with `k` columns filled with integers from `[n]` in such a way that for any `t` columns, each of the `n^t` possible rows occurs exactly once. In particular, the matrix has `n^t` rows. More general definitions sometimes involve a `\lambda` parameter, and we assume here that `\lambda=1`. For more information on orthogonal arrays, see :wikipedia:`Orthogonal_array`. INPUT: - ``k,n,t`` (integers) -- parameters of the orthogonal array. - ``resolvable`` (boolean) -- set to ``True`` if you want the design to be resolvable. The `n` classes of the resolvable design are obtained as the first `n` blocks, then the next `n` blocks, etc ... Set to ``False`` by default. EXAMPLES:: sage: designs.orthogonal_arrays.build(3,3,resolvable=True) # indirect doctest [[0, 0, 0], [1, 2, 1], [2, 1, 2], [0, 2, 2], [1, 1, 0], [2, 0, 1], [0, 1, 1], [1, 0, 2], [2, 2, 0]] sage: OA_7_50 = designs.orthogonal_arrays.build(7,50) # indirect doctest """ return orthogonal_array(k,n,t,resolvable=resolvable) @staticmethod def exists(k,n,t=2): r""" Return the existence status of an `OA(k,n)` INPUT: - ``k,n,t`` (integers) -- parameters of the orthogonal array. .. WARNING:: The function does not only return booleans, but ``True``, ``False``, or ``Unknown``. .. SEEALSO:: :meth:`is_available` EXAMPLE:: sage: designs.orthogonal_arrays.exists(3,6) # indirect doctest True sage: designs.orthogonal_arrays.exists(4,6) # indirect doctest Unknown sage: designs.orthogonal_arrays.exists(7,6) # indirect doctest False """ return orthogonal_array(k,n,t,existence=True) @staticmethod def is_available(k,n,t=2): r""" Return whether Sage can build an `OA(k,n)`. INPUT: - ``k,n,t`` (integers) -- parameters of the orthogonal array. .. SEEALSO:: :meth:`exists` EXAMPLE:: sage: designs.orthogonal_arrays.is_available(3,6) # indirect doctest True sage: designs.orthogonal_arrays.is_available(4,6) # indirect doctest False """ return orthogonal_array(k,n,t,existence=True) is True
py
1a5a8019141656c8fbcf592c17c64773dd874d18
# Generated by Django 4.0.1 on 2022-06-16 15:41 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accreditation', '0003_alter_accreditation_type_name'), ] operations = [ migrations.AddField( model_name='accreditation_type', name='color', field=models.TextField(blank=True, choices=[('success', 'success'), ('danger', 'danger'), ('warning', 'warning'), ('dark', 'dark'), ('primary', 'primary'), ('secondary', 'secondary'), ('grey', 'grey')], max_length=50, null=True), ), ]
py
1a5a8067febede6c8383b3f6c3c7da079c7d5aad
import numpy as np import os import json import time from .utils import log import pdb def get_relationship_feat(committee, pairs): start = time.time() votefeat = [] for i,cmt in enumerate(committee): log("\t\tprocessing: {}/{}".format(i, len(committee))) knn = cmt[0] k = knn.shape[1] find0 = (knn[pairs[:,0], :] == np.tile(pairs[:,1:], (1, k))).any(axis=1, keepdims=True) find1 = (knn[pairs[:,1], :] == np.tile(pairs[:,:1], (1, k))).any(axis=1, keepdims=True) votefeat.append((find0 | find1).astype(np.float32)) log('\t\trelationship feature done. time: {}'.format(time.time() - start)) return np.hstack(votefeat) def cosine_similarity(feat1, feat2): assert feat1.shape == feat2.shape feat1 /= np.linalg.norm(feat1, axis=1).reshape(-1, 1) feat2 /= np.linalg.norm(feat2, axis=1).reshape(-1, 1) return np.einsum('ij,ij->i', feat1, feat2).reshape(-1, 1).reshape(-1, 1) # row-wise dot def get_affinity_feat(features, pairs): start = time.time() cosine_simi = [] for i,feat in enumerate(features): log("\t\tprocessing: {}/{}".format(i, len(features))) cosine_simi.append(cosine_similarity(feat[pairs[:,0],:], feat[pairs[:,1],:])) log('\t\taffinity feature done. time: {}'.format(time.time() - start)) return np.concatenate(cosine_simi, axis=1) def intersection(array1, array2, trunk=-1): ''' To find row wise intersection size. Input: array1, array2: Nxk np array trunk: if out of memory, set trunk to be smaller, e.g., 100000; note than small trunk will increase the processing time. ''' N, k = array1.shape if trunk == -1: tile1 = np.tile(array1.reshape(N, k, 1), (1, 1, k)) tile2 = np.tile(array2.reshape(N, 1, k), (1, k, 1)) inter_num = ((tile1 == tile2) & (tile1 != -1) & (tile2 != -1)).sum(axis=(1,2)) else: inter_num = [] for i in range(0, N, trunk): end = min(i + trunk, N) L = end - i tile1 = np.tile(array1[i:end].reshape(L, k, 1), (1, 1, k)) tile2 = np.tile(array2[i:end].reshape(L, 1, k), (1, k, 1)) inter_num.append(((tile1 == tile2) & (tile1 != -1) & (tile2 != -1)).sum(axis=(1,2))) inter_num = np.concatenate(inter_num, axis=0) return inter_num def get_structure_feat(members, pairs): start = time.time() distr_commnb = [] for i,m in enumerate(members): log("\t\tprocessing: {}/{}".format(i, len(members))) knn = m[0] #comm_neighbor = np.array([len(np.intersect1d(knn[p[0]], knn[p[1]], assume_unique=True)) for p in pairs]).astype(np.float32)[:,np.newaxis] comm_neighbor = intersection(knn[pairs[:,0], :], knn[pairs[:,1], :])[:, np.newaxis] distr_commnb.append(comm_neighbor) log('\t\tstructure feature done. time: {}'.format(time.time() - start)) return np.hstack(distr_commnb) def create_pairs(base): pairs = [] knn = base[0] anchor = np.tile(np.arange(len(knn)).reshape(len(knn), 1), (1, knn.shape[1])) selidx = np.where((knn != -1) & (knn != anchor)) pairs = np.hstack((anchor[selidx].reshape(-1, 1), knn[selidx].reshape(-1, 1))) pairs = np.sort(pairs, axis=1) pairs = np.unique(pairs, axis=0) return pairs def get_label(id_label, pairs): return (id_label[pairs[:,0]] == id_label[pairs[:,1]]).astype(np.float32)[:,np.newaxis] def create(data_name, args, phase='test'): if phase == 'test': output = "{}/output/pairset/k{}".format(args.exp_root, args.k) else: output = "data/{}/pairset/k{}".format(data_name, args.k) members = [args.base] + args.committee # loading if 'affinity' in args.mediator['input'] and not os.path.isfile(output + "/affinity.npy"): log("\tLoading features") features = [] for m in members: features.append(np.fromfile('data/{}/features/{}.bin'.format(data_name, m), dtype=np.float32).reshape(-1, args.feat_dim)) if not os.path.isfile(output + "/pairs.npy") or not os.path.isfile(output + "/structure.npy"): log("\tLoading base KNN") knn_file = np.load('data/{}/knn/{}_k{}.npz'.format(data_name, args.base, args.k)) knn_base = (knn_file['idx'], knn_file['dist']) if 'relationship' in args.mediator['input'] or 'structure' in args.mediator['input']: log("\tLoading committee KNN") knn_committee = [] committee_knn_fn = ['data/{}/knn/{}_k{}.npz'.format(data_name, cmt, args.k) for cmt in args.committee] for cfn in committee_knn_fn: knn_file = np.load(cfn) knn_committee.append((knn_file['idx'], knn_file['dist'])) if not os.path.isdir(output): os.makedirs(output) # get pairs if os.path.isfile(output + "/pairs.npy"): log('\tLoading pairs') pairs = np.load(output + "/pairs.npy") else: log('\tgetting pairs') pairs = create_pairs(knn_base) np.save(output + "/pairs.npy", pairs) log('\tgot {} pairs'.format(len(pairs))) # get features if 'relationship' in args.mediator['input']: if not os.path.isfile(output + "/relationship.npy"): log('\tgetting relationship features') relationship_feat = get_relationship_feat(knn_committee, pairs) np.save(output + "/relationship.npy", relationship_feat) else: log("\trelationship features exist") if 'affinity' in args.mediator['input']: if not os.path.isfile(output + "/affinity.npy"): log('\tgetting affinity features') affinity_feat = get_affinity_feat(features, pairs) np.save(output + "/affinity.npy", affinity_feat) else: log("\taffinity features exist") if 'structure' in args.mediator['input']: if not os.path.isfile(output + "/structure.npy"): log('\tgetting structure features') structure_feat = get_structure_feat([knn_base] + knn_committee, pairs) np.save(output + "/structure.npy", structure_feat) else: log("\tstructure features exist") # get labels when training if phase == 'train' or args.evaluation: if not os.path.isfile(output + "/pair_label.npy"): if not os.path.isfile("data/{}/meta.txt".format(data_name)): raise Exception("Meta file not exist: {}, please create meta.txt or set evaluation to False".format("data/{}/meta.txt".format(data_name))) with open("data/{}/meta.txt".format(data_name), 'r') as f: lines = f.readlines() log('\tgetting pairs label') id_label = np.array([int(l.strip()) for l in lines]) label = get_label(id_label, pairs) np.save(output + "/pair_label.npy", label) else: log("\tpairs label exist")
py
1a5a80aaa68fd8d50dd55e25f657f459103d936f
""" Prime Developer Trial No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: v1 Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from fds.sdk.RecommendationListAPIforDigitalPortals.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from fds.sdk.RecommendationListAPIforDigitalPortals.exceptions import ApiAttributeError def lazy_import(): from fds.sdk.RecommendationListAPIforDigitalPortals.model.status_object import StatusObject globals()['StatusObject'] = StatusObject class ErrorMetaObject(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'status': (StatusObject,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'status': 'status', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 """ErrorMetaObject - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) status (StatusObject): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """ErrorMetaObject - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) status (StatusObject): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
py
1a5a80ae6fe6b8da0da5d897d18b0cdf5bfe6ff3
from logging import getLogger, StreamHandler, DEBUG, INFO from sys import stdout def setup_logging(debug=False): logger = getLogger('raptiformica') logger.setLevel(DEBUG if debug else INFO) console_handler = StreamHandler(stdout) logger.addHandler(console_handler) return logger
py
1a5a812775984d725ccbc98d77fbd6b65a903eae
#!/usr/bin/env python """ Southern California Earthquake Center Broadband Platform Copyright 2010-2016 Southern California Earthquake Center Generic container for Broadband's modules $Id: module.py 1778 2017-01-09 21:47:59Z fsilva $ """ from __future__ import division, print_function # Import Python modules import os import shutil # Import Broadband modules from genslip import Genslip from ucrmg import UCrmg from jbsim import Jbsim from lf_seismograms import LFSeismograms from hfsims import Hfsims from syn1D import Syn1D from uc_stitch import UCStitch from bbtoolbox import BBToolbox from uc_site import UCSite from wcc_siteamp import WccSiteamp from rotd50 import RotD50 from fas import FAS from obs_seismograms import ObsSeismograms from copy_seismograms import CopySeismograms from gen_plots import GenPlots from gp_gof import GPGof from sdsu_mogof import SDSUMOGoF from gmpe_plot import GMPEPlot from gmpe_comparison import GMPEComparison from calculate_gmpe import CalculateGMPE from match import Match from plot_seis import PlotSeis from plot_map import Plot_Map from genhtml import GenHTML from exsim import ExSim from csm import CSM from rmg import RMG from as16 import AS16 from rzz2015 import RZZ2015 from rzz2015_gmpe import RZZ2015GMPE from rotd100 import RotD100 from anderson_gof import AndersonGOF from irikura import Irikura class Module(object): def __init__(self): self.module_name = "" self.module_args = [] self.kw_args = dict() self.files_to_stage = [] def setName(self, name): self.module_name = name def getName(self): return self.module_name def addArg(self, arg): self.module_args.append(arg) def addArgs(self, args): for arg in args: self.module_args.append(arg) def setArgs(self, args): self.module_args = [] self.addArgs(args) def addKeywordArg(self, keyword, value): self.kw_args[keyword] = value def addStageFile(self, file_to_stage): self.files_to_stage.append(file_to_stage) def addStageFiles(self, files): for file_to_stage in files: self.files_to_stage.append(file_to_stage) def resetStageFiles(self): self.files_to_stage = [] def getStageFiles(self): return self.files_to_stage def stage(self, stage_dir): for file_to_stage in self.files_to_stage: if os.path.dirname(file_to_stage) == stage_dir: # File is already there, skip it continue if os.path.exists(os.path.join(stage_dir, os.path.basename(file_to_stage))): # File is already there, skip it continue # print("Staging: %s to %s" % (file, stage_dir)) shutil.copy2(file_to_stage, stage_dir) def getArgs(self): return self.module_args def getKeywordArgs(self): return self.kw_args def instantiate(self, sim_id): print() #print(self.module_name) #for arg in self.module_args: # print arg #for kw_arg in self.kw_args.keys(): # print "keyword %s: value %s" % (kw_arg, self.kw_args[kw_arg]) # print kw_arg.__class__ self.kw_args['sim_id'] = sim_id #kwargs = {"simID" : sim_id} #return globals()[self.module_name](*self.module_args, simID=my_simID) return globals()[self.module_name](*self.module_args, **self.kw_args)
py
1a5a823c3cf06e5770b7b98fb14a96fc0222b249
import math DATA_PATHA = "../data" train_pkl = DATA_PATHA+"/data_train.pkl" val_pkl = DATA_PATHA+"/data_val.pkl" test_pkl = DATA_PATHA+"/data_test.pkl" information = DATA_PATHA+"/information.pkl" #parameters observation = 3*60*60-1 print ("observation time",observation) n_time_interval = 6 print ("the number of time interval:",n_time_interval) time_interval = math.ceil((observation+1)*1.0/n_time_interval)#向上取整 print ("time interval:",time_interval) lmax = 2
py
1a5a8347822baeaf7a5ebc4f69b9c46673cfc5e3
import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler from torch.autograd import Variable from torch.utils.data import DataLoader import torchvision.transforms as transforms from PIL import Image import numpy as np import time import os from fully_conv_nets import VGGNet, FCNs import argparse parser = argparse.ArgumentParser(description="This program trains a Neural Network to detect either cars or roads. It can also load a pretrained model to predict new roads and cars.") parser.add_argument('type', choices=['roads','cars'], help="Choose the type of model to train/load.") parser.add_argument('-v', '--validate', action='store_true', help="When passing v, a model will be loaded and validated using validation images. If this argument is not passed, then a new model will be trained and stored in the models folder.") parser.add_argument('-p', '--persist', action='store_true', help="Persist image.") parser.add_argument('-s', '--show', action='store_true', help="Show image.") args = parser.parse_args() n_class = 2 batch_size = 2 epochs = 200 lr = 1e-4 momentum = 0 w_decay = 1e-5 step_size = 50 gamma = 0.5 if args.type == 'roads': configs = "roads-CrossEnt_batch{}_epoch{}_RMSprop_scheduler-step{}-gamma{}_lr{}_momentum{}_w_decay{}".format(batch_size, epochs, step_size, gamma, lr, momentum, w_decay) raw_imgs_dir = 'dataset/raw_imgs' masks_dir = 'dataset/masks' model_to_load = "FCNs-BCEWithLogits_batch3_epoch90_RMSprop_scheduler-step50-gamma0.5_lr0.0001_momentum0_w_decay1e-05" validation_imgs = 'dataset/validation_imgs' validation_masks = 'dataset/validation_masks' predictions_path = 'dataset/road_preds/prediction_' pred_imgs = 'dataset/road_pred_imgs' else: configs = "cars-CrossEnt_batch{}_epoch{}_RMSprop_scheduler-step{}-gamma{}_lr{}_momentum{}_w_decay{}".format(batch_size, epochs, step_size, gamma, lr, momentum, w_decay) raw_imgs_dir = 'dataset/car_raw_imgs' masks_dir = 'dataset/car_masks' model_to_load = "cars-CrossEnt_batch2_epoch100_RMSprop_scheduler-step50-gamma0.5_lr0.0001_momentum0_w_decay1e-05" validation_imgs = 'dataset/validation_imgs' validation_masks = 'dataset/validation_masks' predictions_path = 'dataset/car_preds/prediction_' pred_imgs = 'dataset/car_pred_imgs' # create dir for model model_dir = "models" if not os.path.exists(model_dir): os.makedirs(model_dir) model_path = os.path.join(model_dir, configs) use_gpu = torch.cuda.is_available() vgg_model = VGGNet(requires_grad=True, remove_fc=True, model='vgg16') from lib import LastModel fcn_model = FCNs(pretrained_net=vgg_model, n_class=n_class, last_layer=LastModel(32, n_class)) if use_gpu: ts = time.time() vgg_model = vgg_model.cuda() fcn_model = fcn_model.cuda() print("Finish cuda loading, time elapsed {}".format(time.time() - ts)) criterion = nn.CrossEntropyLoss() optimizer = optim.RMSprop(fcn_model.parameters(), lr=lr, momentum=momentum, weight_decay=w_decay) scheduler = lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma) # decay LR by a factor of 0.5 every 30 epochs training_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0) ]) from lib import TensorDataset dataset = TensorDataset(raw_imgs_dir, masks_dir, args.type, transform=training_transform) loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size) def train(): for epoch in range(epochs): scheduler.step() ts = time.time() for iter, batch in enumerate(loader): optimizer.zero_grad() if use_gpu: inputs = Variable(batch['X'].cuda()) labels = Variable(batch['Y'].cuda()) else: inputs, labels = Variable(batch['X']), Variable(batch['Y']) outputs = fcn_model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() if iter % 10 == 0: print("epoch{}, iter{}, loss: {}".format(epoch, iter, loss.item())) print("Finish epoch {}, time elapsed {}".format(epoch, time.time() - ts)) torch.save(fcn_model, model_path) if __name__ == "__main__": if args.validate: fcn_model = torch.load("models/" + model_to_load) else: train() validation_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) validation_dataset = TensorDataset(validation_imgs, validation_masks, args.type, transform=validation_transform) loader = torch.utils.data.DataLoader(validation_dataset) import pickle from lib import get_simple_masked_img for idx, batch in enumerate(loader): if use_gpu: inputs = Variable(batch['X'].cuda()) labels = Variable(batch['Y'].cuda()) else: inputs, labels = Variable(batch['X']), Variable(batch['Y']) y_val_pred = fcn_model(inputs) str_idx = str(idx + 1) img_name = ('0' * (7 - len(str_idx) + 1)) + str_idx + '.png' raw_img = Image.open(validation_imgs + "/" + img_name).convert('RGB') get_simple_masked_img(y_val_pred[0], raw_img, pred_imgs, img_name, args.persist, args.show) with open(predictions_path + str_idx + '.pred', 'wb') as handle: pickle.dump((y_val_pred[0], raw_img), handle, protocol=pickle.HIGHEST_PROTOCOL)
py
1a5a852a149b5aabbabb66f44d3ca05802184eff
# -*- coding: utf-8 -*- """ Created on Sat May 18 12:31:06 2019 @author: MAGESHWARAN """ import cv2 import numpy as np from image_processing import one_over_other def detect_edges(image, kernel=np.ones((5, 5), dtype=np.uint8)): """ Perform Edge detection on the image using Morphology Gradient Inputs: image (np.array) : input Image kernel (np.array): Filter to be used on the image Output: result(np.array) : Image with Edges detected """ image = image.astype(np.uint8) result = cv2.morphologyEx(image, cv2.MORPH_GRADIENT, kernel) return result def gradients(image, method="laplacian", ksize=5, **kwargs): """ Perform Edge detection on the image using sobel or laplace methods Inputs: image (np.array) : input Image method (string) : either sobel or laplacian ksize (int) : Size of the kernel to be used axis (int) : 0 for sobel operation in 'x' axis 1 for sobel operation in 'y' axis 2 for sobel operation in 'x,y' axis Output: result(np.array) : Image with Edges detected """ if method == "sobel": axis = kwargs.pop("axis") if axis == 0: sobel_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=ksize) return sobel_x elif axis == 1: sobel_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=ksize) return sobel_y else: sobel_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=ksize) sobel_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=ksize) sobel = one_over_other(sobel_x, sobel_y, blend=True) return sobel elif method == "laplacian": laplacian = cv2.Laplacian(image, cv2.CV_64F, ksize=ksize) return laplacian if __name__=="__main__": # ------------------------- Edge Detection -------------------------------- image = load_image("./data/binary.jpg", gray=True) edge_detected = detect_edges(image) # Sobel operator for edge detection sudoku = load_image("./data/sudoku.jpg", gray=True) sobel = gradients(sudoku, method="sobel", axis=2, ksize=5) display_image(sobel, gray=True) cv2.imwrite("./data/sobel_xy.jpg", sobel)
py
1a5a85c813d8552dbb30e3ae6b63dcb5d950ff4a
from octopus.core import app from octopus.modules.jper import models from octopus.lib import http, dates import json class JPERException(Exception): pass class JPERConnectionException(JPERException): pass class JPERAuthException(JPERException): pass class ValidationException(JPERException): pass class JPER(object): # FilesAndJATS = "http://router.jisc.ac.uk/packages/FilesAndJATS" #FilesAndJATS = "https://pubrouter.jisc.ac.uk/FilesAndJATS" FilesAndJATS = "https://datahub.deepgreen.org/FilesAndJATS" def __init__(self, api_key=None, base_url=None): self.api_key = api_key if api_key is not None else app.config.get("JPER_API_KEY") self.base_url = base_url if base_url is not None else app.config.get("JPER_BASE_URL") if self.base_url.endswith("/"): self.base_url = self.base_url[:-1] def _url(self, endpoint=None, id=None, auth=True, params=None, url=None): if url is None: url = self.base_url if url.endswith("/"): url += url[:-1] if endpoint is not None: url += "/" + endpoint if id is not None: url += "/" + http.quote(id) if auth: if params is None: params = {} if self.api_key is not None and self.api_key != "": params["api_key"] = self.api_key args = [] for k, v in params.items(): args.append(k + "=" + http.quote(str(v))) if len(args) > 0: if "?" not in url: url += "?" else: url += "&" qs = "&".join(args) url += qs return url def validate(self, notification, file_handle=None): # turn the notification into a json string data = None if isinstance(notification, models.IncomingNotification): data = notification.json() else: data = json.dumps(notification) # get the url that we are going to send to url = self._url("validate") # 2016-06-20 TD : switch SSL verification off verify = False resp = None if file_handle is None: # if there is no file handle supplied, send the metadata-only notification resp = http.post(url, data=data, headers={"Content-Type" : "application/json"}, verify=verify) else: # otherwise send both parts as a multipart message files = [ ("metadata", ("metadata.json", data, "application/json")), ("content", ("content.zip", file_handle, "application/zip")) ] resp = http.post(url, files=files, verify=verify) if resp is None: raise JPERConnectionException("Unable to communicate with the JPER API") if resp.status_code == 401: raise JPERAuthException("Could not authenticate with JPER with your API key") if resp.status_code == 400: raise ValidationException(resp.json().get("error")) return True def create_notification(self, notification, file_handle=None): # turn the notification into a json string data = None if isinstance(notification, models.IncomingNotification): data = notification.json() else: data = json.dumps(notification) # get the url that we are going to send to url = self._url("notification") # 2016-06-20 TD : switch SSL verification off verify = False resp = None if file_handle is None: # if there is no file handle supplied, send the metadata-only notification resp = http.post(url, data=data, headers={"Content-Type" : "application/json"}, verify=verify) else: # otherwise send both parts as a multipart message files = [ ("metadata", ("metadata.json", data, "application/json")), ("content", ("content.zip", file_handle, "application/zip")) ] resp = http.post(url, files=files, verify=verify) if resp is None: raise JPERConnectionException("Unable to communicate with the JPER API") if resp.status_code == 401: raise JPERAuthException("Could not authenticate with JPER with your API key") if resp.status_code == 400: raise ValidationException(resp.json().get("error")) # extract the useful information from the acceptance response acc = resp.json() id = acc.get("id") loc = acc.get("location") return id, loc def get_notification(self, notification_id=None, location=None): # get the url that we are going to send to if notification_id is not None: url = self._url("notification", id=notification_id) elif location is not None: url = location else: raise JPERException("You must supply either the notification_id or the location") # 2016-06-20 TD : switch SSL verification off verify = False # get the response object resp = http.get(url, verify=verify) if resp is None: raise JPERConnectionException("Unable to communicate with the JPER API") if resp.status_code == 404: return None if resp.status_code != 200: raise JPERException("Received unexpected status code from {y}: {x}".format(x=resp.status_code, y=url)) j = resp.json() if "provider" in j: return models.ProviderOutgoingNotification(j) else: return models.OutgoingNotification(j) def get_content(self, url, chunk_size=8096): # just sort out the api_key url = self._url(url=url) # 2016-06-20 TD : switch SSL verification off verify = False # get the response object resp, content, downloaded_bytes = http.get_stream(url, read_stream=False, verify=verify) # check for errors or problems with the response if resp is None: raise JPERConnectionException("Unable to communicate with the JPER API") if resp.status_code == 401: raise JPERAuthException("Could not authenticate with JPER with your API key") if resp.status_code != 200: raise JPERException("Received unexpected status code from {y}: {x}".format(x=resp.status_code, y=url)) # return the response object, in case the caller wants access to headers, etc. return resp.iter_content(chunk_size=chunk_size), resp.headers def list_notifications(self, since, page=None, page_size=None, repository_id=None): # check that the since date is valid, and get it into the right format if not hasattr(since, "strftime"): since = dates.parse(since) since = since.strftime("%Y-%m-%dT%H:%M:%SZ") # make the url params into an object params = {"since" : since} if page is not None: try: params["page"] = str(page) except: raise JPERException("Unable to convert page argument to string") if page_size is not None: try: params["pageSize"] = str(page_size) except: raise JPERException("Unable to convert page_size argument to string") # get the url, which may contain the repository id if it is not None url = self._url("routed", id=repository_id, params=params) # 2016-06-20 TD : switch SSL verification off verify = False # get the response object resp = http.get(url, verify=verify) # check for errors or problems with the response if resp is None: raise JPERConnectionException("Unable to communicate with the JPER API") if resp.status_code == 401: raise JPERAuthException("Could not authenticate with JPER with your API key") if resp.status_code == 400: raise JPERException(resp.json().get("error")) if resp.status_code != 200: raise JPERException("Received unexpected status code from {y}: {x} ".format(x=resp.status_code, y=url)) # create the notification list object j = resp.json() return models.NotificationList(j) def iterate_notifications(self, since, repository_id=None, page_size=100): page = 1 while True: nl = self.list_notifications(since, page=page, page_size=page_size, repository_id=repository_id) if len(nl.notifications) == 0: break for n in nl.notifications: yield n if page * page_size >= nl.total: break page += 1 def record_retrieval(self, notification_id, content_id=None): # FIXME: not yet implemented, while waiting to see how retrieval finally # works pass
py
1a5a86b9e23ca212ba73480f23e37f811e8f92f1
import unittest import random import time import pickle import warnings from math import log, exp, pi, fsum, sin from functools import reduce from test import test_support class TestBasicOps(unittest.TestCase): # Superclass with tests common to all generators. # Subclasses must arrange for self.gen to retrieve the Random instance # to be tested. def randomlist(self, n): """Helper function to make a list of random numbers""" return [self.gen.random() for i in xrange(n)] def test_autoseed(self): self.gen.seed() state1 = self.gen.getstate() time.sleep(0.1) self.gen.seed() # diffent seeds at different times state2 = self.gen.getstate() self.assertNotEqual(state1, state2) def test_saverestore(self): N = 1000 self.gen.seed() state = self.gen.getstate() randseq = self.randomlist(N) self.gen.setstate(state) # should regenerate the same sequence self.assertEqual(randseq, self.randomlist(N)) def test_seedargs(self): for arg in [None, 0, 0L, 1, 1L, -1, -1L, 10**20, -(10**20), 3.14, 1+2j, 'a', tuple('abc')]: self.gen.seed(arg) for arg in [range(3), dict(one=1)]: self.assertRaises(TypeError, self.gen.seed, arg) self.assertRaises(TypeError, self.gen.seed, 1, 2) self.assertRaises(TypeError, type(self.gen), []) def test_jumpahead(self): self.gen.seed() state1 = self.gen.getstate() self.gen.jumpahead(100) state2 = self.gen.getstate() # s/b distinct from state1 self.assertNotEqual(state1, state2) self.gen.jumpahead(100) state3 = self.gen.getstate() # s/b distinct from state2 self.assertNotEqual(state2, state3) with test_support.check_py3k_warnings(quiet=True): self.assertRaises(TypeError, self.gen.jumpahead) # needs an arg self.assertRaises(TypeError, self.gen.jumpahead, 2, 3) # too many def test_jumpahead_produces_valid_state(self): # From http://bugs.python.org/issue14591. self.gen.seed(199210368) self.gen.jumpahead(13550674232554645900) for i in range(500): val = self.gen.random() self.assertLess(val, 1.0) def test_sample(self): # For the entire allowable range of 0 <= k <= N, validate that # the sample is of the correct length and contains only unique items N = 100 population = xrange(N) for k in xrange(N+1): s = self.gen.sample(population, k) self.assertEqual(len(s), k) uniq = set(s) self.assertEqual(len(uniq), k) self.assertTrue(uniq <= set(population)) self.assertEqual(self.gen.sample([], 0), []) # test edge case N==k==0 def test_sample_distribution(self): # For the entire allowable range of 0 <= k <= N, validate that # sample generates all possible permutations n = 5 pop = range(n) trials = 10000 # large num prevents false negatives without slowing normal case def factorial(n): return reduce(int.__mul__, xrange(1, n), 1) for k in xrange(n): expected = factorial(n) // factorial(n-k) perms = {} for i in xrange(trials): perms[tuple(self.gen.sample(pop, k))] = None if len(perms) == expected: break else: self.fail() def test_sample_inputs(self): # SF bug #801342 -- population can be any iterable defining __len__() self.gen.sample(set(range(20)), 2) self.gen.sample(range(20), 2) self.gen.sample(xrange(20), 2) self.gen.sample(str('abcdefghijklmnopqrst'), 2) self.gen.sample(tuple('abcdefghijklmnopqrst'), 2) def test_sample_on_dicts(self): self.gen.sample(dict.fromkeys('abcdefghijklmnopqrst'), 2) # SF bug #1460340 -- random.sample can raise KeyError a = dict.fromkeys(range(10)+range(10,100,2)+range(100,110)) self.gen.sample(a, 3) # A followup to bug #1460340: sampling from a dict could return # a subset of its keys or of its values, depending on the size of # the subset requested. N = 30 d = dict((i, complex(i, i)) for i in xrange(N)) for k in xrange(N+1): samp = self.gen.sample(d, k) # Verify that we got ints back (keys); the values are complex. for x in samp: self.assertTrue(type(x) is int) samp.sort() self.assertEqual(samp, range(N)) def test_gauss(self): # Ensure that the seed() method initializes all the hidden state. In # particular, through 2.2.1 it failed to reset a piece of state used # by (and only by) the .gauss() method. for seed in 1, 12, 123, 1234, 12345, 123456, 654321: self.gen.seed(seed) x1 = self.gen.random() y1 = self.gen.gauss(0, 1) self.gen.seed(seed) x2 = self.gen.random() y2 = self.gen.gauss(0, 1) self.assertEqual(x1, x2) self.assertEqual(y1, y2) def test_pickling(self): state = pickle.dumps(self.gen) origseq = [self.gen.random() for i in xrange(10)] newgen = pickle.loads(state) restoredseq = [newgen.random() for i in xrange(10)] self.assertEqual(origseq, restoredseq) def test_bug_1727780(self): # verify that version-2-pickles can be loaded # fine, whether they are created on 32-bit or 64-bit # platforms, and that version-3-pickles load fine. files = [("randv2_32.pck", 780), ("randv2_64.pck", 866), ("randv3.pck", 343)] for file, value in files: f = open(test_support.findfile(file),"rb") r = pickle.load(f) f.close() self.assertEqual(r.randrange(1000), value) class WichmannHill_TestBasicOps(TestBasicOps): gen = random.WichmannHill() def test_setstate_first_arg(self): self.assertRaises(ValueError, self.gen.setstate, (2, None, None)) def test_strong_jumpahead(self): # tests that jumpahead(n) semantics correspond to n calls to random() N = 1000 s = self.gen.getstate() self.gen.jumpahead(N) r1 = self.gen.random() # now do it the slow way self.gen.setstate(s) for i in xrange(N): self.gen.random() r2 = self.gen.random() self.assertEqual(r1, r2) def test_gauss_with_whseed(self): # Ensure that the seed() method initializes all the hidden state. In # particular, through 2.2.1 it failed to reset a piece of state used # by (and only by) the .gauss() method. for seed in 1, 12, 123, 1234, 12345, 123456, 654321: self.gen.whseed(seed) x1 = self.gen.random() y1 = self.gen.gauss(0, 1) self.gen.whseed(seed) x2 = self.gen.random() y2 = self.gen.gauss(0, 1) self.assertEqual(x1, x2) self.assertEqual(y1, y2) def test_bigrand(self): # Verify warnings are raised when randrange is too large for random() with warnings.catch_warnings(): warnings.filterwarnings("error", "Underlying random") self.assertRaises(UserWarning, self.gen.randrange, 2**60) class SystemRandom_TestBasicOps(TestBasicOps): gen = random.SystemRandom() def test_autoseed(self): # Doesn't need to do anything except not fail self.gen.seed() def test_saverestore(self): self.assertRaises(NotImplementedError, self.gen.getstate) self.assertRaises(NotImplementedError, self.gen.setstate, None) def test_seedargs(self): # Doesn't need to do anything except not fail self.gen.seed(100) def test_jumpahead(self): # Doesn't need to do anything except not fail self.gen.jumpahead(100) def test_gauss(self): self.gen.gauss_next = None self.gen.seed(100) self.assertEqual(self.gen.gauss_next, None) def test_pickling(self): self.assertRaises(NotImplementedError, pickle.dumps, self.gen) def test_53_bits_per_float(self): # This should pass whenever a C double has 53 bit precision. span = 2 ** 53 cum = 0 for i in xrange(100): cum |= int(self.gen.random() * span) self.assertEqual(cum, span-1) def test_bigrand(self): # The randrange routine should build-up the required number of bits # in stages so that all bit positions are active. span = 2 ** 500 cum = 0 for i in xrange(100): r = self.gen.randrange(span) self.assertTrue(0 <= r < span) cum |= r self.assertEqual(cum, span-1) def test_bigrand_ranges(self): for i in [40,80, 160, 200, 211, 250, 375, 512, 550]: start = self.gen.randrange(2 ** (i-2)) stop = self.gen.randrange(2 ** i) if stop <= start: continue self.assertTrue(start <= self.gen.randrange(start, stop) < stop) def test_rangelimits(self): for start, stop in [(-2,0), (-(2**60)-2,-(2**60)), (2**60,2**60+2)]: self.assertEqual(set(range(start,stop)), set([self.gen.randrange(start,stop) for i in xrange(100)])) def test_genrandbits(self): # Verify ranges for k in xrange(1, 1000): self.assertTrue(0 <= self.gen.getrandbits(k) < 2**k) # Verify all bits active getbits = self.gen.getrandbits for span in [1, 2, 3, 4, 31, 32, 32, 52, 53, 54, 119, 127, 128, 129]: cum = 0 for i in xrange(100): cum |= getbits(span) self.assertEqual(cum, 2**span-1) # Verify argument checking self.assertRaises(TypeError, self.gen.getrandbits) self.assertRaises(TypeError, self.gen.getrandbits, 1, 2) self.assertRaises(ValueError, self.gen.getrandbits, 0) self.assertRaises(ValueError, self.gen.getrandbits, -1) self.assertRaises(TypeError, self.gen.getrandbits, 10.1) def test_randbelow_logic(self, _log=log, int=int): # check bitcount transition points: 2**i and 2**(i+1)-1 # show that: k = int(1.001 + _log(n, 2)) # is equal to or one greater than the number of bits in n for i in xrange(1, 1000): n = 1L << i # check an exact power of two numbits = i+1 k = int(1.00001 + _log(n, 2)) self.assertEqual(k, numbits) self.assertTrue(n == 2**(k-1)) n += n - 1 # check 1 below the next power of two k = int(1.00001 + _log(n, 2)) self.assertIn(k, [numbits, numbits+1]) self.assertTrue(2**k > n > 2**(k-2)) n -= n >> 15 # check a little farther below the next power of two k = int(1.00001 + _log(n, 2)) self.assertEqual(k, numbits) # note the stronger assertion self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion class MersenneTwister_TestBasicOps(TestBasicOps): gen = random.Random() def test_setstate_first_arg(self): self.assertRaises(ValueError, self.gen.setstate, (1, None, None)) def test_setstate_middle_arg(self): # Wrong type, s/b tuple self.assertRaises(TypeError, self.gen.setstate, (2, None, None)) # Wrong length, s/b 625 self.assertRaises(ValueError, self.gen.setstate, (2, (1,2,3), None)) # Wrong type, s/b tuple of 625 ints self.assertRaises(TypeError, self.gen.setstate, (2, ('a',)*625, None)) # Last element s/b an int also self.assertRaises(TypeError, self.gen.setstate, (2, (0,)*624+('a',), None)) def test_referenceImplementation(self): # Compare the python implementation with results from the original # code. Create 2000 53-bit precision random floats. Compare only # the last ten entries to show that the independent implementations # are tracking. Here is the main() function needed to create the # list of expected random numbers: # void main(void){ # int i; # unsigned long init[4]={61731, 24903, 614, 42143}, length=4; # init_by_array(init, length); # for (i=0; i<2000; i++) { # printf("%.15f ", genrand_res53()); # if (i%5==4) printf("\n"); # } # } expected = [0.45839803073713259, 0.86057815201978782, 0.92848331726782152, 0.35932681119782461, 0.081823493762449573, 0.14332226470169329, 0.084297823823520024, 0.53814864671831453, 0.089215024911993401, 0.78486196105372907] self.gen.seed(61731L + (24903L<<32) + (614L<<64) + (42143L<<96)) actual = self.randomlist(2000)[-10:] for a, e in zip(actual, expected): self.assertAlmostEqual(a,e,places=14) def test_strong_reference_implementation(self): # Like test_referenceImplementation, but checks for exact bit-level # equality. This should pass on any box where C double contains # at least 53 bits of precision (the underlying algorithm suffers # no rounding errors -- all results are exact). from math import ldexp expected = [0x0eab3258d2231fL, 0x1b89db315277a5L, 0x1db622a5518016L, 0x0b7f9af0d575bfL, 0x029e4c4db82240L, 0x04961892f5d673L, 0x02b291598e4589L, 0x11388382c15694L, 0x02dad977c9e1feL, 0x191d96d4d334c6L] self.gen.seed(61731L + (24903L<<32) + (614L<<64) + (42143L<<96)) actual = self.randomlist(2000)[-10:] for a, e in zip(actual, expected): self.assertEqual(long(ldexp(a, 53)), e) def test_long_seed(self): # This is most interesting to run in debug mode, just to make sure # nothing blows up. Under the covers, a dynamically resized array # is allocated, consuming space proportional to the number of bits # in the seed. Unfortunately, that's a quadratic-time algorithm, # so don't make this horribly big. seed = (1L << (10000 * 8)) - 1 # about 10K bytes self.gen.seed(seed) def test_53_bits_per_float(self): # This should pass whenever a C double has 53 bit precision. span = 2 ** 53 cum = 0 for i in xrange(100): cum |= int(self.gen.random() * span) self.assertEqual(cum, span-1) def test_bigrand(self): # The randrange routine should build-up the required number of bits # in stages so that all bit positions are active. span = 2 ** 500 cum = 0 for i in xrange(100): r = self.gen.randrange(span) self.assertTrue(0 <= r < span) cum |= r self.assertEqual(cum, span-1) def test_bigrand_ranges(self): for i in [40,80, 160, 200, 211, 250, 375, 512, 550]: start = self.gen.randrange(2 ** (i-2)) stop = self.gen.randrange(2 ** i) if stop <= start: continue self.assertTrue(start <= self.gen.randrange(start, stop) < stop) def test_rangelimits(self): for start, stop in [(-2,0), (-(2**60)-2,-(2**60)), (2**60,2**60+2)]: self.assertEqual(set(range(start,stop)), set([self.gen.randrange(start,stop) for i in xrange(100)])) def test_genrandbits(self): # Verify cross-platform repeatability self.gen.seed(1234567) self.assertEqual(self.gen.getrandbits(100), 97904845777343510404718956115L) # Verify ranges for k in xrange(1, 1000): self.assertTrue(0 <= self.gen.getrandbits(k) < 2**k) # Verify all bits active getbits = self.gen.getrandbits for span in [1, 2, 3, 4, 31, 32, 32, 52, 53, 54, 119, 127, 128, 129]: cum = 0 for i in xrange(100): cum |= getbits(span) self.assertEqual(cum, 2**span-1) # Verify argument checking self.assertRaises(TypeError, self.gen.getrandbits) self.assertRaises(TypeError, self.gen.getrandbits, 'a') self.assertRaises(TypeError, self.gen.getrandbits, 1, 2) self.assertRaises(ValueError, self.gen.getrandbits, 0) self.assertRaises(ValueError, self.gen.getrandbits, -1) def test_randbelow_logic(self, _log=log, int=int): # check bitcount transition points: 2**i and 2**(i+1)-1 # show that: k = int(1.001 + _log(n, 2)) # is equal to or one greater than the number of bits in n for i in xrange(1, 1000): n = 1L << i # check an exact power of two numbits = i+1 k = int(1.00001 + _log(n, 2)) self.assertEqual(k, numbits) self.assertTrue(n == 2**(k-1)) n += n - 1 # check 1 below the next power of two k = int(1.00001 + _log(n, 2)) self.assertIn(k, [numbits, numbits+1]) self.assertTrue(2**k > n > 2**(k-2)) n -= n >> 15 # check a little farther below the next power of two k = int(1.00001 + _log(n, 2)) self.assertEqual(k, numbits) # note the stronger assertion self.assertTrue(2**k > n > 2**(k-1)) # note the stronger assertion def test_randrange_bug_1590891(self): start = 1000000000000 stop = -100000000000000000000 step = -200 x = self.gen.randrange(start, stop, step) self.assertTrue(stop < x <= start) self.assertEqual((x+stop)%step, 0) def gamma(z, sqrt2pi=(2.0*pi)**0.5): # Reflection to right half of complex plane if z < 0.5: return pi / sin(pi*z) / gamma(1.0-z) # Lanczos approximation with g=7 az = z + (7.0 - 0.5) return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([ 0.9999999999995183, 676.5203681218835 / z, -1259.139216722289 / (z+1.0), 771.3234287757674 / (z+2.0), -176.6150291498386 / (z+3.0), 12.50734324009056 / (z+4.0), -0.1385710331296526 / (z+5.0), 0.9934937113930748e-05 / (z+6.0), 0.1659470187408462e-06 / (z+7.0), ]) class TestDistributions(unittest.TestCase): def test_zeroinputs(self): # Verify that distributions can handle a series of zero inputs' g = random.Random() x = [g.random() for i in xrange(50)] + [0.0]*5 g.random = x[:].pop; g.uniform(1,10) g.random = x[:].pop; g.paretovariate(1.0) g.random = x[:].pop; g.expovariate(1.0) g.random = x[:].pop; g.weibullvariate(1.0, 1.0) g.random = x[:].pop; g.vonmisesvariate(1.0, 1.0) g.random = x[:].pop; g.normalvariate(0.0, 1.0) g.random = x[:].pop; g.gauss(0.0, 1.0) g.random = x[:].pop; g.lognormvariate(0.0, 1.0) g.random = x[:].pop; g.vonmisesvariate(0.0, 1.0) g.random = x[:].pop; g.gammavariate(0.01, 1.0) g.random = x[:].pop; g.gammavariate(1.0, 1.0) g.random = x[:].pop; g.gammavariate(200.0, 1.0) g.random = x[:].pop; g.betavariate(3.0, 3.0) g.random = x[:].pop; g.triangular(0.0, 1.0, 1.0/3.0) def test_avg_std(self): # Use integration to test distribution average and standard deviation. # Only works for distributions which do not consume variates in pairs g = random.Random() N = 5000 x = [i/float(N) for i in xrange(1,N)] for variate, args, mu, sigmasqrd in [ (g.uniform, (1.0,10.0), (10.0+1.0)/2, (10.0-1.0)**2/12), (g.triangular, (0.0, 1.0, 1.0/3.0), 4.0/9.0, 7.0/9.0/18.0), (g.expovariate, (1.5,), 1/1.5, 1/1.5**2), (g.vonmisesvariate, (1.23, 0), pi, pi**2/3), (g.paretovariate, (5.0,), 5.0/(5.0-1), 5.0/((5.0-1)**2*(5.0-2))), (g.weibullvariate, (1.0, 3.0), gamma(1+1/3.0), gamma(1+2/3.0)-gamma(1+1/3.0)**2) ]: g.random = x[:].pop y = [] for i in xrange(len(x)): try: y.append(variate(*args)) except IndexError: pass s1 = s2 = 0 for e in y: s1 += e s2 += (e - mu) ** 2 N = len(y) self.assertAlmostEqual(s1/N, mu, places=2, msg='%s%r' % (variate.__name__, args)) self.assertAlmostEqual(s2/(N-1), sigmasqrd, places=2, msg='%s%r' % (variate.__name__, args)) def test_constant(self): g = random.Random() N = 100 for variate, args, expected in [ (g.uniform, (10.0, 10.0), 10.0), (g.triangular, (10.0, 10.0), 10.0), (g.triangular, (10.0, 10.0, 10.0), 10.0), (g.expovariate, (float('inf'),), 0.0), (g.vonmisesvariate, (3.0, float('inf')), 3.0), (g.gauss, (10.0, 0.0), 10.0), (g.lognormvariate, (0.0, 0.0), 1.0), (g.lognormvariate, (-float('inf'), 0.0), 0.0), (g.normalvariate, (10.0, 0.0), 10.0), (g.paretovariate, (float('inf'),), 1.0), (g.weibullvariate, (10.0, float('inf')), 10.0), (g.weibullvariate, (0.0, 10.0), 0.0), ]: for i in range(N): self.assertEqual(variate(*args), expected) def test_von_mises_range(self): # Issue 17149: von mises variates were not consistently in the # range [0, 2*PI]. g = random.Random() N = 100 for mu in 0.0, 0.1, 3.1, 6.2: for kappa in 0.0, 2.3, 500.0: for _ in range(N): sample = g.vonmisesvariate(mu, kappa) self.assertTrue( 0 <= sample <= random.TWOPI, msg=("vonmisesvariate({}, {}) produced a result {} out" " of range [0, 2*pi]").format(mu, kappa, sample)) def test_von_mises_large_kappa(self): # Issue #17141: vonmisesvariate() was hang for large kappas random.vonmisesvariate(0, 1e15) random.vonmisesvariate(0, 1e100) class TestModule(unittest.TestCase): def testMagicConstants(self): self.assertAlmostEqual(random.NV_MAGICCONST, 1.71552776992141) self.assertAlmostEqual(random.TWOPI, 6.28318530718) self.assertAlmostEqual(random.LOG4, 1.38629436111989) self.assertAlmostEqual(random.SG_MAGICCONST, 2.50407739677627) def test__all__(self): # tests validity but not completeness of the __all__ list self.assertTrue(set(random.__all__) <= set(dir(random))) def test_random_subclass_with_kwargs(self): # SF bug #1486663 -- this used to erroneously raise a TypeError class Subclass(random.Random): def __init__(self, newarg=None): random.Random.__init__(self) Subclass(newarg=1) def test_main(verbose=None): testclasses = [WichmannHill_TestBasicOps, MersenneTwister_TestBasicOps, TestDistributions, TestModule] try: random.SystemRandom().random() except NotImplementedError: pass else: testclasses.append(SystemRandom_TestBasicOps) test_support.run_unittest(*testclasses) # verify reference counting import sys if verbose and hasattr(sys, "gettotalrefcount"): counts = [None] * 5 for i in xrange(len(counts)): test_support.run_unittest(*testclasses) counts[i] = sys.gettotalrefcount() print counts if __name__ == "__main__": test_main(verbose=True)
py
1a5a8721fdb448c91e26b85f7edaca4a7ead7714
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals import os import unittest import frappe from frappe.utils import cint from frappe.model.naming import revert_series_if_last, make_autoname, parse_naming_series class TestDocument(unittest.TestCase): def test_get_return_empty_list_for_table_field_if_none(self): d = frappe.get_doc({"doctype":"User"}) self.assertEqual(d.get("roles"), []) def test_load(self): d = frappe.get_doc("DocType", "User") self.assertEqual(d.doctype, "DocType") self.assertEqual(d.name, "User") self.assertEqual(d.allow_rename, 1) self.assertTrue(isinstance(d.fields, list)) self.assertTrue(isinstance(d.permissions, list)) self.assertTrue(filter(lambda d: d.fieldname=="email", d.fields)) def test_load_single(self): d = frappe.get_doc("Website Settings", "Website Settings") self.assertEqual(d.name, "Website Settings") self.assertEqual(d.doctype, "Website Settings") self.assertTrue(d.disable_signup in (0, 1)) def test_insert(self): d = frappe.get_doc({ "doctype":"Event", "subject":"test-doc-test-event 1", "starts_on": "2014-01-01", "event_type": "Public" }) d.insert() self.assertTrue(d.name.startswith("EV")) self.assertEqual(frappe.db.get_value("Event", d.name, "subject"), "test-doc-test-event 1") # test if default values are added self.assertEqual(d.send_reminder, 1) return d def test_insert_with_child(self): d = frappe.get_doc({ "doctype":"Event", "subject":"test-doc-test-event 2", "starts_on": "2014-01-01", "event_type": "Public" }) d.insert() self.assertTrue(d.name.startswith("EV")) self.assertEqual(frappe.db.get_value("Event", d.name, "subject"), "test-doc-test-event 2") def test_update(self): d = self.test_insert() d.subject = "subject changed" d.save() self.assertEqual(frappe.db.get_value(d.doctype, d.name, "subject"), "subject changed") def test_value_changed(self): d = self.test_insert() d.subject = "subject changed again" d.save() self.assertTrue(d.has_value_changed('subject')) self.assertFalse(d.has_value_changed('event_type')) def test_mandatory(self): # TODO: recheck if it is OK to force delete frappe.delete_doc_if_exists("User", "[email protected]", 1) d = frappe.get_doc({ "doctype": "User", "email": "[email protected]", }) self.assertRaises(frappe.MandatoryError, d.insert) d.set("first_name", "Test Mandatory") d.insert() self.assertEqual(frappe.db.get_value("User", d.name), d.name) def test_conflict_validation(self): d1 = self.test_insert() d2 = frappe.get_doc(d1.doctype, d1.name) d1.save() self.assertRaises(frappe.TimestampMismatchError, d2.save) def test_conflict_validation_single(self): d1 = frappe.get_doc("Website Settings", "Website Settings") d1.home_page = "test-web-page-1" d2 = frappe.get_doc("Website Settings", "Website Settings") d2.home_page = "test-web-page-1" d1.save() self.assertRaises(frappe.TimestampMismatchError, d2.save) def test_permission(self): frappe.set_user("Guest") self.assertRaises(frappe.PermissionError, self.test_insert) frappe.set_user("Administrator") def test_permission_single(self): frappe.set_user("Guest") d = frappe.get_doc("Website Settings", "Website Settings") self.assertRaises(frappe.PermissionError, d.save) frappe.set_user("Administrator") def test_link_validation(self): frappe.delete_doc_if_exists("User", "[email protected]", 1) d = frappe.get_doc({ "doctype": "User", "email": "[email protected]", "first_name": "Link Validation", "roles": [ { "role": "ABC" } ] }) self.assertRaises(frappe.LinkValidationError, d.insert) d.roles = [] d.append("roles", { "role": "System Manager" }) d.insert() self.assertEqual(frappe.db.get_value("User", d.name), d.name) def test_validate(self): d = self.test_insert() d.starts_on = "2014-01-01" d.ends_on = "2013-01-01" self.assertRaises(frappe.ValidationError, d.validate) self.assertRaises(frappe.ValidationError, d.run_method, "validate") self.assertRaises(frappe.ValidationError, d.save) def test_update_after_submit(self): d = self.test_insert() d.starts_on = "2014-09-09" self.assertRaises(frappe.UpdateAfterSubmitError, d.validate_update_after_submit) d.meta.get_field("starts_on").allow_on_submit = 1 d.validate_update_after_submit() d.meta.get_field("starts_on").allow_on_submit = 0 # when comparing date(2014, 1, 1) and "2014-01-01" d.reload() d.starts_on = "2014-01-01" d.validate_update_after_submit() def test_varchar_length(self): d = self.test_insert() d.subject = "abcde"*100 self.assertRaises(frappe.CharacterLengthExceededError, d.save) def test_xss_filter(self): d = self.test_insert() # script xss = '<script>alert("XSS")</script>' escaped_xss = xss.replace('<', '&lt;').replace('>', '&gt;') d.subject += xss d.save() d.reload() self.assertTrue(xss not in d.subject) self.assertTrue(escaped_xss in d.subject) # onload xss = '<div onload="alert("XSS")">Test</div>' escaped_xss = '<div>Test</div>' d.subject += xss d.save() d.reload() self.assertTrue(xss not in d.subject) self.assertTrue(escaped_xss in d.subject) # css attributes xss = '<div style="something: doesn\'t work; color: red;">Test</div>' escaped_xss = '<div style="">Test</div>' d.subject += xss d.save() d.reload() self.assertTrue(xss not in d.subject) self.assertTrue(escaped_xss in d.subject) def test_naming_series(self): data = ["TEST-", "TEST/17-18/.test_data./.####", "TEST.YYYY.MM.####"] for series in data: name = make_autoname(series) prefix = series if ".#" in series: prefix = series.rsplit('.',1)[0] prefix = parse_naming_series(prefix) old_current = frappe.db.get_value('Series', prefix, "current", order_by="name") revert_series_if_last(series, name) new_current = cint(frappe.db.get_value('Series', prefix, "current", order_by="name")) self.assertEqual(cint(old_current) - 1, new_current) def test_non_negative_check(self): frappe.delete_doc_if_exists("Currency", "Frappe Coin", 1) d = frappe.get_doc({ 'doctype': 'Currency', 'currency_name': 'Frappe Coin', 'smallest_currency_fraction_value': -1 }) self.assertRaises(frappe.NonNegativeError, d.insert) d.set('smallest_currency_fraction_value', 1) d.insert() self.assertEqual(frappe.db.get_value("Currency", d.name), d.name) frappe.delete_doc_if_exists("Currency", "Frappe Coin", 1)
py
1a5a877760ad61e7cfc19e7e2ea34362f9d173a6
""" Common Python utilities for interacting with the dashboard infra. """ import argparse import datetime import json import logging import os import sys def print_log(msg, dec_char='*'): padding = max(list(map(len, str(msg).split('\n')))) decorate = dec_char * (padding + 4) print(f'{decorate}\n{msg}\n{decorate}') def validate_json(dirname, *fields, filename='status.json'): if not check_file_exists(dirname, filename): return {'success': False, 'message': 'No {} in {}'.format(filename, dirname)} fp = read_json(dirname, filename) for required_field in fields: if required_field not in fp: return {'success': False, 'message': '{} in {} has no \'{}\' field'.format(filename, dirname, required_field)} return fp def check_file_exists(dirname, filename): dirname = os.path.expanduser(dirname) full_name = os.path.join(dirname, filename) return os.path.isfile(full_name) def idemp_mkdir(dirname): '''Creates a directory in an idempotent fashion.''' dirname = os.path.expanduser(dirname) os.makedirs(dirname, exist_ok=True) def prepare_out_file(dirname, filename): dirname = os.path.expanduser(dirname) full_name = os.path.join(dirname, filename) if not check_file_exists(dirname, filename): os.makedirs(os.path.dirname(full_name), exist_ok=True) return full_name def read_json(dirname, filename): dirname = os.path.expanduser(dirname) with open(os.path.join(dirname, filename)) as json_file: data = json.load(json_file) return data def write_json(dirname, filename, obj): filename = prepare_out_file(dirname, filename) with open(filename, 'w') as outfile: json.dump(obj, outfile) def read_config(dirname): return read_json(dirname, 'config.json') def write_status(output_dir, success, message): write_json(output_dir, 'status.json', { 'success': success, 'message': message }) def write_summary(output_dir, title, value): write_json(output_dir, 'summary.json', { 'title': title, 'value': value }) def get_timestamp(): time = datetime.datetime.now() return time.strftime('%m-%d-%Y-%H%M') def parse_timestamp(data): return datetime.datetime.strptime(data['timestamp'], '%m-%d-%Y-%H%M') def time_difference(entry1, entry2): ''' Returns a datetime object corresponding to the difference in timestamps between two data entries. (Entry 1 time - entry 2 time) ''' return parse_timestamp(entry1) - parse_timestamp(entry2) def sort_data(data_dir): '''Sorts all data files in the given directory by timestamp.''' data_dir = os.path.expanduser(data_dir) all_data = [] for _, _, files in os.walk(data_dir): for name in files: data = read_json(data_dir, name) all_data.append(data) return sorted(all_data, key=parse_timestamp) def gather_stats(sorted_data, fields): ''' Expects input in the form of a list of data objects with timestamp fields (like those returned by sort_data). For each entry, this looks up entry[field[0]][field[1]]... for all entries that have all the fields, skipping those that don't. Returns a pair (list of entry values, list of corresponding entry timestamps) ''' stats = [] times = [] for entry in sorted_data: stat = entry not_present = False for field in fields: if field not in stat: not_present = True break stat = stat[field] if not_present: continue times.append(parse_timestamp(entry)) stats.append(stat) return (stats, times) def traverse_fields(entry, ignore_fields=None): """ Returns a list of sets of nested fields (one set per level of nesting) of a JSON data entry produced by a benchmark analysis script. Ignores the 'detailed' field by default (as old data files will not have detailed summaries). Set ignore_fields to a non-None value to avoid the defaults. """ ignore_set = {'timestamp', 'detailed', 'start_time', 'end_time', 'time_delta', 'success', 'run_cpu_telemetry', 'run_gpu_telemetry'} if ignore_fields is not None: ignore_set = set(ignore_fields) level_fields = {field for field in entry.keys() if field not in ignore_set} values_to_check = [entry[field] for field in level_fields if isinstance(entry[field], dict)] tail = [] max_len = 0 for value in values_to_check: next_fields = traverse_fields(value) tail.append(next_fields) if len(next_fields) > max_len: max_len = len(next_fields) # combine all the field lists (union of each level's sets) final_tail = [] for i in range(max_len): u = set({}) final_tail.append(u.union(*[fields_list[i] for fields_list in tail if len(fields_list) > i])) return [level_fields] + final_tail def invoke_main(main_func, *arg_names): """ Generates an argument parser for arg_names and calls main_func with the arguments it parses. Arguments are assumed to be string-typed. The argument names should be Python-valid names. If main_func returns a value, this function assumes it to be a return code. If not, this function will exit with code 0 after invoking main """ parser = argparse.ArgumentParser() for arg_name in arg_names: name = arg_name parser.add_argument('--{}'.format(name.replace('_', '-')), required=True, type=str) args = parser.parse_args() ret = main_func(*[getattr(args, name) for name in arg_names]) if ret is None: sys.exit(0) sys.exit(ret) def render_exception(e): return logging.Formatter.formatException(e, sys.exc_info())
py
1a5a87c916ef27afd70fdda32b451cfa26b85e0d
from inspect import signature from collections import namedtuple import time import numpy as np import pandas as pd from functools import singledispatch ##################### # utils ##################### class Timer(): def __init__(self): self.times = [time.time()] self.total_time = 0.0 def __call__(self, include_in_total=True): self.times.append(time.time()) delta_t = self.times[-1] - self.times[-2] if include_in_total: self.total_time += delta_t return delta_t localtime = lambda: time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) class TableLogger(): def append(self, output): if not hasattr(self, 'keys'): self.keys = output.keys() print(*(f'{k:>12s}' for k in self.keys)) filtered = [output[k] for k in self.keys] print(*(f'{v:12.4f}' if isinstance(v, np.float) else f'{v:12}' for v in filtered)) ##################### ## data preprocessing ##################### cifar10_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255 cifar10_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255 def normalise(x, mean=cifar10_mean, std=cifar10_std): x, mean, std = [np.array(a, np.float32) for a in (x, mean, std)] x -= mean*255 x *= 1.0/(255*std) return x def pad(x, border=4): return np.pad(x, [(0, 0), (border, border), (border, border), (0, 0)], mode='reflect') def transpose(x, source='NHWC', target='NCHW'): return x.transpose([source.index(d) for d in target]) ##################### ## data augmentation ##################### class Crop(namedtuple('Crop', ('h', 'w'))): def __call__(self, x, x0, y0): return x[:,y0:y0+self.h,x0:x0+self.w] def options(self, x_shape): C, H, W = x_shape return {'x0': range(W+1-self.w), 'y0': range(H+1-self.h)} def output_shape(self, x_shape): C, H, W = x_shape return (C, self.h, self.w) class FlipLR(namedtuple('FlipLR', ())): def __call__(self, x, choice): return x[:, :, ::-1].copy() if choice else x def options(self, x_shape): return {'choice': [True, False]} class Cutout(namedtuple('Cutout', ('h', 'w'))): def __call__(self, x, x0, y0): x = x.copy() x[:,y0:y0+self.h,x0:x0+self.w].fill(0.0) return x def options(self, x_shape): C, H, W = x_shape return {'x0': range(W+1-self.w), 'y0': range(H+1-self.h)} class Transform(): def __init__(self, dataset, transforms): self.dataset, self.transforms = dataset, transforms self.choices = None def __len__(self): return len(self.dataset) def __getitem__(self, index): data, labels = self.dataset[index] for choices, f in zip(self.choices, self.transforms): args = {k: v[index] for (k,v) in choices.items()} data = f(data, **args) return data, labels def set_random_choices(self): self.choices = [] x_shape = self.dataset[0][0].shape N = len(self) for t in self.transforms: options = t.options(x_shape) x_shape = t.output_shape(x_shape) if hasattr(t, 'output_shape') else x_shape self.choices.append({k:np.random.choice(v, size=N) for (k,v) in options.items()}) ##################### ## dict utils ##################### union = lambda *dicts: {k: v for d in dicts for (k, v) in d.items()} def path_iter(nested_dict, pfx=()): for name, val in nested_dict.items(): if isinstance(val, dict): yield from path_iter(val, (*pfx, name)) else: yield ((*pfx, name), val) ##################### ## graph building ##################### sep='_' RelativePath = namedtuple('RelativePath', ('parts')) rel_path = lambda *parts: RelativePath(parts) def build_graph(net): net = dict(path_iter(net)) default_inputs = [[('input',)]]+[[k] for k in net.keys()] with_default_inputs = lambda vals: (val if isinstance(val, tuple) else (val, default_inputs[idx]) for idx,val in enumerate(vals)) parts = lambda path, pfx: tuple(pfx) + path.parts if isinstance(path, RelativePath) else (path,) if isinstance(path, str) else path return {sep.join((*pfx, name)): (val, [sep.join(parts(x, pfx)) for x in inputs]) for (*pfx, name), (val, inputs) in zip(net.keys(), with_default_inputs(net.values()))} ##################### ## training utils ##################### @singledispatch def cat(*xs): raise NotImplementedError @singledispatch def to_numpy(x): raise NotImplementedError class PiecewiseLinear(namedtuple('PiecewiseLinear', ('knots', 'vals'))): def __call__(self, t): return np.interp([t], self.knots, self.vals)[0] class StatsLogger(): def __init__(self, keys): self._stats = {k:[] for k in keys} def append(self, output): for k,v in self._stats.items(): v.append(output[k].detach()) def stats(self, key): return cat(*self._stats[key]) def mean(self, key): return np.mean(to_numpy(self.stats(key)), dtype=np.float) def run_batches(model, batches, training, optimizer_step=None, stats=None): stats = stats or StatsLogger(('loss', 'correct')) model.train(training) for batch in batches: output = model(batch) stats.append(output) if training: output['loss'].sum().backward() optimizer_step() model.zero_grad() return stats def train_epoch(model, train_batches, test_batches, optimizer_step, timer, test_time_in_total=True): train_stats, train_time = run_batches(model, train_batches, True, optimizer_step), timer() test_stats, test_time = run_batches(model, test_batches, False), timer(test_time_in_total) return { 'train time': train_time, 'train loss': train_stats.mean('loss'), 'train acc': train_stats.mean('correct'), 'test time': test_time, 'test loss': test_stats.mean('loss'), 'test acc': test_stats.mean('correct'), 'total time': timer.total_time, } def train(model, optimizer, train_batches, test_batches, epochs, loggers=(), test_time_in_total=True, timer=None): timer = timer or Timer() for epoch in range(epochs): epoch_stats = train_epoch(model, train_batches, test_batches, optimizer.step, timer, test_time_in_total=test_time_in_total) summary = union({'epoch': epoch+1, 'lr': optimizer.param_values()['lr']*train_batches.batch_size}, epoch_stats) for logger in loggers: logger.append(summary) return summary ##################### ## network visualisation (requires pydot) ##################### class ColorMap(dict): palette = ( 'bebada,ffffb3,fb8072,8dd3c7,80b1d3,fdb462,b3de69,fccde5,bc80bd,ccebc5,ffed6f,1f78b4,33a02c,e31a1c,ff7f00,' '4dddf8,e66493,b07b87,4e90e3,dea05e,d0c281,f0e189,e9e8b1,e0eb71,bbd2a4,6ed641,57eb9c,3ca4d4,92d5e7,b15928' ).split(',') def __missing__(self, key): self[key] = self.palette[len(self) % len(self.palette)] return self[key] def make_pydot(nodes, edges, direction='LR', sep=sep, **kwargs): import pydot parent = lambda path: path[:-1] stub = lambda path: path[-1] class Subgraphs(dict): def __missing__(self, path): subgraph = pydot.Cluster(sep.join(path), label=stub(path), style='rounded, filled', fillcolor='#77777744') self[parent(path)].add_subgraph(subgraph) return subgraph subgraphs = Subgraphs() subgraphs[()] = g = pydot.Dot(rankdir=direction, directed=True, **kwargs) g.set_node_defaults( shape='box', style='rounded, filled', fillcolor='#ffffff') for node, attr in nodes: path = tuple(node.split(sep)) subgraphs[parent(path)].add_node( pydot.Node(name=node, label=stub(path), **attr)) for src, dst, attr in edges: g.add_edge(pydot.Edge(src, dst, **attr)) return g get_params = lambda mod: {p.name: getattr(mod, p.name, '?') for p in signature(type(mod)).parameters.values()} class DotGraph(): colors = ColorMap() def __init__(self, net, size=15, direction='LR'): graph = build_graph(net) self.nodes = [(k, { 'tooltip': '%s %.1000r' % (type(n).__name__, get_params(n)), 'fillcolor': '#'+self.colors[type(n)], }) for k, (n, i) in graph.items()] self.edges = [(src, k, {}) for (k, (n, i)) in graph.items() for src in i] self.size, self.direction = size, direction def dot_graph(self, **kwargs): return make_pydot(self.nodes, self.edges, size=self.size, direction=self.direction, **kwargs) def svg(self, **kwargs): return self.dot_graph(**kwargs).create(format='svg').decode('utf-8') try: import pydot def _repr_svg_(self): return self.svg() except ImportError: def __repr__(self): return 'pydot is needed for network visualisation' walk = lambda dict_, key: walk(dict_, dict_[key]) if key in dict_ else key def remove_by_type(net, node_type): #remove identity nodes for more compact visualisations graph = build_graph(net) remap = {k: i[0] for k,(v,i) in graph.items() if isinstance(v, node_type)} return {k: (v, [walk(remap, x) for x in i]) for k, (v,i) in graph.items() if not isinstance(v, node_type)}
py
1a5a87fbae40a6b1a1a7b691bc545284e07041b9
import unittest import numpy import pytest import dpnp as cupy from tests.third_party.cupy import testing # from cupy import util def astype_without_warning(x, dtype, *args, **kwargs): dtype = numpy.dtype(dtype) if x.dtype.kind == 'c' and dtype.kind not in ['b', 'c']: with testing.assert_warns(numpy.ComplexWarning): return x.astype(dtype, *args, **kwargs) else: return x.astype(dtype, *args, **kwargs) @testing.gpu class TestArrayCopyAndView(unittest.TestCase): @testing.numpy_cupy_array_equal() def test_view(self, xp): a = testing.shaped_arange((4,), xp, dtype=numpy.float32) b = a.view(dtype=numpy.int32) b[:] = 0 return a @testing.for_dtypes([numpy.int16, numpy.int64]) @testing.numpy_cupy_array_equal() def test_view_itemsize(self, xp, dtype): a = testing.shaped_arange((4,), xp, dtype=numpy.int32) b = a.view(dtype=dtype) return b @testing.numpy_cupy_array_equal() def test_view_0d(self, xp): a = xp.array(1.5, dtype=numpy.float32) return a.view(dtype=numpy.int32) @testing.for_dtypes([numpy.int16, numpy.int64]) def test_view_0d_raise(self, dtype): for xp in (numpy, cupy): a = xp.array(3, dtype=numpy.int32) with pytest.raises(ValueError): a.view(dtype=dtype) @testing.for_dtypes([numpy.int16, numpy.int64]) def test_view_non_contiguous_raise(self, dtype): for xp in (numpy, cupy): a = testing.shaped_arange((2, 2, 2), xp, dtype=numpy.int32) a = a.transpose(0, 2, 1) with pytest.raises(ValueError): a.view(dtype=dtype) @testing.numpy_cupy_array_equal() def test_flatten(self, xp): a = testing.shaped_arange((2, 3, 4), xp) return a.flatten() @testing.numpy_cupy_array_equal() def test_flatten_copied(self, xp): a = testing.shaped_arange((4,), xp) b = a.flatten() a[:] = 1 return b @testing.numpy_cupy_array_equal() def test_transposed_flatten(self, xp): a = testing.shaped_arange((2, 3, 4), xp).transpose(2, 0, 1) return a.flatten() @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_fill(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) a.fill(1) return a @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_fill_with_numpy_scalar_ndarray(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) a.fill(numpy.ones((), dtype=dtype)) return a @testing.for_all_dtypes() def test_fill_with_numpy_nonscalar_ndarray(self, dtype): a = testing.shaped_arange((2, 3, 4), cupy, dtype) with self.assertRaises(ValueError): a.fill(numpy.ones((1,), dtype=dtype)) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_transposed_fill(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) b = a.transpose(2, 0, 1) b.fill(1) return b @testing.for_orders(['C', 'F', 'A', 'K', None]) @testing.for_all_dtypes_combination(('src_dtype', 'dst_dtype')) @testing.numpy_cupy_array_equal() def test_astype(self, xp, src_dtype, dst_dtype, order): a = testing.shaped_arange((2, 3, 4), xp, src_dtype) return astype_without_warning(a, dst_dtype, order=order) @testing.for_orders('CFAK') @testing.for_all_dtypes_combination(('src_dtype', 'dst_dtype')) def test_astype_type(self, src_dtype, dst_dtype, order): a = testing.shaped_arange((2, 3, 4), cupy, src_dtype) b = astype_without_warning(a, dst_dtype, order=order) a_cpu = testing.shaped_arange((2, 3, 4), numpy, src_dtype) b_cpu = astype_without_warning(a_cpu, dst_dtype, order=order) self.assertEqual(b.dtype.type, b_cpu.dtype.type) @testing.for_orders('CAK') @testing.for_all_dtypes() def test_astype_type_c_contiguous_no_copy(self, dtype, order): a = testing.shaped_arange((2, 3, 4), cupy, dtype) b = a.astype(dtype, order=order, copy=False) self.assertTrue(b is a) @testing.for_orders('FAK') @testing.for_all_dtypes() def test_astype_type_f_contiguous_no_copy(self, dtype, order): a = testing.shaped_arange((2, 3, 4), cupy, dtype) a = cupy.asfortranarray(a) b = a.astype(dtype, order=order, copy=False) self.assertTrue(b is a) @testing.for_all_dtypes_combination(('src_dtype', 'dst_dtype')) @testing.numpy_cupy_array_equal() def test_astype_strides(self, xp, src_dtype, dst_dtype): src = xp.empty((1, 2, 3), dtype=src_dtype) return numpy.array( astype_without_warning(src, dst_dtype, order='K').strides) @testing.for_all_dtypes_combination(('src_dtype', 'dst_dtype')) @testing.numpy_cupy_array_equal() def test_astype_strides_negative(self, xp, src_dtype, dst_dtype): src = xp.empty((2, 3), dtype=src_dtype)[::-1, :] return numpy.array( astype_without_warning(src, dst_dtype, order='K').strides) @testing.for_all_dtypes_combination(('src_dtype', 'dst_dtype')) @testing.numpy_cupy_array_equal() def test_astype_strides_swapped(self, xp, src_dtype, dst_dtype): src = xp.swapaxes(xp.empty((2, 3, 4), dtype=src_dtype), 1, 0) return numpy.array( astype_without_warning(src, dst_dtype, order='K').strides) @testing.for_all_dtypes_combination(('src_dtype', 'dst_dtype')) @testing.numpy_cupy_array_equal() def test_astype_strides_broadcast(self, xp, src_dtype, dst_dtype): src, _ = xp.broadcast_arrays(xp.empty((2,), dtype=src_dtype), xp.empty((2, 3, 2), dtype=src_dtype)) return numpy.array( astype_without_warning(src, dst_dtype, order='K').strides) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_diagonal1(self, xp, dtype): a = testing.shaped_arange((3, 4, 5), xp, dtype) return a.diagonal(1, 2, 0) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_diagonal2(self, xp, dtype): a = testing.shaped_arange((3, 4, 5), xp, dtype) return a.diagonal(-1, 2, 0) # @unittest.skipUnless(util.ENABLE_SLICE_COPY, 'Special copy disabled') @testing.for_orders('CF') @testing.for_dtypes([numpy.int16, numpy.int64, numpy.float16, numpy.float64]) @testing.numpy_cupy_array_equal() def test_isinstance_numpy_copy(self, xp, dtype, order): a = numpy.arange(100, dtype=dtype).reshape(10, 10, order=order) b = xp.empty(a.shape, dtype=dtype, order=order) b[:] = a return b # @unittest.skipUnless(util.ENABLE_SLICE_COPY, 'Special copy disabled') def test_isinstance_numpy_copy_wrong_dtype(self): for xp in (numpy, cupy): a = numpy.arange(100, dtype=numpy.float64).reshape(10, 10) b = cupy.empty(a.shape, dtype=numpy.int32) with pytest.raises(ValueError): b[:] = a # @unittest.skipUnless(util.ENABLE_SLICE_COPY, 'Special copy disabled') def test_isinstance_numpy_copy_wrong_shape(self): for xp in (numpy, cupy): a = numpy.arange(100, dtype=numpy.float64).reshape(10, 10) b = cupy.empty(100, dtype=a.dtype) with pytest.raises(ValueError): b[:] = a # @unittest.skipUnless(util.ENABLE_SLICE_COPY, 'Special copy disabled') @testing.numpy_cupy_array_equal() def test_isinstance_numpy_copy_not_slice(self, xp): a = xp.arange(5, dtype=numpy.float64) a[a < 3] = 0 return a @testing.parameterize( {'src_order': 'C'}, {'src_order': 'F'}, ) @testing.gpu class TestNumPyArrayCopyView(unittest.TestCase): # @unittest.skipUnless(util.ENABLE_SLICE_COPY, 'Special copy disabled') @testing.for_orders('CF') @testing.for_dtypes([numpy.int16, numpy.int64, numpy.float16, numpy.float64]) @testing.numpy_cupy_array_equal() def test_isinstance_numpy_view_copy_f(self, xp, dtype, order): a = numpy.arange(100, dtype=dtype).reshape( 10, 10, order=self.src_order) a = a[2:5, 1:8] b = xp.empty(a.shape, dtype=dtype, order=order) b[:] = a return b
py
1a5a87fbc2ecf658467b0164d5fb020de6133abc
# from framework.utils.analyzer_pydantic import ModelFieldEx import inspect from dataclasses import field from typing import Any, Dict, List, Optional, Type, TypedDict from fastapi import Query from pydantic import BaseConfig, BaseModel, Field from pydantic.fields import FieldInfo, ModelField, NoArgAnyCallable, Union, Validator from pydantic.utils import smart_deepcopy class DataclassFieldMeta(TypedDict): default: Any default_factory: Any init: bool repr: bool hash: bool compare: bool metadata: Any class PydanticFieldMeta(TypedDict): name: str type: Type default: Any default_factory: Any title: str alias: str description: str const: bool gt: float ge: float lt: float le: float multiple_of: float min_items: int max_items: int min_length: int max_length: int regex: str # extra allow_mutation: bool # fastapi deprecated: str class ModelFieldEx: """https://www.apps-gcp.com/openapi_learn_the_basics/ に近づけたい""" kind: inspect._ParameterKind index: int description: str def __init__( self, *, name: str, type_: Type[Any] = Any, # type: ignore kind: inspect._ParameterKind, default: Any = inspect._empty, # type: ignore # common required: bool = True, index: int = -1, alias: str = None, description: str = "", meta: Any = None, # pydantic default_factory: Optional[NoArgAnyCallable] = None, # class_validators: Optional[Dict[str, Validator]] = None, # model_config: Type[BaseConfig] = BaseConfig = None, # field_info: Optional[FieldInfo] = None, # sqlalchemy # column_type=None, relation_type: str = "", # "ONETOONE"|"MANYTOONE"|"ONETOMANY"|"MANYTOMANY"|"" is_primary_key: bool = False, foreign_keys: List[str] = [], is_unique: bool = False, is_index: bool = False, is_nullable: bool = False, is_system: bool = False, ) -> None: if default is inspect._empty and not default_factory: # type: ignore assert required == True type_ = Any if type_ is inspect._empty else type_ # type: ignore self.name = name self.type_ = type_ # self.class_validators = class_validators # self.model_config = model_config self.default = default self.default_factory = default_factory self.required = required self.alias = alias or name # self.field_info = field_info self.kind = kind self.index = index self.description = description or "" self.meta = meta # orm fields # self.column_type = column_type self.foreign_keys = foreign_keys self.relation_type = relation_type self.is_primary_key = is_primary_key self.is_unique = is_unique self.is_nullable = is_nullable self.is_system = is_system self.is_index = is_index def get_meta_or_default(self, undefined: Any = inspect._empty) -> Any: # type: ignore return self.meta or self.get_default(undefined=undefined) def get_real_mata(self, undefined: Any = inspect._empty) -> Any: # type: ignore """定義されたままのメタ情報を取得する""" raise NotImplementedError() return self.meta def get_mata(self, undefined: Any = inspect._empty) -> Any: # type: ignore """標準化されたpydanticのfieldinfoなどのメタ情報を取得する""" return self.meta def get_orm_mata(self, undefined: Any = inspect._empty) -> Any: # type: ignore """標準化されたsqlalchemyのcolumnなどのメタ情報を取得する""" raise NotImplementedError() return self.meta def get_fulltextsearch_mata(self, undefined: Any = inspect._empty) -> Any: # type: ignore """標準化された全文検索に関するメタを取得する""" raise NotImplementedError() return self.meta def get_default(self, undefined: Any = inspect._empty) -> Any: # type: ignore """デフォルト値かdefault_factoryに生成されたデフォルト値かemptyを返す。emptyは任意の値を返すことができる。""" if self.required: return undefined # TODO:pydanticのフィールドはdeppcopyしない。 if isinstance(self.default, FieldInfo): return self.default return ( self.default_factory() if self.default_factory else smart_deepcopy(self.default) ) def __str__(self) -> str: name = self.name type_ = self.type_ default = self.default default_factory = self.default_factory required = self.required alias = self.alias field_info = self.field_info kind = self.kind index = self.index description = self.description return f"{self.__class__!r}({name=},{type_=},{default=},{default_factory=},{required=},{alias=},{field_info=},{kind=},{index=},{description=})" @classmethod def from_parameter(cls, parameter: inspect.Parameter): return cls.from_annotation_info( name=parameter.name, annotation=parameter.annotation, default=parameter.default, ) @classmethod def from_annotation_info( cls, name: str, annotation: Any = inspect._empty, default: Any = inspect._empty # type: ignore ): annotation = Any if annotation is inspect._empty else annotation # type: ignore parameter = inspect.Parameter(name=name, annotation=annotation, default=default) # type: ignore raise NotImplementedError() @classmethod def from_pydantic_modelfield(cls, field: ModelField): raise NotImplementedError()
py
1a5a8875b9d0c5043ee6201d9d790dbbcd09a228
import logging from abc import abstractmethod import numpy as np from srl.base.define import ( ContinuousAction, EnvObservationType, Info, RLAction, RLActionType, RLObservation, RLObservationType, ) from srl.base.env.base import EnvRun, SpaceBase from srl.base.rl.base import RLConfig, RLWorker logger = logging.getLogger(__name__) class ContinuousActionConfig(RLConfig): @property def action_type(self) -> RLActionType: return RLActionType.CONTINUOUS def _set_config_by_env( self, env: EnvRun, env_action_space: SpaceBase, env_observation_space: SpaceBase, env_observation_type: EnvObservationType, ) -> None: n, low, high = env_action_space.get_action_continuous_info() self._action_num = n self._action_low = low self._action_high = high if self.observation_type == RLObservationType.DISCRETE: shape, low, high = env_observation_space.get_observation_discrete_info() elif self.observation_type == RLObservationType.CONTINUOUS: shape, low, high = env_observation_space.get_observation_continuous_info() else: shape = (0,) low = np.array([0]) high = np.array([0]) self._observation_shape = shape self._observation_low = low self._observation_high = high @property def action_num(self) -> int: return self._action_num @property def action_low(self) -> np.ndarray: return self._action_low @property def action_high(self) -> np.ndarray: return self._action_high @property def observation_shape(self) -> tuple: return self._observation_shape @property def observation_low(self) -> np.ndarray: return self._observation_low @property def observation_high(self) -> np.ndarray: return self._observation_high class ContinuousActionWorker(RLWorker): @abstractmethod def call_on_reset(self, state: np.ndarray) -> None: raise NotImplementedError() def _call_on_reset(self, state: RLObservation, env: EnvRun) -> None: self.call_on_reset(state) @abstractmethod def call_policy(self, state: np.ndarray) -> ContinuousAction: raise NotImplementedError() def _call_policy(self, state: RLObservation, env: EnvRun) -> RLAction: return self.call_policy(state) @abstractmethod def call_on_step( self, next_state: np.ndarray, reward: float, done: bool, ) -> Info: raise NotImplementedError() def _call_on_step( self, next_state: RLObservation, reward: float, done: bool, env: EnvRun, ) -> Info: return self.call_on_step(next_state, reward, done) @abstractmethod def call_render(self, env: EnvRun) -> Info: raise NotImplementedError() def _call_render(self, env: EnvRun) -> Info: return self.call_render(env)
py
1a5a89bae27308760e9f72430baf65c94d29da17
import copy import json import re import unittest from django.contrib import admin from django.contrib.auth import get_permission_codename from django.contrib.auth.models import Permission from django.template import RequestContext from django.utils.encoding import force_str from django.utils.html import escape from django.utils.http import urlencode, urlunquote from cms.api import add_plugin, create_page, create_title from cms.models import CMSPlugin, Page, Title from cms.utils.urlutils import admin_reverse from djangocms_text_ckeditor.cms_plugins import TextPlugin from djangocms_text_ckeditor.compat import get_page_placeholders from djangocms_text_ckeditor.models import Text from djangocms_text_ckeditor.utils import ( _plugin_tags_to_html, _render_cms_plugin, plugin_tags_to_admin_html, plugin_tags_to_id_list, plugin_to_tag, ) from tests.test_app.cms_plugins import DummyChildPlugin, DummyParentPlugin from .base import BaseTestCase try: from djangocms_transfer.exporter import export_page HAS_DJANGOCMS_TRANSFER = True except ImportError: HAS_DJANGOCMS_TRANSFER = False try: import djangocms_translations # noqa HAS_DJANGOCMS_TRANSLATIONS = True except ImportError: HAS_DJANGOCMS_TRANSLATIONS = False class PluginActionsTestCase(BaseTestCase): def get_custom_admin_url(self, plugin_class, name): plugin_type = plugin_class.__name__.lower() url_name = f'{plugin_class.model._meta.app_label}_{plugin_type}_{name}' return admin_reverse(url_name) def _add_child_plugin(self, text_plugin, plugin_type='PicturePlugin', data_suffix=None): name = f'{plugin_type} record' if data_suffix is not None: name = f'{name} {data_suffix}' basic_plugins = { 'LinkPlugin': { 'name': name, 'external_link': 'https://www.django-cms.org', }, 'PreviewDisabledPlugin': {}, 'SekizaiPlugin': {}, } if plugin_type == 'PicturePlugin': data = {'caption_text': name, 'picture': self.create_filer_image_object()} else: data = basic_plugins[plugin_type] plugin = add_plugin( text_plugin.placeholder, plugin_type, 'en', target=text_plugin, **data, ) return plugin def _add_text_plugin(self, placeholder, plugin_type='TextPlugin'): text_plugin = add_plugin( placeholder, plugin_type, 'en', body='Hello World', ) return text_plugin def _replace_plugin_contents(self, text, new_plugin_content): def _do_replace(obj, match): return plugin_to_tag(obj, content=new_plugin_content) return _plugin_tags_to_html(text, output_func=_do_replace) def add_plugin_to_text(self, text_plugin, plugin): text_plugin.body = f'{text_plugin.body} {plugin_to_tag(plugin)}' text_plugin.save() return text_plugin def _give_permission(self, user, model, permission_type, save=True): codename = get_permission_codename(permission_type, model._meta) user.user_permissions.add(Permission.objects.get(codename=codename)) def _give_cms_permissions(self, user): for perm_type in ['add', 'change', 'delete']: for model in [Page, Title]: self._give_permission(user, model, perm_type) def get_page_admin(self): admin.autodiscover() return admin.site._registry[Page] def get_post_request(self, data): return self.get_request(post_data=data) def get_plugin_id_from_response(self, response): url = urlunquote(response.url) # Ideal case, this looks like: # /en/admin/cms/page/edit-plugin/1/ return re.findall(r'\d+', url)[0] def test_add_and_edit_plugin(self): """ Test that you can add a text plugin """ admin = self.get_superuser() simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') endpoint = self.get_add_plugin_uri(simple_placeholder, 'TextPlugin') with self.login_user_context(admin): response = self.client.get(endpoint) text_plugin_pk = self.get_plugin_id_from_response(response) self.assertIn('?delete-on-cancel', response.url) self.assertEqual(response.status_code, 302) # Assert "ghost" plugin has been created self.assertObjectExist(CMSPlugin.objects.all(), pk=text_plugin_pk) cms_plugin = CMSPlugin.objects.get(pk=text_plugin_pk) text_plugin_class = cms_plugin.get_plugin_class_instance() # Assert "real" plugin has not been created yet self.assertObjectDoesNotExist(Text.objects.all(), pk=text_plugin_pk) add_url = response.url with self.login_user_context(admin): request = self.get_request() action_token = text_plugin_class.get_action_token(request, cms_plugin) response = self.client.get(add_url) self.assertEqual(response.status_code, 200) # Assert cancel token is present self.assertContains(response, action_token) with self.login_user_context(admin): data = {'body': 'Hello world'} response = self.client.post(add_url, data) self.assertEqual(response.status_code, 200) # Assert "real" plugin has been created yet self.assertObjectExist(Text.objects.all(), pk=text_plugin_pk) text_plugin = Text.objects.get(pk=text_plugin_pk) # Assert the text was correctly saved self.assertEqual(text_plugin.body, 'Hello world') def test_add_and_cancel_plugin(self): """ Test that you can add a text plugin """ simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') endpoint = self.get_add_plugin_uri(simple_placeholder, 'TextPlugin') with self.login_user_context(self.get_superuser()): response = self.client.get(endpoint) self.assertEqual(response.status_code, 302) # Point to the newly created text plugin text_plugin_pk = self.get_plugin_id_from_response(response) cms_plugin = CMSPlugin.objects.get(pk=text_plugin_pk) text_plugin_class = cms_plugin.get_plugin_class_instance() # Assert "ghost" plugin has been created self.assertObjectExist(CMSPlugin.objects.all(), pk=text_plugin_pk) with self.login_user_context(self.get_superuser()): request = self.get_request() action_token = text_plugin_class.get_action_token(request, cms_plugin) data = {'token': action_token} request = self.get_post_request(data) response = text_plugin_class.delete_on_cancel(request) self.assertEqual(response.status_code, 204) # Assert "ghost" plugin has been removed self.assertObjectDoesNotExist(CMSPlugin.objects.all(), pk=text_plugin_pk) # Assert "real" plugin was never created self.assertObjectDoesNotExist(Text.objects.all(), pk=text_plugin_pk) # Assert user can't delete a non "ghost" plugin text_plugin = add_plugin( simple_placeholder, 'TextPlugin', 'en', body="I'm the first", ) with self.login_user_context(self.get_superuser()): request = self.get_request() action_token = text_plugin_class.get_action_token(request, text_plugin) data = {'token': action_token} request = self.get_post_request(data) response = text_plugin_class.delete_on_cancel(request) self.assertEqual(response.status_code, 400) def test_copy_referenced_plugins(self): """ Test that copy+pasting a child plugin between text editors creates proper copies of the child plugin and messes no other data up """ simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') def _get_text_plugin_with_children(): text_plugin = add_plugin( simple_placeholder, 'TextPlugin', 'en', body='Text plugin we copy child plugins to', ) _add_child_plugins_to_text_plugin(text_plugin) return text_plugin def _add_child_plugins_to_text_plugin(text_plugin): child_plugin_1 = add_plugin( simple_placeholder, 'PicturePlugin', 'en', target=text_plugin, picture=self.create_filer_image_object(), caption_text='Child plugin one', ) child_plugin_2 = add_plugin( simple_placeholder, 'PicturePlugin', 'en', target=text_plugin, picture=self.create_filer_image_object(), caption_text='Child plugin two', ) self.add_plugin_to_text(text_plugin, child_plugin_1) self.add_plugin_to_text(text_plugin, child_plugin_2) def _copy_child_plugins_from_text(text_plugin_source, text_plugin_destination): for child_plugin in text_plugin_source.cmsplugin_set.all(): text_plugin_destination.body += ' ' + plugin_to_tag(child_plugin) text_plugin_destination.save() _run_clean_and_copy(text_plugin_destination) def _run_clean_and_copy(text_plugin): text_plugin.clean_plugins() text_plugin.copy_referenced_plugins() def _get_common_children_ids(text_plugin_one, text_plugin_two): original_children_ids = set(plugin_tags_to_id_list(text_plugin_one.body)) copied_children_ids = set(plugin_tags_to_id_list(text_plugin_two.body)) return original_children_ids.intersection(copied_children_ids) text_plugin_copy_from = _get_text_plugin_with_children() text_plugin_copy_to = _get_text_plugin_with_children() _copy_child_plugins_from_text(text_plugin_copy_from, text_plugin_copy_to) self.assertEqual(text_plugin_copy_from.cmsplugin_set.count(), 2) self.assertEqual(text_plugin_copy_to.cmsplugin_set.count(), 4) _run_clean_and_copy(text_plugin_copy_from) _run_clean_and_copy(text_plugin_copy_to) self.assertEqual(text_plugin_copy_from.cmsplugin_set.count(), 2) self.assertEqual(text_plugin_copy_to.cmsplugin_set.count(), 4) common_children_ids = _get_common_children_ids(text_plugin_copy_from, text_plugin_copy_to) self.assertFalse(common_children_ids) def test_add_and_cancel_child_plugin(self): """ Test that you can add a text plugin """ admin = self.get_superuser() simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') text_plugin = add_plugin( simple_placeholder, 'TextPlugin', 'en', body="I'm the first", ) text_plugin_class = text_plugin.get_plugin_class_instance() child_plugin_1 = add_plugin( simple_placeholder, 'PicturePlugin', 'en', target=text_plugin, picture=self.create_filer_image_object(), caption_text='Foo', ) child_plugin_2 = add_plugin( simple_placeholder, 'PicturePlugin', 'en', target=text_plugin, picture=self.create_filer_image_object(), caption_text='Foo', ) child_plugin_3 = add_plugin( simple_placeholder, 'PicturePlugin', 'en', target=text_plugin, picture=self.create_filer_image_object(), caption_text='Foo', ) child_plugin_4 = add_plugin( simple_placeholder, 'PicturePlugin', 'en', target=text_plugin, picture=self.create_filer_image_object(), caption_text='Foo', ) text_plugin = self.add_plugin_to_text(text_plugin, child_plugin_1) text_plugin = self.add_plugin_to_text(text_plugin, child_plugin_4) with self.login_user_context(admin): request = self.get_request() action_token = text_plugin_class.get_action_token(request, text_plugin) # Assert user is unable to delete a saved child plugin data = {'token': action_token, 'child_plugins': [child_plugin_1.pk]} request = self.get_post_request(data) response = text_plugin_class.delete_on_cancel(request) self.assertEqual(response.status_code, 400) self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_1.pk) # Assert user is unable to delete if plugins array contains # an unsaved plugin. plugin_ids = [ child_plugin_1.pk, child_plugin_2.pk, child_plugin_3.pk, child_plugin_4.pk, ] data = {'token': action_token, 'child_plugins': plugin_ids} request = self.get_post_request(data) response = text_plugin_class.delete_on_cancel(request) self.assertEqual(response.status_code, 400) self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_1.pk) self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_2.pk) self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_3.pk) self.assertObjectExist(CMSPlugin.objects.all(), pk=child_plugin_4.pk) plugin_ids = [ child_plugin_2.pk, child_plugin_3.pk, ] data = {'token': action_token, 'child_plugins': plugin_ids} request = self.get_post_request(data) response = text_plugin_class.delete_on_cancel(request) self.assertEqual(response.status_code, 204) self.assertObjectDoesNotExist(CMSPlugin.objects.all(), pk=child_plugin_2.pk) self.assertObjectDoesNotExist(CMSPlugin.objects.all(), pk=child_plugin_3.pk) def test_action_token_per_session(self): # Assert that a cancel token for the same plugin # is different per user session. simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') text_plugin = add_plugin( simple_placeholder, 'TextPlugin', 'en', body="I'm the first", ) text_plugin_class = text_plugin.get_plugin_class_instance() with self.login_user_context(self.get_superuser()): request = self.get_request() action_token_1 = text_plugin_class.get_action_token(request, text_plugin) with self.login_user_context(self.get_superuser()): request = self.get_request() action_token_2 = text_plugin_class.get_action_token(request, text_plugin) self.assertNotEqual(action_token_1, action_token_2) def test_add_and_cancel_plugin_permissions(self): simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') endpoint = self.get_add_plugin_uri(simple_placeholder, 'TextPlugin') with self.login_user_context(self.user): response = self.client.post(endpoint, {}) self.assertEqual(response.status_code, 302) # Point to the newly created text plugin text_plugin_pk = self.get_plugin_id_from_response(response) cms_plugin = CMSPlugin.objects.get(pk=text_plugin_pk) text_plugin_class = cms_plugin.get_plugin_class_instance() endpoint = self.get_custom_admin_url(TextPlugin, 'delete_on_cancel') # Assert a standard user (no staff) can't delete ghost plugin with self.login_user_context(self.get_standard_user()): request = self.get_request() action_token = text_plugin_class.get_action_token(request, cms_plugin) data = {'token': action_token} response = self.client.post(endpoint, data) self.assertEqual(response.status_code, 403) staff_user = self._create_user('addonly-staff', is_staff=True, is_superuser=False) self._give_cms_permissions(staff_user) self._give_permission(staff_user, text_plugin_class.model, 'add') with self.login_user_context(staff_user): request = self.get_request() action_token = text_plugin_class.get_action_token(request, cms_plugin) data = {'token': action_token} response = self.client.post(endpoint, data) self.assertEqual(response.status_code, 204) def test_change_form_has_rendered_plugin_content(self): """ When the text form is rendered in the admin, the child plugins are rendered as their contents passed as initial data to the text field. """ simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') text_plugin = add_plugin( simple_placeholder, 'TextPlugin', 'en', body="I'm the first", ) child_plugins = [ self._add_child_plugin(text_plugin), self._add_child_plugin(text_plugin), ] for plugin in child_plugins: text_plugin = self.add_plugin_to_text(text_plugin, plugin) with self.login_user_context(self.get_superuser()): request = self.get_request() context = RequestContext(request) context['request'] = request text_with_rendered_plugins = plugin_tags_to_admin_html( text=text_plugin.body, context=context, ) endpoint = self.get_change_plugin_uri(text_plugin) response = self.client.get(endpoint) self.assertEqual(response.status_code, 200) self.assertEqual( response.context['adminform'].form['body'].value(), text_with_rendered_plugins, ) self.assertContains( response, escape(text_with_rendered_plugins), html=False, ) def test_user_cant_edit_child_plugins_directly(self): """ No user regardless of permissions can modify the contents of a child plugin directly in the text plugin text. """ simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') text_plugin = add_plugin( simple_placeholder, 'TextPlugin', 'en', body="I'm the first", ) child_plugins = [ self._add_child_plugin(text_plugin), self._add_child_plugin(text_plugin), ] for plugin in child_plugins: text_plugin = self.add_plugin_to_text(text_plugin, plugin) with self.login_user_context(self.get_superuser()): expected_text = text_plugin.body # This returns the child plugins with their content # overridden to <img src=""> overridden_text = self._replace_plugin_contents( text_plugin.body, new_plugin_content='<img src="">', ) endpoint = self.get_change_plugin_uri(text_plugin) response = self.client.post(endpoint, {'body': overridden_text}) text_plugin.refresh_from_db() self.assertEqual(response.status_code, 200) self.assertXMLEqual(text_plugin.body, expected_text) def test_render_child_plugin_endpoint(self): simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') text_plugin = add_plugin( simple_placeholder, 'TextPlugin', 'en', body="I'm the first", ) text_plugin_class = text_plugin.get_plugin_class_instance() child_plugin = self._add_child_plugin(text_plugin) text_plugin = self.add_plugin_to_text(text_plugin, child_plugin) with self.login_user_context(self.get_superuser()): request = self.get_request() action_token = text_plugin_class.get_action_token(request, text_plugin) endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin') endpoint += f'?token={action_token}&plugin={child_plugin.pk}' response = self.client.get(endpoint) self.assertEqual(response.status_code, 200) context = RequestContext(request) context['request'] = request rendered_content = _render_cms_plugin(child_plugin, context) rendered_child_plugin = plugin_to_tag( child_plugin, content=rendered_content, admin=True, ) self.assertEqual(force_str(response.content), rendered_child_plugin) child_plugin = self._add_child_plugin(text_plugin, plugin_type='PreviewDisabledPlugin') text_plugin = self.add_plugin_to_text(text_plugin, child_plugin) with self.login_user_context(self.get_superuser()): request = self.get_request() action_token = text_plugin_class.get_action_token(request, text_plugin) endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin') endpoint += f'?token={action_token}&plugin={child_plugin.pk}' response = self.client.get(endpoint) self.assertEqual(response.status_code, 200) # it is important that we do not add any extra whitespace inside of # <cms-plugin></cms-plugin> rendered_child_plugin = ('<cms-plugin render-plugin=false ' 'alt="Preview Disabled Plugin - 3 ' '"title="Preview Disabled Plugin - 3" ' 'id="3"><span>Preview is disabled for this plugin</span>' '</cms-plugin>') self.assertEqual(force_str(response.content), rendered_child_plugin) def test_render_child_plugin_endpoint_calls_context_processors(self): simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') text_plugin = add_plugin( simple_placeholder, 'TextPlugin', 'en', body="I'm the first", ) text_plugin_class = text_plugin.get_plugin_class_instance() child_plugin = self._add_child_plugin( text_plugin, plugin_type='SekizaiPlugin', ) text_plugin = self.add_plugin_to_text(text_plugin, child_plugin) with self.login_user_context(self.get_superuser()): request = self.get_request() action_token = text_plugin_class.get_action_token(request, text_plugin) endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin') endpoint += f'?token={action_token}&plugin={child_plugin.pk}' response = self.client.get(endpoint) self.assertEqual(response.status_code, 200) context = RequestContext(request) context['request'] = request rendered_content = _render_cms_plugin(child_plugin, context) rendered_child_plugin = plugin_to_tag( child_plugin, content=rendered_content, admin=True, ) self.assertEqual(force_str(response.content), rendered_child_plugin) def test_render_child_plugin_permissions(self): """ Users can't render a child plugin without change permissions on the placeholder attached object and the text plugin. """ simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') text_plugin = add_plugin( simple_placeholder, 'TextPlugin', 'en', body="I'm the first", ) text_plugin_class = text_plugin.get_plugin_class_instance() child_plugin = self._add_child_plugin(text_plugin) text_plugin = self.add_plugin_to_text(text_plugin, child_plugin) with self.login_user_context(self.get_standard_user()): request = self.get_request() action_token = text_plugin_class.get_action_token(request, text_plugin) endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin') endpoint += f'?token={action_token}&plugin={child_plugin.pk}' response = self.client.get(endpoint) self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403, html=True) def test_render_child_plugin_token_validation(self): """ Users can only render a child plugin if the token was created in the current session and it's text plugin matches the child plugin parent. """ simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') text_plugin = add_plugin( simple_placeholder, 'TextPlugin', 'en', body="I'm the first", ) text_plugin_class = text_plugin.get_plugin_class_instance() child_plugin = self._add_child_plugin(text_plugin) text_plugin = self.add_plugin_to_text(text_plugin, child_plugin) # Tokens are unique per session. # Users can't render a child plugin with a token # from another session. with self.login_user_context(self.get_superuser()): request = self.get_request() with self.login_user_context(self.get_superuser()): action_token = text_plugin_class.get_action_token(request, text_plugin) endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin') endpoint += f'?token={action_token}&plugin={child_plugin.pk}' response = self.client.get(endpoint) self.assertEqual(response.status_code, 400) self.assertEqual(force_str(response.content), 'Unable to process your request. Invalid token.') text_plugin_2 = add_plugin( simple_placeholder, 'TextPlugin', 'en', body="I'm the second", ) # Tokens are unique per text plugin. # User can't render a child plugin for a token whose text plugin # does not match the plugin's parent. with self.login_user_context(self.get_superuser()): request = self.get_request() action_token = text_plugin_class.get_action_token(request, text_plugin_2) endpoint = self.get_custom_admin_url(TextPlugin, 'render_plugin') endpoint += f'?token={action_token}&plugin={child_plugin.pk}' response = self.client.get(endpoint) self.assertEqual(response.status_code, 400) self.assertEqual(force_str(response.content), 'Unable to process your request.') def test_custom_ckeditor_body_css_classes(self): simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') parent_plugin = add_plugin( simple_placeholder, DummyParentPlugin, 'en', label=DummyParentPlugin._ckeditor_body_class_label_trigger, ) child_plugin = add_plugin( simple_placeholder, DummyChildPlugin, 'en', target=parent_plugin, ) text_plugin = add_plugin( simple_placeholder, 'TextPlugin', 'en', body='Content', target=child_plugin, ) with self.login_user_context(self.get_superuser()): change_endpoint = self.get_change_plugin_uri(text_plugin) response = self.client.get(change_endpoint) self.assertContains(response, DummyParentPlugin._ckeditor_body_class) self.assertContains(response, DummyChildPlugin.child_ckeditor_body_css_class) def test_render_plugin(self): simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') text_plugin = self._add_text_plugin(simple_placeholder) for i in range(0, 10): plugin = self._add_child_plugin( text_plugin, plugin_type='LinkPlugin', data_suffix=i, ) text_plugin = self.add_plugin_to_text(text_plugin, plugin) with self.assertNumQueries(2): request = self.get_request() context = RequestContext(request) context['request'] = request rendered = _render_cms_plugin(text_plugin, context) for i in range(0, 10): self.assertTrue('LinkPlugin record %d' % i in rendered) def test_render_extended_plugin(self): simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') text_plugin = self._add_text_plugin(simple_placeholder, 'ExtendedTextPlugin') for i in range(0, 10): plugin = self._add_child_plugin( text_plugin, plugin_type='LinkPlugin', data_suffix=i, ) text_plugin = self.add_plugin_to_text(text_plugin, plugin) with self.assertNumQueries(2): request = self.get_request() context = RequestContext(request) context['request'] = request rendered = _render_cms_plugin(text_plugin, context) for i in range(0, 10): self.assertTrue('LinkPlugin record %d' % i in rendered) def test_copy_plugin_integrity(self): """ Test that copying of textplugins replaces references to copied plugins """ simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') text_plugin = self._add_text_plugin(simple_placeholder) child_plugin_1 = self._add_child_plugin( text_plugin, plugin_type='LinkPlugin', ) text_plugin = self.add_plugin_to_text(text_plugin, child_plugin_1) child_plugin_2 = self._add_child_plugin( text_plugin, plugin_type='LinkPlugin', ) text_plugin = self.add_plugin_to_text(text_plugin, child_plugin_2) # create a page translation to copy plugins to translation = create_title( 'fr', 'test-page-fr', simple_page, slug='test-page-fr', ) self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 3) self.assertEqual(CMSPlugin.objects.filter(language=translation.language).count(), 0) data = { 'source_placeholder_id': simple_placeholder.pk, 'target_placeholder_id': simple_placeholder.pk, 'target_language': translation.language, 'source_language': 'en', } endpoint = self.get_admin_url(Page, 'copy_plugins') endpoint += '?' + urlencode({'cms_path': '/en/'}) with self.login_user_context(self.user): response = self.client.post(endpoint, data) self.assertEqual(response.status_code, 200) self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 3) self.assertEqual(CMSPlugin.objects.filter(language=translation.language).count(), 3) plugins = list(CMSPlugin.objects.all()) new_plugin = plugins[3].get_plugin_instance()[0] idlist = sorted(plugin_tags_to_id_list(new_plugin.body)) expected = sorted([plugins[4].pk, plugins[5].pk]) self.assertEqual(idlist, expected) def test_copy_plugin_callback(self): simple_page = create_page('test page', 'page.html', 'en') simple_placeholder = get_page_placeholders(simple_page, 'en').get(slot='content') text_plugin_1 = self._add_text_plugin(simple_placeholder) child_plugin_1_a = self._add_child_plugin( text_plugin_1, plugin_type='LinkPlugin', ) text_plugin_1 = self.add_plugin_to_text(text_plugin_1, child_plugin_1_a) child_plugin_1_b = self._add_child_plugin( text_plugin_1, plugin_type='LinkPlugin', ) text_plugin_1 = self.add_plugin_to_text(text_plugin_1, child_plugin_1_b) text_plugin_2 = copy.copy(text_plugin_1) text_plugin_2.pk = None text_plugin_2.save() child_plugin_2_a = self._add_child_plugin( text_plugin_2, plugin_type='LinkPlugin', ) child_plugin_2_b = self._add_child_plugin( text_plugin_2, plugin_type='LinkPlugin', ) source_map = { child_plugin_1_a.pk: child_plugin_2_a, child_plugin_1_b.pk: child_plugin_2_b, } TextPlugin.do_post_copy(text_plugin_2, source_map) text_plugin_2.refresh_from_db() idlist = sorted(plugin_tags_to_id_list(text_plugin_2.body)) expected = sorted([child_plugin_2_a.pk, child_plugin_2_b.pk]) self.assertEqual(idlist, expected) def test_plugin_tags_to_id_list(self): pairs = ( ('<cms-plugin id="1"></cms-plugin><cms-plugin id="2"></cms-plugin>', [1, 2]), ('<cms-plugin alt="<h1>markup</h1>" id="1"></cms-plugin><cms-plugin id="1"></cms-plugin>', [1, 1]), ) for markup, expected in pairs: self.assertEqual(plugin_tags_to_id_list(markup), expected) def test_text_plugin_xss(self): page = create_page('test page', 'page.html', 'en') placeholder = get_page_placeholders(page, 'en').get(slot='content') plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body') endpoint = self.get_change_plugin_uri(plugin) with self.login_user_context(self.user): data = { 'body': ( '<div onload="do_evil_stuff();">divcontent</div><a href="javascript:do_evil_stuff();">acontent</a>' ), } response = self.client.post(endpoint, data) self.assertEqual(response.status_code, 200) self.assertEqual(self.reload(plugin).body, '<div>divcontent</div><a>acontent</a>') @unittest.skipUnless( HAS_DJANGOCMS_TRANSLATIONS and HAS_DJANGOCMS_TRANSFER, 'Optional dependencies for tests are not installed.', ) class DjangoCMSTranslationsIntegrationTestCase(BaseTestCase): def setUp(self): super().setUp() self.page = create_page('test page', 'page.html', 'en', published=True) self.placeholder = get_page_placeholders(self.page, 'en').get(slot='content') def _export_page(self): return json.loads(export_page(self.page, 'en')) def test_textfield_without_children(self): raw_content = '<p>Please <a href="http://www.google.com">CLICK ON LINK1</a> to go to link1.</p>' add_plugin(self.placeholder, 'TextPlugin', 'en', body=raw_content) plugin = self._export_page()[0]['plugins'][0] result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data']) self.assertEqual(result, raw_content) self.assertEqual(children_included_in_this_content, []) result = TextPlugin.set_translation_import_content(result, plugin) self.assertDictEqual(result, {}) def test_textfield_with_children(self): parent = add_plugin(self.placeholder, 'TextPlugin', 'en', body='') child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1') parent_body = ( '<p>Please <cms-plugin alt="Dummy Link Plugin - dummy link object "' 'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link1.</p>' ).format(child1.pk) parent.body = parent_body parent.save() plugin = self._export_page()[0]['plugins'][0] result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data']) expected = ( parent_body .replace('></cms-plugin>', '>CLICK ON LINK1</cms-plugin>', 1) ) self.assertEqual(result, expected) self.assertEqual(children_included_in_this_content, [child1.pk]) result = TextPlugin.set_translation_import_content(result, plugin) self.assertDictEqual(result, {child1.pk: 'CLICK ON LINK1'}) def test_textfield_with_multiple_children(self): parent = add_plugin(self.placeholder, 'TextPlugin', 'en', body='') child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1') child2 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK2') parent_body = ( '<p>Please <cms-plugin alt="Dummy Link Plugin - dummy link object "' 'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link1 ' 'or <cms-plugin alt="Dummy Link Plugin - dummy link object "' 'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link2.</p>' ).format(child1.pk, child2.pk) parent.body = parent_body parent.save() plugin = self._export_page()[0]['plugins'][0] result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data']) expected = ( parent_body .replace('></cms-plugin>', '>CLICK ON LINK1</cms-plugin>', 1) .replace('></cms-plugin>', '>CLICK ON LINK2</cms-plugin>', 1) ) self.assertEqual(result, expected) self.assertEqual(children_included_in_this_content, [child1.pk, child2.pk]) result = TextPlugin.set_translation_import_content(result, plugin) self.assertDictEqual(result, {child1.pk: 'CLICK ON LINK1', child2.pk: 'CLICK ON LINK2'}) def test_textfield_with_multiple_children_one_deleted(self): parent = add_plugin(self.placeholder, 'TextPlugin', 'en', body='') child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1') child2 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK2') parent_body = ( '<p>Please <cms-plugin alt="Dummy Link Plugin - dummy link object "' 'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link1 ' 'or <cms-plugin alt="Dummy Link Plugin - dummy link object "' 'title="Dummy Link Plugin - dummy link object" id="{}"></cms-plugin> to go to link2.</p>' ).format(child1.pk, child2.pk) parent.body = parent_body parent.save() plugin = self._export_page()[0]['plugins'][0] child1.delete() result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data']) expected = ( '<p>Please to go to link1 ' 'or <cms-plugin alt="Dummy Link Plugin - dummy link object "' 'title="Dummy Link Plugin - dummy link object" id="{}">CLICK ON LINK2</cms-plugin> to go to link2.</p>' ).format(child2.pk) self.assertEqual(result, expected) self.assertEqual(children_included_in_this_content, [child2.pk]) result = TextPlugin.set_translation_import_content(result, plugin) self.assertDictEqual(result, {child2.pk: 'CLICK ON LINK2'}) def test_textfield_with_untranslatable_children(self): parent = add_plugin(self.placeholder, 'TextPlugin', 'en', body='') child1 = add_plugin(self.placeholder, 'DummySpacerPlugin', 'en', target=parent) parent_body = ( '<p>This is cool <cms-plugin alt="Dummy Spacer Plugin - dummy spacer object "' 'title="Dummy Spacer Plugin - dummy spacer object" id="{}"></cms-plugin> this is nice</p>' ).format(child1.pk) parent.body = parent_body parent.save() plugin = self._export_page()[0]['plugins'][0] result, children_included_in_this_content = TextPlugin.get_translation_export_content('body', plugin['data']) expected = ( parent_body ) self.assertEqual(result, expected) self.assertEqual(children_included_in_this_content, [child1.pk]) result = TextPlugin.set_translation_import_content(result, plugin) self.assertDictEqual(result, {child1.pk: ''})
py
1a5a89cc0d24b08e0109c0bf0bd7d7c209c2bb7c
""" Tests of modelling class. """ import unittest import symengine as se from neurolib.models.multimodel.builder.base.neural_mass import NeuralMass from neurolib.utils.stimulus import OrnsteinUhlenbeckProcess, ZeroInput class MassTest(NeuralMass): required_params = ["a", "b"] num_state_variables = 1 num_noise_variables = 2 helper_variables = ["helper_test"] python_callbacks = ["test_callback"] _noise_input = [ZeroInput(), ZeroInput()] class TestNeuralMass(unittest.TestCase): PARAMS = {"a": 1.2, "b": 11.9} def test_init(self): mass = MassTest(self.PARAMS) self.assertTrue(isinstance(mass, NeuralMass)) self.assertTrue(isinstance(mass.__str__(), str)) self.assertEqual(mass.__str__(), mass.__repr__()) self.assertTrue(isinstance(mass.describe(), dict)) mass._initialize_state_vector() self.assertEqual(len(mass.initial_state), mass.num_state_variables) self.assertTrue(hasattr(mass, "DESCRIPTION_FIELD")) self.assertTrue(all(hasattr(mass, field) for field in mass.DESCRIPTION_FIELD)) self.assertTrue(hasattr(mass, "_derivatives")) self.assertTrue(hasattr(mass, "_validate_params")) self.assertTrue(hasattr(mass, "_validate_callbacks")) self.assertTrue(hasattr(mass, "_initialize_state_vector")) self.assertTrue(all(isinstance(symb, se.Symbol) for symb in mass.helper_symbols.values())) # callbacks are UndefFunction for now self.assertTrue(all(isinstance(callback, se.UndefFunction) for callback in mass.callback_functions.values())) def test_derivatives(self): mass = MassTest(self.PARAMS) self.assertRaises(NotImplementedError, mass._derivatives) def test_validate_params(self): mass = MassTest(self.PARAMS) self.assertDictEqual(self.PARAMS, mass.params) def test_update_params(self): UPDATE_WITH = {"a": 2.4, "noise_0": {"seed": 12}} mass = MassTest(self.PARAMS) mass.index = 0 mass.init_mass(start_idx_for_noise=0) self.assertEqual(mass.params["a"], self.PARAMS["a"]) mass.update_params(UPDATE_WITH) self.assertEqual(mass.params["a"], UPDATE_WITH["a"]) self.assertEqual(mass.params["noise_0"]["seed"], UPDATE_WITH["noise_0"]["seed"]) def test_init_mass(self): mass = MassTest(self.PARAMS) self.assertFalse(mass.initialised) mass.index = 0 mass.init_mass(start_idx_for_noise=6) self.assertTrue(mass.initialised) self.assertListEqual(mass.initial_state, [0.0] * mass.num_state_variables) self.assertListEqual(mass.noise_input_idx, [6, 7]) def test_set_noise_input(self): mass = MassTest(self.PARAMS) self.assertTrue(all(isinstance(noise, ZeroInput) for noise in mass.noise_input)) mass.noise_input = [OrnsteinUhlenbeckProcess(0.0, 0.0, 1.0), ZeroInput()] self.assertTrue(isinstance(mass.noise_input[0], OrnsteinUhlenbeckProcess)) self.assertTrue(isinstance(mass.noise_input[1], ZeroInput)) def test_unwrap_state_vector(self): mass = MassTest(self.PARAMS) mass.idx_state_var = 0 self.assertTrue(hasattr(mass, "_unwrap_state_vector")) state_vec = mass._unwrap_state_vector() self.assertTrue(isinstance(state_vec, list)) self.assertEqual(len(state_vec), mass.num_state_variables) self.assertTrue(all(isinstance(vec, se.Function) for vec in state_vec)) if __name__ == "__main__": unittest.main()
py
1a5a8a997a86420749ba409fa45ddfbd6341f1cf
from typing import List import inspect from functools import partial, wraps from json import JSONEncoder class JsonSerializable(JSONEncoder): _klasses: List[type] = [] def __init__(self, kls): super().__init__() self.__kls = kls self._klasses.append(kls) def get_json_members(self): return inspect.getmembers( self.__kls, lambda o: isinstance(o, JsonProperty) ) def scan_properties(self, o): for name, property in self.get_json_members(): value = getattr(o, name) if value.__class__ in self._klasses: value = value.default() elif isinstance(value, (list, tuple)): value = [ v.default() if v.__class__ in self._klasses else v for v in value ] yield name, value def default(self, o): if isinstance(o, self.__kls): return dict(self.scan_properties(o)) return super().default(o) def __call__(self, *args, **kwargs): @wraps(self.__kls) def wrapped(cls): cls.__json__ = True instance = cls(*args, **kwargs) # setattr(inspect, 'default', partial(self.default, instance)) setattr(instance, 'default', partial(self.default, instance)) return instance return wrapped(self.__kls) class JsonProperty(property): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) json_property = JsonProperty
py
1a5a8aa452c25c3343f51846ea461301d2db5ce8
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]] somap = 0 maior = 0 for l in range(0, 3): for a in range(0, 3): matriz[l][a] = int(input(f'Digite um valor para [{l}, {a}]: ')) print('-='*30) for r in range(0, 3): for i in range(0, 3): print(f'[{matriz[r][i]:^5}]', end='') ''' if matriz[r][i] % 2 == 0: somap += e ''' print() print('-='*30) for d in matriz: for e in d: if e % 2 == 0: somap += e print(f'A soma dos valores pares é {somap}') somat = matriz[0][2] + matriz[1][2] + matriz[2][2] ''' for k in range(0, 3): somat += matriz[k][2] ''' print(f'A soma dos valores da terceira coluna é {somat}.') for f in matriz[1]: if maior == 0 or f > maior: maior = f print(f'O maior valor da segunda linha é {maior}')
py
1a5a8ab21d287d7d763ab7108f93e48bc1c7d53e
import FWCore.ParameterSet.Config as cms from Validation.HGCalValidation.hgcalDigiValidationEEDefault_cfi import hgcalDigiValidationEEDefault as _hgcalDigiValidationEEDefault hgcalDigiValidationEE = _hgcalDigiValidationEEDefault.clone()
py
1a5a8adead1cb71fc1ce82743a3dbfda36a128eb
class Heap: def __init__(self, A:list(int)=[]) -> None: """ A heap represents a nearly-complete binary tree that maintains a heap property between parents and children. A heap represents a nearly-complete binary tree that maintains a heap property between parents and children. Let ~ represent an inequality, then the heap property maintains that A[i] ~ A[child[i]] and A[parent[i]] ~ A[i]. Note: usually heaps are in-place and operations have constant space complexity. """ assert A is not None # Array containing the elements of the heap. self.A = A # Length of the complete array. self.length = len(A) def compare(self, i:int, j:int) -> bool: """ Takes values and performs a comparison based on relationship ~. :param i: First value. :type i: int :param j: Second value. :type j: int :return: True if the relationship is held, False otherwise. :rtype: bool """ pass def left(self, i) -> int: """ Takes a node index and returns the left child. :param i: Node index. :type i: int :return: Index of the left node. :rtype: int """ assert i >= 0 return 2*i def right(self, i) -> int: """ Takes a node index and returns the right child. :param i: Node index. :type i: int :return: Index of the right node. :rtype: int """ assert i >= 0 return 2*i + 1 def swap(self, i:int, j:int) -> None: """ Swaps two elements in the heap if the indices are valid. :param i: Position of first element. :type i: int :param j: Position of second element. :type j: int :return: None. """ if 0 <= i <= self.length and 0 <= j <= self.length: A = self.A t = A[i] A[i] = A[j] A[j] = t def heapify(self, i:int) -> None: """ Heapify takes an index and ensures the heap property for node i if the left and right children are heaps. :param i: Index of a node to heapify. :type i: int :rtype: None """ assert 0 <= i < self.length A = self.A l = self.left(i) r = self.right(i) if r <= self.length: # Case 1: all indices are valid. if not self.compare(A[i], A[l]) and self.compare(A[l], A[r]): # Case 1a: left ~ parent and left ~ right self.swap(i, l) self.heapify(l) elif not self.compare(A[i], A[r]): # Case 1b: right ~ parent and right ~ left self.swap(i, r) self.heapify(r) # Case 1c: parent ~ left and parent ~ right, so the heap property is maintained. elif l <= self.length: # Case 2: the right index is not valid, but all others are. if not self.compare(A[i], A[l]): self.swap(i, l) # Right was not valid, ie. A.length == l, so heapify recursion ends here. # Case 3: parent is a leaf node. def make_heap(A:list(int)) -> Heap: """ Constructs a Heap from an unsorted array of elements. Constructs a Heap from an unsorted array of elements. Takes O(n*lg(n)) time complexity and O(n) space. :param A: An array of values to add to the heap. :return: A Heap with values from the array. """ heap = Heap(A) for i in range(len(A) // 2, 0): heap.heapify(i) return heap
py
1a5a8b449f577b1b341dc5ade5916df34e0b626d
"""This is a test to test the paraview proxy manager API.""" from paraview import servermanager import sys servermanager.Connect() sources = servermanager.sources.__dict__ for source in sources: try: sys.stderr.write('Creating %s...'%(source)) s = sources[source]() s.UpdateVTKObjects() sys.stderr.write('ok\n') except: sys.stderr.write('failed\n') raise RuntimeError('ERROR: Failed to create %s'%(source))
py
1a5a8b457fbefc4576690e03ab6914dc7f0a46a2
# Generated by Django 3.2.11 on 2022-02-01 20:30 from django.conf import settings import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.db.models.deletion import django.utils.timezone import uuid class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0012_alter_user_first_name_max_length'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('user_type', models.CharField(choices=[('EVENT MANAGER', 'Event Manager'), ('CUSTOMER', 'Customer')], default='Super Admin', max_length=13, verbose_name='User Type')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'verbose_name': 'System Admin', 'verbose_name_plural': 'System Admins', }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), migrations.CreateModel( name='Profile', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user_profile', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Customer', fields=[ ], options={ 'verbose_name': 'Customer', 'verbose_name_plural': 'Customers', 'proxy': True, 'indexes': [], 'constraints': [], }, bases=('users.user',), managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), migrations.CreateModel( name='Manager', fields=[ ], options={ 'verbose_name': 'Manager', 'verbose_name_plural': 'Managers', 'proxy': True, 'indexes': [], 'constraints': [], }, bases=('users.user',), managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
py
1a5a8e266137118d2546a03b9d9b96f5bbb7a1d7
#!/usr/bin/env python from distutils.core import setup LONG_DESCRIPTION = \ ''' This program is a basic python conversion of Mick Watson's Ideel. It reads one or more input FASTA files and for each file it will use prodigal for rapid annotation, then run diamond blast, then compare the query length to hit length. It was built with the help of 'Bionitio' ''' setup( name='pydeel', version='0.2.0.0', author='Alistair Legione', author_email='[email protected]', packages=['pydeel'], package_dir={'pydeel': 'pydeel'}, entry_points={ 'console_scripts': ['pydeel = pydeel.pydeel:main'] }, url='https://github.com/alegione/pydeel', license='LICENSE', description=('Assembly completion by annotation assessment'), long_description=(LONG_DESCRIPTION), install_requires=["argparse", "pandas", "altair", "seaborn", "selenium", "datetime", "Bio"], )
py
1a5a8e279da09025b7a2a0bd8d72a9e73f7f2aa3
from bead.exceptions import InvalidArchive from .test import TestCase, chdir from . import workspace as m import os import zipfile from .archive import Archive from . import layouts from . import tech write_file = tech.fs.write_file ensure_directory = tech.fs.ensure_directory temp_dir = tech.fs.temp_dir timestamp = tech.timestamp.timestamp Path = tech.fs.Path A_KIND = 'an arbitrary identifier that is not used by chance' class Test_create(TestCase): def test_valid(self): self.when_a_new_workspace_is_created() self.then_directory_is_a_valid_bead_dir() def test_has_no_inputs(self): self.when_a_new_workspace_is_created() self.then_workspace_has_no_inputs() def test_of_specified_kind(self): self.when_a_new_workspace_is_created() self.then_workspace_is_of_specified_kind() # implementation __workspace_dir = None @property def workspace(self): return m.Workspace(self.__workspace_dir) def when_a_new_workspace_is_created(self): self.__workspace_dir = self.new_temp_dir() / 'new_workspace' self.workspace.create(A_KIND) def then_directory_is_a_valid_bead_dir(self): assert self.workspace.is_valid def then_workspace_has_no_inputs(self): assert not self.workspace.has_input('bead1') assert not self.workspace.is_loaded('bead1') assert not self.workspace.inputs def then_workspace_is_of_specified_kind(self): assert A_KIND == self.workspace.kind class Test_for_current_working_directory(TestCase): def test_non_workspace(self): root = self.new_temp_dir() with chdir(root): ws = m.Workspace.for_current_working_directory() assert tech.fs.Path(root) == ws.directory def test_workspace_root(self): root = self.new_temp_dir() / 'new_workspace' workspace = m.Workspace(root) workspace.create(A_KIND) with chdir(root): ws = m.Workspace.for_current_working_directory() assert tech.fs.Path(root) == ws.directory def test_workspace_above_root(self): root = self.new_temp_dir() / 'new_workspace' workspace = m.Workspace(root) workspace.create(A_KIND) with chdir(root / layouts.Workspace.INPUT): ws = m.Workspace.for_current_working_directory() assert tech.fs.Path(root) == ws.directory class Test_pack(TestCase): def test_creates_valid_archive(self): self.given_a_workspace() self.when_archived() self.then_archive_is_valid_bead() def test_archives_all_content(self): self.given_a_workspace() self.when_archived() self.then_archive_contains_files_from_bead_directory() def test_not_saved_content(self): self.given_a_workspace() self.when_archived() self.then_archive_does_not_contain_workspace_meta_and_temp_files() def test_archive_has_comment(self): self.given_a_workspace() self.when_archived() self.then_archive_has_comment() # implementation __workspace_dir = None __zipfile = None __SOURCE1 = b's1' __SOURCE2 = b's2' __OUTPUT1 = b'o1' assert __SOURCE2 != __SOURCE1 __BEAD_COMMENT = 'custom bead comment' @property def workspace(self): return m.Workspace(self.__workspace_dir) def given_a_workspace(self): self.__workspace_dir = self.new_temp_dir() / 'workspace' self.workspace.create(A_KIND) layout = layouts.Workspace write_file( self.__workspace_dir / layout.TEMP / 'README', 'temporary directory') write_file( self.__workspace_dir / layout.OUTPUT / 'output1', self.__OUTPUT1) write_file(self.__workspace_dir / 'source1', self.__SOURCE1) ensure_directory(self.__workspace_dir / 'subdir') write_file(self.__workspace_dir / 'subdir/source2', self.__SOURCE2) def when_archived(self): self.__zipfile = self.new_temp_dir() / 'bead.zip' self.workspace.pack(self.__zipfile, timestamp(), self.__BEAD_COMMENT) def then_archive_contains_files_from_bead_directory(self): with zipfile.ZipFile(self.__zipfile) as z: layout = layouts.Archive assert self.__OUTPUT1 == z.read(layout.DATA / 'output1') assert self.__SOURCE1 == z.read(layout.CODE / 'source1') assert self.__SOURCE2 == z.read(layout.CODE / 'subdir/source2') files = z.namelist() assert layout.BEAD_META in files assert layout.MANIFEST in files def then_archive_is_valid_bead(self): bead = Archive(self.__zipfile) bead.validate() def then_archive_has_comment(self): with zipfile.ZipFile(self.__zipfile) as z: assert self.__BEAD_COMMENT == z.comment.decode('utf-8') def then_archive_does_not_contain_workspace_meta_and_temp_files(self): def does_not_contain(workspace_path): with zipfile.ZipFile(self.__zipfile) as z: archive_path = layouts.Archive.CODE / workspace_path self.assertRaises(KeyError, z.getinfo, archive_path) does_not_contain(layouts.Workspace.BEAD_META) does_not_contain(layouts.Workspace.TEMP / 'README') class Test_pack_stability(TestCase): def test_directory_name_data_and_timestamp_determines_content_ids(self): TS = '20150910T093724802366+0200' # note: it is important to create the same bead in # two different directories def make_bead(): output = self.new_temp_dir() / 'bead.zip' ws = m.Workspace(self.new_temp_dir() / 'a bead') ws.create(A_KIND) write_file(ws.directory / 'source1', 'code to produce output') write_file(ws.directory / 'output/output1', TS) ws.pack(output, TS, comment='') return Archive(output) bead1 = make_bead() bead2 = make_bead() assert bead1.content_id == bead2.content_id def make_bead(path, filespecs): with temp_dir() as root: workspace = m.Workspace(root / 'workspace') workspace.create(A_KIND) for filename, content in filespecs.items(): write_file(workspace.directory / filename, content) workspace.pack(path, timestamp(), 'no comment') class Test_load(TestCase): def test_makes_bead_files_available_under_input(self): self.given_a_workspace() self.when_loading_a_bead() self.then_data_files_in_bead_are_available_in_workspace() def test_loaded_inputs_are_read_only(self): self.given_a_workspace() self.when_loading_a_bead() self.then_extracted_files_under_input_are_readonly() def test_load_adds_input_to_bead_meta(self): self.given_a_workspace() self.when_loading_a_bead() self.then_input_info_is_added_to_bead_meta() def test_loading_more_than_one_bead(self): self.given_a_workspace() self.when_loading_a_bead() self.then_another_bead_can_be_loaded() # implementation __workspace_dir = None @property def workspace(self): return m.Workspace(self.__workspace_dir) def given_a_workspace(self): self.__workspace_dir = self.new_temp_dir() / 'workspace' self.workspace.create(A_KIND) def _load_a_bead(self, input_nick): path_of_bead_to_load = self.new_temp_dir() / 'bead.zip' make_bead( path_of_bead_to_load, { 'output/output1': f'data for {input_nick}'.encode('utf-8') } ) self.workspace.load(input_nick, Archive(path_of_bead_to_load)) def when_loading_a_bead(self): self._load_a_bead('bead1') def then_data_files_in_bead_are_available_in_workspace(self): with open(self.__workspace_dir / 'input/bead1/output1', 'rb') as f: assert b'data for bead1' == f.read() def then_extracted_files_under_input_are_readonly(self): root = self.__workspace_dir / 'input/bead1' assert os.path.exists(root) self.assertRaises(IOError, open, root / 'output1', 'ab') # also folders are read only - this does not work on Windows if os.name == 'posix': self.assertRaises(IOError, open, root / 'new-file', 'wb') def then_input_info_is_added_to_bead_meta(self): assert self.workspace.has_input('bead1') assert self.workspace.is_loaded('bead1') def then_another_bead_can_be_loaded(self): self._load_a_bead('bead2') class Test_input_map(TestCase): def test_default_value(self, workspace_with_input, input_nick): assert input_nick == workspace_with_input.get_input_bead_name(input_nick) def test_define(self, workspace_with_input, input_nick): bead_name = f'{input_nick}2' workspace_with_input.set_input_bead_name(input_nick, bead_name) assert bead_name == workspace_with_input.get_input_bead_name(input_nick) def test_update(self, workspace_with_input, input_nick): workspace_with_input.set_input_bead_name(input_nick, f'{input_nick}2') bead_name = f'{input_nick}42' workspace_with_input.set_input_bead_name(input_nick, bead_name) assert bead_name == workspace_with_input.get_input_bead_name(input_nick) def test_independent_update(self, workspace_with_input, input_nick): input_nick2 = f'{input_nick}2' self.add_input(workspace_with_input, input_nick2) workspace_with_input.set_input_bead_name(input_nick, f'{input_nick}1111') workspace_with_input.set_input_bead_name(input_nick2, f'{input_nick2}222') assert f'{input_nick}1111' == workspace_with_input.get_input_bead_name(input_nick) assert f'{input_nick2}222' == workspace_with_input.get_input_bead_name(input_nick2) # implementation def workspace_dir(self): return self.new_temp_dir() / 'workspace' def workspace(self, workspace_dir): workspace = m.Workspace(workspace_dir) workspace.create(A_KIND) return workspace def input_nick(self): return 'input_nick' def add_input(self, workspace, input_nick): workspace.add_input(input_nick, A_KIND, 'content_id', timestamp()) def workspace_with_input(self, workspace, input_nick): self.add_input(workspace, input_nick) return workspace def unzip(archive_path, directory): ensure_directory(directory) with zipfile.ZipFile(archive_path) as z: z.extractall(directory) def zip_up(directory, archive_path): with zipfile.ZipFile(archive_path, 'w') as z: def add(path, zip_path): if os.path.isdir(path): for name in os.listdir(path): add(path / name, zip_path / name) else: z.write(path, zip_path) add(directory, Path('/')) class Test_is_valid(TestCase): # fixtures def workspace(self): workspace = m.Workspace(self.new_temp_dir() / 'workspace') workspace.create(A_KIND) return workspace def timestamp(self): return '20150930T093724802366+0200' def archive_path(self, workspace, timestamp): archive_path = self.new_temp_dir() / 'bead.zip' workspace.pack(archive_path, timestamp, comment=archive_path) return archive_path def archive_with_two_files_path(self, workspace, timestamp): write_file(workspace.directory / 'code1', 'code1') write_file(workspace.directory / 'output/data1', 'data1') return self.archive_path(workspace, timestamp) def unzipped_archive_path(self, archive_with_two_files_path): path = self.new_temp_dir() unzip(archive_with_two_files_path, path) return path def archive(self, archive_path): return Archive(archive_path) # tests def test_newly_created_bead_is_valid(self, archive_with_two_files_path): Archive(archive_with_two_files_path).validate() def test_adding_a_data_file_to_an_archive_makes_bead_invalid(self, archive_path): with zipfile.ZipFile(archive_path, 'a') as z: z.writestr(layouts.Archive.DATA / 'extra_file', b'something') self.assertRaises(InvalidArchive, Archive(archive_path).validate) def test_adding_a_code_file_to_an_archive_makes_bead_invalid(self, archive_path): with zipfile.ZipFile(archive_path, 'a') as z: z.writestr(layouts.Archive.CODE / 'extra_file', b'something') self.assertRaises(InvalidArchive, Archive(archive_path).validate) def test_unzipping_and_zipping_an_archive_remains_valid(self, unzipped_archive_path): rezipped_archive_path = self.new_temp_dir() / 'rezipped_archive.zip' zip_up(unzipped_archive_path, rezipped_archive_path) Archive(rezipped_archive_path).validate() def test_deleting_a_file_in_the_manifest_makes_the_bead_invalid(self, unzipped_archive_path): os.remove(unzipped_archive_path / layouts.Archive.CODE / 'code1') modified_archive_path = self.new_temp_dir() / 'modified_archive.zip' zip_up(unzipped_archive_path, modified_archive_path) self.assertRaises(InvalidArchive, Archive(modified_archive_path).validate) def test_changing_a_file_makes_the_bead_invalid(self, unzipped_archive_path): write_file(unzipped_archive_path / layouts.Archive.CODE / 'code1', b'HACKED') modified_archive_path = self.new_temp_dir() / 'modified_archive.zip' zip_up(unzipped_archive_path, modified_archive_path) self.assertRaises(InvalidArchive, Archive(modified_archive_path).validate)
py
1a5a8e418285ee3578c530b9c6d53f412b44162b
"""Module to read Instagram statistics and upload them to Dropbox for safekeeping""" import pandas import dropbox from accounts import token as db_token from os import listdir from os.path import isfile, join from get_stats import run def read_overall_stats(filename): """Reads csv file and returns it as a pandas data frame""" pandas_df = pandas.read_csv(".\\stats\\" + filename, parse_dates=['DATE']) print pandas_df return pandas_df def upload_to_dropbox(dataframe, path, token): """uploads pandas dataframe to dropbox as csv""" dbx = dropbox.Dropbox(token) df_string = dataframe.to_csv(index=False) db_bytes = bytes(df_string) dbx.files_upload( f=db_bytes, path=path, mode=dropbox.files.WriteMode.overwrite ) def all_files_upload(): """Gets files from stats folder and uploads them sends them to dropbox function""" file_data_path = './stats/' file_list = [f for f in listdir(file_data_path) if isfile(join(file_data_path, f))] for individual_file in file_list: upload_path = "/individual_reports/" + individual_file if 'instaPyStats.csv' in individual_file: upload_path = "/" + individual_file if '.csv' in individual_file: data_frame = read_overall_stats(individual_file) upload_to_dropbox(data_frame, upload_path, db_token) if __name__ == '__main__': run() # runs the 'Get stats' code all_files_upload()
py
1a5a8e610abfedc8ec94a6ffcbf0b63d2542ea4a
from datetime import date, datetime from typing import Generator, Optional import pendulum # type: ignore def month_range(start: date, to: Optional[date] = None) -> Generator[date, None, None]: start_month = pendulum.instance(datetime(start.year, start.month, start.day)).start_of("month").date() end_month = pendulum.instance(datetime.today()).start_of("month").date() if to: end_month = pendulum.instance(datetime(to.year, to.month, to.day)).start_of("month").date() if end_month < start_month: raise ValueError("to must be later than start") current_month = start_month while current_month <= end_month: yield current_month current_month = current_month.add(months=1)
py
1a5a8f473431bec08ab0a9306204e26a17cb0a0b
import sys import toml import nltk import logging from typing import List from pathlib import Path logging.basicConfig( format="%(asctime)s (PID %(process)d) [%(levelname)s] %(filename)s:%(lineno)d %(message)s", level=logging.INFO, handlers=[logging.StreamHandler(sys.stdout)], ) BASE_DIR = Path(__file__).parent.parent.absolute() with open(BASE_DIR / "pyproject.toml", "r", encoding="utf-8") as f: CONFIG = toml.load(f) TOKENS: List[str] = [] if not (BASE_DIR / "tokens.txt").exists(): logging.error("No tokens.txt file found. Please create one.") else: with open(BASE_DIR / "tokens.txt") as f: TOKENS = f.read().strip().split("\n") nltk.download("wordnet") nltk.download("omw-1.4")
py
1a5a8fef3b659a59647a9d1ba0be08263f69c76b
# -*- coding: utf-8 -*- # snapshottest: v1 - https://goo.gl/zC4yUc from __future__ import unicode_literals from snapshottest import Snapshot snapshots = Snapshot() snapshots['TestComposites.test_composites[non_launchable_in_memory_instance_lazy_repository] 1'] = { 'pipelineOrError': { '__typename': 'Pipeline', 'name': 'composites_pipeline', 'solidHandles': [ { 'handleID': 'add_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'add_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_four' } } ] } ] } }, { 'handleID': 'add_four.adder_1', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_2.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'div_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'div_2' } } } ], 'solids': [ { 'name': 'div_1' }, { 'name': 'div_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'add_four' } } ] } ], 'name': 'div_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four.div_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'div_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_2' } } ] } ] } }, { 'handleID': 'div_four.div_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'div_1' } } ] } ], 'name': 'div_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } } ] } } snapshots['TestComposites.test_composites[non_launchable_in_memory_instance_managed_grpc_env] 1'] = { 'pipelineOrError': { '__typename': 'Pipeline', 'name': 'composites_pipeline', 'solidHandles': [ { 'handleID': 'add_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'add_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_four' } } ] } ] } }, { 'handleID': 'add_four.adder_1', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_2.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'div_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'div_2' } } } ], 'solids': [ { 'name': 'div_1' }, { 'name': 'div_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'add_four' } } ] } ], 'name': 'div_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four.div_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'div_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_2' } } ] } ] } }, { 'handleID': 'div_four.div_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'div_1' } } ] } ], 'name': 'div_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } } ] } } snapshots['TestComposites.test_composites[non_launchable_in_memory_instance_multi_location] 1'] = { 'pipelineOrError': { '__typename': 'Pipeline', 'name': 'composites_pipeline', 'solidHandles': [ { 'handleID': 'add_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'add_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_four' } } ] } ] } }, { 'handleID': 'add_four.adder_1', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_2.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'div_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'div_2' } } } ], 'solids': [ { 'name': 'div_1' }, { 'name': 'div_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'add_four' } } ] } ], 'name': 'div_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four.div_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'div_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_2' } } ] } ] } }, { 'handleID': 'div_four.div_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'div_1' } } ] } ], 'name': 'div_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } } ] } } snapshots['TestComposites.test_composites[non_launchable_sqlite_instance_deployed_grpc_env] 1'] = { 'pipelineOrError': { '__typename': 'Pipeline', 'name': 'composites_pipeline', 'solidHandles': [ { 'handleID': 'add_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'add_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_four' } } ] } ] } }, { 'handleID': 'add_four.adder_1', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_2.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'div_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'div_2' } } } ], 'solids': [ { 'name': 'div_1' }, { 'name': 'div_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'add_four' } } ] } ], 'name': 'div_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four.div_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'div_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_2' } } ] } ] } }, { 'handleID': 'div_four.div_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'div_1' } } ] } ], 'name': 'div_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } } ] } } snapshots['TestComposites.test_composites[non_launchable_sqlite_instance_lazy_repository] 1'] = { 'pipelineOrError': { '__typename': 'Pipeline', 'name': 'composites_pipeline', 'solidHandles': [ { 'handleID': 'add_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'add_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_four' } } ] } ] } }, { 'handleID': 'add_four.adder_1', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_2.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'div_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'div_2' } } } ], 'solids': [ { 'name': 'div_1' }, { 'name': 'div_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'add_four' } } ] } ], 'name': 'div_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four.div_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'div_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_2' } } ] } ] } }, { 'handleID': 'div_four.div_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'div_1' } } ] } ], 'name': 'div_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } } ] } } snapshots['TestComposites.test_composites[non_launchable_sqlite_instance_managed_grpc_env] 1'] = { 'pipelineOrError': { '__typename': 'Pipeline', 'name': 'composites_pipeline', 'solidHandles': [ { 'handleID': 'add_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'add_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_four' } } ] } ] } }, { 'handleID': 'add_four.adder_1', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_2.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'div_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'div_2' } } } ], 'solids': [ { 'name': 'div_1' }, { 'name': 'div_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'add_four' } } ] } ], 'name': 'div_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four.div_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'div_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_2' } } ] } ] } }, { 'handleID': 'div_four.div_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'div_1' } } ] } ], 'name': 'div_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } } ] } } snapshots['TestComposites.test_composites[non_launchable_sqlite_instance_multi_location] 1'] = { 'pipelineOrError': { '__typename': 'Pipeline', 'name': 'composites_pipeline', 'solidHandles': [ { 'handleID': 'add_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'add_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_four' } } ] } ] } }, { 'handleID': 'add_four.adder_1', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_1.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'adder_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'adder_2' } } } ], 'solids': [ { 'name': 'adder_1' }, { 'name': 'adder_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'add_four.adder_2.adder_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'adder_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'adder_2' } } ] } ] } }, { 'handleID': 'add_four.adder_2.adder_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'adder_1' } } ] } ], 'name': 'adder_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four', 'solid': { 'definition': { 'inputMappings': [ { 'definition': { 'name': 'num' }, 'mappedInput': { 'definition': { 'name': 'num' }, 'solid': { 'name': 'div_1' } } } ], 'outputMappings': [ { 'definition': { 'name': 'result' }, 'mappedOutput': { 'definition': { 'name': 'result' }, 'solid': { 'name': 'div_2' } } } ], 'solids': [ { 'name': 'div_1' }, { 'name': 'div_2' } ] }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'add_four' } } ] } ], 'name': 'div_four', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } }, { 'handleID': 'div_four.div_1', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ ] } ], 'name': 'div_1', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ { 'solid': { 'name': 'div_2' } } ] } ] } }, { 'handleID': 'div_four.div_2', 'solid': { 'definition': { }, 'inputs': [ { 'definition': { 'name': 'num' }, 'dependsOn': [ { 'solid': { 'name': 'div_1' } } ] } ], 'name': 'div_2', 'outputs': [ { 'definition': { 'name': 'result' }, 'dependedBy': [ ] } ] } } ] } }
py
1a5a90c0290feabf8268b5fc7995523921a571b5
# vim: expandtab:ts=4:sw=4 from __future__ import absolute_import import numpy as np from scipy.optimize import linear_sum_assignment from . import kalman_filter INFTY_COST = 1e+5 def min_cost_matching( distance_metric, max_distance, tracks, detections, track_indices=None, detection_indices=None): """Solve linear assignment problem. Parameters ---------- distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray The distance metric is given a list of tracks and detections as well as a list of N track indices and M detection indices. The metric should return the NxM dimensional cost matrix, where element (i, j) is the association cost between the i-th track in the given track indices and the j-th detection in the given detection_indices. max_distance : float Gating threshold. Associations with cost larger than this value are disregarded. tracks : List[track.Track] A list of predicted tracks at the current time step. detections : List[detection.Detection] A list of detections at the current time step. track_indices : List[int] List of track indices that maps rows in `cost_matrix` to tracks in `tracks` (see description above). detection_indices : List[int] List of detection indices that maps columns in `cost_matrix` to detections in `detections` (see description above). Returns ------- (List[(int, int)], List[int], List[int]) Returns a tuple with the following three entries: * A list of matched track and detection indices. * A list of unmatched track indices. * A list of unmatched detection indices. """ if track_indices is None: track_indices = np.arange(len(tracks)) if detection_indices is None: detection_indices = np.arange(len(detections)) if len(detection_indices) == 0 or len(track_indices) == 0: return [], track_indices, detection_indices # Nothing to match. cost_matrix = distance_metric( tracks, detections, track_indices, detection_indices) cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5 indices = linear_sum_assignment(cost_matrix) indices = np.asarray(indices) indices = np.transpose(indices) matches, unmatched_tracks, unmatched_detections = [], [], [] for col, detection_idx in enumerate(detection_indices): if col not in indices[:, 1]: unmatched_detections.append(detection_idx) for row, track_idx in enumerate(track_indices): if row not in indices[:, 0]: unmatched_tracks.append(track_idx) for row, col in indices: track_idx = track_indices[row] detection_idx = detection_indices[col] if cost_matrix[row, col] > max_distance: unmatched_tracks.append(track_idx) unmatched_detections.append(detection_idx) else: matches.append((track_idx, detection_idx)) return matches, unmatched_tracks, unmatched_detections def matching_cascade( distance_metric, max_distance, cascade_depth, tracks, detections, track_indices=None, detection_indices=None): """Run matching cascade. Parameters ---------- distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray The distance metric is given a list of tracks and detections as well as a list of N track indices and M detection indices. The metric should return the NxM dimensional cost matrix, where element (i, j) is the association cost between the i-th track in the given track indices and the j-th detection in the given detection indices. max_distance : float Gating threshold. Associations with cost larger than this value are disregarded. #distance 임계값 cascade_depth: int The cascade depth, should be se to the maximum track age. #track age의 최대값으로 cascade depth세팅 tracks : List[track.Track] A list of predicted tracks at the current time step. detections : List[detection.Detection] A list of detections at the current time step. track_indices : Optional[List[int]] List of track indices that maps rows in `cost_matrix` to tracks in `tracks` (see description above). Defaults to all tracks. detection_indices : Optional[List[int]] List of detection indices that maps columns in `cost_matrix` to detections in `detections` (see description above). Defaults to all detections. Returns ------- (List[(int, int)], List[int], List[int]) Returns a tuple with the following three entries: * A list of matched track and detection indices. * A list of unmatched track indices. * A list of unmatched detection indices. """ if track_indices is None: track_indices = list(range(len(tracks))) if detection_indices is None: detection_indices = list(range(len(detections))) unmatched_detections = detection_indices matches = [] for level in range(cascade_depth): if len(unmatched_detections) == 0: # No detections left break track_indices_l = [ k for k in track_indices if tracks[k].time_since_update == 1 + level ] if len(track_indices_l) == 0: # Nothing to match at this level continue matches_l, _, unmatched_detections = \ min_cost_matching( distance_metric, max_distance, tracks, detections, track_indices_l, unmatched_detections) matches += matches_l unmatched_tracks = list(set(track_indices) - set(k for k, _ in matches)) return matches, unmatched_tracks, unmatched_detections def gate_cost_matrix( kf, cost_matrix, tracks, detections, track_indices, detection_indices, gated_cost=INFTY_COST, only_position=False): """Invalidate infeasible entries in cost matrix based on the state distributions obtained by Kalman filtering. 얻은 매트릭스 정보를 기반으로 무시할 개체를 선별 Parameters ---------- kf : The Kalman filter. cost_matrix : ndarray The NxM dimensional cost matrix, where N is the number of track indices and M is the number of detection indices, such that entry (i, j) is the association cost between `tracks[track_indices[i]]` and `detections[detection_indices[j]]`. tracks : List[track.Track] A list of predicted tracks at the current time step. detections : List[detection.Detection] A list of detections at the current time step. track_indices : List[int] List of track indices that maps rows in `cost_matrix` to tracks in `tracks` (see description above). detection_indices : List[int] List of detection indices that maps columns in `cost_matrix` to detections in `detections` (see description above). gated_cost : Optional[float] Entries in the cost matrix corresponding to infeasible associations are set this value. Defaults to a very large value. only_position : Optional[bool] If True, only the x, y position of the state distribution is considered during gating. Defaults to False. Returns ------- ndarray Returns the modified cost matrix. """ gating_dim = 2 if only_position else 4 gating_threshold = kalman_filter.chi2inv95[gating_dim] measurements = np.asarray( [detections[i].to_xyah() for i in detection_indices]) for row, track_idx in enumerate(track_indices): track = tracks[track_idx] gating_distance = kf.gating_distance( track.mean, track.covariance, measurements, only_position) cost_matrix[row, gating_distance > gating_threshold] = gated_cost return cost_matrix
py
1a5a917228de96a0e0ac689d7355be89756f2c47
import sys import argparse from svtools.external_cmd import ExternalCmd class BedpeSort(ExternalCmd): def __init__(self): super(BedpeSort, self).__init__('bedpesort', 'bin/bedpesort') def description(): return 'sort a BEDPE file' def epilog(): return 'To read in stdin and output to a file, use /dev/stdin or - as the first positional argument.' def add_arguments_to_parser(parser): parser.add_argument('input', metavar='<BEDPE file>', nargs='?', help='BEDPE file to sort') parser.add_argument('output', metavar='<output file>', nargs='?', help='output file to write to') parser.set_defaults(entry_point=run_from_args) def command_parser(): parser = argparse.ArgumentParser(description=description()) add_arguments_to_parser(parser) return parser def run_from_args(args): opts = list() if args.input: opts.append(args.input) if args.output: opts.append(args.output) sort_cmd_runner = BedpeSort() sort_cmd_runner.run_cmd_with_options(opts) if __name__ == "__main__": parser = command_parser() args = parser.parse_args() sys.exit(args.entry_point(args))
py
1a5a929b3fa32a1c8d33bb0e7d8a55bd50e25a81
import asyncio from weakref import ref from decimal import Decimal import re import threading import traceback, sys from typing import TYPE_CHECKING, List from kivy.app import App from kivy.cache import Cache from kivy.clock import Clock from kivy.compat import string_types from kivy.properties import (ObjectProperty, DictProperty, NumericProperty, ListProperty, StringProperty) from kivy.uix.recycleview import RecycleView from kivy.uix.label import Label from kivy.uix.behaviors import ToggleButtonBehavior from kivy.uix.image import Image from kivy.lang import Builder from kivy.factory import Factory from kivy.utils import platform from electrum.bitcoin import TYPE_ADDRESS from electrum.util import profiler, parse_URI, format_time, InvalidPassword, NotEnoughFunds, Fiat from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN from electrum import bitcoin, constants from electrum.transaction import TxOutput, Transaction, tx_from_str from electrum.util import send_exception_to_crash_reporter, parse_URI, InvalidBitcoinURI from electrum.util import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED, PR_INFLIGHT, TxMinedInfo, get_request_status, pr_expiration_values from electrum.plugin import run_hook from electrum.wallet import InternalAddressCorruption from electrum import simple_config from electrum.lnaddr import lndecode from electrum.lnutil import RECEIVED, SENT, PaymentFailure from .dialogs.question import Question from .dialogs.lightning_open_channel import LightningOpenChannelDialog from electrum.gui.kivy.i18n import _ if TYPE_CHECKING: from electrum.gui.kivy.main_window import ElectrumWindow class HistoryRecycleView(RecycleView): pass class RequestRecycleView(RecycleView): pass class PaymentRecycleView(RecycleView): pass class CScreen(Factory.Screen): __events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave') action_view = ObjectProperty(None) loaded = False kvname = None app = App.get_running_app() # type: ElectrumWindow def _change_action_view(self): app = App.get_running_app() action_bar = app.root.manager.current_screen.ids.action_bar _action_view = self.action_view if (not _action_view) or _action_view.parent: return action_bar.clear_widgets() action_bar.add_widget(_action_view) def on_enter(self): # FIXME: use a proper event don't use animation time of screen Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25) pass def update(self): pass @profiler def load_screen(self): self.screen = Builder.load_file('electrum/gui/kivy/uix/ui_screens/' + self.kvname + '.kv') self.add_widget(self.screen) self.loaded = True self.update() setattr(self.app, self.kvname + '_screen', self) def on_activate(self): if self.kvname and not self.loaded: self.load_screen() #Clock.schedule_once(lambda dt: self._change_action_view()) def on_leave(self): self.dispatch('on_deactivate') def on_deactivate(self): pass # note: this list needs to be kept in sync with another in qt TX_ICONS = [ "unconfirmed", "close", "unconfirmed", "close", "clock1", "clock2", "clock3", "clock4", "clock5", "confirmed", ] class HistoryScreen(CScreen): tab = ObjectProperty(None) kvname = 'history' cards = {} def __init__(self, **kwargs): self.ra_dialog = None super(HistoryScreen, self).__init__(**kwargs) def show_item(self, obj): key = obj.key tx = self.app.wallet.db.get_transaction(key) if not tx: return self.app.tx_dialog(tx) def get_card(self, tx_item): #tx_hash, tx_mined_status, value, balance): is_lightning = tx_item.get('lightning', False) timestamp = tx_item['timestamp'] key = tx_item.get('txid') or tx_item['payment_hash'] if is_lightning: status = 0 txpos = tx_item['txpos'] status_str = 'unconfirmed' if timestamp is None else format_time(int(timestamp)) icon = "atlas://electrum/gui/kivy/theming/light/lightning" message = tx_item['label'] fee_msat = tx_item['fee_msat'] fee = int(fee_msat/1000) if fee_msat else None fee_text = '' if fee is None else 'fee: %d sat'%fee else: tx_hash = tx_item['txid'] conf = tx_item['confirmations'] txpos = tx_item['txpos_in_block'] or 0 height = tx_item['height'] tx_mined_info = TxMinedInfo(height=tx_item['height'], conf=tx_item['confirmations'], timestamp=tx_item['timestamp']) status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_info) icon = "atlas://electrum/gui/kivy/theming/light/" + TX_ICONS[status] message = tx_item['label'] or tx_hash fee = tx_item['fee_sat'] fee_text = '' if fee is None else 'fee: %d sat'%fee ri = {} ri['screen'] = self ri['key'] = key ri['icon'] = icon ri['date'] = status_str ri['message'] = message ri['fee_text'] = fee_text value = tx_item['value'].value if value is not None: ri['is_mine'] = value <= 0 ri['amount'] = self.app.format_amount(value, is_diff = True) if 'fiat_value' in tx_item: ri['quote_text'] = str(tx_item['fiat_value']) return ri def update(self, see_all=False): wallet = self.app.wallet if wallet is None: return history = sorted(wallet.get_full_history(self.app.fx).values(), key=lambda x: x.get('timestamp') or float('inf'), reverse=True) history_card = self.screen.ids.history_container history_card.data = [self.get_card(item) for item in history] class SendScreen(CScreen): kvname = 'send' payment_request = None payment_request_queued = None parsed_URI = None def set_URI(self, text): if not self.app.wallet: self.payment_request_queued = text return try: uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop) except InvalidBitcoinURI as e: self.app.show_info(_("Error parsing URI") + f":\n{e}") return self.parsed_URI = uri amount = uri.get('amount') self.screen.address = uri.get('address', '') self.screen.message = uri.get('message', '') self.screen.amount = self.app.format_amount_and_units(amount) if amount else '' self.payment_request = None self.screen.is_lightning = False def set_ln_invoice(self, invoice): try: lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP) except Exception as e: self.app.show_info(invoice + _(" is not a valid Lightning invoice: ") + repr(e)) # repr because str(Exception()) == '' return self.screen.address = invoice self.screen.message = dict(lnaddr.tags).get('d', None) self.screen.amount = self.app.format_amount_and_units(lnaddr.amount * bitcoin.COIN) if lnaddr.amount else '' self.payment_request = None self.screen.is_lightning = True def update(self): if not self.loaded: return if self.app.wallet and self.payment_request_queued: self.set_URI(self.payment_request_queued) self.payment_request_queued = None _list = self.app.wallet.get_invoices() _list = [x for x in _list if x and x.get('status') != PR_PAID or x.get('rhash') in self.app.wallet.lnworker.logs] payments_container = self.screen.ids.payments_container payments_container.data = [self.get_card(item) for item in _list] def show_item(self, obj): self.app.show_invoice(obj.is_lightning, obj.key) def get_card(self, item): invoice_type = item['type'] status, status_str = get_request_status(item) # convert to str if invoice_type == PR_TYPE_LN: key = item['rhash'] log = self.app.wallet.lnworker.logs.get(key) if item['status'] == PR_INFLIGHT and log: status_str += '... (%d)'%len(log) elif invoice_type == PR_TYPE_ONCHAIN: key = item['id'] else: raise Exception('unknown invoice type') return { 'is_lightning': invoice_type == PR_TYPE_LN, 'is_bip70': 'bip70' in item, 'screen': self, 'status': status, 'status_str': status_str, 'key': key, 'memo': item['message'], 'amount': self.app.format_amount_and_units(item['amount'] or 0), } def do_clear(self): self.screen.amount = '' self.screen.message = '' self.screen.address = '' self.payment_request = None self.screen.locked = False self.parsed_URI = None def set_request(self, pr): self.screen.address = pr.get_requestor() amount = pr.get_amount() self.screen.amount = self.app.format_amount_and_units(amount) if amount else '' self.screen.message = pr.get_memo() self.screen.locked = True self.payment_request = pr def do_paste(self): data = self.app._clipboard.paste().strip() if not data: self.app.show_info(_("Clipboard is empty")) return # try to decode as transaction try: raw_tx = tx_from_str(data) tx = Transaction(raw_tx) tx.deserialize() except: tx = None if tx: self.app.tx_dialog(tx) return lower = data.lower() if lower.startswith('lightning:ln'): lower = lower[10:] # try to decode as URI/address if lower.startswith('ln'): self.set_ln_invoice(lower) else: self.set_URI(data) def read_invoice(self): address = str(self.screen.address) if not address: self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Bitcoin address or a payment request')) return if not self.screen.amount: self.app.show_error(_('Please enter an amount')) return try: amount = self.app.get_amount(self.screen.amount) except: self.app.show_error(_('Invalid amount') + ':\n' + self.screen.amount) return message = self.screen.message if self.screen.is_lightning: return self.app.wallet.lnworker.parse_bech32_invoice(address) else: if not bitcoin.is_address(address): self.app.show_error(_('Invalid Bitcoin Address') + ':\n' + address) return outputs = [TxOutput(TYPE_ADDRESS, address, amount)] return self.app.wallet.create_invoice(outputs, message, self.payment_request, self.parsed_URI) def do_save(self): invoice = self.read_invoice() if not invoice: return self.app.wallet.save_invoice(invoice) self.do_clear() self.update() def do_pay(self): invoice = self.read_invoice() if not invoice: return self.app.wallet.save_invoice(invoice) self.do_clear() self.update() self.do_pay_invoice(invoice) def do_pay_invoice(self, invoice): if invoice['type'] == PR_TYPE_LN: self._do_pay_lightning(invoice) return elif invoice['type'] == PR_TYPE_ONCHAIN: do_pay = lambda rbf: self._do_pay_onchain(invoice, rbf) if self.app.electrum_config.get('use_rbf'): d = Question(_('Should this transaction be replaceable?'), do_pay) d.open() else: do_pay(False) else: raise Exception('unknown invoice type') def _do_pay_lightning(self, invoice): attempts = 10 threading.Thread(target=self.app.wallet.lnworker.pay, args=(invoice['invoice'], invoice['amount'], attempts)).start() def _do_pay_onchain(self, invoice, rbf): # make unsigned transaction outputs = invoice['outputs'] # type: List[TxOutput] amount = sum(map(lambda x: x.value, outputs)) coins = self.app.wallet.get_spendable_coins(None) try: tx = self.app.wallet.make_unsigned_transaction(coins, outputs, None) except NotEnoughFunds: self.app.show_error(_("Not enough funds")) return except Exception as e: traceback.print_exc(file=sys.stdout) self.app.show_error(repr(e)) return if rbf: tx.set_rbf(True) fee = tx.get_fee() msg = [ _("Amount to be sent") + ": " + self.app.format_amount_and_units(amount), _("Mining fee") + ": " + self.app.format_amount_and_units(fee), ] x_fee = run_hook('get_tx_extra_fee', self.app.wallet, tx) if x_fee: x_fee_address, x_fee_amount = x_fee msg.append(_("Additional fees") + ": " + self.app.format_amount_and_units(x_fee_amount)) feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE if fee > feerate_warning * tx.estimated_size() / 1000: msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high.")) msg.append(_("Enter your PIN code to proceed")) self.app.protected('\n'.join(msg), self.send_tx, (tx, invoice)) def send_tx(self, tx, invoice, password): if self.app.wallet.has_password() and password is None: return def on_success(tx): if tx.is_complete(): self.app.broadcast(tx, invoice) else: self.app.tx_dialog(tx) def on_failure(error): self.app.show_error(error) if self.app.wallet.can_sign(tx): self.app.show_info("Signing...") self.app.sign_tx(tx, password, on_success, on_failure) else: self.app.tx_dialog(tx) class ReceiveScreen(CScreen): kvname = 'receive' def __init__(self, **kwargs): super(ReceiveScreen, self).__init__(**kwargs) Clock.schedule_interval(lambda dt: self.update(), 5) def expiry(self): return self.app.electrum_config.get('request_expiry', 3600) # 1 hour def clear(self): self.screen.address = '' self.screen.amount = '' self.screen.message = '' self.screen.lnaddr = '' def set_address(self, addr): self.screen.address = addr def on_address(self, addr): req = self.app.wallet.get_request(addr) self.screen.status = '' if req: self.screen.message = req.get('memo', '') amount = req.get('amount') self.screen.amount = self.app.format_amount_and_units(amount) if amount else '' status = req.get('status', PR_UNKNOWN) self.screen.status = _('Payment received') if status == PR_PAID else '' def get_URI(self): from electrum.util import create_bip21_uri amount = self.screen.amount if amount: a, u = self.screen.amount.split() assert u == self.app.base_unit amount = Decimal(a) * pow(10, self.app.decimal_point()) return create_bip21_uri(self.screen.address, amount, self.screen.message) def do_copy(self): uri = self.get_URI() self.app._clipboard.copy(uri) self.app.show_info(_('Request copied to clipboard')) def new_request(self, lightning): amount = self.screen.amount amount = self.app.get_amount(amount) if amount else 0 message = self.screen.message if lightning: key = self.app.wallet.lnworker.add_request(amount, message, self.expiry()) else: addr = self.screen.address or self.app.wallet.get_unused_address() if not addr: self.app.show_info(_('No address available. Please remove some of your pending requests.')) return self.screen.address = addr req = self.app.wallet.make_payment_request(addr, amount, message, self.expiry()) self.app.wallet.add_payment_request(req) key = addr self.clear() self.update() self.app.show_request(lightning, key) def get_card(self, req): is_lightning = req.get('type') == PR_TYPE_LN if not is_lightning: address = req['address'] key = address else: key = req['rhash'] address = req['invoice'] amount = req.get('amount') description = req.get('memo', '') status, status_str = get_request_status(req) ci = {} ci['screen'] = self ci['address'] = address ci['is_lightning'] = is_lightning ci['key'] = key ci['amount'] = self.app.format_amount_and_units(amount) if amount else '' ci['memo'] = description ci['status'] = status_str ci['is_expired'] = status == PR_EXPIRED return ci def update(self): if not self.loaded: return _list = self.app.wallet.get_sorted_requests() requests_container = self.screen.ids.requests_container requests_container.data = [self.get_card(item) for item in _list if item.get('status') != PR_PAID] def show_item(self, obj): self.app.show_request(obj.is_lightning, obj.key) def expiration_dialog(self, obj): from .dialogs.choice_dialog import ChoiceDialog def callback(c): self.app.electrum_config.set_key('request_expiry', c) d = ChoiceDialog(_('Expiration date'), pr_expiration_values, self.expiry(), callback) d.open() def clear_requests_dialog(self): expired = [req for req in self.app.wallet.get_sorted_requests() if req['status'] == PR_EXPIRED] if len(expired) == 0: return def callback(c): if c: for req in expired: is_lightning = req.get('lightning', False) key = req['rhash'] if is_lightning else req['address'] self.app.wallet.delete_request(key) self.update() d = Question(_('Delete expired requests?'), callback) d.open() class TabbedCarousel(Factory.TabbedPanel): '''Custom TabbedPanel using a carousel used in the Main Screen ''' carousel = ObjectProperty(None) def animate_tab_to_center(self, value): scrlv = self._tab_strip.parent if not scrlv: return idx = self.tab_list.index(value) n = len(self.tab_list) if idx in [0, 1]: scroll_x = 1 elif idx in [n-1, n-2]: scroll_x = 0 else: scroll_x = 1. * (n - idx - 1) / (n - 1) mation = Factory.Animation(scroll_x=scroll_x, d=.25) mation.cancel_all(scrlv) mation.start(scrlv) def on_current_tab(self, instance, value): self.animate_tab_to_center(value) def on_index(self, instance, value): current_slide = instance.current_slide if not hasattr(current_slide, 'tab'): return tab = current_slide.tab ct = self.current_tab try: if ct.text != tab.text: carousel = self.carousel carousel.slides[ct.slide].dispatch('on_leave') self.switch_to(tab) carousel.slides[tab.slide].dispatch('on_enter') except AttributeError: current_slide.dispatch('on_enter') def switch_to(self, header): # we have to replace the functionality of the original switch_to if not header: return if not hasattr(header, 'slide'): header.content = self.carousel super(TabbedCarousel, self).switch_to(header) try: tab = self.tab_list[-1] except IndexError: return self._current_tab = tab tab.state = 'down' return carousel = self.carousel self.current_tab.state = "normal" header.state = 'down' self._current_tab = header # set the carousel to load the appropriate slide # saved in the screen attribute of the tab head slide = carousel.slides[header.slide] if carousel.current_slide != slide: carousel.current_slide.dispatch('on_leave') carousel.load_slide(slide) slide.dispatch('on_enter') def add_widget(self, widget, index=0): if isinstance(widget, Factory.CScreen): self.carousel.add_widget(widget) return super(TabbedCarousel, self).add_widget(widget, index=index)
py
1a5a9300e1b2a6c07873885804b7fd30bd9e6f6b
import os import re regex = list() dir_path = os.path.dirname(os.path.realpath(__file__)) f = open(dir_path + '/../regex.txt') lines = f.readlines() for line in lines: if (len(line) > 10): # Remove the \n at the end regex.append(re.compile('^' + line[1:-2] + '$')) class Pincode: @staticmethod def validate(code): for r in regex: if r.match(code) != None: return True return False
py
1a5a9395f14d7da14a7bd9b8c78ae63fe86a5b6e
from __future__ import absolute_import import numpy as np from numpy.testing.utils import assert_allclose as numpy_allclose from brian2 import prefs from brian2.units.fundamentalunits import have_same_dimensions def assert_allclose(actual, desired, rtol=4.5e8, atol=0, **kwds): ''' Thin wrapper around numpy's `~numpy.testing.utils.assert_allclose` function. The tolerance depends on the floating point precision as defined by the `core.default_float_dtype` preference. Parameters ---------- actual : `numpy.ndarray` The results to check. desired : `numpy.ndarray` The expected results. rtol : float, optional The relative tolerance which will be multiplied with the machine epsilon of the type set as `core.default_float_type`. atol : float, optional The absolute tolerance which will be multiplied with the machine epsilon of the type set as `core.default_float_type`. ''' assert have_same_dimensions(actual, desired) eps = np.finfo(prefs['core.default_float_dtype']).eps rtol = eps*rtol atol = eps*atol numpy_allclose(np.asarray(actual), np.asarray(desired), rtol=rtol, atol=atol, **kwds)
py
1a5a93d595be67948828acf24c9bdd9713bb53e2
import custom_paths from pathlib import Path import utils import shutil from typing import * # This file contains some utility functions to modify/rename/remove saved results. # It can be used for example if the names of some experiment results should be changed. def rename_alg(exp_name: str, old_name: str, new_name: str): print(f'Renaming alg "{old_name}" to "{new_name}" for {exp_name} experiments') results_path = Path(custom_paths.get_results_path()) / exp_name for task_path in results_path.iterdir(): if utils.existsDir(task_path / old_name): shutil.move(task_path / old_name, task_path / new_name) def remove_alg(exp_name: str, alg_name: str): print(f'Removing alg "{alg_name}" for {exp_name} experiments') results_path = Path(custom_paths.get_results_path()) / exp_name for task_path in results_path.iterdir(): if utils.existsDir(task_path / alg_name): shutil.rmtree(task_path / alg_name) def replace_in_alg_name(exp_name: str, old_name: str, new_name: str): print(f'Replacing "{old_name}" with "{new_name}" in alg names for {exp_name} experiments') results_path = Path(custom_paths.get_results_path()) / exp_name for task_path in results_path.iterdir(): for alg_path in task_path.iterdir(): alg_name = str(alg_path.name) new_alg_name = alg_name.replace(old_name, new_name) if alg_name != new_alg_name: shutil.move(task_path / alg_name, task_path / new_alg_name) def process_results(exp_name: str, f: Callable): print('Applying function to results for {exp_name} experiments') results_path = Path(custom_paths.get_results_path()) / exp_name for task_path in results_path.iterdir(): for alg_path in task_path.iterdir(): for split_path in alg_path.iterdir(): file_path = split_path / 'results.json' if utils.existsFile(file_path): results = utils.deserialize(file_path, use_json=True) results = f(results) utils.serialize(file_path, results, use_json=True) if __name__ == '__main__': pass
py
1a5a9462ed8375127a3f2991be785f50880e6437
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """BERT classification or regression finetuning runner in TF 2.x.""" import functools import json import math import os # Import libraries from absl import app from absl import flags from absl import logging import gin import tensorflow as tf from official.common import distribute_utils from official.modeling import performance from official.nlp import optimization from official.nlp.bert import bert_models from official.nlp.bert import common_flags from official.nlp.bert import configs as bert_configs from official.nlp.bert import input_pipeline from official.nlp.bert import model_saving_utils from official.utils.misc import keras_utils flags.DEFINE_enum( 'mode', 'train_and_eval', ['train_and_eval', 'export_only', 'predict'], 'One of {"train_and_eval", "export_only", "predict"}. `train_and_eval`: ' 'trains the model and evaluates in the meantime. ' '`export_only`: will take the latest checkpoint inside ' 'model_dir and export a `SavedModel`. `predict`: takes a checkpoint and ' 'restores the model to output predictions on the test set.') flags.DEFINE_string('train_data_path', None, 'Path to training data for BERT classifier.') flags.DEFINE_string('eval_data_path', None, 'Path to evaluation data for BERT classifier.') flags.DEFINE_string( 'input_meta_data_path', None, 'Path to file that contains meta data about input ' 'to be used for training and evaluation.') flags.DEFINE_integer('train_data_size', None, 'Number of training samples ' 'to use. If None, uses the full train data. ' '(default: None).') flags.DEFINE_string('predict_checkpoint_path', None, 'Path to the checkpoint for predictions.') flags.DEFINE_integer( 'num_eval_per_epoch', 1, 'Number of evaluations per epoch. The purpose of this flag is to provide ' 'more granular evaluation scores and checkpoints. For example, if original ' 'data has N samples and num_eval_per_epoch is n, then each epoch will be ' 'evaluated every N/n samples.') flags.DEFINE_integer('train_batch_size', 32, 'Batch size for training.') flags.DEFINE_integer('eval_batch_size', 32, 'Batch size for evaluation.') common_flags.define_common_bert_flags() FLAGS = flags.FLAGS LABEL_TYPES_MAP = {'int': tf.int64, 'float': tf.float32} def get_loss_fn(num_classes): """Gets the classification loss function.""" def classification_loss_fn(labels, logits): """Classification loss.""" labels = tf.squeeze(labels) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot( tf.cast(labels, dtype=tf.int32), depth=num_classes, dtype=tf.float32) per_example_loss = -tf.reduce_sum( tf.cast(one_hot_labels, dtype=tf.float32) * log_probs, axis=-1) return tf.reduce_mean(per_example_loss) return classification_loss_fn def get_dataset_fn(input_file_pattern, max_seq_length, global_batch_size, is_training, label_type=tf.int64, include_sample_weights=False, num_samples=None): """Gets a closure to create a dataset.""" def _dataset_fn(ctx=None): """Returns tf.data.Dataset for distributed BERT pretraining.""" batch_size = ctx.get_per_replica_batch_size( global_batch_size) if ctx else global_batch_size dataset = input_pipeline.create_classifier_dataset( tf.io.gfile.glob(input_file_pattern), max_seq_length, batch_size, is_training=is_training, input_pipeline_context=ctx, label_type=label_type, include_sample_weights=include_sample_weights, num_samples=num_samples) return dataset return _dataset_fn def run_bert_classifier(strategy, bert_config, input_meta_data, model_dir, epochs, steps_per_epoch, steps_per_loop, eval_steps, warmup_steps, initial_lr, init_checkpoint, train_input_fn, eval_input_fn, training_callbacks=True, custom_callbacks=None, custom_metrics=None): """Run BERT classifier training using low-level API.""" max_seq_length = input_meta_data['max_seq_length'] num_classes = input_meta_data.get('num_labels', 1) is_regression = num_classes == 1 def _get_classifier_model(): """Gets a classifier model.""" classifier_model, core_model = ( bert_models.classifier_model( bert_config, num_classes, max_seq_length, hub_module_url=FLAGS.hub_module_url, hub_module_trainable=FLAGS.hub_module_trainable)) optimizer = optimization.create_optimizer(initial_lr, steps_per_epoch * epochs, warmup_steps, FLAGS.end_lr, FLAGS.optimizer_type) classifier_model.optimizer = performance.configure_optimizer( optimizer, use_float16=common_flags.use_float16(), use_graph_rewrite=common_flags.use_graph_rewrite(), use_experimental_api=False) return classifier_model, core_model # tf.keras.losses objects accept optional sample_weight arguments (eg. coming # from the dataset) to compute weighted loss, as used for the regression # tasks. The classification tasks, using the custom get_loss_fn don't accept # sample weights though. loss_fn = (tf.keras.losses.MeanSquaredError() if is_regression else get_loss_fn(num_classes)) # Defines evaluation metrics function, which will create metrics in the # correct device and strategy scope. if custom_metrics: metric_fn = custom_metrics elif is_regression: metric_fn = functools.partial( tf.keras.metrics.MeanSquaredError, 'mean_squared_error', dtype=tf.float32) else: metric_fn = functools.partial( tf.keras.metrics.SparseCategoricalAccuracy, 'accuracy', dtype=tf.float32) # Start training using Keras compile/fit API. logging.info('Training using TF 2.x Keras compile/fit API with ' 'distribution strategy.') return run_keras_compile_fit( model_dir, strategy, _get_classifier_model, train_input_fn, eval_input_fn, loss_fn, metric_fn, init_checkpoint, epochs, steps_per_epoch, steps_per_loop, eval_steps, training_callbacks=training_callbacks, custom_callbacks=custom_callbacks) def run_keras_compile_fit(model_dir, strategy, model_fn, train_input_fn, eval_input_fn, loss_fn, metric_fn, init_checkpoint, epochs, steps_per_epoch, steps_per_loop, eval_steps, training_callbacks=True, custom_callbacks=None): """Runs BERT classifier model using Keras compile/fit API.""" with strategy.scope(): training_dataset = train_input_fn() evaluation_dataset = eval_input_fn() if eval_input_fn else None bert_model, sub_model = model_fn() optimizer = bert_model.optimizer if init_checkpoint: checkpoint = tf.train.Checkpoint(model=sub_model, encoder=sub_model) checkpoint.read(init_checkpoint).assert_existing_objects_matched() if not isinstance(metric_fn, (list, tuple)): metric_fn = [metric_fn] bert_model.compile( optimizer=optimizer, loss=loss_fn, metrics=[fn() for fn in metric_fn], steps_per_execution=steps_per_loop) summary_dir = os.path.join(model_dir, 'summaries') summary_callback = tf.keras.callbacks.TensorBoard(summary_dir) checkpoint = tf.train.Checkpoint(model=bert_model, optimizer=optimizer) checkpoint_manager = tf.train.CheckpointManager( checkpoint, directory=model_dir, max_to_keep=None, step_counter=optimizer.iterations, checkpoint_interval=0) checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager) if training_callbacks: if custom_callbacks is not None: custom_callbacks += [summary_callback, checkpoint_callback] else: custom_callbacks = [summary_callback, checkpoint_callback] history = bert_model.fit( x=training_dataset, validation_data=evaluation_dataset, steps_per_epoch=steps_per_epoch, epochs=epochs, validation_steps=eval_steps, callbacks=custom_callbacks) stats = {'total_training_steps': steps_per_epoch * epochs} if 'loss' in history.history: stats['train_loss'] = history.history['loss'][-1] if 'val_accuracy' in history.history: stats['eval_metrics'] = history.history['val_accuracy'][-1] return bert_model, stats def get_predictions_and_labels(strategy, trained_model, eval_input_fn, is_regression=False, return_probs=False): """Obtains predictions of trained model on evaluation data. Note that list of labels is returned along with the predictions because the order changes on distributing dataset over TPU pods. Args: strategy: Distribution strategy. trained_model: Trained model with preloaded weights. eval_input_fn: Input function for evaluation data. is_regression: Whether it is a regression task. return_probs: Whether to return probabilities of classes. Returns: predictions: List of predictions. labels: List of gold labels corresponding to predictions. """ @tf.function def test_step(iterator): """Computes predictions on distributed devices.""" def _test_step_fn(inputs): """Replicated predictions.""" inputs, labels = inputs logits = trained_model(inputs, training=False) if not is_regression: probabilities = tf.nn.softmax(logits) return probabilities, labels else: return logits, labels outputs, labels = strategy.run(_test_step_fn, args=(next(iterator),)) # outputs: current batch logits as a tuple of shard logits outputs = tf.nest.map_structure(strategy.experimental_local_results, outputs) labels = tf.nest.map_structure(strategy.experimental_local_results, labels) return outputs, labels def _run_evaluation(test_iterator): """Runs evaluation steps.""" preds, golds = list(), list() try: with tf.experimental.async_scope(): while True: probabilities, labels = test_step(test_iterator) for cur_probs, cur_labels in zip(probabilities, labels): if return_probs: preds.extend(cur_probs.numpy().tolist()) else: preds.extend(tf.math.argmax(cur_probs, axis=1).numpy()) golds.extend(cur_labels.numpy().tolist()) except (StopIteration, tf.errors.OutOfRangeError): tf.experimental.async_clear_error() return preds, golds test_iter = iter(strategy.distribute_datasets_from_function(eval_input_fn)) predictions, labels = _run_evaluation(test_iter) return predictions, labels def export_classifier(model_export_path, input_meta_data, bert_config, model_dir): """Exports a trained model as a `SavedModel` for inference. Args: model_export_path: a string specifying the path to the SavedModel directory. input_meta_data: dictionary containing meta data about input and model. bert_config: Bert configuration file to define core bert layers. model_dir: The directory where the model weights and training/evaluation summaries are stored. Raises: Export path is not specified, got an empty string or None. """ if not model_export_path: raise ValueError('Export path is not specified: %s' % model_export_path) if not model_dir: raise ValueError('Export path is not specified: %s' % model_dir) # Export uses float32 for now, even if training uses mixed precision. tf.keras.mixed_precision.set_global_policy('float32') classifier_model = bert_models.classifier_model( bert_config, input_meta_data.get('num_labels', 1), hub_module_url=FLAGS.hub_module_url, hub_module_trainable=False)[0] model_saving_utils.export_bert_model( model_export_path, model=classifier_model, checkpoint_dir=model_dir) def run_bert(strategy, input_meta_data, model_config, train_input_fn=None, eval_input_fn=None, init_checkpoint=None, custom_callbacks=None, custom_metrics=None): """Run BERT training.""" # Enables XLA in Session Config. Should not be set for TPU. keras_utils.set_session_config(FLAGS.enable_xla) performance.set_mixed_precision_policy(common_flags.dtype(), use_experimental_api=False) epochs = FLAGS.num_train_epochs * FLAGS.num_eval_per_epoch train_data_size = ( input_meta_data['train_data_size'] // FLAGS.num_eval_per_epoch) if FLAGS.train_data_size: train_data_size = min(train_data_size, FLAGS.train_data_size) logging.info('Updated train_data_size: %s', train_data_size) steps_per_epoch = int(train_data_size / FLAGS.train_batch_size) warmup_steps = int(epochs * train_data_size * 0.1 / FLAGS.train_batch_size) eval_steps = int( math.ceil(input_meta_data['eval_data_size'] / FLAGS.eval_batch_size)) if not strategy: raise ValueError('Distribution strategy has not been specified.') if not custom_callbacks: custom_callbacks = [] if FLAGS.log_steps: custom_callbacks.append( keras_utils.TimeHistory( batch_size=FLAGS.train_batch_size, log_steps=FLAGS.log_steps, logdir=FLAGS.model_dir)) trained_model, _ = run_bert_classifier( strategy, model_config, input_meta_data, FLAGS.model_dir, epochs, steps_per_epoch, FLAGS.steps_per_loop, eval_steps, warmup_steps, FLAGS.learning_rate, init_checkpoint or FLAGS.init_checkpoint, train_input_fn, eval_input_fn, custom_callbacks=custom_callbacks, custom_metrics=custom_metrics) if FLAGS.model_export_path: model_saving_utils.export_bert_model( FLAGS.model_export_path, model=trained_model) return trained_model def custom_main(custom_callbacks=None, custom_metrics=None): """Run classification or regression. Args: custom_callbacks: list of tf.keras.Callbacks passed to training loop. custom_metrics: list of metrics passed to the training loop. """ gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param) with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader: input_meta_data = json.loads(reader.read().decode('utf-8')) label_type = LABEL_TYPES_MAP[input_meta_data.get('label_type', 'int')] include_sample_weights = input_meta_data.get('has_sample_weights', False) if not FLAGS.model_dir: FLAGS.model_dir = '/tmp/bert20/' bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.mode == 'export_only': export_classifier(FLAGS.model_export_path, input_meta_data, bert_config, FLAGS.model_dir) return strategy = distribute_utils.get_distribution_strategy( distribution_strategy=FLAGS.distribution_strategy, num_gpus=FLAGS.num_gpus, tpu_address=FLAGS.tpu) eval_input_fn = get_dataset_fn( FLAGS.eval_data_path, input_meta_data['max_seq_length'], FLAGS.eval_batch_size, is_training=False, label_type=label_type, include_sample_weights=include_sample_weights) if FLAGS.mode == 'predict': num_labels = input_meta_data.get('num_labels', 1) with strategy.scope(): classifier_model = bert_models.classifier_model( bert_config, num_labels)[0] checkpoint = tf.train.Checkpoint(model=classifier_model) latest_checkpoint_file = ( FLAGS.predict_checkpoint_path or tf.train.latest_checkpoint(FLAGS.model_dir)) assert latest_checkpoint_file logging.info('Checkpoint file %s found and restoring from ' 'checkpoint', latest_checkpoint_file) checkpoint.restore( latest_checkpoint_file).assert_existing_objects_matched() preds, _ = get_predictions_and_labels( strategy, classifier_model, eval_input_fn, is_regression=(num_labels == 1), return_probs=True) output_predict_file = os.path.join(FLAGS.model_dir, 'test_results.tsv') with tf.io.gfile.GFile(output_predict_file, 'w') as writer: logging.info('***** Predict results *****') for probabilities in preds: output_line = '\t'.join( str(class_probability) for class_probability in probabilities) + '\n' writer.write(output_line) return if FLAGS.mode != 'train_and_eval': raise ValueError('Unsupported mode is specified: %s' % FLAGS.mode) train_input_fn = get_dataset_fn( FLAGS.train_data_path, input_meta_data['max_seq_length'], FLAGS.train_batch_size, is_training=True, label_type=label_type, include_sample_weights=include_sample_weights, num_samples=FLAGS.train_data_size) run_bert( strategy, input_meta_data, bert_config, train_input_fn, eval_input_fn, custom_callbacks=custom_callbacks, custom_metrics=custom_metrics) def main(_): custom_main(custom_callbacks=None, custom_metrics=None) if __name__ == '__main__': flags.mark_flag_as_required('bert_config_file') flags.mark_flag_as_required('input_meta_data_path') flags.mark_flag_as_required('model_dir') app.run(main)
py
1a5a949ca471515a028a8b1a4e7e183f8da91405
""" This file offers the methods to automatically retrieve the graph Leuconostoc gasicomitatum. The graph is automatically retrieved from the STRING repository. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 21:42:27.264861 The undirected graph Leuconostoc gasicomitatum has 1900 nodes and 96547 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.05352 and has 6 connected components, where the component with most nodes has 1886 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 77, the mean node degree is 101.63, and the node degree mode is 9. The top 5 most central nodes are 762550.LEGAS_0771 (degree 795), 762550.LEGAS_1828 (degree 664), 762550.LEGAS_1522 (degree 602), 762550.LEGAS_0220 (degree 548) and 762550.LEGAS_1445 (degree 544). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import LeuconostocGasicomitatum # Then load the graph graph = LeuconostocGasicomitatum() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks. """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error def LeuconostocGasicomitatum( directed: bool = False, verbose: int = 2, cache_path: str = "graphs/string", **additional_graph_kwargs: Dict ) -> EnsmallenGraph: """Return new instance of the Leuconostoc gasicomitatum graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Leuconostoc gasicomitatum graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 21:42:27.264861 The undirected graph Leuconostoc gasicomitatum has 1900 nodes and 96547 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.05352 and has 6 connected components, where the component with most nodes has 1886 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 77, the mean node degree is 101.63, and the node degree mode is 9. The top 5 most central nodes are 762550.LEGAS_0771 (degree 795), 762550.LEGAS_1828 (degree 664), 762550.LEGAS_1522 (degree 602), 762550.LEGAS_0220 (degree 548) and 762550.LEGAS_1445 (degree 544). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import LeuconostocGasicomitatum # Then load the graph graph = LeuconostocGasicomitatum() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks. """ return AutomaticallyRetrievedGraph( graph_name="LeuconostocGasicomitatum", dataset="string", directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
py
1a5a951d3c1f56a36483b684eaa476bc3f85eae6
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.1.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # S_FlexProbBootstrap [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_FlexProbBootstrap&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerFPspec). # ## Prepare the environment # + import os import os.path as path import sys sys.path.append(path.abspath('../../functions-legacy')) from collections import namedtuple from numpy import arange, array, zeros, diff, log from numpy import min as npmin, max as npmax from numpy.random import choice from scipy.io import loadmat import matplotlib.pyplot as plt from matplotlib.pyplot import figure, bar, xlim, ylim, scatter, ylabel, \ xlabel, title, xticks, yticks import matplotlib.dates as mdates plt.style.use('seaborn') from CONFIG import GLOBAL_DB, TEMPORARY_DB from ARPM_utils import save_plot, struct_to_dict, date_mtop from HistogramFP import HistogramFP from EffectiveScenarios import EffectiveScenarios from Stats import Stats from ColorCodedFP import ColorCodedFP # - # ## Upload database # + try: db = loadmat(os.path.join(GLOBAL_DB, 'db_Stocks'), squeeze_me=True) except FileNotFoundError: db = loadmat(os.path.join(TEMPORARY_DB, 'db_Stocks'), squeeze_me=True) SPX = struct_to_dict(db['SPX']) # - # ## Compute the realized time series of the S&P 500 log-returns # + SPX_ = SPX.Price_close date = SPX.Date epsi = diff(log(SPX_)) t_ = len(epsi) epsi = epsi.reshape(1,-1) date = date[1:] # - # ## FLEXIBLE PROBABILITIES FROM BOOTSTRAP # + k_ = 252 # size of subsamples q_ = 5 # number of subsamples (and frames) prob_bs = zeros((q_, t_)) ens = zeros((1, q_)) typ = namedtuple('type','Entropy') typ.Entropy = 'Exp' for q in range(q_): r = choice(arange(t_), size=k_, replace=False) prob_bs[q, r] = 1 / k_ ens[0,q] = EffectiveScenarios(prob_bs[[q],:], typ) # - # ## HFP histogram and statistics # + q_ = prob_bs.shape[0] option = namedtuple('option', 'n_bins') option.n_bins = 10*log(epsi.shape[1]) p, x = {}, {} for q in range(q_): p[q], x[q] = HistogramFP(epsi, prob_bs[[q],:], option) mu, sdev, VaR, CVaR, skewness, kurtosis = Stats(epsi, prob_bs) # - # ## Figure date_tick = arange(99, t_-1, 680) date_dt = array([date_mtop(i) for i in date]) myFmt = mdates.DateFormatter('%d-%b-%Y') # ## q=0 for q in range(2): figure() # FP profile plt.subplot2grid((3, 3), (0, 0), colspan=2) plt.gca().set_facecolor('white') bar(date_dt, prob_bs[q, :], facecolor=[0.5, 0.5, 0.5], edgecolor=[0.5, 0.5, 0.5]) xlim([min(date_dt), max(date_dt)]) xticks(date_dt[date_tick]) plt.gca().xaxis.set_major_formatter(myFmt) ylim([0, 1.1 * npmax(prob_bs[q, :])]) yticks([]) title('FLEXIBLE PROBABILITIES FROM BOOTSTRAP') ylabel('probability') TEXT = 'Effective Num.Scenarios = % 3.0f' % ens[0, q] plt.text(min(date_dt), 1.05 * npmax(prob_bs[q, :]), TEXT, horizontalalignment='left') # scatter colormap and colors CM, C = ColorCodedFP(prob_bs[[q], :], 10 ** -20, npmax(prob_bs[:5, :]), arange(0, 0.95, 0.05), 0, 1, [1, 0]) # Time series of S&P500 log-rets ax = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2) scatter(date_dt, epsi, 15, c=C, marker='.', cmap=CM) xlim([min(date_dt), max(date_dt)]) xticks(date_dt[date_tick]) plt.gca().xaxis.set_major_formatter(myFmt) ax.set_facecolor('white') ylim([1.1 * npmin(epsi), 1.1 * npmax(epsi)]) ylabel('returns') title('S&P') # HFP histogram plt.subplot2grid((3, 3), (1, 2), rowspan=2) plt.gca().set_facecolor('white') plt.barh(x[q][:-1], p[q][0], height=x[q][1] - x[q][0], facecolor=[0.7, 0.7, 0.7], edgecolor=[0.5, 0.5, 0.5]) xlim([0, 1.05 * npmax(p[q])]) xticks([]) yticks([]), ylim([1.1 * npmin(epsi), 1.1 * npmax(epsi)]) xlabel('probability') plt.tight_layout(); # statistics TEXT = 'Mean % 3.3f \nSdev %3.3f \nVaR %3.3f \nCVaR %3.3f \nSkew %3.3f \nKurt %3.3f' % ( mu[q], sdev[q], VaR[q], CVaR[q], skewness[q], kurtosis[q]) plt.text(0.5 * npmax(p[q]), 0.08, TEXT, horizontalalignment='left', verticalalignment='bottom'); # save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
py
1a5a9524a141285b062a867212e2de116e9b5c5e
from dataclasses import dataclass from enum import Enum from typing import Dict, List from loguru import logger from dome9 import Dome9Resource, Client, BaseDataclassRequest, APIUtils class IpListConsts(Enum): IP_LIST = 'IpList' @dataclass class IpDescriptorItem(BaseDataclassRequest): """IP list descriptor item :link https://api-v2-docs.dome9.com/#schemafalconetix-model-ipdescriptor :param ip: (Required) IP address with CIDR notation (e.g. 10.0.0.0/16 or 10.50.100.22/32) if omitted default of /32 will be assigned :type ip: str :param comment: (Optional) IP address description :type comment: str """ ip: str comment: str = None @logger.catch(reraise=True) def __post_init__(self): APIUtils.check_is_ip(self.ip) @dataclass class IpListRequest(BaseDataclassRequest): """IP list request: "Name" for name, "Description" for description and Items are list of addresses :link https://api-v2-docs.dome9.com/#schemadome9-web-api-models-iplistviewmodel :param name: (Required) IP list name :type name: str :param items: (Optional) List of IP descriptor items :type items: list(IpDescriptorItem) :param description: (Optional) IP list description :type description: str """ name: str items: List[IpDescriptorItem] = () description: str = None class IpList(Dome9Resource): def __init__(self, client: Client): super().__init__(client) def create(self, body: IpListRequest) -> Dict: """Add a new IP List :link https://api-v2-docs.dome9.com/#iplist_post :param body: IP list details :type body: IpListRequest :return https://api-v2-docs.dome9.com/#schemadome9-web-api-models-iplistviewmodel :rtype IpList """ return self._post(route=IpListConsts.IP_LIST.value, body=body) def get(self, ip_list_id: int) -> Dict: """Get an IP List by ID :link https://api-v2-docs.dome9.com/#iplist_get :param ip_list_id: ID of the IP list to get :type ip_list_id: int :return https://api-v2-docs.dome9.com/#schemadome9-web-api-models-iplistviewmodel :rtype IpList """ route = f'{IpListConsts.IP_LIST.value}/{ip_list_id}' return self._get(route=route) def update(self, ip_list_id: int, body: IpListRequest) -> None: """Update an IP list. This will override the existing IP list :link https://api-v2-docs.dome9.com/#iplist_put :param ip_list_id: ID of the IP list to update :type ip_list_id: int :param body: IP list details :type body: IpListRequest """ route = f'{IpListConsts.IP_LIST.value}/{ip_list_id}' return self._put(route=route, body=body) def delete(self, ip_list_id: int) -> None: """Delete an IP List by ID :link https://api-v2-docs.dome9.com/#iplist_delete :param ip_list_id: ID of the IP list ot delete :type ip_list_id: int """ route = f'{IpListConsts.IP_LIST.value}/{ip_list_id}' return self._delete(route=route)
py
1a5a9611f1138f95132be7c8ce55622d1170ac81
#currently takes picture snip, inverts it, adds words from picture to array and prints it import os,sys,datetime,subprocess,time,shutil,inspect import numpy as np import cv2 from PIL import ImageGrab import pyautogui as ptg import pytesseract import xlsxwriter from PIL import Image, ImageEnhance, ImageOps pytesseract.pytesseract.tesseract_cmd = 'D:\\Program Files\\Python\\Python37-32\\Lib\\site-packages\\Tesseract-OCR\\tesseract' screenWidth, screenHeight = ptg.size() #Script detail variables and logging information scriptPath = inspect.getfile(inspect.currentframe()) # script filename (usually with path) scriptDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # script directory arguments = sys.argv
py
1a5a969615c5ce639cba28404f28926fbcdecd50
from rest_framework import serializers from .models import Card class CardSerializer(serializers.ModelSerializer): class Meta: model = Card fields = '__all__'
py
1a5a97e07e6dd5ca66ad1e34d06841ff28cb7eac
import copy import urllib import time import re from urlparse import urlparse, parse_qs, urlunparse # from xml.sax.saxutils import unescape as xml_unescape ACTION_REGEX = re.compile("(.*?)\((.*)\)") def page_redux(page): pages = [] if "|" in page: prepages = page.split("|") else: prepages = [page] ranges = [] for p in prepages: if "-" in p: ranges = ranges + [i for i in range(int(p.split("-")[0]),int(p.split("-")[1])+1) if i not in pages and i not in ranges] else: if p not in pages: pages.append(int(p)) if len(pages) < 1: return None else: return pages + ranges def merge_dicts(*dict_args): result = {} for dictionary in dict_args: result.update(dictionary) return result def to_utf8(obj): if isinstance(obj, unicode): obj = obj.encode('utf-8', 'ignore') elif isinstance(obj, dict): obj = copy.deepcopy(obj) for key, val in obj.items(): obj[key] = to_utf8(val) elif obj is not None and hasattr(obj, "__iter__"): obj = obj.__class__([to_utf8(x) for x in obj]) else: pass return obj def to_unicode(obj): if isinstance(obj, basestring): try: obj = unicode(obj, 'utf-8') except TypeError: pass elif isinstance(obj, dict): obj = copy.deepcopy(obj) for key, val in obj.items(): obj[key] = to_unicode(val) elif obj is not None and hasattr(obj, "__iter__"): obj = obj.__class__([to_unicode(x) for x in obj]) else: pass return obj def number_to_text(number_text): if not number_text.isnumeric(): return number_text if number_text == "" or None: return "" else: number_text = int(number_text) numbers = [ u"zero", u"one", u"two", u"three", u"four", u"five", u"six", u"seven", u"eight", u"nine", u"ten", u"eleven", u"twelve", u"thirteen", u"fourteen", u"fifteen", u"sixteen", u"seventeen", u"eighteen", u"nineteen" ] if number_text < 20: return numbers[number_text] else: return "" def text_to_number(text): if text.isnumeric(): return text if text == "" or None: return "" else: numbers = [ u"zero", u"one", u"two", u"three", u"four", u"five", u"six", u"seven", u"eight", u"nine", u"ten", u"eleven", u"twelve", u"thirteen", u"fourteen", u"fifteen", u"sixteen", u"seventeen", u"eighteen", u"nineteen" ] numwords = {} for idx, word in enumerate(numbers): numwords[word] = idx if text.lower() in numwords: return numwords[text.lower()] else: return "" def equals(a, b): return to_unicode(a) == to_unicode(b) def contains(a, b): return to_unicode(a) in to_unicode(b) def is_ascii(s): try: if isinstance(s, basestring): s.decode() return True except UnicodeDecodeError: pass except UnicodeEncodeError: pass return False def urlencode_path(path): path = to_utf8(path) o = urlparse(path) query = parse_qs(o.query) path = urlunparse([o.scheme, o.netloc, o.path, o.params, urllib.urlencode(query, True), o.fragment]) return path def parse_year(text): try: return text.split("-")[0].strip() except: return '0' def date_to_timestamp(date_str, format="%Y-%m-%d"): if date_str: try: tt = time.strptime(date_str, format) return int(time.mktime(tt)) except: return 0 # 1970 return None def apply_text_actions(text, dictionary): def unescape(x): # x = xml_unescape(x) # x = x.strip() x = x.replace('&dot;', '.') x = x.replace('&sbo;', '[') x = x.replace('&sbc;', ']') x = x.replace('&colon;', ':') return x splitted_text = text.split('|') if splitted_text[0] in dictionary: value = dictionary.get(splitted_text[0]) for action in splitted_text[1:]: match = ACTION_REGEX.match(action) if match: action, params = match.groups() params = [unescape(x) for x in params.split(',')] if action == "ws": value = value.replace(' ', params[0]) elif action == "replace": value = value.replace(params[0], params[1]) elif action == "text_to_number": value = text_to_number(value) elif action == "number_to_text": value = number_to_text(value) else: pass return value return None def apply_parameters(text, parameters): while True: try: text = text.format(**parameters) except KeyError, e: # Auto generate missing parameters if possible missing_key = e.args[0] new_val = apply_text_actions(missing_key, parameters) if new_val is not None: parameters = dict(parameters) parameters[missing_key] = new_val else: raise e else: return text
py
1a5a97ee3220a25871110fc7f6c07e33fee2e060
from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_login import LoginManager from dotenv import load_dotenv import os # init SQLAlchemy so we can use it later in our models db = SQLAlchemy() def create_app(): app = Flask(__name__) # with app.app_context(): basedir = os.path.abspath(os.path.dirname(__file__)) load_dotenv(os.path.join(basedir, '.env')) app.config['SERVER_NAME'] = 'local.docker:5000' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY') or 'no_secret_key_set' app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('SQLALCHEMY_DATABASE_URI') or \ 'sqlite:///' + os.path.join(basedir, 'db/threatnote.db') app.config['OTX_API_KEY'] = os.environ.get('OTX_API_KEY') app.config['SHODAN_API_KEY'] = os.environ.get('SHODAN_API_KEY') app.config['RISKIQ_USERNAME'] = os.environ.get('RISKIQ_USERNAME') app.config['RISKIQ_KEY'] = os.environ.get('RISKIQ_KEY') app.config['GREYNOISE_API_KEY'] = os.environ.get('GREYNOISE_API_KEY') app.config['EMAILREP_API_KEY'] = os.environ.get('EMAILREP_API_KEY') app.config['VT_API_KEY'] = os.environ.get('VT_API_KEY') app.config['MISP_API_KEY'] = os.environ.get('MISP_API_KEY') app.config['MISP_URL'] = os.environ.get('MISP_URL') app.config['HIBP_API_KEY'] = os.environ.get('HIBP_API_KEY') db.init_app(app) login_manager = LoginManager() login_manager.login_view = 'auth.login' login_manager.init_app(app) from models import User @login_manager.user_loader def load_user(user_id): # since the user_id is just the primary key of our user table, use it in the query for the user return User.query.get(int(user_id)) # blueprint for auth routes in our app from auth import auth as auth_blueprint app.register_blueprint(auth_blueprint) # blueprint for non-auth parts of app from main import main as main_blueprint app.register_blueprint(main_blueprint) return app
py
1a5a97f81729b4a1c771bc7e65fba4ec30969b76
######## # Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from pkgutil import extend_path __path__ = extend_path(__path__, __name__)
py
1a5a98150fcdb895bfd56410cc5f2a90b0d7c3c4
# Copyright 2018 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from rest_framework import serializers class PnfInstanceSerializer(serializers.Serializer): pnfId = serializers.CharField(help_text="Identifier of the PNF.", required=True, allow_null=False) pnfName = serializers.CharField(help_text="Name of the PNF.", required=True, allow_null=True) pnfdId = serializers.CharField(help_text="Identifier of the PNFD on which the PNF is based.", required=True, allow_null=True) pnfdInfoId = serializers.CharField(help_text="Identifier of the PNFD information object related to this PNF.", required=False, allow_null=True, allow_blank=True) pnfProfileId = serializers.CharField(help_text="Identifier of the related PnfProfile in the NSD on which the PNF is based.", required=True, allow_null=True) cpInfo = serializers.CharField(help_text="Information on the external CP of the PNF.", required=False, allow_null=True, allow_blank=True) class PnfInstancesSerializer(serializers.ListSerializer): child = PnfInstanceSerializer()
py
1a5a9912440283b78bccd458573c0103db9a2d67
#!/usr/bin/python # Copyright (c) 2013, 2014-2017 Oracle and/or its affiliates. All rights reserved. """Provide Module Description """ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# __author__ = "Andrew Hopkinson (Oracle Cloud Solutions A-Team)" __copyright__ = "Copyright (c) 2013, 2014-2017 Oracle and/or its affiliates. All rights reserved." __ekitversion__ = "@VERSION@" __ekitrelease__ = "@RELEASE@" __version__ = "1.0.0.0" __date__ = "@BUILDDATE@" __status__ = "Development" __module__ = "list-imagelist-entries" # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# import datetime import getopt import json import locale import logging import operator import os import requests import sys # Import utility methods from occsutils import callRESTApi from occsutils import getPassword from occsutils import printJSON from authenticate import authenticate # Define methods def listImagelistEntries(endpoint, resourcename, cookie): basepath = '/imagelist/' params = None data = None response = callRESTApi(endpoint, basepath, resourcename, data, 'GET', params, cookie) jsonResponse = json.loads(response.text) return jsonResponse # Read Module Arguments def readModuleArgs(opts, args): moduleArgs = {} moduleArgs['endpoint'] = None moduleArgs['user'] = None moduleArgs['password'] = None moduleArgs['pwdfile'] = None moduleArgs['cookie'] = None moduleArgs['resourcename'] = None # Read Module Command Line Arguments. for opt, arg in opts: if opt in ("-e", "--endpoint"): moduleArgs['endpoint'] = arg elif opt in ("-u", "--user"): moduleArgs['user'] = arg elif opt in ("-p", "--password"): moduleArgs['password'] = arg elif opt in ("-P", "--pwdfile"): moduleArgs['pwdfile'] = arg elif opt in ("-R", "--resourcename"): moduleArgs['resourcename'] = arg elif opt in ("-C", "--cookie"): moduleArgs['cookie'] = arg return moduleArgs # Main processing function def main(argv): # Configure Parameters and Options options = 'e:u:p:P:R:C:' longOptions = ['endpoint=', 'user=', 'password=', 'pwdfile=', 'resourcename=', 'cookie='] # Get Options & Arguments try: opts, args = getopt.getopt(argv, options, longOptions) # Read Module Arguments moduleArgs = readModuleArgs(opts, args) if moduleArgs['cookie'] is None and moduleArgs['endpoint'] is not None and moduleArgs['user'] is not None: if moduleArgs['password'] is None and moduleArgs['pwdfile'] is None: moduleArgs['password'] = getPassword(moduleArgs['user']) elif moduleArgs['pwdfile'] is not None: with open(moduleArgs['pwdfile'], 'r') as f: moduleArgs['password'] = f.read().rstrip('\n') moduleArgs['cookie'] = authenticate(moduleArgs['endpoint'], moduleArgs['user'], moduleArgs['password']) if moduleArgs['cookie'] is not None: jsonObj = listImagelistEntries(moduleArgs['endpoint'], moduleArgs['resourcename'], moduleArgs['cookie']) printJSON(jsonObj) else: print ('Incorrect parameters') except getopt.GetoptError: usage() except Exception as e: print('Unknown Exception please check log file') logging.exception(e) sys.exit(1) return # Main function to kick off processing if __name__ == "__main__": main(sys.argv[1:])
py
1a5a9921d7432377c313fda1e0e27cd0866dd3f8
# Imports from os import path from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init def _weights_init(m): classname = m.__class__.__name__ if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight) class LambdaLayer(nn.Module): def __init__(self, lambd): super(LambdaLayer, self).__init__() self.lambd = lambd def forward(self, x): return self.lambd(x) class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1, option="A"): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d( in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False ) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d( planes, planes, kernel_size=3, stride=1, padding=1, bias=False ) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != planes: if option == "A": """ For CIFAR10 ResNet paper uses option A. """ self.shortcut = LambdaLayer( lambda x: F.pad( x[:, :, ::2, ::2], (0, 0, 0, 0, planes // 4, planes // 4), "constant", 0, ) ) elif option == "B": self.shortcut = nn.Sequential( nn.Conv2d( in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False, ), nn.BatchNorm2d(self.expansion * planes), ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out class BBN_ResNet_Cifar(nn.Module): """ResNet32 from the "BBN: Bilateral-Branch Network with Cumulative Learning for Long-Tailed Visual Recognition (CVPR 2020)" """ def __init__(self, block, num_blocks): """Initialize #FIXME Args: block ([type]): [description] num_blocks ([type]): [description] """ super(BBN_ResNet_Cifar, self).__init__() self.in_planes = 16 self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 64, num_blocks[2] - 1, stride=2) self.cb_block = block(self.in_planes, self.in_planes, stride=1) self.rb_block = block(self.in_planes, self.in_planes, stride=1) self.apply(_weights_init) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) def load_model(self, pretrain_dir): """Load a pre-trained model Args: pretrain_dir (str): path of pretrained model """ print(f"Loading Backbone pretrain model from {pretrain_dir}......") model_dict = self.state_dict() pretrain_dict = torch.load(pretrain_dir)["state_dict_best"]["feat_model"] new_dict = OrderedDict() # Removing FC and Classifier layers for k, v in pretrain_dict.items(): if k.startswith("module"): k = k[7:] if "fc" not in k and "classifier" not in k: new_dict[k] = v model_dict.update(new_dict) self.load_state_dict(model_dict) print("Backbone model has been loaded......") def _make_layer(self, block, planes, num_blocks, stride, add_flag=True): strides = [stride] + [1] * (num_blocks - 1) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x, **kwargs): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) if "feature_cb" in kwargs: out = self.cb_block(out) return out elif "feature_rb" in kwargs: out = self.rb_block(out) return out out1 = self.cb_block(out) out2 = self.rb_block(out) out = torch.cat((out1, out2), dim=1) out = self.avgpool(out) out = out.view(out.shape[0], -1) return out def create_model(pretrain=False, pretrain_dir=None, *args): """Initialize/load the model Args: pretrain (bool, optional): Use pre-trained model?. Defaults to False. pretrain_dir (str, optional): Directory of the pre-trained model. Defaults to None. Returns: class: Model """ print("Loading ResNet 32 Feature Model.") resnet32 = BBN_ResNet_Cifar(BasicBlock, [5, 5, 5]) if pretrain: if path.exists(pretrain_dir): print("===> Load Pretrain Initialization for ResNet32") resnet32.load_model(pretrain_dir=pretrain_dir) else: raise Exception(f"Pretrain path doesn't exist!!-{pretrain_dir}") else: print("===> Train backbone from the scratch") return resnet32
py
1a5a9ad3e78d7bb0c5df90742370e263e0d4ecfe
# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Define API Jobs.""" import copy import threading from six.moves import http_client import google.api_core.future.polling from google.cloud import exceptions from google.cloud.exceptions import NotFound from google.cloud._helpers import _datetime_from_microseconds from google.cloud.bigquery.dataset import DatasetReference from google.cloud.bigquery.external_config import ExternalConfig from google.cloud.bigquery.query import _query_param_from_api_repr from google.cloud.bigquery.query import ArrayQueryParameter from google.cloud.bigquery.query import ScalarQueryParameter from google.cloud.bigquery.query import StructQueryParameter from google.cloud.bigquery.query import UDFResource from google.cloud.bigquery.schema import SchemaField from google.cloud.bigquery.table import EncryptionConfiguration from google.cloud.bigquery.table import TableReference from google.cloud.bigquery import _helpers from google.cloud.bigquery._helpers import DEFAULT_RETRY from google.cloud.bigquery._helpers import _int_or_none _DONE_STATE = 'DONE' _STOPPED_REASON = 'stopped' _TIMEOUT_BUFFER_SECS = 0.1 _ERROR_REASON_TO_EXCEPTION = { 'accessDenied': http_client.FORBIDDEN, 'backendError': http_client.INTERNAL_SERVER_ERROR, 'billingNotEnabled': http_client.FORBIDDEN, 'billingTierLimitExceeded': http_client.BAD_REQUEST, 'blocked': http_client.FORBIDDEN, 'duplicate': http_client.CONFLICT, 'internalError': http_client.INTERNAL_SERVER_ERROR, 'invalid': http_client.BAD_REQUEST, 'invalidQuery': http_client.BAD_REQUEST, 'notFound': http_client.NOT_FOUND, 'notImplemented': http_client.NOT_IMPLEMENTED, 'quotaExceeded': http_client.FORBIDDEN, 'rateLimitExceeded': http_client.FORBIDDEN, 'resourceInUse': http_client.BAD_REQUEST, 'resourcesExceeded': http_client.BAD_REQUEST, 'responseTooLarge': http_client.FORBIDDEN, 'stopped': http_client.OK, 'tableUnavailable': http_client.BAD_REQUEST, } def _error_result_to_exception(error_result): """Maps BigQuery error reasons to an exception. The reasons and their matching HTTP status codes are documented on the `troubleshooting errors`_ page. .. _troubleshooting errors: https://cloud.google.com/bigquery\ /troubleshooting-errors :type error_result: Mapping[str, str] :param error_result: The error result from BigQuery. :rtype google.cloud.exceptions.GoogleCloudError: :returns: The mapped exception. """ reason = error_result.get('reason') status_code = _ERROR_REASON_TO_EXCEPTION.get( reason, http_client.INTERNAL_SERVER_ERROR) return exceptions.from_http_status( status_code, error_result.get('message', ''), errors=[error_result]) class Compression(object): """The compression type to use for exported files. Possible values include `GZIP`, `DEFLATE`, `SNAPPY`, and `NONE`. The default value is `NONE`. `DEFLATE` and `SNAPPY` are only supported for Avro. """ GZIP = 'GZIP' DEFLATE = 'DEFLATE' SNAPPY = 'SNAPPY' NONE = 'NONE' class CreateDisposition(object): """Specifies whether the job is allowed to create new tables. The following values are supported: `CREATE_IF_NEEDED`: If the table does not exist, BigQuery creates the table. `CREATE_NEVER`: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is `CREATE_IF_NEEDED`. Creation, truncation and append actions occur as one atomic update upon job completion. """ CREATE_IF_NEEDED = 'CREATE_IF_NEEDED' CREATE_NEVER = 'CREATE_NEVER' class DestinationFormat(object): """The exported file format. Possible values include `CSV`, `NEWLINE_DELIMITED_JSON` and `AVRO`. The default value is `CSV`. Tables with nested or repeated fields cannot be exported as CSV. """ CSV = 'CSV' NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON' AVRO = 'AVRO' class Encoding(object): """The character encoding of the data. The supported values are `UTF_8` corresponding to `'UTF-8'` or `ISO_8859_1` corresponding to `'ISO-8559-1'`. The default value is `UTF_8`. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties. """ UTF_8 = 'UTF-8' ISO_8559_1 = 'ISO-8559-1' class QueryPriority(object): """Specifies a priority for the query. Possible values include `INTERACTIVE` and `BATCH`. The default value is `INTERACTIVE`. """ INTERACTIVE = 'INTERACTIVE' BATCH = 'BATCH' class SourceFormat(object): """The format of the data files. For CSV files, specify `CSV`. For datastore backups, specify `DATASTORE_BACKUP`. For newline-delimited json, specify `NEWLINE_DELIMITED_JSON`. For Avro, specify `AVRO`. For Parquet, specify `PARQUET`. The default value is `CSV`. """ CSV = 'CSV' DATASTORE_BACKUP = 'DATASTORE_BACKUP' NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON' AVRO = 'AVRO' PARQUET = 'PARQUET' class WriteDisposition(object): """Specifies the action that occurs if destination table already exists. The following values are supported: `WRITE_TRUNCATE`: If the table already exists, BigQuery overwrites the table data. `WRITE_APPEND`: If the table already exists, BigQuery appends the data to the table. `WRITE_EMPTY`: If the table already exists and contains data, a 'duplicate' error is returned in the job result. The default value is `WRITE_APPEND`. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. """ WRITE_APPEND = 'WRITE_APPEND' WRITE_TRUNCATE = 'WRITE_TRUNCATE' WRITE_EMPTY = 'WRITE_EMPTY' class _AsyncJob(google.api_core.future.polling.PollingFuture): """Base class for asynchronous jobs. :type job_id: str :param job_id: the job's ID in the project associated with the client. :type client: :class:`google.cloud.bigquery.client.Client` :param client: A client which holds credentials and project configuration. """ def __init__(self, job_id, client): super(_AsyncJob, self).__init__() self.job_id = job_id self._client = client self._properties = {} self._result_set = False self._completion_lock = threading.Lock() @property def project(self): """Project bound to the job. :rtype: str :returns: the project (derived from the client). """ return self._client.project def _require_client(self, client): """Check client or verify over-ride. :type client: :class:`~google.cloud.bigquery.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current dataset. :rtype: :class:`google.cloud.bigquery.client.Client` :returns: The client passed in or the currently bound client. """ if client is None: client = self._client return client @property def job_type(self): """Type of job :rtype: str :returns: one of 'load', 'copy', 'extract', 'query' """ return self._JOB_TYPE @property def path(self): """URL path for the job's APIs. :rtype: str :returns: the path based on project and job ID. """ return '/projects/%s/jobs/%s' % (self.project, self.job_id) @property def etag(self): """ETag for the job resource. :rtype: str, or ``NoneType`` :returns: the ETag (None until set from the server). """ return self._properties.get('etag') @property def self_link(self): """URL for the job resource. :rtype: str, or ``NoneType`` :returns: the URL (None until set from the server). """ return self._properties.get('selfLink') @property def user_email(self): """E-mail address of user who submitted the job. :rtype: str, or ``NoneType`` :returns: the URL (None until set from the server). """ return self._properties.get('user_email') @property def created(self): """Datetime at which the job was created. :rtype: ``datetime.datetime``, or ``NoneType`` :returns: the creation time (None until set from the server). """ statistics = self._properties.get('statistics') if statistics is not None: millis = statistics.get('creationTime') if millis is not None: return _datetime_from_microseconds(millis * 1000.0) @property def started(self): """Datetime at which the job was started. :rtype: ``datetime.datetime``, or ``NoneType`` :returns: the start time (None until set from the server). """ statistics = self._properties.get('statistics') if statistics is not None: millis = statistics.get('startTime') if millis is not None: return _datetime_from_microseconds(millis * 1000.0) @property def ended(self): """Datetime at which the job finished. :rtype: ``datetime.datetime``, or ``NoneType`` :returns: the end time (None until set from the server). """ statistics = self._properties.get('statistics') if statistics is not None: millis = statistics.get('endTime') if millis is not None: return _datetime_from_microseconds(millis * 1000.0) def _job_statistics(self): """Helper for job-type specific statistics-based properties.""" statistics = self._properties.get('statistics', {}) return statistics.get(self._JOB_TYPE, {}) @property def error_result(self): """Error information about the job as a whole. :rtype: mapping, or ``NoneType`` :returns: the error information (None until set from the server). """ status = self._properties.get('status') if status is not None: return status.get('errorResult') @property def errors(self): """Information about individual errors generated by the job. :rtype: list of mappings, or ``NoneType`` :returns: the error information (None until set from the server). """ status = self._properties.get('status') if status is not None: return status.get('errors') @property def state(self): """Status of the job. :rtype: str, or ``NoneType`` :returns: the state (None until set from the server). """ status = self._properties.get('status') if status is not None: return status.get('state') def _scrub_local_properties(self, cleaned): """Helper: handle subclass properties in cleaned.""" pass def _copy_configuration_properties(self, configuration): """Helper: assign subclass configuration properties in cleaned.""" raise NotImplementedError("Abstract") def _set_properties(self, api_response): """Update properties from resource in body of ``api_response`` :type api_response: dict :param api_response: response returned from an API call """ cleaned = api_response.copy() self._scrub_local_properties(cleaned) statistics = cleaned.get('statistics', {}) if 'creationTime' in statistics: statistics['creationTime'] = float(statistics['creationTime']) if 'startTime' in statistics: statistics['startTime'] = float(statistics['startTime']) if 'endTime' in statistics: statistics['endTime'] = float(statistics['endTime']) self._properties.clear() self._properties.update(cleaned) self._copy_configuration_properties(cleaned['configuration']) # For Future interface self._set_future_result() @classmethod def _get_resource_config(cls, resource): """Helper for :meth:`from_api_repr` :type resource: dict :param resource: resource for the job :rtype: dict :returns: tuple (string, dict), where the first element is the job ID and the second contains job-specific configuration. :raises: :class:`KeyError` if the resource has no identifier, or is missing the appropriate configuration. """ if ('jobReference' not in resource or 'jobId' not in resource['jobReference']): raise KeyError('Resource lacks required identity information: ' '["jobReference"]["jobId"]') job_id = resource['jobReference']['jobId'] if ('configuration' not in resource or cls._JOB_TYPE not in resource['configuration']): raise KeyError('Resource lacks required configuration: ' '["configuration"]["%s"]' % cls._JOB_TYPE) return job_id, resource['configuration'] def _begin(self, client=None, retry=DEFAULT_RETRY): """API call: begin the job via a POST request See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert :type client: :class:`~google.cloud.bigquery.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current dataset. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. :raises: :exc:`ValueError` if the job has already begin. """ if self.state is not None: raise ValueError("Job already begun.") client = self._require_client(client) path = '/projects/%s/jobs' % (self.project,) # jobs.insert is idempotent because we ensure that every new # job has an ID. api_response = client._call_api( retry, method='POST', path=path, data=self._build_resource()) self._set_properties(api_response) def exists(self, client=None, retry=DEFAULT_RETRY): """API call: test for the existence of the job via a GET request See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get :type client: :class:`~google.cloud.bigquery.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current dataset. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. :rtype: bool :returns: Boolean indicating existence of the job. """ client = self._require_client(client) try: client._call_api(retry, method='GET', path=self.path, query_params={'fields': 'id'}) except NotFound: return False else: return True def reload(self, client=None, retry=DEFAULT_RETRY): """API call: refresh job properties via a GET request. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get :type client: :class:`~google.cloud.bigquery.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current dataset. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. """ client = self._require_client(client) api_response = client._call_api(retry, method='GET', path=self.path) self._set_properties(api_response) def cancel(self, client=None): """API call: cancel job via a POST request See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/cancel :type client: :class:`~google.cloud.bigquery.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current dataset. :rtype: bool :returns: Boolean indicating that the cancel request was sent. """ client = self._require_client(client) api_response = client._connection.api_request( method='POST', path='%s/cancel' % (self.path,)) self._set_properties(api_response['job']) # The Future interface requires that we return True if the *attempt* # to cancel was successful. return True # The following methods implement the PollingFuture interface. Note that # the methods above are from the pre-Future interface and are left for # compatibility. The only "overloaded" method is :meth:`cancel`, which # satisfies both interfaces. def _set_future_result(self): """Set the result or exception from the job if it is complete.""" # This must be done in a lock to prevent the polling thread # and main thread from both executing the completion logic # at the same time. with self._completion_lock: # If the operation isn't complete or if the result has already been # set, do not call set_result/set_exception again. # Note: self._result_set is set to True in set_result and # set_exception, in case those methods are invoked directly. if self.state != _DONE_STATE or self._result_set: return if self.error_result is not None: exception = _error_result_to_exception(self.error_result) self.set_exception(exception) else: self.set_result(self) def done(self, retry=DEFAULT_RETRY): """Refresh the job and checks if it is complete. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. :rtype: bool :returns: True if the job is complete, False otherwise. """ # Do not refresh is the state is already done, as the job will not # change once complete. if self.state != _DONE_STATE: self.reload(retry=retry) return self.state == _DONE_STATE def result(self, timeout=None): """Start the job and wait for it to complete and get the result. :type timeout: float :param timeout: How long (in seconds) to wait for job to complete before raising a :class:`concurrent.futures.TimeoutError`. :rtype: _AsyncJob :returns: This instance. :raises: :class:`~google.cloud.exceptions.GoogleCloudError` if the job failed or :class:`concurrent.futures.TimeoutError` if the job did not complete in the given timeout. """ if self.state is None: self._begin() # TODO: modify PollingFuture so it can pass a retry argument to done(). return super(_AsyncJob, self).result(timeout=timeout) def cancelled(self): """Check if the job has been cancelled. This always returns False. It's not possible to check if a job was cancelled in the API. This method is here to satisfy the interface for :class:`google.api_core.future.Future`. :rtype: bool :returns: False """ return (self.error_result is not None and self.error_result.get('reason') == _STOPPED_REASON) class _JobConfig(object): """Abstract base class for job configuration objects. Arguments: job_type (str): The key to use for the job configuration. """ def __init__(self, job_type): self._job_type = job_type self._properties = {job_type: {}} def _get_sub_prop(self, key, default=None): """Get a value in the ``self._properties[self._job_type]`` dictionary. Most job properties are inside the dictionary related to the job type (e.g. 'copy', 'extract', 'load', 'query'). Use this method to access those properties:: self._get_sub_prop('destinationTable') This is equivalent to using the ``_helper.get_sub_prop`` function:: _helper.get_sub_prop( self._properties, ['query', 'destinationTable']) Arguments: key (str): Key for the value to get in the ``self._properties[self._job_type]`` dictionary. default (object): (Optional) Default value to return if the key is not found. Defaults to ``None``. Returns: object: The value if present or the default. """ return _helpers.get_sub_prop( self._properties, [self._job_type, key], default=default) def _set_sub_prop(self, key, value): """Set a value in the ``self._properties[self._job_type]`` dictionary. Most job properties are inside the dictionary related to the job type (e.g. 'copy', 'extract', 'load', 'query'). Use this method to set those properties:: self._set_sub_prop('useLegacySql', False) This is equivalent to using the ``_helper.set_sub_prop`` function:: _helper.set_sub_prop( self._properties, ['query', 'useLegacySql'], False) Arguments: key (str): Key to set in the ``self._properties[self._job_type]`` dictionary. value (object): Value to set. """ _helpers.set_sub_prop(self._properties, [self._job_type, key], value) def to_api_repr(self): """Build an API representation of the job config. :rtype: dict :returns: A dictionary in the format used by the BigQuery API. """ return copy.deepcopy(self._properties) @classmethod def from_api_repr(cls, resource): """Factory: construct a job configuration given its API representation :type resource: dict :param resource: An extract job configuration in the same representation as is returned from the API. :rtype: :class:`google.cloud.bigquery.job._JobConfig` :returns: Configuration parsed from ``resource``. """ config = cls() config._properties = copy.deepcopy(resource) return config class LoadJobConfig(_JobConfig): """Configuration options for load jobs. All properties in this class are optional. Values which are ``None`` -> server defaults. """ def __init__(self): super(LoadJobConfig, self).__init__('load') @property def allow_jagged_rows(self): """bool: Allow missing trailing optional columns (CSV only). See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.allowJaggedRows """ return self._get_sub_prop('allowJaggedRows') @allow_jagged_rows.setter def allow_jagged_rows(self, value): self._set_sub_prop('allowJaggedRows', value) @property def allow_quoted_newlines(self): """bool: Allow quoted data containing newline characters (CSV only). See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.allowQuotedNewlines """ return self._get_sub_prop('allowQuotedNewlines') @allow_quoted_newlines.setter def allow_quoted_newlines(self, value): self._set_sub_prop('allowQuotedNewlines', value) @property def autodetect(self): """bool: Automatically infer the schema from a sample of the data. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.autodetect """ return self._get_sub_prop('autodetect') @autodetect.setter def autodetect(self, value): self._set_sub_prop('autodetect', value) @property def create_disposition(self): """google.cloud.bigquery.job.CreateDisposition: Specifies behavior for creating tables. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.createDisposition """ return self._get_sub_prop('createDisposition') @create_disposition.setter def create_disposition(self, value): self._set_sub_prop('createDisposition', value) @property def encoding(self): """google.cloud.bigquery.job.Encoding: The character encoding of the data. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.encoding """ return self._get_sub_prop('encoding') @encoding.setter def encoding(self, value): self._set_sub_prop('encoding', value) @property def field_delimiter(self): """str: The separator for fields in a CSV file. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.fieldDelimiter """ return self._get_sub_prop('fieldDelimiter') @field_delimiter.setter def field_delimiter(self, value): self._set_sub_prop('fieldDelimiter', value) @property def ignore_unknown_values(self): """bool: Ignore extra values not represented in the table schema. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.ignoreUnknownValues """ return self._get_sub_prop('ignoreUnknownValues') @ignore_unknown_values.setter def ignore_unknown_values(self, value): self._set_sub_prop('ignoreUnknownValues', value) @property def max_bad_records(self): """int: Number of invalid rows to ignore. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.maxBadRecords """ return self._get_sub_prop('maxBadRecords') @max_bad_records.setter def max_bad_records(self, value): self._set_sub_prop('maxBadRecords', value) @property def null_marker(self): """str: Represents a null value (CSV only). See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.nullMarker """ return self._get_sub_prop('nullMarker') @null_marker.setter def null_marker(self, value): self._set_sub_prop('nullMarker', value) @property def quote_character(self): """str: Character used to quote data sections (CSV only). See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.quote """ return self._get_sub_prop('quote') @quote_character.setter def quote_character(self, value): self._set_sub_prop('quote', value) @property def skip_leading_rows(self): """int: Number of rows to skip when reading data (CSV only). See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.skipLeadingRows """ return _int_or_none(self._get_sub_prop('skipLeadingRows')) @skip_leading_rows.setter def skip_leading_rows(self, value): self._set_sub_prop('skipLeadingRows', str(value)) @property def source_format(self): """google.cloud.bigquery.job.SourceFormat: File format of the data. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.sourceFormat """ return self._get_sub_prop('sourceFormat') @source_format.setter def source_format(self, value): self._set_sub_prop('sourceFormat', value) @property def write_disposition(self): """google.cloud.bigquery.job.WriteDisposition: Action that occurs if the destination table already exists. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.writeDisposition """ return self._get_sub_prop('writeDisposition') @write_disposition.setter def write_disposition(self, value): self._set_sub_prop('writeDisposition', value) @property def schema(self): """List[google.cloud.bigquery.schema.SchemaField]: Schema of the destination table. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema """ schema = _helpers.get_sub_prop( self._properties, ['load', 'schema', 'fields']) if schema is None: return return [SchemaField.from_api_repr(field) for field in schema] @schema.setter def schema(self, value): if not all(hasattr(field, 'to_api_repr') for field in value): raise ValueError('Schema items must be fields') _helpers.set_sub_prop( self._properties, ['load', 'schema', 'fields'], [field.to_api_repr() for field in value]) @property def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or ``None`` if using default encryption. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.destinationEncryptionConfiguration """ prop = self._get_sub_prop('destinationEncryptionConfiguration') if prop is not None: prop = EncryptionConfiguration.from_api_repr(prop) return prop @destination_encryption_configuration.setter def destination_encryption_configuration(self, value): api_repr = value if value is not None: api_repr = value.to_api_repr() self._set_sub_prop('destinationEncryptionConfiguration', api_repr) class LoadJob(_AsyncJob): """Asynchronous job for loading data into a table. Can load from Google Cloud Storage URIs or from a file. :type job_id: str :param job_id: the job's ID :type source_uris: sequence of string or ``NoneType`` :param source_uris: URIs of one or more data files to be loaded. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.sourceUris for supported URI formats. Pass None for jobs that load from a file. :type destination: :class:`google.cloud.bigquery.table.TableReference` :param destination: reference to table into which data is to be loaded. :type client: :class:`google.cloud.bigquery.client.Client` :param client: A client which holds credentials and project configuration for the dataset (which requires a project). """ _JOB_TYPE = 'load' def __init__(self, job_id, source_uris, destination, client, job_config=None): super(LoadJob, self).__init__(job_id, client) if job_config is None: job_config = LoadJobConfig() self.source_uris = source_uris self.destination = destination self._configuration = job_config @property def allow_jagged_rows(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.allow_jagged_rows`. """ return self._configuration.allow_jagged_rows @property def allow_quoted_newlines(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.allow_quoted_newlines`. """ return self._configuration.allow_quoted_newlines @property def autodetect(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.autodetect`. """ return self._configuration.autodetect @property def create_disposition(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.create_disposition`. """ return self._configuration.create_disposition @property def encoding(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.encoding`. """ return self._configuration.encoding @property def field_delimiter(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.field_delimiter`. """ return self._configuration.field_delimiter @property def ignore_unknown_values(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.ignore_unknown_values`. """ return self._configuration.ignore_unknown_values @property def max_bad_records(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.max_bad_records`. """ return self._configuration.max_bad_records @property def null_marker(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.null_marker`. """ return self._configuration.null_marker @property def quote_character(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.quote_character`. """ return self._configuration.quote_character @property def skip_leading_rows(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.skip_leading_rows`. """ return self._configuration.skip_leading_rows @property def source_format(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.source_format`. """ return self._configuration.source_format @property def write_disposition(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.write_disposition`. """ return self._configuration.write_disposition @property def schema(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.schema`. """ return self._configuration.schema @property def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or ``None`` if using default encryption. See :attr:`google.cloud.bigquery.job.LoadJobConfig.destination_encryption_configuration`. """ return self._configuration.destination_encryption_configuration @property def input_file_bytes(self): """Count of bytes loaded from source files. :rtype: int, or ``NoneType`` :returns: the count (None until set from the server). :raises: ValueError for invalid value types. """ statistics = self._properties.get('statistics') if statistics is not None: return int(statistics['load']['inputFileBytes']) @property def input_files(self): """Count of source files. :rtype: int, or ``NoneType`` :returns: the count (None until set from the server). """ statistics = self._properties.get('statistics') if statistics is not None: return int(statistics['load']['inputFiles']) @property def output_bytes(self): """Count of bytes saved to destination table. :rtype: int, or ``NoneType`` :returns: the count (None until set from the server). """ statistics = self._properties.get('statistics') if statistics is not None: return int(statistics['load']['outputBytes']) @property def output_rows(self): """Count of rows saved to destination table. :rtype: int, or ``NoneType`` :returns: the count (None until set from the server). """ statistics = self._properties.get('statistics') if statistics is not None: return int(statistics['load']['outputRows']) def _build_resource(self): """Generate a resource for :meth:`begin`.""" configuration = self._configuration.to_api_repr() if self.source_uris is not None: _helpers.set_sub_prop( configuration, ['load', 'sourceUris'], self.source_uris) _helpers.set_sub_prop( configuration, ['load', 'destinationTable'], self.destination.to_api_repr()) return { 'jobReference': { 'projectId': self.project, 'jobId': self.job_id, }, 'configuration': configuration, } def _copy_configuration_properties(self, configuration): """Helper: assign subclass configuration properties in cleaned.""" self._configuration._properties = copy.deepcopy(configuration) @classmethod def from_api_repr(cls, resource, client): """Factory: construct a job given its API representation .. note: This method assumes that the project found in the resource matches the client's project. :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.LoadJob` :returns: Job parsed from ``resource``. """ job_id, config_resource = cls._get_resource_config(resource) config = LoadJobConfig.from_api_repr(config_resource) dest_config = _helpers.get_sub_prop( config_resource, ['load', 'destinationTable']) ds_ref = DatasetReference(dest_config['projectId'], dest_config['datasetId'],) destination = TableReference(ds_ref, dest_config['tableId']) # sourceUris will be absent if this is a file upload. source_uris = _helpers.get_sub_prop( config_resource, ['load', 'sourceUris']) job = cls(job_id, source_uris, destination, client, config) job._set_properties(resource) return job class CopyJobConfig(_JobConfig): """Configuration options for copy jobs. All properties in this class are optional. Values which are ``None`` -> server defaults. """ def __init__(self): super(CopyJobConfig, self).__init__('copy') @property def create_disposition(self): """google.cloud.bigquery.job.CreateDisposition: Specifies behavior for creating tables. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy.createDisposition """ return self._get_sub_prop('createDisposition') @create_disposition.setter def create_disposition(self, value): self._set_sub_prop('createDisposition', value) @property def write_disposition(self): """google.cloud.bigquery.job.WriteDisposition: Action that occurs if the destination table already exists. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy.writeDisposition """ return self._get_sub_prop('writeDisposition') @write_disposition.setter def write_disposition(self, value): self._set_sub_prop('writeDisposition', value) @property def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or ``None`` if using default encryption. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy.destinationEncryptionConfiguration """ prop = self._get_sub_prop('destinationEncryptionConfiguration') if prop is not None: prop = EncryptionConfiguration.from_api_repr(prop) return prop @destination_encryption_configuration.setter def destination_encryption_configuration(self, value): api_repr = value if value is not None: api_repr = value.to_api_repr() self._set_sub_prop('destinationEncryptionConfiguration', api_repr) class CopyJob(_AsyncJob): """Asynchronous job: copy data into a table from other tables. :type job_id: str :param job_id: the job's ID, within the project belonging to ``client``. :type sources: list of :class:`google.cloud.bigquery.table.TableReference` :param sources: Table into which data is to be loaded. :type destination: :class:`google.cloud.bigquery.table.TableReference` :param destination: Table into which data is to be loaded. :type client: :class:`google.cloud.bigquery.client.Client` :param client: A client which holds credentials and project configuration for the dataset (which requires a project). :type job_config: :class:`~google.cloud.bigquery.job.CopyJobConfig` :param job_config: (Optional) Extra configuration options for the copy job. """ _JOB_TYPE = 'copy' def __init__(self, job_id, sources, destination, client, job_config=None): super(CopyJob, self).__init__(job_id, client) if job_config is None: job_config = CopyJobConfig() self.destination = destination self.sources = sources self._configuration = job_config @property def create_disposition(self): """See :attr:`google.cloud.bigquery.job.CopyJobConfig.create_disposition`. """ return self._configuration.create_disposition @property def write_disposition(self): """See :attr:`google.cloud.bigquery.job.CopyJobConfig.write_disposition`. """ return self._configuration.write_disposition @property def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or ``None`` if using default encryption. See :attr:`google.cloud.bigquery.job.CopyJobConfig.destination_encryption_configuration`. """ return self._configuration.destination_encryption_configuration def _build_resource(self): """Generate a resource for :meth:`begin`.""" source_refs = [{ 'projectId': table.project, 'datasetId': table.dataset_id, 'tableId': table.table_id, } for table in self.sources] configuration = self._configuration.to_api_repr() _helpers.set_sub_prop( configuration, ['copy', 'sourceTables'], source_refs) _helpers.set_sub_prop( configuration, ['copy', 'destinationTable'], { 'projectId': self.destination.project, 'datasetId': self.destination.dataset_id, 'tableId': self.destination.table_id, }) return { 'jobReference': { 'projectId': self.project, 'jobId': self.job_id, }, 'configuration': configuration, } def _copy_configuration_properties(self, configuration): """Helper: assign subclass configuration properties in cleaned.""" self._configuration._properties = copy.deepcopy(configuration) @classmethod def from_api_repr(cls, resource, client): """Factory: construct a job given its API representation .. note: This method assumes that the project found in the resource matches the client's project. :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.CopyJob` :returns: Job parsed from ``resource``. """ job_id, config_resource = cls._get_resource_config(resource) config = CopyJobConfig.from_api_repr(config_resource) # Copy required fields to the job. copy_resource = config_resource['copy'] destination = TableReference.from_api_repr( copy_resource['destinationTable']) sources = [] source_configs = copy_resource.get('sourceTables') if source_configs is None: single = copy_resource.get('sourceTable') if single is None: raise KeyError( "Resource missing 'sourceTables' / 'sourceTable'") source_configs = [single] for source_config in source_configs: table_ref = TableReference.from_api_repr(source_config) sources.append(table_ref) job = cls( job_id, sources, destination, client=client, job_config=config) job._set_properties(resource) return job class ExtractJobConfig(_JobConfig): """Configuration options for extract jobs. All properties in this class are optional. Values which are ``None`` -> server defaults. """ def __init__(self): super(ExtractJobConfig, self).__init__('extract') @property def compression(self): """google.cloud.bigquery.job.Compression: Compression type to use for exported files. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.compression """ return self._get_sub_prop('compression') @compression.setter def compression(self, value): self._set_sub_prop('compression', value) @property def destination_format(self): """google.cloud.bigquery.job.DestinationFormat: Exported file format. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.destinationFormat """ return self._get_sub_prop('destinationFormat') @destination_format.setter def destination_format(self, value): self._set_sub_prop('destinationFormat', value) @property def field_delimiter(self): """str: Delimiter to use between fields in the exported data. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.fieldDelimiter """ return self._get_sub_prop('fieldDelimiter') @field_delimiter.setter def field_delimiter(self, value): self._set_sub_prop('fieldDelimiter', value) @property def print_header(self): """bool: Print a header row in the exported data. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.printHeader """ return self._get_sub_prop('printHeader') @print_header.setter def print_header(self, value): self._set_sub_prop('printHeader', value) class ExtractJob(_AsyncJob): """Asynchronous job: extract data from a table into Cloud Storage. :type job_id: str :param job_id: the job's ID :type source: :class:`google.cloud.bigquery.table.TableReference` :param source: Table into which data is to be loaded. :type destination_uris: list of string :param destination_uris: URIs describing where the extracted data will be written in Cloud Storage, using the format ``gs://<bucket_name>/<object_name_or_glob>``. :type client: :class:`google.cloud.bigquery.client.Client` :param client: A client which holds credentials and project configuration. :type job_config: :class:`~google.cloud.bigquery.job.ExtractJobConfig` :param job_config: (Optional) Extra configuration options for the extract job. """ _JOB_TYPE = 'extract' def __init__( self, job_id, source, destination_uris, client, job_config=None): super(ExtractJob, self).__init__(job_id, client) if job_config is None: job_config = ExtractJobConfig() self.source = source self.destination_uris = destination_uris self._configuration = job_config @property def compression(self): """See :attr:`google.cloud.bigquery.job.ExtractJobConfig.compression`. """ return self._configuration.compression @property def destination_format(self): """See :attr:`google.cloud.bigquery.job.ExtractJobConfig.destination_format`. """ return self._configuration.destination_format @property def field_delimiter(self): """See :attr:`google.cloud.bigquery.job.ExtractJobConfig.field_delimiter`. """ return self._configuration.field_delimiter @property def print_header(self): """See :attr:`google.cloud.bigquery.job.ExtractJobConfig.print_header`. """ return self._configuration.print_header @property def destination_uri_file_counts(self): """Return file counts from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.extract.destinationUriFileCounts :rtype: int or None :returns: number of DML rows affectd by the job, or None if job is not yet complete. """ result = self._job_statistics().get('destinationUriFileCounts') if result is not None: result = int(result) return result def _build_resource(self): """Generate a resource for :meth:`begin`.""" source_ref = { 'projectId': self.source.project, 'datasetId': self.source.dataset_id, 'tableId': self.source.table_id, } configuration = self._configuration.to_api_repr() _helpers.set_sub_prop( configuration, ['extract', 'sourceTable'], source_ref) _helpers.set_sub_prop( configuration, ['extract', 'destinationUris'], self.destination_uris) resource = { 'jobReference': { 'projectId': self.project, 'jobId': self.job_id, }, 'configuration': configuration, } return resource def _copy_configuration_properties(self, configuration): """Helper: assign subclass configuration properties in cleaned.""" self._configuration._properties = copy.deepcopy(configuration) @classmethod def from_api_repr(cls, resource, client): """Factory: construct a job given its API representation .. note: This method assumes that the project found in the resource matches the client's project. :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.ExtractJob` :returns: Job parsed from ``resource``. """ job_id, config_resource = cls._get_resource_config(resource) config = ExtractJobConfig.from_api_repr(config_resource) source_config = _helpers.get_sub_prop( config_resource, ['extract', 'sourceTable']) dataset = DatasetReference( source_config['projectId'], source_config['datasetId']) source = dataset.table(source_config['tableId']) destination_uris = _helpers.get_sub_prop( config_resource, ['extract', 'destinationUris']) job = cls( job_id, source, destination_uris, client=client, job_config=config) job._set_properties(resource) return job def _from_api_repr_query_parameters(resource): return [ _query_param_from_api_repr(mapping) for mapping in resource ] def _to_api_repr_query_parameters(value): return [ query_parameter.to_api_repr() for query_parameter in value ] def _from_api_repr_udf_resources(resource): udf_resources = [] for udf_mapping in resource: for udf_type, udf_value in udf_mapping.items(): udf_resources.append(UDFResource(udf_type, udf_value)) return udf_resources def _to_api_repr_udf_resources(value): return [ {udf_resource.udf_type: udf_resource.value} for udf_resource in value ] def _from_api_repr_table_defs(resource): return {k: ExternalConfig.from_api_repr(v) for k, v in resource.items()} def _to_api_repr_table_defs(value): return {k: ExternalConfig.to_api_repr(v) for k, v in value.items()} class QueryJobConfig(_JobConfig): """Configuration options for query jobs. All properties in this class are optional. Values which are ``None`` -> server defaults. """ def __init__(self): super(QueryJobConfig, self).__init__('query') @property def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or ``None`` if using default encryption. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationEncryptionConfiguration """ prop = self._get_sub_prop('destinationEncryptionConfiguration') if prop is not None: prop = EncryptionConfiguration.from_api_repr(prop) return prop @destination_encryption_configuration.setter def destination_encryption_configuration(self, value): api_repr = value if value is not None: api_repr = value.to_api_repr() self._set_sub_prop('destinationEncryptionConfiguration', api_repr) @property def allow_large_results(self): """bool: Allow large query results tables (legacy SQL, only) See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.allowLargeResults """ return self._get_sub_prop('allowLargeResults') @allow_large_results.setter def allow_large_results(self, value): self._set_sub_prop('allowLargeResults', value) @property def create_disposition(self): """google.cloud.bigquery.job.CreateDisposition: Specifies behavior for creating tables. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.createDisposition """ return self._get_sub_prop('createDisposition') @create_disposition.setter def create_disposition(self, value): self._set_sub_prop('createDisposition', value) @property def default_dataset(self): """google.cloud.bigquery.dataset.DatasetReference: the default dataset to use for unqualified table names in the query or ``None`` if not set. See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.defaultDataset """ prop = self._get_sub_prop('defaultDataset') if prop is not None: prop = DatasetReference.from_api_repr(prop) return prop @default_dataset.setter def default_dataset(self, value): resource = None if value is not None: resource = value.to_api_repr() self._set_sub_prop('defaultDataset', resource) @property def destination(self): """google.cloud.bigquery.table.TableReference: table where results are written or ``None`` if not set. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationTable """ prop = self._get_sub_prop('destinationTable') if prop is not None: prop = TableReference.from_api_repr(prop) return prop @destination.setter def destination(self, value): resource = None if value is not None: resource = value.to_api_repr() self._set_sub_prop('destinationTable', resource) @property def dry_run(self): """bool: ``True`` if this query should be a dry run to estimate costs. See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.dryRun """ return self._properties.get('dryRun') @dry_run.setter def dry_run(self, value): self._properties['dryRun'] = value @property def flatten_results(self): """bool: Flatten nested/repeated fields in results. (Legacy SQL only) See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.flattenResults """ return self._get_sub_prop('flattenResults') @flatten_results.setter def flatten_results(self, value): self._set_sub_prop('flattenResults', value) @property def maximum_billing_tier(self): """int: Deprecated. Changes the billing tier to allow high-compute queries. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBillingTier """ return self._get_sub_prop('maximumBillingTier') @maximum_billing_tier.setter def maximum_billing_tier(self, value): self._set_sub_prop('maximumBillingTier', value) @property def maximum_bytes_billed(self): """int: Maximum bytes to be billed for this job or ``None`` if not set. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBytesBilled """ return _int_or_none(self._get_sub_prop('maximumBytesBilled')) @maximum_bytes_billed.setter def maximum_bytes_billed(self, value): self._set_sub_prop('maximumBytesBilled', str(value)) @property def priority(self): """google.cloud.bigquery.job.QueryPriority: Priority of the query. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.priority """ return self._get_sub_prop('priority') @priority.setter def priority(self, value): self._set_sub_prop('priority', value) @property def query_parameters(self): """List[Union[google.cloud.bigquery.query.ArrayQueryParameter, \ google.cloud.bigquery.query.ScalarQueryParameter, \ google.cloud.bigquery.query.StructQueryParameter]]: list of parameters for parameterized query (empty by default) See: https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.queryParameters """ prop = self._get_sub_prop('queryParameters', default=[]) return _from_api_repr_query_parameters(prop) @query_parameters.setter def query_parameters(self, values): self._set_sub_prop( 'queryParameters', _to_api_repr_query_parameters(values)) @property def udf_resources(self): """List[google.cloud.bigquery.query.UDFResource]: user defined function resources (empty by default) See: https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.userDefinedFunctionResources """ prop = self._get_sub_prop('userDefinedFunctionResources', default=[]) return _from_api_repr_udf_resources(prop) @udf_resources.setter def udf_resources(self, values): self._set_sub_prop( 'userDefinedFunctionResources', _to_api_repr_udf_resources(values)) @property def use_legacy_sql(self): """bool: Use legacy SQL syntax. See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.useLegacySql """ return self._get_sub_prop('useLegacySql') @use_legacy_sql.setter def use_legacy_sql(self, value): self._set_sub_prop('useLegacySql', value) @property def use_query_cache(self): """bool: Look for the query result in the cache. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.useQueryCache """ return self._get_sub_prop('useQueryCache') @use_query_cache.setter def use_query_cache(self, value): self._set_sub_prop('useQueryCache', value) @property def write_disposition(self): """google.cloud.bigquery.job.WriteDisposition: Action that occurs if the destination table already exists. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.writeDisposition """ return self._get_sub_prop('writeDisposition') @write_disposition.setter def write_disposition(self, value): self._set_sub_prop('writeDisposition', value) @property def table_definitions(self): """Dict[str, google.cloud.bigquery.external_config.ExternalConfig]: Definitions for external tables or ``None`` if not set. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions """ prop = self._get_sub_prop('tableDefinitions') if prop is not None: prop = _from_api_repr_table_defs(prop) return prop @table_definitions.setter def table_definitions(self, values): self._set_sub_prop( 'tableDefinitions', _to_api_repr_table_defs(values)) def to_api_repr(self): """Build an API representation of the query job config. Returns: dict: A dictionary in the format used by the BigQuery API. """ resource = copy.deepcopy(self._properties) # Query parameters have an addition property associated with them # to indicate if the query is using named or positional parameters. query_parameters = resource['query'].get('queryParameters') if query_parameters: if query_parameters[0].get('name') is None: resource['query']['parameterMode'] = 'POSITIONAL' else: resource['query']['parameterMode'] = 'NAMED' return resource class QueryJob(_AsyncJob): """Asynchronous job: query tables. :type job_id: str :param job_id: the job's ID, within the project belonging to ``client``. :type query: str :param query: SQL query string :type client: :class:`google.cloud.bigquery.client.Client` :param client: A client which holds credentials and project configuration for the dataset (which requires a project). :type job_config: :class:`~google.cloud.bigquery.job.QueryJobConfig` :param job_config: (Optional) Extra configuration options for the query job. """ _JOB_TYPE = 'query' _UDF_KEY = 'userDefinedFunctionResources' def __init__(self, job_id, query, client, job_config=None): super(QueryJob, self).__init__(job_id, client) if job_config is None: job_config = QueryJobConfig() if job_config.use_legacy_sql is None: job_config.use_legacy_sql = False self.query = query self._configuration = job_config self._query_results = None self._done_timeout = None @property def allow_large_results(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.allow_large_results`. """ return self._configuration.allow_large_results @property def create_disposition(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.create_disposition`. """ return self._configuration.create_disposition @property def default_dataset(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.default_dataset`. """ return self._configuration.default_dataset @property def destination(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.destination`. """ return self._configuration.destination @property def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or ``None`` if using default encryption. See :attr:`google.cloud.bigquery.job.QueryJobConfig.destination_encryption_configuration`. """ return self._configuration.destination_encryption_configuration @property def dry_run(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.dry_run`. """ return self._configuration.dry_run @property def flatten_results(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.flatten_results`. """ return self._configuration.flatten_results @property def priority(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.priority`. """ return self._configuration.priority @property def query_parameters(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.query_parameters`. """ return self._configuration.query_parameters @property def udf_resources(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.udf_resources`. """ return self._configuration.udf_resources @property def use_legacy_sql(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.use_legacy_sql`. """ return self._configuration.use_legacy_sql @property def use_query_cache(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.use_query_cache`. """ return self._configuration.use_query_cache @property def write_disposition(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.write_disposition`. """ return self._configuration.write_disposition @property def maximum_billing_tier(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.maximum_billing_tier`. """ return self._configuration.maximum_billing_tier @property def maximum_bytes_billed(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.maximum_bytes_billed`. """ return self._configuration.maximum_bytes_billed @property def table_definitions(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.table_definitions`. """ return self._configuration.table_definitions def _build_resource(self): """Generate a resource for :meth:`begin`.""" configuration = self._configuration.to_api_repr() resource = { 'jobReference': { 'projectId': self.project, 'jobId': self.job_id, }, 'configuration': configuration, } configuration['query']['query'] = self.query return resource def _copy_configuration_properties(self, configuration): """Helper: assign subclass configuration properties in cleaned.""" self._configuration._properties = copy.deepcopy(configuration) self.query = _helpers.get_sub_prop(configuration, ['query', 'query']) @classmethod def from_api_repr(cls, resource, client): """Factory: construct a job given its API representation :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.QueryJob` :returns: Job parsed from ``resource``. """ job_id, config = cls._get_resource_config(resource) query = config['query']['query'] job = cls(job_id, query, client=client) job._set_properties(resource) return job @property def query_plan(self): """Return query plan from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.queryPlan :rtype: list of :class:`QueryPlanEntry` :returns: mappings describing the query plan, or an empty list if the query has not yet completed. """ plan_entries = self._job_statistics().get('queryPlan', ()) return [QueryPlanEntry.from_api_repr(entry) for entry in plan_entries] @property def total_bytes_processed(self): """Return total bytes processed from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesProcessed :rtype: int or None :returns: total bytes processed by the job, or None if job is not yet complete. """ result = self._job_statistics().get('totalBytesProcessed') if result is not None: result = int(result) return result @property def total_bytes_billed(self): """Return total bytes billed from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesBilled :rtype: int or None :returns: total bytes processed by the job, or None if job is not yet complete. """ result = self._job_statistics().get('totalBytesBilled') if result is not None: result = int(result) return result @property def billing_tier(self): """Return billing tier from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.billingTier :rtype: int or None :returns: billing tier used by the job, or None if job is not yet complete. """ return self._job_statistics().get('billingTier') @property def cache_hit(self): """Return whether or not query results were served from cache. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.cacheHit :rtype: bool or None :returns: whether the query results were returned from cache, or None if job is not yet complete. """ return self._job_statistics().get('cacheHit') @property def num_dml_affected_rows(self): """Return the number of DML rows affected by the job. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.numDmlAffectedRows :rtype: int or None :returns: number of DML rows affected by the job, or None if job is not yet complete. """ result = self._job_statistics().get('numDmlAffectedRows') if result is not None: result = int(result) return result @property def statement_type(self): """Return statement type from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.statementType :rtype: str or None :returns: type of statement used by the job, or None if job is not yet complete. """ return self._job_statistics().get('statementType') @property def referenced_tables(self): """Return referenced tables from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.referencedTables :rtype: list of dict :returns: mappings describing the query plan, or an empty list if the query has not yet completed. """ tables = [] datasets_by_project_name = {} for table in self._job_statistics().get('referencedTables', ()): t_project = table['projectId'] ds_id = table['datasetId'] t_dataset = datasets_by_project_name.get((t_project, ds_id)) if t_dataset is None: t_dataset = DatasetReference(t_project, ds_id) datasets_by_project_name[(t_project, ds_id)] = t_dataset t_name = table['tableId'] tables.append(t_dataset.table(t_name)) return tables @property def undeclared_query_parameters(self): """Return undeclared query parameters from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.undeclaredQueryParameters :rtype: list of :class:`~google.cloud.bigquery.ArrayQueryParameter`, :class:`~google.cloud.bigquery.ScalarQueryParameter`, or :class:`~google.cloud.bigquery.StructQueryParameter` :returns: undeclared parameters, or an empty list if the query has not yet completed. """ parameters = [] undeclared = self._job_statistics().get( 'undeclaredQueryParameters', ()) for parameter in undeclared: p_type = parameter['parameterType'] if 'arrayType' in p_type: klass = ArrayQueryParameter elif 'structTypes' in p_type: klass = StructQueryParameter else: klass = ScalarQueryParameter parameters.append(klass.from_api_repr(parameter)) return parameters def done(self, retry=DEFAULT_RETRY): """Refresh the job and checks if it is complete. :rtype: bool :returns: True if the job is complete, False otherwise. """ # Since the API to getQueryResults can hang up to the timeout value # (default of 10 seconds), set the timeout parameter to ensure that # the timeout from the futures API is respected. See: # https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4135 timeout_ms = None if self._done_timeout is not None: # Subtract a buffer for context switching, network latency, etc. timeout = self._done_timeout - _TIMEOUT_BUFFER_SECS timeout = max(min(timeout, 10), 0) self._done_timeout -= timeout self._done_timeout = max(0, self._done_timeout) timeout_ms = int(timeout * 1000) # Do not refresh is the state is already done, as the job will not # change once complete. if self.state != _DONE_STATE: self._query_results = self._client._get_query_results( self.job_id, retry, project=self.project, timeout_ms=timeout_ms) # Only reload the job once we know the query is complete. # This will ensure that fields such as the destination table are # correctly populated. if self._query_results.complete: self.reload(retry=retry) return self.state == _DONE_STATE def _blocking_poll(self, timeout=None): self._done_timeout = timeout super(QueryJob, self)._blocking_poll(timeout=timeout) def result(self, timeout=None, retry=DEFAULT_RETRY): """Start the job and wait for it to complete and get the result. :type timeout: float :param timeout: How long (in seconds) to wait for job to complete before raising a :class:`concurrent.futures.TimeoutError`. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the call that retrieves rows. :rtype: :class:`~google.cloud.bigquery.table.RowIterator` :returns: Iterator of row data :class:`~google.cloud.bigquery.table.Row`-s. During each page, the iterator will have the ``total_rows`` attribute set, which counts the total number of rows **in the result set** (this is distinct from the total number of rows in the current page: ``iterator.page.num_items``). :raises: :class:`~google.cloud.exceptions.GoogleCloudError` if the job failed or :class:`concurrent.futures.TimeoutError` if the job did not complete in the given timeout. """ super(QueryJob, self).result(timeout=timeout) # Return an iterator instead of returning the job. if not self._query_results: self._query_results = self._client._get_query_results( self.job_id, retry, project=self.project) schema = self._query_results.schema dest_table = self.destination return self._client.list_rows(dest_table, selected_fields=schema, retry=retry) def to_dataframe(self): """Return a pandas DataFrame from a QueryJob Returns: A :class:`~pandas.DataFrame` populated with row data and column headers from the query results. The column headers are derived from the destination table's schema. Raises: ValueError: If the `pandas` library cannot be imported. """ return self.result().to_dataframe() def __iter__(self): return iter(self.result()) class QueryPlanEntryStep(object): """Map a single step in a query plan entry. :type kind: str :param kind: step type :type substeps: :param substeps: names of substeps """ def __init__(self, kind, substeps): self.kind = kind self.substeps = list(substeps) @classmethod def from_api_repr(cls, resource): """Factory: construct instance from the JSON repr. :type resource: dict :param resource: JSON representation of the entry :rtype: :class:`QueryPlanEntryStep` :return: new instance built from the resource """ return cls( kind=resource.get('kind'), substeps=resource.get('substeps', ()), ) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.kind == other.kind and self.substeps == other.substeps class QueryPlanEntry(object): """Map a single entry in a query plan. :type name: str :param name: name of the entry :type entry_id: int :param entry_id: ID of the entry :type wait_ratio_avg: float :param wait_ratio_avg: average wait ratio :type wait_ratio_max: float :param wait_ratio_max: maximum wait ratio :type read_ratio_avg: float :param read_ratio_avg: average read ratio :type read_ratio_max: float :param read_ratio_max: maximum read ratio :type compute_ratio_avg: float :param compute_ratio_avg: average compute ratio :type compute_ratio_max: float :param compute_ratio_max: maximum compute ratio :type write_ratio_avg: float :param write_ratio_avg: average write ratio :type write_ratio_max: float :param write_ratio_max: maximum write ratio :type records_read: int :param records_read: number of records read :type records_written: int :param records_written: number of records written :type status: str :param status: entry status :type steps: List(QueryPlanEntryStep) :param steps: steps in the entry """ def __init__(self, name, entry_id, wait_ratio_avg, wait_ratio_max, read_ratio_avg, read_ratio_max, compute_ratio_avg, compute_ratio_max, write_ratio_avg, write_ratio_max, records_read, records_written, status, steps): self.name = name self.entry_id = entry_id self.wait_ratio_avg = wait_ratio_avg self.wait_ratio_max = wait_ratio_max self.read_ratio_avg = read_ratio_avg self.read_ratio_max = read_ratio_max self.compute_ratio_avg = compute_ratio_avg self.compute_ratio_max = compute_ratio_max self.write_ratio_avg = write_ratio_avg self.write_ratio_max = write_ratio_max self.records_read = records_read self.records_written = records_written self.status = status self.steps = steps @classmethod def from_api_repr(cls, resource): """Factory: construct instance from the JSON repr. :type resource: dict :param resource: JSON representation of the entry :rtype: :class:`QueryPlanEntry` :return: new instance built from the resource """ records_read = resource.get('recordsRead') if records_read is not None: records_read = int(records_read) records_written = resource.get('recordsWritten') if records_written is not None: records_written = int(records_written) return cls( name=resource.get('name'), entry_id=resource.get('id'), wait_ratio_avg=resource.get('waitRatioAvg'), wait_ratio_max=resource.get('waitRatioMax'), read_ratio_avg=resource.get('readRatioAvg'), read_ratio_max=resource.get('readRatioMax'), compute_ratio_avg=resource.get('computeRatioAvg'), compute_ratio_max=resource.get('computeRatioMax'), write_ratio_avg=resource.get('writeRatioAvg'), write_ratio_max=resource.get('writeRatioMax'), records_read=records_read, records_written=records_written, status=resource.get('status'), steps=[QueryPlanEntryStep.from_api_repr(step) for step in resource.get('steps', ())], )
py
1a5a9d8bdf8e2fd64a63156a71b1aacc203d585f
from h2o.estimators import H2ODeepLearningEstimator, H2OGradientBoostingEstimator, H2OGeneralizedLinearEstimator, \ H2ONaiveBayesEstimator, H2ORandomForestEstimator from sklearn.base import BaseEstimator import h2o import pandas as pd class H2ODecorator(BaseEstimator): def __init__(self, est_type, est_params, nthreads=-1, mem_max='2G', target_type=None): # using H2O estimator classes directly does not work correctly hence string to class mapping is used est_map = { 'dl': H2ODeepLearningEstimator, 'gbm': H2OGradientBoostingEstimator, 'glm': H2OGeneralizedLinearEstimator, 'nb': H2ONaiveBayesEstimator, 'rf': H2ORandomForestEstimator, } self.est_type = est_type self.est_params = est_params self.est = est_map[est_type](**est_params) self.nthreads = nthreads self.mem_max = mem_max self.cluster_ready = False self.target_type = target_type def _init_h2o(self): if self.cluster_ready: return h2o.init(nthreads=self.nthreads, max_mem_size=self.mem_max) self.cluster_ready = True def fit(self, X, y): self._init_h2o() features = h2o.H2OFrame(python_obj=X) if type(y) == pd.Series: _y = y.values else: _y = y if self.target_type is not None: target_type = [self.target_type] else: target_type = None target = h2o.H2OFrame(python_obj=_y, column_types=target_type) self.est.fit(features, target) return self def predict(self, X): self._init_h2o() features = h2o.H2OFrame(python_obj=X) pred_df = self.est.predict(features).as_data_frame() if pred_df.columns.contains('predict'): return pred_df['predict'] else: return pred_df.iloc[:, 0] def predict_proba(self, X): self._init_h2o() features = h2o.H2OFrame(python_obj=X) pred_df = self.est.predict(features).as_data_frame() if pred_df.columns.contains('predict'): return pred_df.drop('predict', axis=1).values else: return pred_df.drop(pred_df.columns[0], axis=1).values
py
1a5a9dc7c0fa9f96487ba87b5d26b10643c28c21
#! /usr/bin/env python # example patch call: # ./extract_patches.py -subset 202 -slices 64 -dim 64 #### ---- Imports & Dependencies ---- #### import sys import os import argparse from configparser import ConfigParser import pathlib from glob import glob from random import shuffle import SimpleITK as sitk # pip install SimpleITK from tqdm import tqdm # pip install tqdm import h5py import pandas as pd import numpy as np from scipy.spatial import distance #### ---- Argparse Utility ---- #### parser = argparse.ArgumentParser(description='Modify the patch extractor script',add_help=True) parser.add_argument('-hdf5', action="store_true", dest="hdf5", default=True, help='Save processed data to hdf5') parser.add_argument('-hu_norm', action="store_true", dest="hu_norm", default=False, help='Normalize Patch to -1000 - 400 HU') parser.add_argument('-slices', type=int, action="store", dest="slices", default=1, help='Num of tensor slices > 0, default = 1') parser.add_argument('-dim', action="store", dest="dim", type=int, default=64, help='Dimension of the patch, default = 64') parser.add_argument('-remote', action="store_true", dest="remote", default=False, help='Use if running script remote e.g. AWS') requiredNamed = parser.add_argument_group('required named arguments') requiredNamed.add_argument('-subset', action="store", dest="subset", type=lambda s: ['subset'+str(x)+'/' for x in s.split(',')], #pass a list of nums to arg required=True, help='subset dir name or number(s) e.g. 0,1,2') args = parser.parse_args() #### ---- ConfigParse Utility ---- #### config = ConfigParser() config.read('extract_patches_config.ini') #local just for now (need if - else for AWS) ''' Example extract_patches_config.ini file: [local] LUNA_PATH = /Users/keil/datasets/LUNA16/ CSV_PATH = /Users/keil/datasets/LUNA16/csv-files/ IMG_PATH = /Users/keil/datasets/LUNA16/patches/ [remote] # - when we move to AWS ''' #### ---- Global Vars ---- #### LUNA_PATH = config.get('local', 'LUNA_PATH') CSV_PATH = config.get('local', 'CSV_PATH') IMG_PATH = config.get('local', 'IMG_PATH') SUBSET = args.subset SAVE_HDF5 = args.hdf5 HU_NORM = args.hu_norm PATCH_DIM = args.dim NUM_SLICES = args.slices CHANNELS = 1 PATCH_WIDTH = PATCH_DIM/2 PATCH_HEIGHT = PATCH_DIM/2 PATCH_DEPTH = NUM_SLICES/2 # WORK_REMOTE = args.remote #add later w/ AWS #TODO add this to config file for csv file name DF_NODE = pd.read_csv(CSV_PATH + "candidates_V2.csv") DF_NODE1 = pd.read_csv(CSV_PATH + "candidates_with_annotations.csv") # DF_NODE = pd.read_csv(CSV_PATH + "candidates_with_annotations.csv") FILE_LIST = [] SUBSET_LIST = [] for unique_set in SUBSET: mhd_files = glob("{}{}/*.mhd".format(LUNA_PATH, unique_set)) FILE_LIST.extend(mhd_files) #add subset of .mhd files subset_num = unique_set.strip('subset/') #extracting out subset number for elements in mhd_files: #making sure we match each globbed mhd file to a subset num SUBSET_LIST.append(int(subset_num)) #pass this list later to write subset num to HDF5 #### ---- Helper Functions ---- #### def normalizePlanes(npzarray): """ Normalize pixel depth into Hounsfield units (HU), between -1000 - 400 HU All other HU will be masked. Then we normalize pixel values between 0 and 1. """ maxHU, minHU = 400., -1000. npzarray = (npzarray - minHU) / (maxHU - minHU) npzarray[npzarray>1] = 1. npzarray[npzarray<0] = 0. return npzarray def normalize_img(img): """ Sets the MHD image to be approximately 1.0 mm voxel size https://itk.org/ITKExamples/src/Filtering/ImageGrid/ResampleAnImage/Documentation.html """ # Number of pixels you want for x,y,z dimensions new_x_size = int(img.GetSpacing()[0]*img.GetWidth()) new_y_size = int(img.GetSpacing()[1]*img.GetHeight()) new_z_size = int(img.GetSpacing()[2]*img.GetDepth()) new_size = [new_x_size, new_y_size, new_z_size] new_spacing = [1,1,1] # New spacing to be 1.0 x 1.0 x 1.0 mm voxel size interpolator_type = sitk.sitkBSpline #sitkLinear using BSpline over Linear return sitk.Resample(img, np.array(new_size, dtype='uint32').tolist(), sitk.Transform(), interpolator_type, img.GetOrigin(), new_spacing, img.GetDirection(), 0.0, img.GetPixelIDValue()) def make_bbox(center,width,height,depth,origin,class_id): """ Returns a 3d (numpy tensor) bounding box from the CT scan. 2d in the case where PATCH_DEPTH = 1 """ # left = np.max([0, np.abs(center[0] - origin[0]) - PATCH_WIDTH]).astype(int) left = np.max([0, center[0] - PATCH_WIDTH]).astype(int) right = np.min([width, center[0] + PATCH_WIDTH]).astype(int) # left = int((np.abs(center[0] - origin[0])) - PATCH_WIDTH) #DEBUG # right = int((np.abs(center[0] - origin[0])) + PATCH_WIDTH) #DEBUG down = np.max([0, center[1] - PATCH_HEIGHT]).astype(int) up = np.min([height, center[1] + PATCH_HEIGHT]).astype(int) top = np.min([depth, center[2] + PATCH_DEPTH]).astype(int) bottom = np.max([0, center[2] - PATCH_DEPTH]).astype(int) bbox = [[down, up], [left, right], [bottom, top]] #(back,abdomen - left side, right side - feet, head) # If bbox has a origin - center - PATCH_DIM/2 that results in a 0, (rarely the case) # ensure that the bbox dims are all [PATCH_DIM x PATCH_DIM x PATCH_DIM] if bbox[0][0] == 0: bbox[0][1] = PATCH_DIM elif bbox[1][0] == 0: bbox[1][1] = PATCH_DIM elif bbox[2][0] == 0: bbox[2][1] = NUM_SLICES # change to --slice dim return bbox def downsample_class_0(df): """ Returns a pd.DataFrame where class 0s that collide with class 1s have been flagged based on a distance measurement threshold. Threshold = PATCH_DIM/2 The flag will be written to HDF5 and let the user know not to train on these class 0s """ empty_col = [0 for x in range(len(df))] idx_to_flag = [] df.reset_index(inplace=True) if 1 in df['class'].tolist(): #check series ID for a positive nodule df_class_1 = df[df["class"] == 1].copy(deep=True) ones_coords = df_class_1[["coordX", "coordY", "coordZ"]].values for idx, row in df.iterrows(): #check for a class 1 if row['class'] == 1: continue #set vars for calculation zero_coord = (row['coordX'],row['coordY'],row['coordZ']) for one_coord in ones_coords: dst = distance.euclidean(zero_coord,one_coord) if dst <= PATCH_DIM/2: #follow this heuristic for downsampling class 0 idx_to_flag.append(idx) else: df = df.assign(no_train = empty_col) return df idx_to_flag = list(set(idx_to_flag)) downsample_col = [] for idx, i in enumerate(empty_col): if idx in idx_to_flag: downsample_col.append(1) else: downsample_col.append(0) df = df.assign(no_train = downsample_col) return df def write_to_hdf5(dset_and_data,first_patch=False): """Accept zipped hdf5 dataset obj and numpy data, write data to dataset""" dset = dset_and_data[0] #hdf5 dataset obj data = dset_and_data[1] #1D numpy hdf5 writable data if first_patch == True: dset[:] = data #set the whole, empty, hdf5 dset = data return row = dset.shape[0] # Count current dataset rows dset.resize(row+1, axis=0) # Add new row dset[row, :] = data # Insert data into new row return #### ---- Process CT Scans and extract Patches (the pipeline) ---- #### def main(): """ Create the hdf5 file + datasets, iterate thriough the folders DICOM imgs Normalize the imgs, create mini patches and write them to the hdf5 file system """ with h5py.File(LUNA_PATH + str(PATCH_DIM) + 'x' + str(PATCH_DIM) + 'x' + str(NUM_SLICES) + '-patch-withdiam.hdf5', 'w') as HDF5: # Datasets for 3d patch tensors & class_id/x,y,z coords total_patch_dim = PATCH_DIM * PATCH_DIM * NUM_SLICES patch_dset = HDF5.create_dataset('input', (1,total_patch_dim), maxshape=(None,total_patch_dim)) #patches = inputs class_dset = HDF5.create_dataset('output', (1,1), maxshape=(None,1), dtype=int) #classes = outputs notrain_dset = HDF5.create_dataset('notrain', (1,1), maxshape=(None,1), dtype=int) # test holdout centroid_dset = HDF5.create_dataset('centroid', (1,3), maxshape=(None,3), dtype=float) uuid_dset = HDF5.create_dataset('uuid', (1,1), maxshape=(None,None), dtype=h5py.special_dtype(vlen=bytes)) subset_dset = HDF5.create_dataset('subsets', (1,1), maxshape=(None,1), dtype=int) diameter_dset = HDF5.create_dataset('diameter_label', (1,1), maxshape=(None,1), dtype=int) HDF5['input'].attrs['lshape'] = (PATCH_DIM, PATCH_DIM, NUM_SLICES, CHANNELS) # (Height, Width, Depth) print("Successfully initiated the HDF5 file. Ready to recieve data!") #### ---- Iterating through a CT scan ---- #### counter = 0 scan_number = 1 first_patch = True # flag for saving first img to hdf5 for img_file, subset_id in tqdm(zip(FILE_LIST,SUBSET_LIST)): print("Processing CT Scan: {}".format(scan_number)) base=os.path.basename(img_file) # Strip the filename out seriesuid = os.path.splitext(base)[0] # Get the filename without the extension mini_df = DF_NODE[DF_NODE["seriesuid"] == seriesuid] #### ---- Downsampling Class 0s ---- #### mini_df = downsample_class_0(mini_df) mini_df1 = DF_NODE1[DF_NODE1["seriesuid"] == seriesuid] mini_df1.fillna(-1, inplace=True) # Load the CT scan (3D .mhd file) # Numpy is z,y,x and SimpleITK is x,y,z -- (note the ordering of dimesions) itk_img = sitk.ReadImage(img_file) # Normalize the image spacing so that a voxel is 1x1x1 mm in dimension itk_img = normalize_img(itk_img) # SimpleITK keeps the origin and spacing information for the 3D image volume img_array = sitk.GetArrayFromImage(itk_img) # indices are z,y,x (note the ordering of dimesions) img_array = np.pad(img_array, int(PATCH_DIM), mode="constant", constant_values=-2000)#, constant_values=0) #0 padding 3d array for patch clipping issue slice_z, height, width = img_array.shape origin = np.array(itk_img.GetOrigin()) # x,y,z Origin in world coordinates (mm) - Not same as img_array spacing = np.array(itk_img.GetSpacing()) # spacing of voxels in world coordinates (mm) scan_number += 1 #### ---- Iterating through a CT scan's slices ---- #### for candidate_idx, cur_row in mini_df.iterrows(): # Iterate through all candidates (in dataframe) # This is the real world x,y,z coordinates of possible nodule (in mm) class_id = cur_row["class"] #0 for false, 1 for true nodule no_train = cur_row["no_train"] candidate_x = cur_row["coordX"] + PATCH_DIM candidate_y = cur_row["coordY"] + PATCH_DIM candidate_z = cur_row["coordZ"] + PATCH_DIM center = np.array([candidate_x, candidate_y, candidate_z]) # candidate center voxel_center = np.rint(np.abs(center / spacing - origin)).astype(int) # candidate center in voxels # if class_id is a 1, then we find the diameter, if there is no diameter it is # because it is from the candidates v2 file, and we do not have this info for the # nodule, thus we can not make a mask for it. We set the diameter label to -1 # and set the flag of no_train to false so we do not use it for training unet. if class_id == 0: diameter_label = 0 else: diameter_label = mini_df1[(mini_df1['coordX']==cur_row["coordX"]) & (mini_df1['coordY']==cur_row["coordY"]) & (mini_df1['coordZ']==cur_row["coordZ"])]['diameter_mm'].values if len(diameter_label) == 0: diameter_label = -1 no_train = 1 # put a counter here #### ---- Generating the 2d/2.5d/3d Patch ---- #### bbox = make_bbox(voxel_center, width, height, slice_z, origin, class_id) #return bounding box patch = img_array[ bbox[2][0]:bbox[2][1], bbox[0][0]:bbox[0][1], bbox[1][0]:bbox[1][1]] # DEBUG print(patch.shape) #uncomment to debug shape size being written #### ---- Perform Hounsfield Normlization ---- #### if HU_NORM: patch = normalizePlanes(patch) #normalize patch to HU units #### ---- Prepare Data for HDF5 insert ---- #### patch = patch.ravel().reshape(1,-1) #flatten img to (1 x N) if patch.shape[1] != total_patch_dim: # Catch any class 0 bbox issues and pass them counter += 1 continue #minor fix to subtract the PATCH_DIM from each centroid when saving to HDF5 to match candidates_V2.csv centroid_data = np.array([candidate_x - PATCH_DIM,candidate_y - PATCH_DIM,candidate_z - PATCH_DIM]).ravel().reshape(1,-1) seriesuid_str = np.string_(seriesuid) #set seriesuid str to numpy.bytes_ type #### ---- Write Data to HDF5 insert ---- #### hdf5_dsets = [patch_dset, class_dset, notrain_dset, uuid_dset, subset_dset, centroid_dset,diameter_dset] hdf5_data = [patch, class_id, no_train, seriesuid_str, subset_id, centroid_data,diameter_label] for dset_and_data in zip(hdf5_dsets,hdf5_data): if first_patch == True: write_to_hdf5(dset_and_data,first_patch=True) else: write_to_hdf5(dset_and_data) first_patch = False print("Did not write: " + str(counter) + " patches to HDF5") print("All {} CT Scans Processed and Individual Patches written to HDF5!".format(scan_number)) print('\a') if __name__ == '__main__': main()
py
1a5a9e50204715e7494e4f744bf10d0f7f7f3bde
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jul 23 21:09:54 2021 @author: smullally """ import numpy import matplotlib import exovetter as exo from exovetter import const from exovetter import utils import exovetter.vetters as vet import lightkurve as lk candidate = "TOI 565.01" tce = utils.get_mast_tce(candidate) lc = lk.search_lightcurve(candidate, exptime=120)[0].download() lc.plot() tpf = lk.search_targetpixelfile(candidate, exptime=120)[0].download() cent = vet.Centroid() cent.run(tce[0],tpf, plot=True)
py
1a5a9ec9a80cd0ab7312f311c8be2674e6c9f3c4
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: skip-file from tqdm import tqdm import torch import torch.optim as optim import numpy as np import random import networkx as nx from bigg.common.configs import cmd_args, set_device from bigg.model.tree_clib.tree_lib import setup_treelib, TreeLib from bigg.model.tree_model import RecurTreeGen if __name__ == '__main__': random.seed(cmd_args.seed) torch.manual_seed(cmd_args.seed) np.random.seed(cmd_args.seed) set_device(cmd_args.gpu) setup_treelib(cmd_args) train_graphs = [nx.barabasi_albert_graph(10, 2)] TreeLib.InsertGraph(train_graphs[0]) max_num_nodes = max([len(gg.nodes) for gg in train_graphs]) cmd_args.max_num_nodes = max_num_nodes model = RecurTreeGen(cmd_args).to(cmd_args.device) optimizer = optim.Adam(model.parameters(), lr=cmd_args.learning_rate, weight_decay=1e-4) for i in range(2): optimizer.zero_grad() ll, _ = model.forward_train([0]) loss = -ll / max_num_nodes print('iter', i, 'loss', loss.item()) loss.backward() optimizer.step()
py
1a5a9f50f81b5321c426de97a15c684890558484
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Nov 11 15:29:04 2017 @author: rhoenigman """ import networkx as nx #create a directed graph G = nx.DiGraph() #adding an edge also adds the node G.add_edge('Spider', 'A', weight=1.0) G.add_edge('Spider', 'H', weight=1.0) G.add_edge('Spider', 'J', weight=1.0) G.add_edge('H', 'G', weight=1.0) G.add_edge('H', 'K', weight=1.0) G.add_edge('G', 'L', weight=1.0) G.add_edge('G', 'F', weight=1.0) G.add_edge('F', 'E', weight=1.0) G.add_edge('E', 'Fly', weight=1.0) G.add_edge('J', 'S', weight=1.0) G.add_edge('J', 'K', weight=1.0) G.add_edge('K', 'L', weight=1.0) G.add_edge('L', 'M', weight=1.0) G.add_edge('M', 'N', weight=1.0) G.add_edge('M', 'F', weight=1.0) G.add_edge('N', 'O', weight=1.0) G.add_edge('N', 'E', weight=1.0) G.add_edge('O', 'Fly', weight=1.0) G.add_edge('A', 'S', weight=1.0) G.add_edge('A', 'B', weight=1.0) G.add_edge('B', 'R', weight=1.0) G.add_edge('B', 'C', weight=1.0) G.add_edge('S', 'R', weight=1.0) G.add_edge('R', 'Q', weight=1.0) G.add_edge('Q', 'C', weight=1.0) G.add_edge('Q', 'P', weight=1.0) G.add_edge('C', 'D', weight=1.0) G.add_edge('D', 'Fly', weight=1.0) G.add_edge('P', 'D', weight=1.0) G.add_edge('P', 'O', weight=1.0) G.add_edge('O', 'Fly', weight=1.0) G.add_edge('T', 'Q', weight=1.0) G.add_edge('T', 'P', weight=1.0) G.add_edge('T', 'O', weight=1.0) G.add_edge('T', 'N', weight=1.0) G.add_edge('T', 'M', weight=1.0) G.add_edge('R', 'T', weight=1.0) G.add_edge('S', 'T', weight=1.0) G.add_edge('J', 'T', weight=1.0) G.add_edge('K', 'T', weight=1.0) G.add_edge('L', 'T', weight=1.0) #each edge has a weight of 1. The shortest path is the fewest edges. #Use this to verify that your graph built correctly. t = nx.shortest_path(G, 'Spider', 'Fly', weight='weight') print(t)
py
1a5aa0889865b570bf685bb0f0004339fbcfaf18
#!/usr/bin/python # -*- coding: utf-8 -*- import sys, requests from event_handlers import monitoredEvents class EventHandler(object): def __init__(self): self.max_retry = 3 def event_parser(self, event): # print ("%s - %s" % (event['type'], event['name'])) return_message = monitoredEvents.events_classification[event['type']]['lib'].parse_event(event['name']) print return_message
py
1a5aa12e4e555b190d90b87fe2669a55febecebc
from argparse import ArgumentParser from fmovies_api import Fmovies, FmoviesConfig import os import re class Application(): def __init__(self): self.config_data = {} def setup_parser(): parser = ArgumentParser() parser.add_argument("-u", "--base-url", help="The base url for the website.") parser.add_argument( "--search-path", help="Absolute or relative url to search page. If relative, base-url must be supplied.") parser.add_argument("-c", "--config-file", help="Input file for the configuration.") self.data = FmoviesConfig.parse(parser.parse_args(), type=( "file" if parser.config_file else "data")) def main(): pass if __name__ == "__main__": setup_parser() main()
py
1a5aa16705dc46f9d94bfd1d7543dc21e8ed34f4
""" tests cw_seqComparer.py A component of the findNeighbour4 system for bacterial relatedness monitoring Copyright (C) 2021 David Wyllie [email protected] repo: https://github.com/davidhwyllie/findNeighbour4 This program is free software: you can redistribute it and/or modify it under the terms of the MIT License as published by the Free Software Foundation. See <https://opensource.org/licenses/MIT>, and the LICENSE file. """ import unittest from findn.cw_seqComparer import NoCWParametersProvidedError, cw_seqComparer from Bio import SeqIO ## persistence unit tests UNITTEST_MONGOCONN = "mongodb://localhost" UNITTEST_RDBMSCONN = "sqlite://" class setup_ref(unittest.TestCase): def setUp(self): self.inputfile = "reference/NC_000962.fasta" with open(self.inputfile, "rt") as f: for record in SeqIO.parse(f, "fasta"): self.goodseq = str(record.seq) self.badseq = "".join("N" * len(self.goodseq)) self.originalseq = list(str(record.seq)) class test_cw_seqComparer_estimate_expected_unk(setup_ref): """tests estimate_expected_unk, a function estimating the number of Ns in sequences by sampling""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, unittesting=True, ) n = 0 originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", ] guids = [] for original in originals: n += 1 c = sc.compress(original) guid = "{0}-{1}".format(original, n) guids.append(guid) sc.persist(c, guid=guid) res = sc.estimate_expected_unk() # defaults to sample size 30 self.assertEqual(res, None) # analyse the last two res = sc.estimate_expected_unk( sample_size=2, unk_type="N", exclude_guids=guids[0:5] ) self.assertEqual(res, 1.5) # analyse the first two res = sc.estimate_expected_unk( sample_size=2, unk_type="N", exclude_guids=guids[2:7] ) self.assertEqual(res, 1) class test_cw_seqComparer_estimate_expected_unk_sites(setup_ref): """tests estimate_expected_unk_sites, a function estimating the number of Ns or Ms in sequences by sampling""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, unittesting=True, ) n = 0 originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", ] guids = [] for original in originals: n += 1 c = sc.compress(original) guid = "{0}-{1}".format(original, n) guids.append(guid) sc.persist(c, guid=guid) # evaluate Ms res = sc.estimate_expected_unk_sites( unk_type="N", sites=set([]), sample_size=30 ) self.assertEqual(res, None) res = sc.estimate_expected_unk_sites(unk_type="N", sites=set([]), sample_size=7) self.assertEqual(res, 1) res = sc.estimate_expected_unk_sites( unk_type="N", sites=set([0, 1, 2, 3, 4, 5]), sample_size=7 ) self.assertEqual(res, 1) # generate compressed sequences refSeq = "GGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, unittesting=True, ) n = 0 originals = [ "AAACGM", "CCCCGM", "TTTCGM", "GGGGGM", "MMMCGM", "ACTCGM", "TCTMGM", ] guids = [] for original in originals: n += 1 c = sc.compress(original) guid = "{0}-{1}".format(original, n) guids.append(guid) sc.persist(c, guid=guid) # analyse res = sc.estimate_expected_unk_sites(unk_type="M", sites=set([]), sample_size=7) self.assertEqual(res, 1) res = sc.estimate_expected_unk_sites( unk_type="M", sites=set([0, 1, 2, 3]), sample_size=7 ) self.assertEqual(res, 0) class test_cw_seqComparer_msa_1(setup_ref): """tests the generation of multiple alignments of variant sites.""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, unittesting=True, ) originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", ] guid_names = [] n = 0 for original in originals: n += 1 c = sc.compress(original) this_guid = "{0}-{1}".format(original, n) sc.persist(c, guid=this_guid) guid_names.append(this_guid) self.assertEqual(len(guid_names), 7) msa = sc.multi_sequence_alignment(guid_names) # there's variation at positions 0,1,2,3 self.assertEqual(msa.alignment_length, 4) self.assertEqual(msa.variant_positions, [0, 1, 2, 3]) class test_cw_seqComparer_msa_b(setup_ref): """tests the generation of multiple alignments of variant sites.""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, maxNs=6, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, unittesting=True, ) originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTGGN", ] guid_names = [] n = 0 for original in originals: n += 1 c = sc.compress(original) this_guid = "{0}-{1}".format(original, n) sc.persist(c, guid=this_guid) guid_names.append(this_guid) msa = sc.multi_sequence_alignment(guid_names) # there's variation at positions 0,1,2,3 self.assertEqual(msa.alignment_length, 4) self.assertEqual(msa.variant_positions, [0, 1, 2, 3]) class test_cw_seqComparer_repopulate(setup_ref): """tests persist, guids, mcompare and methods""" def runTest(self): # load catwalk and database refSeq = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACGA" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, unittesting=True, ) sc.PERSIST._delete_existing_data() s1 = sc.summarise_stored_items() self.assertEqual(s1["server|catwalk|n_samples"], 0) n = 0 originals = [ "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACGN", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACCCCGN", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATTTCGN", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGGGGGN", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANNNCGN", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACTCGN", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATCTNGN", ] guids = [] for original in originals: n += 1 c = sc.compress(original) guid = "{0}-{1}".format(original, n) guids.append(guid) sc.persist(c, guid=guid) # check that all have inserted self.assertEqual(set(guids), set(sc.guids())) s2 = sc.summarise_stored_items() self.assertEqual(s2["server|catwalk|n_samples"], len(originals)) self.assertEqual(len(sc.PERSIST.guids()), len(guids)) self.assertEqual(len(sc.PERSIST.guids_valid()), len(guids)) sc.catWalk.stop() # terminate the catwalk instance sc.catWalk.start() sc.repopulate_all() s3 = sc.summarise_stored_items() self.assertEqual(s3["server|catwalk|n_samples"], len(originals)) class test_cw_seqComparer_mcompare(setup_ref): """tests persist, guids, mcompare and methods""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, unittesting=True, ) sc.PERSIST._delete_existing_data() s1 = sc.summarise_stored_items() self.assertEqual(s1["server|catwalk|n_samples"], 0) n = 0 originals = [ "AAACGN", "CCCCGN", "TTTCGN", "GGGGGN", "NNNCGN", "ACTCGN", "TCTNGN", ] guids = [] for original in originals: n += 1 c = sc.compress(original) guid = "{0}-{1}".format(original, n) guids.append(guid) sc.persist(c, guid=guid) # check that all have inserted self.assertEqual(set(guids), set(sc.guids())) s2 = sc.summarise_stored_items() self.assertEqual(s2["server|catwalk|n_samples"], len(originals)) for test_guid in guids: res = sc.mcompare(test_guid) # defaults to sample size 30 res = res["neighbours"] self.assertEqual(len(res), len(originals) - 1) class test_no_catwalk_settings(setup_ref): """tests initialisation with no catwalk""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" with self.assertRaises(NoCWParametersProvidedError): cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={}, unittesting=True, ) class test_invalid_inputs(setup_ref): """tests initialisation with invalid inputs""" def runTest(self): # generate compressed sequences refSeq = "GGGGGG" with self.assertRaises(TypeError): cw_seqComparer( maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={"catWalk_parameters": {}}, unittesting=True, PERSIST=-1, ) with self.assertRaises(TypeError): cw_seqComparer( maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={"catWalk_parameters": {}}, unittesting=True, PERSIST=-1, ) with self.assertRaises(TypeError): cw_seqComparer( maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={"catWalk_parameters": {}}, unittesting=True, PERSIST=None, ) class test_cw_seqComparer_eh(setup_ref): """tests the loading of an exclusion file""" def runTest(self): # default exclusion file refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, reference=refSeq, snpCeiling=1, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) self.assertEqual( sc.excluded_hash(), "Excl 0 nt [d751713988987e9331980363e24189ce]" ) class test_cw_seqComparer_compress_uncompress(setup_ref): def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) originals = [ "AAAA", "CCCC", "TTTT", "GGGG", "NNNN", "ACTG", "ACTC", "TCTN", "NYTQ", "QRST", ] for original in originals: compressed_sequence = sc.compress(sequence=original) roundtrip = sc.uncompress(compressed_sequence) self.assertEqual(original, roundtrip) class test_cw_seqComparer_load_refseq(setup_ref): """test init""" def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) self.assertEqual(sc.reference, refSeq) class test_cw_seqComparer_wrong_length(setup_ref): def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) with self.assertRaises(TypeError): sc.compress(sequence="AC") class test_cw_seqComparer_is_ref(setup_ref): def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) retVal = sc.compress(sequence="ACTG") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([]), "N": set([]), "M": {}, "invalid": 0, }, ) class test_cw_seqComparer_store_m(setup_ref): def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) retVal = sc.compress(sequence="ACTQ") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([]), "N": set([]), "M": {3: "Q"}, "invalid": 0, }, ) class test_cw_seqComparer_store_ms(setup_ref): def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) retVal = sc.compress(sequence="NYTQ") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([]), "N": set([0]), "M": {1: "Y", 3: "Q"}, "invalid": 0, }, ) class test_cw_seqComparer_store_n(setup_ref): def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) retVal = sc.compress(sequence="ACTN") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([]), "N": set([3]), "M": {}, "invalid": 0, }, ) class test_cw_seqComparer_store_dash_as_n(setup_ref): def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) retVal = sc.compress(sequence="ACT-") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([]), "N": set([3]), "M": {}, "invalid": 0, }, ) class test_cw_seqComparer_store_n_and_var(setup_ref): def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) retVal = sc.compress(sequence="TCT-") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([0]), "N": set([3]), "M": {}, "invalid": 0, }, ) class test_cw_seqComparer_store_n_and_othervar(setup_ref): def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) retVal = sc.compress(sequence="ATT-") self.assertEqual( retVal, { "G": set([]), "A": set([]), "C": set([]), "T": set([1]), "N": set([3]), "M": {}, "invalid": 0, }, ) class test_cw_seqComparer_compress_uncompress_roundtrip(setup_ref): def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) originals = [ "AAAA", "CCCC", "TTTT", "GGGG", "NNNN", "ACTG", "ACTC", "TCTN", "NYTQ", "QRST", ] for original in originals: compressed_sequence = sc.compress(sequence=original) roundtrip = sc.uncompress(compressed_sequence) self.assertEqual(original, roundtrip) class test_cw_seqComparer_compress_uncompress_roundtrip_2(setup_ref): def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, maxNs=1e8, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, unittesting=True, ) originals = ["NNNN"] for original in originals: compressed_sequence = sc.compress(sequence=original) roundtrip = sc.uncompress(compressed_sequence) self.assertEqual(original, roundtrip) class test_cw_seqComparer_compress_uncompress_roundtrip_3(setup_ref): def runTest(self): refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, maxNs=3, snpCeiling=20, reference=refSeq, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, unittesting=True, ) originals = ["NNNN"] for original in originals: compressed_sequence = sc.compress(sequence=original) with self.assertRaises(ValueError): sc.uncompress(compressed_sequence) class test_cw_seqComparer_one_invalid_sequence(setup_ref): """tests the comparison of two sequences where one is invalid""" def runTest(self): # generate compressed sequences refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, maxNs=3, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 1, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, unittesting=True, ) seq1 = sc.compress("AAAA") seq2 = sc.compress("NNNN") sc.persist(seq2, "k2") sc.persist(seq1, "k1") # should be in persistence self.assertEqual(sc.PERSIST.guid_valid("k1"), 0) self.assertEqual(sc.PERSIST.guid_valid("k2"), 1) res = sc.mcompare("k1") self.assertEqual(res["invalid"], 0) res = res["neighbours"] self.assertEqual(len(res), 0) res = sc.mcompare("k2") self.assertEqual(res["invalid"], 1) res = res["neighbours"] self.assertEqual(len(res), 0) class test_cw_seqComparer_csh(setup_ref): """tests the computation of a hash of a compressed object""" def runTest(self): # generate compressed sequences refSeq = "ACTG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) compressed_sequence = sc.compress(sequence="TTAA") res = sc.compressed_sequence_hash(compressed_sequence) self.assertEqual(res, "6ce0e55c4ab092f560e03c5d2de53098") class test_cw_seqComparer_epp(setup_ref): """tests estimate_expected_proportion, a function computing the proportion of Ns expected based on the median Ns in a list of sequences""" def runTest(self): refSeq = "GGGGGGGGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) res = sc.estimate_expected_proportion([]) self.assertTrue(res is None) res = sc.estimate_expected_proportion(["AA", "AA"]) self.assertTrue(res is None) res = sc.estimate_expected_proportion(["AA", "AA", "AA"]) self.assertTrue(res is not None) self.assertTrue(res == 0) res = sc.estimate_expected_proportion(["AAN", "AAN", "AAN"]) self.assertTrue(res is not None) self.assertAlmostEqual(res, 1 / 3) class test_cw_seqComparer_verify_1(setup_ref): """tests verification when there is no data""" def runTest(self): refSeq = "GGGGGGGGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) res = sc.verify_insertion("noguid") self.assertEqual(res["status"], "Neither") self.assertEqual(res["n_updated"], 0) class test_cw_seqComparer_verify_2(setup_ref): """tests verification when there is data in both catwalk and the database""" def runTest(self): refSeq = "GGGGGGGGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) seq1 = sc.compress(refSeq) sc.persist(seq1, "k1") res = sc.verify_insertion("k1") self.assertEqual(res["status"], "Both") self.assertEqual(res["n_updated"], 0) class test_cw_seqComparer_verify_3(setup_ref): """tests verification when there is data in both catwalk and the database. There is one link.""" def runTest(self): refSeq = "GGGGGGGGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) seq1 = sc.compress(refSeq) sc.persist(seq1, "k1") sc.persist(seq1, "k2") res = sc.verify_insertion("k1") self.assertEqual(res["status"], "Both") self.assertEqual(res["n_updated"], 0) class test_cw_seqComparer_verify_4(setup_ref): """tests verification when there is data in both catwalk and the database. There should be one link but there is not.""" def runTest(self): refSeq = "GGGGGGGGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) seq1 = sc.compress(refSeq) sc.persist(seq1, "k1") sc.persist( seq1, "k2", unittesting_omit_link=True ) # don't add links. This will be recgonisied as an issue by verify_insertion res = sc.verify_insertion("k1") self.assertEqual(res["status"], "Both") self.assertEqual(res["n_updated"], 1) class test_cw_seqComparer_verify_5(setup_ref): """tests verification when there is data in database but not catwalk.""" def runTest(self): refSeq = "GGGGGGGGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) seq1 = sc.compress(refSeq) sc.persist(seq1, "k1") sc.persist(seq1, "k2") self.assertEqual(set(sc.catWalk.sample_names()), set(["k1", "k2"])) # delete k1 from catwalk to create a 'catwalk failure' sc.catWalk.remove_sample("k1") self.assertEqual(set(sc.catWalk.sample_names()), set(["k2"])) res = sc.verify_insertion("k1") self.assertEqual(res["status"], "Database, not catwalk") self.assertEqual( set(sc.catWalk.sample_names()), set(["k1", "k2"]) ) # see https://gitea.mmmoxford.uk/dvolk/catwalk/issues/4 self.assertEqual(res["n_updated"], 0) class test_cw_seqComparer_verify_6(setup_ref): """tests verification when there is data in catwalk but not the database.""" def runTest(self): refSeq = "GGGGGGGGGGGG" sc = cw_seqComparer( PERSIST=UNITTEST_RDBMSCONN, unittesting=True, maxNs=1e8, reference=refSeq, snpCeiling=10, preComparer_parameters={ "selection_cutoff": 20, "uncertain_base": "M", "over_selection_cutoff_ignore_factor": 5, "catWalk_parameters": { "bind_port": 5999, "bind_host": "localhost", "cw_binary_filepath": None, "reference_name": "H37RV", "reference_filepath": self.inputfile, "mask_filepath": "reference/TB-exclude-adaptive.txt", }, }, ) seq1 = sc.compress(refSeq) sc.catWalk.add_sample_from_refcomp("k1", seq1) self.assertEqual(set(sc.catWalk.sample_names()), set(["k1"])) res = sc.verify_insertion("k1") self.assertEqual(res["status"], "Catwalk only") self.assertEqual( set(sc.catWalk.sample_names()), set([]) ) # see https://gitea.mmmoxford.uk/dvolk/catwalk/issues/4 self.assertEqual(res["n_updated"], 0)
py
1a5aa2368b07f31512d7a4634388a6f3028cd512
""" A backend to export DXF using a custom DXF renderer. This allows saving of DXF figures. Use as a matplotlib external backend: import matplotlib matplotlib.use('module://mpldxf.backend_dxf') or register: matplotlib.backend_bases.register_backend('dxf', FigureCanvasDxf) Based on matplotlib.backends.backend_template.py. Copyright (C) 2014 David M Kent Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import os import sys import matplotlib from matplotlib.backend_bases import (RendererBase, FigureCanvasBase, GraphicsContextBase) import ezdxf from . import dxf_colors # When packaged with py2exe ezdxf has issues finding its templates # We tell it where to find them using this. # Note we also need to make sure they get packaged by adding them to the # configuration in setup.py if hasattr(sys, 'frozen'): ezdxf.options.template_dir = os.path.dirname(sys.executable) def rgb_to_dxf(rgb_val): """Convert an RGB[A] colour to DXF colour index. ``rgb_val`` should be a tuple of values in range 0.0 - 1.0. Any alpha value is ignored. """ if rgb_val is not None: dxfcolor = dxf_colors.nearest_index([255.0 * val for val in rgb_val[:3]]) else: dxfcolor = dxf_colors.BLACK return dxfcolor class RendererDxf(RendererBase): """ The renderer handles drawing/rendering operations. Renders the drawing using the ``ezdxf`` package. """ def __init__(self, width, height, dpi, dxfversion): RendererBase.__init__(self) self.height = height self.width = width self.dpi = dpi self.dxfversion = dxfversion self._init_drawing() def _init_drawing(self): """Create a drawing, set some global information and add the layers we need. """ drawing = ezdxf.new(dxfversion=self.dxfversion) modelspace = drawing.modelspace() drawing.header['$EXTMIN'] = (0, 0, 0) drawing.header['$EXTMAX'] = (self.width, self.height, 0) self.drawing = drawing self.modelspace = modelspace def clear(self): """Reset the renderer.""" super(RendererDxf, self).clear() self._init_drawing() def draw_path(self, gc, path, transform, rgbFace=None): """Draw a path. To do this we need to decide which DXF entity is most appropriate for the path. We choose from lwpolylines or hatches. """ dxfcolor = rgb_to_dxf(rgbFace) for vertices in path.to_polygons(transform=transform): if rgbFace is not None and vertices.shape[0] > 2: # we have a face color so we draw a filled polygon, # in DXF this means a HATCH entity hatch = self.modelspace.add_hatch(dxfcolor) with hatch.edit_boundary() as editor: editor.add_polyline_path(vertices) else: # A non-filled polygon or a line - use LWPOLYLINE entity attrs = { 'color': dxfcolor, } self.modelspace.add_lwpolyline(vertices, attrs) def draw_image(self, gc, x, y, im): pass def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None): fontsize = self.points_to_pixels(prop.get_size_in_points()) dxfcolor = rgb_to_dxf(gc.get_rgb()) text = self.modelspace.add_text(s.encode('ascii', 'ignore'), { 'height': fontsize, 'rotation': angle, 'color': dxfcolor, }) halign = self._map_align(mtext.get_ha(), vert=False) valign = self._map_align(mtext.get_va(), vert=True) align = valign if align: align += '_' align += halign p1 = x, y p2 = (x - 50, y) text.set_pos(p1, p2=p2, align=align) def _map_align(self, align, vert=False): """Translate a matplotlib text alignment to the ezdxf alignment.""" if align in ['right', 'center', 'left', 'top', 'bottom', 'middle']: align = align.upper() elif align == 'baseline': align = '' else: raise NotImplementedError if vert and align == 'CENTER': align = 'MIDDLE' return align def flipy(self): return False def get_canvas_width_height(self): return self.width, self.height def new_gc(self): return GraphicsContextBase() def points_to_pixels(self, points): return points / 72.0 * self.dpi class FigureCanvasDxf(FigureCanvasBase): """ A canvas to use the renderer. This only implements enough of the API to allow the export of DXF to file. """ #: The DXF version to use. This can be set to another version #: supported by ezdxf if desired. DXFVERSION = 'AC1015' def get_dxf_renderer(self, cleared=False): """Get a renderer to use. Will create a new one if we don't alreadty have one or if the figure dimensions or resolution have changed. """ l, b, w, h = self.figure.bbox.bounds key = w, h, self.figure.dpi try: self._lastKey, self.dxf_renderer except AttributeError: need_new_renderer = True else: need_new_renderer = (self._lastKey != key) if need_new_renderer: self.dxf_renderer = RendererDxf(w, h, self.figure.dpi, self.DXFVERSION) self._lastKey = key elif cleared: self.dxf_renderer.clear() return self.dxf_renderer def draw(self): """ Draw the figure using the renderer """ renderer = self.get_dxf_renderer() self.figure.draw(renderer) return renderer.drawing # Add DXF to the class-scope filetypes dictionary filetypes = FigureCanvasBase.filetypes.copy() filetypes['dxf'] = 'DXF' def print_dxf(self, filename, *args, **kwargs): """ Write out a DXF file. """ drawing = self.draw() drawing.saveas(filename) def get_default_filetype(self): return 'dxf' ######################################################################## # # Now just provide the standard names that backend.__init__ is expecting # ######################################################################## FigureCanvas = FigureCanvasDxf
py
1a5aa244acf1b8f0fd66d394311e8658833f3a92
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import LineByLineTextDataset, TextDataset from .dcn_language_modeling import PinyinShuffleLineByLineTextDataset
py
1a5aa330cc8a2f76c40b4f5089aac56608f7268b
"""File for Azure Event Hub models.""" from __future__ import annotations from dataclasses import dataclass import logging from azure.eventhub.aio import EventHubProducerClient, EventHubSharedKeyCredential from .const import ADDITIONAL_ARGS, CONF_EVENT_HUB_CON_STRING _LOGGER = logging.getLogger(__name__) @dataclass class AzureEventHubClient: """Class for the Azure Event Hub client. Use from_input to initialize.""" event_hub_instance_name: str @property def client(self) -> EventHubProducerClient: """Return the client.""" async def test_connection(self) -> None: """Test connection, will throw EventHubError when it cannot connect.""" async with self.client as client: await client.get_eventhub_properties() @classmethod def from_input(cls, **kwargs) -> AzureEventHubClient: """Create the right class.""" if CONF_EVENT_HUB_CON_STRING in kwargs: return AzureEventHubClientConnectionString(**kwargs) return AzureEventHubClientSAS(**kwargs) @dataclass class AzureEventHubClientConnectionString(AzureEventHubClient): """Class for Connection String based Azure Event Hub Client.""" event_hub_connection_string: str @property def client(self) -> EventHubProducerClient: """Return the client.""" return EventHubProducerClient.from_connection_string( conn_str=self.event_hub_connection_string, eventhub_name=self.event_hub_instance_name, **ADDITIONAL_ARGS, ) @dataclass class AzureEventHubClientSAS(AzureEventHubClient): """Class for SAS based Azure Event Hub Client.""" event_hub_namespace: str event_hub_sas_policy: str event_hub_sas_key: str @property def client(self) -> EventHubProducerClient: """Get a Event Producer Client.""" return EventHubProducerClient( fully_qualified_namespace=f"{self.event_hub_namespace}.servicebus.windows.net", eventhub_name=self.event_hub_instance_name, credential=EventHubSharedKeyCredential( # type: ignore policy=self.event_hub_sas_policy, key=self.event_hub_sas_key ), **ADDITIONAL_ARGS, )
py
1a5aa396367e45ee2d5c45120207dfe6c1998a20
from django.test import TestCase from silverstrike.forms import ReconcilationForm from silverstrike.models import Account, Transaction class ReconcilationFormTests(TestCase): def setUp(self): self.account = Account.objects.create(name='personal') self.account.set_initial_balance(100) def test_same_balance(self): form = ReconcilationForm({'balance': '100', 'title': 'meh'}, account=self.account.id) self.assertFalse(form.is_valid()) def test_increasing_balance(self): form = ReconcilationForm({'balance': '150', 'title': 'meh'}, account=self.account.id) self.assertTrue(form.is_valid()) self.assertEqual(self.account.balance, 100) form.save() self.assertEqual(self.account.balance, 150) self.assertEqual(Transaction.objects.last().amount, 50) self.assertEqual(Transaction.objects.last().transaction_type, Transaction.SYSTEM) def test_decreasing_balance(self): form = ReconcilationForm({'balance': '50', 'title': 'meh'}, account=self.account.id) self.assertTrue(form.is_valid()) self.assertEqual(self.account.balance, 100) form.save() self.assertEqual(self.account.balance, 50) self.assertEqual(Transaction.objects.last().amount, 50) self.assertEqual(Transaction.objects.last().transaction_type, Transaction.SYSTEM)
py
1a5aa39904e33b8235a464e5fff80706aa0c8069
# coding=utf-8 # Copyright 2018 David Mack # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path from absl import logging from absl import flags FLAGS = flags.FLAGS from compare_gan.metrics import eval_task import tensorflow as tf import tensorflow_gan as tfgan import imageio import math import numpy as np flags.DEFINE_string( "example_dir", "./examples", "Where to save generated image examples") flags.DEFINE_integer( "example_count", 100, "How many generated image examples to save") class SaveExamplesTask(): """Quick and dirty image saver.""" _LABEL = "save_examples" def merge(self, images, size): h, w = images.shape[1], images.shape[2] if (images.shape[3] in (3,4)): c = images.shape[3] img = np.zeros((h * size[0], w * size[1], c)) for idx, image in enumerate(images): i = idx % size[1] j = idx // size[1] img[j * h:j * h + h, i * w:i * w + w, :] = image return img elif images.shape[3]==1: img = np.zeros((h * size[0], w * size[1])) for idx, image in enumerate(images): i = idx % size[1] j = idx // size[1] img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0] return img else: raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4') def run_after_session(self, fake_dset, real_dest, step, force_label=None): tf.io.gfile.makedirs(FLAGS.example_dir) n_images = fake_dset.images.shape[0] if force_label is not None: label_str = "force_label_" + str(force_label) else: label_str = "all_labels" # for i in range(min(n_images, FLAGS.example_count)): # filename = os.path.join(FLAGS.example_dir, step, label_str + '_%03d.png' % i) # with tf.io.gfile.GFile(filename, 'w') as file: # imageio.imwrite(file, fake_dset.images[i], format='png') grid_size = (int(math.sqrt(n_images))+1, int(math.sqrt(n_images))) grid = self.merge(fake_dset.images, grid_size) filename = os.path.join(FLAGS.example_dir, step + '_' + label_str + '_grid.png') with tf.io.gfile.GFile(filename, 'w') as file: imageio.imwrite(file, grid, format='png') return {self._LABEL: FLAGS.example_count}
py
1a5aa444a5d58c32d0e3821ad50ac82338f566e9
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import unicode_literals import unittest import os import json from pymatgen.electronic_structure.core import Spin, Orbital, OrbitalType from pymatgen.electronic_structure.dos import CompleteDos, DOS from pymatgen.util.testing import PymatgenTest test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", 'test_files') class DosTest(unittest.TestCase): def setUp(self): with open(os.path.join(test_dir, "complete_dos.json"), "r") as f: self.dos = CompleteDos.from_dict(json.load(f)) def test_get_gap(self): dos = self.dos self.assertAlmostEqual(dos.get_gap(), 2.0589, 4) self.assertEqual(len(dos.energies), 301) self.assertAlmostEqual(dos.get_interpolated_gap(tol=0.001, abs_tol=False, spin=None)[0], 2.16815942458015, 7) self.assertAlmostEqual(dos.get_cbm_vbm(), (3.8729, 1.8140000000000001)) self.assertAlmostEqual(dos.get_interpolated_value(9.9)[Spin.up], 1.744588888888891, 7) self.assertAlmostEqual(dos.get_interpolated_value(9.9)[Spin.down], 1.756888888888886, 7) self.assertRaises(ValueError, dos.get_interpolated_value, 1000) def test_get_smeared_densities(self): dos = self.dos smeared = dos.get_smeared_densities(0.2) dens = dos.densities for spin in Spin: self.assertAlmostEqual(sum(dens[spin]), sum(smeared[spin])) class CompleteDosTest(unittest.TestCase): def setUp(self): with open(os.path.join(test_dir, "complete_dos.json"), "r") as f: self.dos = CompleteDos.from_dict(json.load(f)) def test_get_gap(self): dos = self.dos self.assertAlmostEqual(dos.get_gap(), 2.0589, 4, "Wrong gap from dos!") self.assertEqual(len(dos.energies), 301) self.assertAlmostEqual(dos.get_interpolated_gap(tol=0.001, abs_tol=False, spin=None)[0], 2.16815942458015, 7) spd_dos = dos.get_spd_dos() self.assertEqual(len(spd_dos), 3) el_dos = dos.get_element_dos() self.assertEqual(len(el_dos), 4) sum_spd = spd_dos[OrbitalType.s] + spd_dos[OrbitalType.p] + spd_dos[OrbitalType.d] sum_element = None for pdos in el_dos.values(): if sum_element is None: sum_element = pdos else: sum_element += pdos #The sums of the SPD or the element doses should be the same. self.assertTrue((abs(sum_spd.energies - sum_element.energies) < 0.0001).all()) self.assertTrue((abs(sum_spd.densities[Spin.up] - sum_element.densities[Spin.up]) < 0.0001).all()) self.assertTrue((abs(sum_spd.densities[Spin.down] - sum_element.densities[Spin.down]) < 0.0001).all()) site = dos.structure[0] self.assertIsNotNone(dos.get_site_dos(site)) self.assertAlmostEqual(sum(dos.get_site_dos(site).get_densities( Spin.up)), 2.0391) self.assertAlmostEqual(sum(dos.get_site_dos(site).get_densities( Spin.down)), 2.0331999999999995) self.assertIsNotNone(dos.get_site_orbital_dos(site, Orbital.s)) egt2g = dos.get_site_t2g_eg_resolved_dos(site) self.assertAlmostEqual(sum(egt2g["e_g"].get_densities(Spin.up)), 0.0) self.assertAlmostEqual(sum(egt2g["t2g"].get_densities(Spin.up)), 0.0) egt2g = dos.get_site_t2g_eg_resolved_dos(dos.structure[4]) self.assertAlmostEqual(sum(egt2g["e_g"].get_densities(Spin.up)), 15.004399999999997) self.assertAlmostEqual(sum(egt2g["t2g"].get_densities(Spin.up)), 22.910399999999999) self.assertAlmostEqual(dos.get_cbm_vbm(), (3.8729, 1.8140000000000001)) self.assertAlmostEqual(dos.get_interpolated_value(9.9)[Spin.up], 1.744588888888891, 7) self.assertAlmostEqual(dos.get_interpolated_value(9.9)[Spin.down], 1.756888888888886, 7) self.assertRaises(ValueError, dos.get_interpolated_value, 1000) def test_to_from_dict(self): d = self.dos.as_dict() dos = CompleteDos.from_dict(d) el_dos = dos.get_element_dos() self.assertEqual(len(el_dos), 4) spd_dos = dos.get_spd_dos() sum_spd = spd_dos[OrbitalType.s] + spd_dos[OrbitalType.p] + spd_dos[OrbitalType.d] sum_element = None for pdos in el_dos.values(): if sum_element is None: sum_element = pdos else: sum_element += pdos #The sums of the SPD or the element doses should be the same. self.assertTrue((abs(sum_spd.energies - sum_element.energies) < 0.0001).all()) def test_str(self): self.assertIsNotNone(str(self.dos)) class DOSTest(PymatgenTest): def setUp(self): with open(os.path.join(test_dir, "complete_dos.json"), "r") as f: d = json.load(f) y = list(zip(d["densities"]["1"], d["densities"]["-1"])) self.dos = DOS(d["energies"], y, d["efermi"]) def test_get_gap(self): dos = self.dos self.assertAlmostEqual(dos.get_gap(), 2.0589, 4) self.assertEqual(len(dos.x), 301) self.assertAlmostEqual(dos.get_interpolated_gap(tol=0.001, abs_tol=False, spin=None)[0], 2.16815942458015, 7) self.assertArrayAlmostEqual(dos.get_cbm_vbm(), (3.8729, 1.8140000000000001)) self.assertAlmostEqual(dos.get_interpolated_value(9.9)[0], 1.744588888888891, 7) self.assertAlmostEqual(dos.get_interpolated_value(9.9)[1], 1.756888888888886, 7) self.assertRaises(ValueError, dos.get_interpolated_value, 1000) self.assertArrayAlmostEqual(dos.get_cbm_vbm(spin=Spin.up), (3.8729, 1.2992999999999999)) self.assertArrayAlmostEqual(dos.get_cbm_vbm(spin=Spin.down), (4.645, 1.8140000000000001)) if __name__ == '__main__': unittest.main()
py
1a5aa51c7de928c1a7f328e8cb6b7584c82339f6
#!/usr/bin/env python """ @package mi.dataset.parser.flord_l_wfp @file marine-integrations/mi/dataset/parser/flord_l_wfp.py @author Joe Padula @brief Particle for the flord_l_wfp dataset driver NOTE: there is no parser class in this file. This dataset is using the parser in global_wfp_e_file_parser.py. Release notes: Initial Release """ __author__ = 'Joe Padula' __license__ = 'Apache 2.0' # noinspection PyUnresolvedReferences import ntplib import struct from mi.core.log import get_logger log = get_logger() from mi.core.common import BaseEnum from mi.core.instrument.dataset_data_particle import DataParticle ############## # There is no parser in this file, this dataset uses the parser in global_wfp_e_file_parser.py ############## class DataParticleType(BaseEnum): """ The output particle/record stream for the recovered data, as identified in the flord_l_wfp IDD. """ INSTRUMENT = 'flord_l_wfp_instrument_recovered' class FlordLWfpInstrumentParserDataParticleKey(BaseEnum): """ The names of the instrument particle parameters in the DataParticleType.INSTRUMENT stream. """ RAW_SIGNAL_CHL = 'raw_signal_chl' # corresponds to 'chl' from E file RAW_SIGNAL_BETA = 'raw_signal_beta' # corresponds to 'ntu' from E file RAW_INTERNAL_TEMP = 'raw_internal_temp' # corresponds to 'temperature' from E file WFP_TIMESTAMP = 'wfp_timestamp' class FlordLWfpInstrumentParserDataParticle(DataParticle): """ Class for parsing data from the flord_l_wfp data set """ _data_particle_type = DataParticleType.INSTRUMENT def _build_parsed_values(self): """ Take something in the binary data format and turn it into an array of dictionaries defining the data in the particle with the appropriate tag. Note that parse_chunks() in global_wfp_e_file_parser.py will set the data in raw_data. @throws SampleException If there is a problem with sample creation """ fields_prof = struct.unpack('>I f f f f f h h h', self.raw_data) result = [self._encode_value(FlordLWfpInstrumentParserDataParticleKey.RAW_SIGNAL_CHL, fields_prof[6], int), self._encode_value(FlordLWfpInstrumentParserDataParticleKey.RAW_SIGNAL_BETA, fields_prof[7], int), self._encode_value(FlordLWfpInstrumentParserDataParticleKey.RAW_INTERNAL_TEMP, fields_prof[8], int), self._encode_value(FlordLWfpInstrumentParserDataParticleKey.WFP_TIMESTAMP, fields_prof[0], int)] return result
py
1a5aa5bac58fc09443351f670b9bfc7c89bfc7ef
# Generated by Django 3.1 on 2020-08-17 19:40 from django.db import migrations, models import django.db.models.deletion import wagtail.core.fields class Migration(migrations.Migration): dependencies = [ ('wagtailcore', '0052_pagelogentry'), ('blog', '0001_initial'), ] operations = [ migrations.CreateModel( name='BlogPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.page')), ('date', models.DateField(verbose_name='Post date')), ('intro', models.CharField(max_length=250)), ('body', wagtail.core.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), ]
py
1a5aa7603e1feadaee2422168c2ca1ce6e4dc6d6
from __future__ import print_function """ Before running this script there is a dependency for reading WMO BUFR: pip install pybufrkit This library has a way to work within the python script but as of now it works using a command line interface. It is important to download the files from the ftp using binary mode! This was tested on python 2.7 using operational files """ def convert_bufr_hd(bufr_file, path_out=None, VERSION=1): """ This function will parse a WMO BUFR sounding using a pure python toolkit: PyBufrKit The parsing will be done using a query over the bufr using tables included in the package. Parameters ---------- bufr_file : str Only the bufr file name. path_out : str (optional) The full path of the out file. VERSION : int (optional) Which version is used to get the final file name. It could be 1 that is calculated from the launching time or 2 that is extracted from the file header. The default is 2. """ import os import sys import subprocess from datetime import datetime, timedelta # This will parse only the WMO station number to check if # we have to process the file or return region = subprocess.check_output( 'pybufrkit query 001001 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] number = subprocess.check_output( 'pybufrkit query 001002 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] wmo_code = region + number if wmo_code == '87344': directory = 'COR/' print('Reading BUFR {bufr_file}'.format(bufr_file=bufr_file)) print('The station is {wmo_code}: Cordoba Aero'.format(wmo_code=wmo_code)) elif wmo_code == '87155': directory = 'SIS/' print('Reading BUFR {bufr_file}'.format(bufr_file=bufr_file)) print('The station is {wmo_code}: Resistencia Aero'.format(wmo_code=wmo_code)) elif wmo_code == '87244': directory = 'VMRS/' print('Reading BUFR {bufr_file}'.format(bufr_file=bufr_file)) print('The station is {wmo_code}: Villa Maria del Rio Seco'.format(wmo_code=wmo_code)) elif wmo_code == '87418': directory = 'MDZ/' print('Reading BUFR {bufr_file}'.format(bufr_file=bufr_file)) print('The station is {wmo_code}: Mendoza Aero'.format(wmo_code=wmo_code)) elif wmo_code == '87576': print('The station is {wmo_code}: Ezeiza Aero, do not process'.format(wmo_code=wmo_code)) print() return else: print('Do not care about station {wmo_code}'.format(wmo_code=wmo_code)) print() return if not os.path.exists(path_out+directory): os.makedirs(path_out+directory) # Now we parse the date and time of the baloon launching year = subprocess.check_output( 'pybufrkit query 004001 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] month = subprocess.check_output( 'pybufrkit query 004002 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] day = subprocess.check_output( 'pybufrkit query 004003 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] hour = subprocess.check_output( 'pybufrkit query 004004 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] minute = subprocess.check_output( 'pybufrkit query 004005 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] second = subprocess.check_output( 'pybufrkit query 004006 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] time_of_launch = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)) print('The time of launching was: {time_of_launch}'.format(time_of_launch=time_of_launch)) if VERSION == 1: time_of_sounding = datetime(time_of_launch.year, time_of_launch.month, time_of_launch.day, time_of_launch.hour, 0, 0) # In order to have the correct output file name we have to check # and correct the time of launching. # If it is around the hour (5 minutes after and before) # we can assume the launching was within # an hourly schedule and we are ok with the time_of_sounding being # the same as time_of_launch. # But as the other soundings (i.e. with regular or tri-hourly schedules) # are launched way before the hour we have to add one hour to the # time_of_launch. # Either way if the sounding was supposed to be 12UTC it may be the case # that the time_of_launch still is between if (time_of_launch.minute > 5) and (time_of_launch.minute < 55): time_of_sounding = time_of_sounding + timedelta(hours=1) # if time_of_sounding.hour == 11: # time_of_sounding = time_of_sounding + timedelta(hours=1) # elif time_of_sounding.hour == 23: # time_of_sounding = time_of_sounding + timedelta(hours=1) elif VERSION == 2: year = subprocess.check_output( 'pybufrkit info {bufr_file} | grep year'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2].split(' ')[-1] month = subprocess.check_output( 'pybufrkit info {bufr_file} | grep month'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2].split(' ')[-1] day = subprocess.check_output( 'pybufrkit info {bufr_file} | grep day'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2].split(' ')[-1] hour = subprocess.check_output( 'pybufrkit info {bufr_file} | grep hour'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2].split(' ')[-1] minute = subprocess.check_output( 'pybufrkit info {bufr_file} | grep minute'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2].split(' ')[-1] second = subprocess.check_output( 'pybufrkit info {bufr_file} | grep second'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2].split(' ')[-1] time_of_sounding = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second)) print('The datetime of the file name will be: {time_of_sounding}'.format(time_of_sounding=time_of_sounding)) file_name = time_of_sounding.strftime('%y%m%d_%H_{wmo_code}.lst'.format(wmo_code=wmo_code)) # print(file_name) # Here we can add the sounding site to the path file_name_path = '{path_out}{directory}{file_name}'.format(path_out=path_out, directory=directory, file_name=file_name) if os.path.exists(file_name_path): print('Already did {file_name_path}'.format(file_name_path=file_name_path)) print() return # pressure in Pa pressure = subprocess.check_output( 'pybufrkit query 007004 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] # geopotential height height = subprocess.check_output( 'pybufrkit query 010009 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] # Temperature in K temp = subprocess.check_output( 'pybufrkit query 012101 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] # Dew point temperature in K temp_dew = subprocess.check_output( 'pybufrkit query 012103 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] # Wind direction in degrees dir_v = subprocess.check_output( 'pybufrkit query 011001 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] # Wind speed in m/s vel_v = subprocess.check_output( 'pybufrkit query 011002 {bufr_file}'.format(bufr_file=bufr_file), shell=True).decode().split('\n')[-2] pressure = [float(x)/100 for x in pressure.split(',')] height = [int(x) for x in height.split(',')] temp = [round(float(x)-273.15, 2) for x in temp.split(',')] # convert to Celsius temp_dew = [round(float(x)-273.15, 2) for x in temp_dew.split(',')] # convert to Celsius dir_v = [int(x) for x in dir_v.split(',')] vel_v = [round(float(x)*1.94384, 4) for x in vel_v.split(',')] # convert to kt print('Starting to write in: {file_name_path}'.format(file_name_path=file_name_path)) with open('{file_name_path}'.format(file_name_path=file_name_path), 'w') as fid: for p, h, t, td, dv, vv in zip(pressure, height, temp, temp_dew, dir_v, vel_v): fid.write('\t{p},\t{h},\t{t},\t{td},\t{dv},\t{vv}\n'.format(p=p, h=h, t=t, td=td, dv=dv, vv=vv)) print('Finished writing the csv') print() ################################################# # Example of usage with glob # we might move the processed bufr to another directory # this import needs to happens at the beginning of the file # from __future__ import print_function import os import sys import glob # Check input parameters script = sys.argv[0] if len(sys.argv) != 3: print('Useage: {script} [path_in] [path_out]'.format(script=script)) quit() path_in = sys.argv[1] path_out = sys.argv[2] print('path_in = {path_in}'.format(path_in=path_in)) print('path_out = {path_out}'.format(path_out=path_out)) bufr_list = glob.glob('{path_in}/radiosondeos_*'.format(path_in=path_in)) for bufr_file in bufr_list: convert_bufr_hd(bufr_file, path_out) # then move bufr already processed os.rename('{bufr_file}'.format(bufr_file=bufr_file), '{path_in}/processed/{bufr_file}'.format(path_in=path_in, bufr_file=bufr_file.split("/")[-1]))
py
1a5aa84e75fc8435d760950d3cc075a3b838603d
""" Default (fixed) hyperparameter values used in Neural network model """ from ....constants import BINARY, MULTICLASS, REGRESSION def get_fixed_params(): """ Parameters that currently cannot be searched during HPO """ fixed_params = { 'num_epochs': 500, # maximum number of epochs for training NN 'epochs_wo_improve': 20, # we terminate training if validation performance hasn't improved in the last 'epochs_wo_improve' # of epochs # TODO: Epochs could take a very long time, we may want smarter logic than simply # of epochs without improvement (slope, difference in score, etc.) 'seed_value': None, # random seed for reproducibility (set = None to ignore) # For data processing: 'proc.embed_min_categories': 4, # apply embedding layer to categorical features with at least this many levels. Features with fewer levels are one-hot encoded. Choose big value to avoid use of Embedding layers # Options: [3,4,10, 100, 1000] 'proc.impute_strategy': 'median', # # strategy argument of sklearn.SimpleImputer() used to impute missing numeric values # Options: ['median', 'mean', 'most_frequent'] 'proc.max_category_levels': 100, # maximum number of allowed levels per categorical feature # Options: [10, 100, 200, 300, 400, 500, 1000, 10000] 'proc.skew_threshold': 0.99, # numerical features whose absolute skewness is greater than this receive special power-transform preprocessing. Choose big value to avoid using power-transforms # Options: [0.2, 0.3, 0.5, 0.8, 1.0, 10.0, 100.0] # Old params: These are now set based off of nthreads_per_trial, ngpus_per_trial. # 'num_dataloading_workers': 1, # Will be overwritten by nthreads_per_trial, can be >= 1 # 'ctx': mx.cpu(), # Will be overwritten by ngpus_per_trial if unspecified (can alternatively be: mx.gpu()) } return fixed_params def get_hyper_params(): """ Parameters that currently can be tuned during HPO """ hyper_params = { ## Hyperparameters for neural net architecture: 'network_type': 'widedeep', # Type of neural net used to produce predictions # Options: ['widedeep', 'feedforward'] 'layers': None, # List of widths (num_units) for each hidden layer (Note: only specifies hidden layers. These numbers are not absolute, they will also be scaled based on number of training examples and problem type) # Options: List of lists that are manually created 'numeric_embed_dim': None, # Size of joint embedding for all numeric+one-hot features. # Options: integer values between 10-10000 'activation': 'relu', # Activation function # Options: ['relu', 'softrelu', 'tanh', 'softsign'] 'max_layer_width': 2056, # maximum number of hidden units in network layer (integer > 0) # Does not need to be searched by default 'embedding_size_factor': 1.0, # scaling factor to adjust size of embedding layers (float > 0) # Options: range[0.01 - 100] on log-scale 'embed_exponent': 0.56, # exponent used to determine size of embedding layers based on # categories. 'max_embedding_dim': 100, # maximum size of embedding layer for a single categorical feature (int > 0). ## Regression-specific hyperparameters: 'y_range': None, # Tuple specifying whether (min_y, max_y). Can be = (-np.inf, np.inf). # If None, inferred based on training labels. Note: MUST be None for classification tasks! 'y_range_extend': 0.05, # Only used to extend size of inferred y_range when y_range = None. ## Hyperparameters for neural net training: 'use_batchnorm': True, # whether or not to utilize Batch-normalization # Options: [True, False] 'dropout_prob': 0.1, # dropout probability, = 0 turns off Dropout. # Options: range(0.0, 0.5) 'batch_size': 512, # batch-size used for NN training # Options: [32, 64, 128. 256, 512, 1024, 2048] 'loss_function': None, # MXNet loss function minimized during training 'optimizer': 'adam', # MXNet optimizer to use. # Options include: ['adam','sgd'] 'learning_rate': 3e-4, # learning rate used for NN training (float > 0) 'weight_decay': 1e-6, # weight decay regularizer (float > 0) 'clip_gradient': 100.0, # gradient clipping threshold (float > 0) 'momentum': 0.9, # momentum which is only used for SGD optimizer 'lr_scheduler': None, # If not None, string specifying what type of learning rate scheduler to use (may override learning_rate). # Options: [None, 'cosine', 'step', 'poly', 'constant'] # Below are hyperparameters specific to the LR scheduler (only used if lr_scheduler != None). For more info, see: https://gluon-cv.mxnet.io/api/utils.html#gluoncv.utils.LRScheduler 'base_lr': 3e-5, # smallest LR (float > 0) 'target_lr': 1.0, # largest LR (float > 0) 'lr_decay': 0.1, # step factor used to decay LR (float in (0,1)) 'warmup_epochs': 10, # number of epochs at beginning of training in which LR is linearly ramped up (float > 1). ## Feature-specific hyperparameters: 'use_ngram_features': False, # If False, will drop automatically generated ngram features from language features. This results in worse model quality but far faster inference and training times. # Options: [True, False] } return hyper_params # Note: params for original NNTabularModel were: # weight_decay=0.01, dropout_prob = 0.1, batch_size = 2048, lr = 1e-2, epochs=30, layers= [200, 100] (semi-equivalent to our layers = [100],numeric_embed_dim=200) def get_default_param(problem_type, num_classes=None): if problem_type == BINARY: return get_param_binary() elif problem_type == MULTICLASS: return get_param_multiclass(num_classes=num_classes) elif problem_type == REGRESSION: return get_param_regression() else: return get_param_binary() def get_param_multiclass(num_classes): params = get_fixed_params() params.update(get_hyper_params()) return params def get_param_binary(): params = get_fixed_params() params.update(get_hyper_params()) return params def get_param_regression(): params = get_fixed_params() params.update(get_hyper_params()) return params
py
1a5aa860f4acd0c7944ba0420881946ddd33f191
""" The Ravel backend PostgreSQL database """ import psycopg2 from ravel.log import logger from ravel.util import resource_file ISOLEVEL = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT BASE_SQL = resource_file("ravel/sql/base.sql") FLOW_SQL = resource_file("ravel/sql/flows.sql") NOFLOW_SQL = resource_file("ravel/sql/noflows.sql") TOPO_SQL = resource_file("ravel/sql/topo.sql") AUXILIARY_FUN_SQL = resource_file("ravel/sql/auxiliary_functions.sql") class RavelDb(): """A representation of Ravel's backend PostgreSQL database.""" def __init__(self, name, user, base, passwd=None, reconnect=False): """name: the name of the database to connect to user: the username to use to connect base: a file containing the SQL implementation for Ravel's base passwd: the password to connect to the database reconnect: true to connect to an existing database setup, false to load a new instance of Ravel's base into the database""" self.name = name self.user = user self.passwd = passwd self.base = base self.cleaned = not reconnect self._cursor = None self._conn = None if not reconnect and self.num_connections() > 0: logger.warning("existing connections to database, skipping reinit") self.cleaned = False elif not reconnect: self.init() self.cleaned = True @property def conn(self): "returns: a psycopg2 connection to the PostgreSQL database" if not self._conn or self._conn.closed: self._conn = psycopg2.connect(database=self.name, user=self.user, password=self.passwd) self._conn.set_isolation_level(ISOLEVEL) return self._conn @property def cursor(self): """returns: a psycopg2 cursor from RavelDb.conn for the PostgreSQL database""" if not self._cursor or self._cursor.closed: self._cursor = self.conn.cursor() return self._cursor def num_connections(self): """Returns the number of existing connections to the database. If there are >1 connections, a new Ravel base implementation cannot be loaded into the database. returns: the number of existing connections to the database""" try: self.cursor.execute("SELECT * FROM pg_stat_activity WHERE " "datname='{0}'".format(self.name)) # ignore cursor connection return len(self.cursor.fetchall()) - 1 except psycopg2.DatabaseError as e: logger.warning("error loading schema: %s", self.fmt_errmsg(e)) return 0 def init(self): """Initialize the database with the base Ravel SQL implementation. Removes any existing Ravel objects from the database""" self.clean() self.create() self.add_extensions() self.load_schema(self.base) def load_schema(self, script): """Load the specified schema into the database" script: path to a SQL script""" try: s = open(script, "r").read() logger.debug("loaded schema %s", script) self.cursor.execute(s) except psycopg2.DatabaseError as e: logger.warning("error loading schema: %s", self.fmt_errmsg(e)) def load_topo(self, provider): """Load a topology from the specified network provider provider: a ravel.network.NetworkProvider instance""" topo = provider.topo try: node_count = 0 nodes = {} for sw in topo.switches(): node_count += 1 dpid = provider.getNodeByName(sw).dpid ip = provider.getNodeByName(sw).IP() mac = provider.getNodeByName(sw).MAC() nodes[sw] = node_count self.cursor.execute("INSERT INTO switches (sid, dpid, ip, mac, name) " "VALUES ({0}, '{1}', '{2}', '{3}', '{4}');" .format(node_count, dpid, ip, mac, sw)) for host in topo.hosts(): node_count += 1 ip = provider.getNodeByName(host).IP() mac = provider.getNodeByName(host).MAC() nodes[host] = node_count self.cursor.execute("INSERT INTO hosts (hid, ip, mac, name) " "VALUES ({0}, '{1}', '{2}', '{3}');" .format(node_count, ip, mac, host)) for link in topo.links(): h1,h2 = link if h1 in topo.switches() and h2 in topo.switches(): ishost = 0 else: ishost = 1 sid = nodes[h1] nid = nodes[h2] self.cursor.execute("INSERT INTO tp(sid, nid, ishost, isactive) " "VALUES ({0}, {1}, {2}, {3});" .format(sid, nid, ishost, 1)) # bidirectional edges self.cursor.execute("INSERT INTO tp(sid, nid, ishost, isactive) " "VALUES ({1}, {0}, {2}, {3});" .format(sid, nid, ishost, 1)) self.cursor.execute("INSERT INTO ports(sid, nid, port) " "VALUES ({0}, {1}, {2}), ({1}, {0}, {3});" .format(sid, nid, topo.port(h1, h2)[0], topo.port(h1, h2)[1])) except psycopg2.DatabaseError as e: logger.warning("error loading topology: %s", self.fmt_errmsg(e)) def create(self): """If not created, create a database with the name specified in the constructor""" conn = None try: conn = psycopg2.connect(database="postgres", user=self.user, password=self.passwd) conn.set_isolation_level(ISOLEVEL) cursor = conn.cursor() cursor.execute("SELECT datname FROM pg_database WHERE " + "datistemplate = false;") fetch = cursor.fetchall() dblist = [fetch[i][0] for i in range(len(fetch))] if self.name not in dblist: cursor.execute("CREATE DATABASE %s;" % self.name) logger.debug("created databse %s", self.name) except psycopg2.DatabaseError as e: logger.warning("error creating database: %s", self.fmt_errmsg(e)) finally: conn.close() def add_extensions(self): """If not already added, add extensions required by Ravel (plpython3u, postgis, pgrouting)""" try: self.cursor.execute("SELECT 1 FROM pg_catalog.pg_namespace n JOIN " + "pg_catalog.pg_proc p ON pronamespace = n.oid " + "WHERE proname = 'pgr_dijkstra';") fetch = self.cursor.fetchall() if fetch == []: self.cursor.execute("CREATE EXTENSION IF NOT EXISTS plpython3u;") self.cursor.execute("CREATE EXTENSION IF NOT EXISTS postgis;") self.cursor.execute("CREATE EXTENSION IF NOT EXISTS pgrouting;") self.cursor.execute("CREATE EXTENSION plsh;") logger.debug("created extensions") except psycopg2.DatabaseError as e: logger.warning("error loading extensions: %s", self.fmt_errmsg(e)) def clean(self): """Clean the database of any existing Ravel components""" # close existing connections self.conn.close() conn = None try: conn = psycopg2.connect(database="postgres", user=self.user, password=self.passwd) conn.set_isolation_level(ISOLEVEL) cursor = conn.cursor() cursor.execute("drop database %s" % self.name) except psycopg2.DatabaseError as e: logger.warning("error cleaning database: %s", self.fmt_errmsg(e)) finally: if conn: conn.close() def truncate(self): """Clean the database of any state Ravel components, except for topology tables. This rolls back the database to the state after the topology is first loaded""" try: tables = ["cf", "clock", "p_spv", "spatial_ref_sys", "spv_tb_del", "spv_tb_ins", "rm", "rm_delta", "urm"] self.cursor.execute("truncate %s;" % ", ".join(tables)) logger.debug("truncated tables") self.cursor.execute("INSERT INTO clock values (0);") except psycopg2.DatabaseError as e: logger.warning("error truncating databases: %s", self.fmt_errmsg(e)) def fmt_errmsg(self, exception): return str(exception).strip()