ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a41739a95a089d8c3f5a045b712d5d7cf411b0e
""" Load local .env from CWD or path, if provided Current format for the `.env` file supports strings only and is parsed in the following order: - Each seperate line is considered a new possible key/value set - Each set is delimted by the first `=` found - Leading and trailing whitespace are removed - Matched leading/trailing single quotes or double quotes will be stripped from values (not keys). I'm open to suggestions on standards to follow here. Author : Preocts <Preocts#8196> Git Repo: https://github.com/Preocts/secretbox """ import logging import re from secretbox.loader import Loader class EnvFileLoader(Loader): """Load local .env file""" LT_DBL_QUOTES = r'^".*"$' LT_SGL_QUOTES = r"^'.*'$" EXPORT_PREFIX = r"^\s*?export\s" logger = logging.getLogger(__name__) def load_values(self, **kwargs: str) -> bool: """ Loads local .env from cwd or path, if provided Keywords: filename : [str] Alternate filename to load over `.env` """ filename = kwargs.get("filename", ".env") self.logger.debug("Reading vars from '%s'", filename) try: with open(filename, "r", encoding="utf-8") as input_file: self.parse_env_file(input_file.read()) except FileNotFoundError: return False return True def parse_env_file(self, input_file: str) -> None: """Parses env file into key-pair values""" for line in input_file.split("\n"): if not line or line.strip().startswith("#") or len(line.split("=", 1)) != 2: continue key, value = line.split("=", 1) key = self.strip_export(key).strip() value = value.strip() if value.startswith('"'): value = self.remove_lt_dbl_quotes(value) elif value.startswith("'"): value = self.remove_lt_sgl_quotes(value) self.loaded_values[key] = value def remove_lt_dbl_quotes(self, in_: str) -> str: """Removes matched leading and trailing double quotes""" return in_.strip('"') if re.match(self.LT_DBL_QUOTES, in_) else in_ def remove_lt_sgl_quotes(self, in_: str) -> str: """Removes matched leading and trailing double quotes""" return in_.strip("'") if re.match(self.LT_SGL_QUOTES, in_) else in_ def strip_export(self, in_: str) -> str: """Removes leading 'export ' prefix, case agnostic""" return re.sub(self.EXPORT_PREFIX, "", in_, flags=re.IGNORECASE)
py
1a41760f8814ef69a10811ce858f434a8831fd73
# coding: utf-8 from __future__ import unicode_literals import json import re import time from .common import InfoExtractor from ..compat import ( compat_urlparse, compat_HTTPError, ) from ..utils import ( USER_AGENTS, ExtractorError, int_or_none, unified_strdate, remove_end, update_url_query, ) class DPlayIE(InfoExtractor): _VALID_URL = r'https?://(?P<domain>www\.dplay\.(?:dk|se|no))/[^/]+/(?P<id>[^/?#]+)' _TESTS = [{ # non geo restricted, via secure api, unsigned download hls URL 'url': 'http://www.dplay.se/nugammalt-77-handelser-som-format-sverige/season-1-svensken-lar-sig-njuta-av-livet/', 'info_dict': { 'id': '3172', 'display_id': 'season-1-svensken-lar-sig-njuta-av-livet', 'ext': 'mp4', 'title': 'Svensken lär sig njuta av livet', 'description': 'md5:d3819c9bccffd0fe458ca42451dd50d8', 'duration': 2650, 'timestamp': 1365454320, 'upload_date': '20130408', 'creator': 'Kanal 5 (Home)', 'series': 'Nugammalt - 77 händelser som format Sverige', 'season_number': 1, 'episode_number': 1, 'age_limit': 0, }, }, { # geo restricted, via secure api, unsigned download hls URL 'url': 'http://www.dplay.dk/mig-og-min-mor/season-6-episode-12/', 'info_dict': { 'id': '70816', 'display_id': 'season-6-episode-12', 'ext': 'mp4', 'title': 'Episode 12', 'description': 'md5:9c86e51a93f8a4401fc9641ef9894c90', 'duration': 2563, 'timestamp': 1429696800, 'upload_date': '20150422', 'creator': 'Kanal 4 (Home)', 'series': 'Mig og min mor', 'season_number': 6, 'episode_number': 12, 'age_limit': 0, }, }, { # geo restricted, via direct unsigned hls URL 'url': 'http://www.dplay.no/pga-tour/season-1-hoydepunkter-18-21-februar/', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = mobj.group('id') domain = mobj.group('domain') webpage = self._download_webpage(url, display_id) video_id = self._search_regex( r'data-video-id=["\'](\d+)', webpage, 'video id') info = self._download_json( 'http://%s/api/v2/ajax/videos?video_id=%s' % (domain, video_id), video_id)['data'][0] title = info['title'] PROTOCOLS = ('hls', 'hds') formats = [] def extract_formats(protocol, manifest_url): if protocol == 'hls': m3u8_formats = self._extract_m3u8_formats( manifest_url, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id=protocol, fatal=False) # Sometimes final URLs inside m3u8 are unsigned, let's fix this # ourselves. Also fragments' URLs are only served signed for # Safari user agent. query = compat_urlparse.parse_qs(compat_urlparse.urlparse(manifest_url).query) for m3u8_format in m3u8_formats: m3u8_format.update({ 'url': update_url_query(m3u8_format['url'], query), 'http_headers': { 'User-Agent': USER_AGENTS['Safari'], }, }) formats.extend(m3u8_formats) elif protocol == 'hds': formats.extend(self._extract_f4m_formats( manifest_url + '&hdcore=3.8.0&plugin=flowplayer-3.8.0.0', video_id, f4m_id=protocol, fatal=False)) domain_tld = domain.split('.')[-1] if domain_tld in ('se', 'dk', 'no'): for protocol in PROTOCOLS: # Providing dsc-geo allows to bypass geo restriction in some cases self._set_cookie( 'secure.dplay.%s' % domain_tld, 'dsc-geo', json.dumps({ 'countryCode': domain_tld.upper(), 'expiry': (time.time() + 20 * 60) * 1000, })) stream = self._download_json( 'https://secure.dplay.%s/secure/api/v2/user/authorization/stream/%s?stream_type=%s' % (domain_tld, video_id, protocol), video_id, 'Downloading %s stream JSON' % protocol, fatal=False) if stream and stream.get(protocol): extract_formats(protocol, stream[protocol]) # The last resort is to try direct unsigned hls/hds URLs from info dictionary. # Sometimes this does work even when secure API with dsc-geo has failed (e.g. # http://www.dplay.no/pga-tour/season-1-hoydepunkter-18-21-februar/). if not formats: for protocol in PROTOCOLS: if info.get(protocol): extract_formats(protocol, info[protocol]) self._sort_formats(formats) subtitles = {} for lang in ('se', 'sv', 'da', 'nl', 'no'): for format_id in ('web_vtt', 'vtt', 'srt'): subtitle_url = info.get('subtitles_%s_%s' % (lang, format_id)) if subtitle_url: subtitles.setdefault(lang, []).append({'url': subtitle_url}) return { 'id': video_id, 'display_id': display_id, 'title': title, 'description': info.get('video_metadata_longDescription'), 'duration': int_or_none(info.get('video_metadata_length'), scale=1000), 'timestamp': int_or_none(info.get('video_publish_date')), 'creator': info.get('video_metadata_homeChannel'), 'series': info.get('video_metadata_show'), 'season_number': int_or_none(info.get('season')), 'episode_number': int_or_none(info.get('episode')), 'age_limit': int_or_none(info.get('minimum_age')), 'formats': formats, 'subtitles': subtitles, } class DPlayItIE(InfoExtractor): _VALID_URL = r'https?://it\.dplay\.com/[^/]+/[^/]+/(?P<id>[^/?#]+)' _GEO_COUNTRIES = ['IT'] _TEST = { 'url': 'http://it.dplay.com/nove/biografie-imbarazzanti/luigi-di-maio-la-psicosi-di-stanislawskij/', 'md5': '2b808ffb00fc47b884a172ca5d13053c', 'info_dict': { 'id': '6918', 'display_id': 'luigi-di-maio-la-psicosi-di-stanislawskij', 'ext': 'mp4', 'title': 'Biografie imbarazzanti: Luigi Di Maio: la psicosi di Stanislawskij', 'description': 'md5:3c7a4303aef85868f867a26f5cc14813', 'thumbnail': r're:^https?://.*\.jpe?g', 'upload_date': '20160524', 'series': 'Biografie imbarazzanti', 'season_number': 1, 'episode': 'Luigi Di Maio: la psicosi di Stanislawskij', 'episode_number': 1, }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) info_url = self._search_regex( r'url\s*[:=]\s*["\']((?:https?:)?//[^/]+/playback/videoPlaybackInfo/\d+)', webpage, 'video id') title = remove_end(self._og_search_title(webpage), ' | Dplay') try: info = self._download_json( info_url, display_id, headers={ 'Authorization': 'Bearer %s' % self._get_cookies(url).get( 'dplayit_token').value, 'Referer': url, }) except ExtractorError as e: if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 403): info = self._parse_json(e.cause.read().decode('utf-8'), display_id) error = info['errors'][0] if error.get('code') == 'access.denied.geoblocked': self.raise_geo_restricted( msg=error.get('detail'), countries=self._GEO_COUNTRIES) raise ExtractorError(info['errors'][0]['detail'], expected=True) raise hls_url = info['data']['attributes']['streaming']['hls']['url'] formats = self._extract_m3u8_formats( hls_url, display_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls') series = self._html_search_regex( r'(?s)<h1[^>]+class=["\'].*?\bshow_title\b.*?["\'][^>]*>(.+?)</h1>', webpage, 'series', fatal=False) episode = self._search_regex( r'<p[^>]+class=["\'].*?\bdesc_ep\b.*?["\'][^>]*>\s*<br/>\s*<b>([^<]+)', webpage, 'episode', fatal=False) mobj = re.search( r'(?s)<span[^>]+class=["\']dates["\'][^>]*>.+?\bS\.(?P<season_number>\d+)\s+E\.(?P<episode_number>\d+)\s*-\s*(?P<upload_date>\d{2}/\d{2}/\d{4})', webpage) if mobj: season_number = int(mobj.group('season_number')) episode_number = int(mobj.group('episode_number')) upload_date = unified_strdate(mobj.group('upload_date')) else: season_number = episode_number = upload_date = None return { 'id': info_url.rpartition('/')[-1], 'display_id': display_id, 'title': title, 'description': self._og_search_description(webpage), 'thumbnail': self._og_search_thumbnail(webpage), 'series': series, 'season_number': season_number, 'episode': episode, 'episode_number': episode_number, 'upload_date': upload_date, 'formats': formats, }
py
1a4176dab3f079013b48a64339bef001f9401ede
import os import sys import pathlib import time import shutil try: import pymake except: msg = "Error. Pymake package is not available.\n" msg += "Try installing using the following command:\n" msg += " pip install https://github.com/modflowpy/pymake/zipball/master" raise Exception(msg) try: import flopy except: msg = "Error. FloPy package is not available.\n" msg += "Try installing using the following command:\n" msg += " pip install flopy" raise Exception(msg) from simulation import Simulation from targets import target_dict as target_dict def get_example_directory(base, fdir, subdir="mf6"): exdir = None for root, dirs, files in os.walk(base): for d in dirs: if d.startswith(fdir): exdir = os.path.abspath(os.path.join(root, d, subdir)) break if exdir is not None: break return exdir # find path to modflow6-testmodels or modflow6-testmodels.git directory home = os.path.expanduser("~") print("$HOME={}".format(home)) fdir = "modflow6-testmodels" exdir = get_example_directory(home, fdir, subdir="mf5to6") if exdir is None: p = pathlib.Path(os.getcwd()) home = os.path.abspath(pathlib.Path(*p.parts[:2])) print("$HOME={}".format(home)) exdir = get_example_directory(home, fdir, subdir="mf5to6") if exdir is not None: assert os.path.isdir(exdir) sfmt = "{:25s} - {}" def get_mf5to6_models(): """ Get a list of test models """ # list of example files to exclude exclude = [ "test1ss_ic1", "test9.5-3layer", "testmm2", "testmm3", "testmmSimple", "testps3a", "testTwri", "testTwrip", "test028_sfr_simple", ] # write a summary of the files to exclude print("list of tests to exclude:") for idx, ex in enumerate(exclude): print(" {}: {}".format(idx + 1, ex)) # build list of directories with valid example files if exdir is not None: dirs = [ d for d in os.listdir(exdir) if "test" in d and d not in exclude ] # sort in numerical order for case sensitive os dirs = sorted(dirs, key=lambda v: (v.upper(), v[0].islower())) else: dirs = [] # determine if only a selection of models should be run select_dirs = None select_packages = None for idx, arg in enumerate(sys.argv): if arg.lower() == "--sim": if len(sys.argv) > idx + 1: select_dirs = sys.argv[idx + 1 :] break elif arg.lower() == "--pak": if len(sys.argv) > idx + 1: select_packages = sys.argv[idx + 1 :] select_packages = [item.upper() for item in select_packages] break # determine if the selection of model is in the test models to evaluate if select_dirs is not None: found_dirs = [] for d in select_dirs: if d in dirs: found_dirs.append(d) dirs = found_dirs if len(dirs) < 1: msg = "Selected models not available in test" print(msg) # determine if the specified package(s) is in the test models to evaluate if select_packages is not None: found_dirs = [] for d in dirs: pth = os.path.join(exdir, d) namefiles = pymake.get_namefiles(pth) ftypes = [] for namefile in namefiles: for pak in select_packages: ftype = pymake.get_entries_from_namefile( namefile, ftype=pak ) for t in ftype: if t[1] is not None: if t[1] not in ftypes: ftypes.append(t[1].upper()) if len(ftypes) > 0: ftypes = [item.upper() for item in ftypes] for pak in select_packages: if pak in ftypes: found_dirs.append(d) break dirs = found_dirs if len(dirs) < 1: msg = "Selected packages not available [" for idx, pak in enumerate(select_packages): msg += "{}".format(pak) if idx + 1 < len(select_packages): msg += ", " msg += "]" print(msg) return dirs def run_mf5to6(sim): """ Run the MODFLOW 6 simulation and compare to existing head file or appropriate MODFLOW-2005, MODFLOW-NWT, MODFLOW-USG, or MODFLOW-LGR run. """ src = os.path.join(exdir, sim.name) dst = os.path.join("temp", "working") # set default version version = "mf2005" lgrpth = None # determine if compare directory exists in directory or if mflgr control # file is in directory listdir = os.listdir(src) for value in listdir: fpth = os.path.join(src, value) if os.path.isfile(fpth): ext = os.path.splitext(fpth)[1] if ".lgr" in ext.lower(): version = "mflgr" lgrpth = fpth elif os.path.isdir(fpth): if "compare" in value.lower() or "cmp" in value.lower(): compare = True cpth = value msg = "Copying {} files to working directory".format(version) # copy lgr files to working directory if lgrpth is not None: print(msg) npth = lgrpth pymake.setup(lgrpth, dst) # copy modflow 2005, NWT, or USG files to working directory else: print(msg) npths = pymake.get_namefiles(src) if len(npths) < 1: msg = "No name files in {}".format(src) print(msg) assert False npth = npths[0] pymake.setup(npth, dst) # read ftype from name file to set modflow version if version != "mflgr": lines = [line.rstrip("\n") for line in open(npth)] for line in lines: if len(line) < 1: continue t = line.split() ftype = t[0].upper() if ftype == "NWT" or ftype == "UPW": version = "mfnwt" break elif ftype == "SMS" or ftype == "DISU": version = "mfusg" break # run converter exe = os.path.abspath(target_dict["mf5to6"]) msg = sfmt.format("using executable", exe) print(msg) nmsg = "Program terminated normally" try: nam = os.path.basename(npth) success, buff = flopy.run_model( exe, nam, model_ws=dst, silent=False, report=True, normal_msg=nmsg, cargs="mf6", ) msg = sfmt.format("MODFLOW 5 to 6 run", nam) if success: print(msg) else: print("ERROR: " + msg) except: msg = sfmt.format("MODFLOW 5 to 6 run", nam) print("ERROR: " + msg) success = False assert success, msg # standard setup src = dst dst = os.path.join("temp", sim.name) sim.setup(src, dst) # clean up temp/working directory (src) if os.path.exists(src): msg = "Removing {} directory".format(src) print(msg) shutil.rmtree(src) time.sleep(0.5) # standard comparison run sim.run() sim.compare() sim.teardown() def test_model(): # determine if test directory exists dirtest = dir_avail() if not dirtest: return # get a list of test models to run dirs = get_mf5to6_models() # run the test models for dir in dirs: yield run_mf5to6, Simulation(dir, mf6_regression=True) return def dir_avail(): avail = False if exdir is not None: avail = os.path.isdir(exdir) if not avail: print('"{}" does not exist'.format(exdir)) print("no need to run {}".format(os.path.basename(__file__))) return avail def main(): # write message tnam = os.path.splitext(os.path.basename(__file__))[0] msg = "Running {} test".format(tnam) print(msg) # get name of current file module_name = sys.modules[__name__].__file__ # determine if test directory exists dirtest = dir_avail() if not dirtest: return # get a list of test models to run dirs = get_mf5to6_models() # run the test models for dir in dirs: sim = Simulation(dir, mf6_regression=True) run_mf5to6(sim) return if __name__ == "__main__": print("standalone run of {}".format(os.path.basename(__file__))) delFiles = True for idx, arg in enumerate(sys.argv): if arg.lower() == "--keep": if len(sys.argv) > idx + 1: delFiles = False break # run main routine main()
py
1a4176e9162f9c35bf3f755c774a715087bed333
import Plugins.Plugin from Components.config import config, ConfigSubsection, ConfigSelection, ConfigInteger, ConfigSubList, ConfigSubDict, ConfigText, configfile, ConfigYesNo from Components.Language import language from Tools.Directories import resolveFilename, SCOPE_LANGUAGE, SCOPE_PLUGINS import os, gettext currentmcversion = "099" currentmcplatform = "sh4" config.plugins.mc_favorites = ConfigSubsection() config.plugins.mc_favorites.foldercount = ConfigInteger(0) config.plugins.mc_favorites.folders = ConfigSubList() config.plugins.mc_globalsettings = ConfigSubsection() config.plugins.mc_globalsettings.showinmainmenu = ConfigYesNo(default=True) config.plugins.mc_globalsettings.showinextmenu = ConfigYesNo(default=False) config.plugins.mc_globalsettings.currentversion = ConfigInteger(0, (0, 999)) config.plugins.mc_globalsettings.currentplatform = ConfigText(default = currentmcplatform) config.plugins.mc_globalsettings.currentversion.value = currentmcversion config.plugins.mc_globalsettings.currentplatform.value = currentmcplatform PluginLanguageDomain = "HDMUMediaCenter" PluginLanguagePath = "Extensions/BMediaCenter/locale" # Load Language def localeInit(): lang = language.getLanguage()[:2] os.environ["LANGUAGE"] = lang gettext.bindtextdomain(PluginLanguageDomain, resolveFilename(SCOPE_PLUGINS, PluginLanguagePath)) def _(txt): t = gettext.dgettext(PluginLanguageDomain, txt) if t == txt: t = gettext.gettext(txt) return t localeInit() language.addCallback(localeInit) # Favorite Folders def addFavoriteFolders(): i = len(config.plugins.mc_favorites.folders) config.plugins.mc_favorites.folders.append(ConfigSubsection()) config.plugins.mc_favorites.folders[i].name = ConfigText("", False) config.plugins.mc_favorites.folders[i].basedir = ConfigText("/", False) config.plugins.mc_favorites.foldercount.value = i+1 return i for i in list(range(0, config.plugins.mc_favorites.foldercount.value)): addFavoriteFolders() # VLC PLAYER CONFIG config.plugins.mc_vlc = ConfigSubsection() config.plugins.mc_vlc.lastDir = ConfigText(default="") config.plugins.mc_vlc.foldercount = ConfigInteger(0) config.plugins.mc_vlc.folders = ConfigSubList() config.plugins.mc_vlc.vcodec = ConfigSelection({"mp1v": "MPEG1", "mp2v": "MPEG2"}, "mp2v") config.plugins.mc_vlc.vb = ConfigInteger(1000, (100, 9999)) config.plugins.mc_vlc.acodec = ConfigSelection({"mpga":"MP1", "mp2a": "MP2", "mp3": "MP3"}, "mp2a") config.plugins.mc_vlc.ab = ConfigInteger(128, (64, 320)) config.plugins.mc_vlc.samplerate = ConfigSelection({"0":"as Input", "44100": "44100", "48000": "48000"}, "0") config.plugins.mc_vlc.channels = ConfigInteger(2, (1, 9)) config.plugins.mc_vlc.width = ConfigSelection(["352", "704", "720"]) config.plugins.mc_vlc.height = ConfigSelection(["288", "576"]) config.plugins.mc_vlc.fps = ConfigInteger(25, (1, 99)) config.plugins.mc_vlc.aspect = ConfigSelection(["none", "16:9", "4:3"], "none") config.plugins.mc_vlc.soverlay = ConfigYesNo() config.plugins.mc_vlc.checkdvd = ConfigYesNo(True) config.plugins.mc_vlc.notranscode = ConfigYesNo(False) config.plugins.mc_vlc.servercount = ConfigInteger(0) config.plugins.mc_vlc.servers = ConfigSubList() def addVlcServerConfig(): i = len(config.plugins.mc_vlc.servers) config.plugins.mc_vlc.servers.append(ConfigSubsection()) config.plugins.mc_vlc.servers[i].host = ConfigText("", False) config.plugins.mc_vlc.servers[i].httpport = ConfigInteger(8080, (0, 65535)) config.plugins.mc_vlc.servers[i].basedir = ConfigText("/", False) config.plugins.mc_vlc.servercount.value = i+1 return i for i in list(range(0, config.plugins.mc_vlc.servercount.value)): addVlcServerConfig()
py
1a4176f794b51a685c8873d64c712372e9d79a6b
""" Statistical Quantities of Interest """ import numpy as np from quantumnetworks.analysis.quantities.base import SystemQuantity class Identity(SystemQuantity): def calculate(self, xs: np.ndarray) -> np.ndarray: return xs class Average(SystemQuantity): def calculate(self, xs: np.ndarray) -> np.ndarray: return np.average(xs, axis=1) class Std(SystemQuantity): def calculate(self, xs: np.ndarray) -> np.ndarray: return np.std(xs, axis=1)
py
1a417835ef3dcf503b0c6dc4ae5cced8b93b663b
# -*- coding: utf-8 -*- """ Created on Mon Jan 11 11:34:57 2021 @author: SethHarden """ import math import heapq def maxCandies(arr, k): bags = [] minutes = k #push the list into a heap for i in arr: heapq.heappush(bags, -i) #set our minimum answer = 0 #while we have time and there are bags while minutes > 0 and bags: #get the absolute value of everything in our bag max_candy = abs(heapq.heappop(bags)) answer += max_candy heapq.heappush(bags, - (max_candy // 2)) minutes -= 1 return answer def printInteger(n): print('[', n, ']', sep='', end='') test_case_number = 1 def check(expected, output): global test_case_number result = False if expected == output: result = True rightTick = '\u2713' wrongTick = '\u2717' if result: print(rightTick, 'Test #', test_case_number, sep='') else: print(wrongTick, 'Test #', test_case_number, ': Expected ', sep='', end='') printInteger(expected) print(' Your output: ', end='') printInteger(output) print() test_case_number += 1 if __name__ == "__main__": n_1, k_1 = 5, 3 arr_1 = [2, 1, 7, 4, 2] expected_1 = 14 output_1 = maxCandies(arr_1, k_1) check(expected_1, output_1) n_2, k_2 = 9, 3 arr_2 = [19, 78, 76, 72, 48, 8, 24, 74, 29] expected_2 = 228 output_2 = maxCandies(arr_2, k_2) check(expected_2, output_2) # Add your own test cases here
py
1a417924a20896ffa25ad550a19d9abbeb42d0d0
#!/usr/bin/env python # Copyright (C) 2001-2021 Artifex Software, Inc. # All Rights Reserved. # # This software is provided AS-IS with no warranty, either express or # implied. # # This software is distributed under license and may not be copied, # modified or distributed except as expressly authorized under the terms # of the license contained in the file LICENSE in this distribution. # # Refer to licensing information at http://www.artifex.com or contact # Artifex Software, Inc., 1305 Grant Avenue - Suite 200, Novato, # CA 94945, U.S.A., +1(415)492-9861, for further information. # # This script analyzes the output of Ghostscript run with -Z67. # Its primary purpose is detecting memory leaks. USAGE = """\ Usage: python memory.py z67trace > report where z67trace is the output of gs -Z67""" HELP = """\ An example of usage: gs -Z67 somefile.ps >& somefile.log python memory.py somefile.log > somefile.report """ __author__ = 'L Peter Deutsch' import re from cStringIO import StringIO from difflib import SequenceMatcher #---------------- Memory representation ----------------# class struct(object): # Instance variables: # address (int) - the address of the object pass class Store(object): def __init__(self): self.memories = [] def totals(self): o = s = a = n = 0 for memory in self.memories: for chunk in memory.chunks: o += chunk.otop - chunk.obot s += chunk.stop - chunk.sbot for obj in chunk.objects: if not obj.isfree: n += 1 a += obj.size return '%d object space (%d objects, %d total size), %d strings' % \ (o, n, a, s) def compare(self, store): ml, sml = [[(m.address, m.space, m.level) \ for m in s.memories] for s in [self, store]] if ml != sml: return 'Memory lists differ' buf = StringIO() for m, sm in zip(self.memories, store.memories): buf.write('Memory 0x%x, space = %d, level = %d:\n' % \ (m.address, m.space, m.level)) buf.write(m.compare(sm)) return buf.getvalue() class Memory(struct): def __init__(self, store, address, space, level): self.address, self.space, self.level = address, space, level self.chunks = [] if store: store.memories.append(self) def compare(self, memory): buf = StringIO() cdict = dict([(c.address, c) for c in self.chunks]) mcdict = dict([(c.address, c) for c in memory.chunks]) for a in cdict.keys(): if a not in mcdict: buf.write('Freed: ') buf.write(cdict[a].listing()) for a in mcdict.keys(): if a not in cdict: buf.write('Added: ') buf.write(mcdict[a].listing()) for a, c in cdict.items(): if a in mcdict: buf.write(c.compare(mcdict[a])) return buf.getvalue() class Chunk(struct): # obot, otop, sbot, stop correspond to chunk_t.cbase, cbot, ctop, climit. def __init__(self, memory, address, obot, otop, sbot, stop, cend): self.address = address self.obot, self.otop = obot, otop self.sbot, self.stop = sbot, stop self.cend = cend self.objects = [] if memory: memory.chunks.append(self) def compare(self, chunk): buf = StringIO() o, s = self.otop - self.obot, self.stop - self.sbot co, cs = chunk.otop - chunk.obot, chunk.stop - chunk.sbot if co != o or cs != s: buf.write('objects %+d, strings %+d' % (co - o, cs - s)) buf.write('\n') # Use difflib to find the differences between the two chunks. seq1 = [b.content for b in self.objects if not b.isfree] seq2 = [b.content for b in chunk.objects if not b.isfree] m = SequenceMatcher(None, seq1, seq2) pi = pj = 0 for i, j, n in m.get_matching_blocks(): while pi < i: buf.write('- %s\n' % self.objects[pi]) pi += 1 while pj < j: buf.write('+ %s\n' % chunk.objects[pj]) pj += 1 pi, pj = pi + n, pj + n if buf.tell() > 1: return 'Chunk 0x%x: ' % self.address + buf.getvalue() else: return '' def listing(self): buf = StringIO() buf.write('chunk at 0x%x: %d used, %d free \n' % \ (self.address, sum([o.size for o in self.objects if not o.isfree]), sum([o.size for o in self.objects if o.isfree]))) for obj in self.objects: buf.write(' %s\n' % obj) return buf.getvalue() class block(struct): content = property(lambda b: (b.name, b.size)) def __init__(self, chunk, address, size): self.address, self.size = address, size if chunk: chunk.objects.append(self) def __str__(self): return '0x%x: %s (%d)' % (self.address, self.name, self.size) class Object(block): isfree = False def __init__(self, chunk, name, address, size): self.name = name block.__init__(self, chunk, address, size) class Free(block): isfree = True name = '(free)' #---------------- Log reader ----------------# # Parse the log entries produced by -Z67. res_hex = '0x([0-9a-f]+)' res_dec = '([-0-9]+)' re_memory = re.compile(r'validating memory %s, space %s, [^0-9]*%s' % \ (res_hex, res_dec, res_dec)) re_chunk = re.compile(r'validating chunk %s \(%s\.\.%s, %s\.\.%s\.\.%s\)$' % \ (6 * (res_hex,))) re_object = re.compile(r'validating ([^(]+)\(%s\) %s$' % \ (res_dec, res_hex)) re_free = re.compile(r'validating \(free\)\(%s\) %s$' % \ (res_dec, res_hex)) class Log: def __init__(self): self.stores = [] def readlog(self, fname): # Read a log produced by -Z67. Each separate validation trace is a # separate instance of Store. Note that each GC produces two # Stores, one from pre-validation, one from post-validation. f, store = file(fname), None memory = chunk = None for line in f: line = line.strip() if line.startswith('[6]validating memory '): addr, space, level = re_memory.match(line[3:]).groups() if not store: store = Store() memory = Memory(store, int(addr, 16), int(space), int(level)) chunk = None elif line.startswith('[6]validating chunk '): cvalues = re_chunk.match(line[3:]).groups() chunk = Chunk(memory, *[int(v, 16) for v in cvalues]) elif line.startswith('[7]validating (free)'): size, addr = re_free.match(line[3:]).groups() Free(chunk, int(addr, 16), int(size)) elif line.startswith('[7]validating '): name, size, addr = re_object.match(line[3:]).groups() Object(chunk, name, int(addr, 16), int(size)) elif line[2:].startswith(']validating'): print '**** unknown:', line elif _is_end_trace(line): self.stores.append(store) store = None f.close() def compare(self, which = slice(3, -2, 2)): buf = StringIO() stores = self.stores indices = range(*which.indices(len(stores))) for i1, i2 in zip(indices[:-1], indices[1:]): buf.write('Comparing %d and %d\n' % (i1, i2)) for j in [i1, i2]: buf.write('%3d: %s\n' % (j, stores[j].totals())) buf.write(stores[i1].compare(stores[i2])) buf.write(64 * '-' + '\n') for j in [indices[0], indices[-1]]: buf.write('%3d: %s\n' % (j, stores[j].totals())) return buf.getvalue() def _is_end_trace(line): return line.startswith('[6]---------------- end ') and \ line.endswith('validate pointers ----------------') #---------------- Main program ----------------# def main(argv): args = argv[1:] if len(args) != 1: print 'Use --help for usage information.' return if args[0] == '--help': print USAGE print HELP return log = Log() log.readlog(args[0]) print len(log.stores), 'stores' print log.compare() if __name__ == '__main__': import sys sys.exit(main(sys.argv) or 0)
py
1a4179402833d67f93a867e2a2b086cb1d8ebdb0
from __future__ import absolute_import INPUT_FILE_NAME = 'inputs.pb' OUTPUT_FILE_NAME = 'outputs.pb' FUTURES_FILE_NAME = 'futures.pb' ERROR_FILE_NAME = 'error.pb' class SdkTaskType(object): PYTHON_TASK = "python-task" DYNAMIC_TASK = "dynamic-task" CONTAINER_ARRAY_TASK = "container_array" SPARK_TASK = "spark" # Hive is multi-step operation: # 1. a generator task that generates hive-job to be executed by the operator. Generator task is called hive task # for backward compatibility (Note: it is a "batch-task" with a different name) # 2. hive-job is the actual set of queries to be executed. This is called hive_job BATCH_HIVE_TASK = "batch_hive" HIVE_JOB = "hive" SIDECAR_TASK = "sidecar" SENSOR_TASK = "sensor-task" GLOBAL_INPUT_NODE_ID = '' class CloudProvider(object): AWS = "aws" GCP = "gcp"
py
1a4179726432d4c605218c23a5a61c1dc8650108
""" A module implementing EOPatch merging utility Credits: Copyright (c) 2018-2020 William Ouellette Copyright (c) 2017-2020 Matej Aleksandrov, Matej Batič, Grega Milčinski, Matic Lubej, Devis Peresutti (Sinergise) Copyright (c) 2017-2020 Nejc Vesel, Jovan Višnjić, Anže Zupanc (Sinergise) This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. """ import functools import warnings from collections.abc import Callable import numpy as np import pandas as pd from geopandas import GeoDataFrame from .constants import FeatureType from .utilities import FeatureParser def merge_eopatches(*eopatches, features=..., time_dependent_op=None, timeless_op=None): """ Merge features of given EOPatches into a new EOPatch :param eopatches: Any number of EOPatches to be merged together :type eopatches: EOPatch :param features: A collection of features to be merged together. By default all features will be merged. :type features: object :param time_dependent_op: An operation to be used to join data for any time-dependent raster feature. Before joining time slices of all arrays will be sorted. Supported options are: - None (default): If time slices with matching timestamps have the same values, take one. Raise an error otherwise. - 'concatenate': Keep all time slices, even the ones with matching timestamps - 'min': Join time slices with matching timestamps by taking minimum values. Ignore NaN values. - 'max': Join time slices with matching timestamps by taking maximum values. Ignore NaN values. - 'mean': Join time slices with matching timestamps by taking mean values. Ignore NaN values. - 'median': Join time slices with matching timestamps by taking median values. Ignore NaN values. :type time_dependent_op: str or Callable or None :param timeless_op: An operation to be used to join data for any timeless raster feature. Supported options are: - None (default): If arrays are the same, take one. Raise an error otherwise. - 'concatenate': Join arrays over the last (i.e. bands) dimension - 'min': Join arrays by taking minimum values. Ignore NaN values. - 'max': Join arrays by taking maximum values. Ignore NaN values. - 'mean': Join arrays by taking mean values. Ignore NaN values. - 'median': Join arrays by taking median values. Ignore NaN values. :type timeless_op: str or Callable or None :return: A dictionary with EOPatch features and values :rtype: Dict[(FeatureType, str), object] """ reduce_timestamps = time_dependent_op != 'concatenate' time_dependent_op = _parse_operation(time_dependent_op, is_timeless=False) timeless_op = _parse_operation(timeless_op, is_timeless=True) all_features = {feature for eopatch in eopatches for feature in FeatureParser(features)(eopatch)} eopatch_content = {} timestamps, sort_mask, split_mask = _merge_timestamps(eopatches, reduce_timestamps) eopatch_content[FeatureType.TIMESTAMP] = timestamps for feature in all_features: feature_type, feature_name = feature if feature_type.is_raster(): if feature_type.is_time_dependent(): eopatch_content[feature] = _merge_time_dependent_raster_feature( eopatches, feature, time_dependent_op, sort_mask, split_mask ) else: eopatch_content[feature] = _merge_timeless_raster_feature(eopatches, feature, timeless_op) if feature_type.is_vector(): eopatch_content[feature] = _merge_vector_feature(eopatches, feature) if feature_type is FeatureType.META_INFO: eopatch_content[feature] = _select_meta_info_feature(eopatches, feature_name) if feature_type is FeatureType.BBOX: eopatch_content[feature] = _get_common_bbox(eopatches) return eopatch_content def _parse_operation(operation_input, is_timeless): """ Transforms operation's instruction (i.e. an input string) into a function that can be applied to a list of arrays. If the input already is a function it returns it. """ if isinstance(operation_input, Callable): return operation_input try: return { None: _return_if_equal_operation, 'concatenate': functools.partial(np.concatenate, axis=-1 if is_timeless else 0), 'mean': functools.partial(np.nanmean, axis=0), 'median': functools.partial(np.nanmedian, axis=0), 'min': functools.partial(np.nanmin, axis=0), 'max': functools.partial(np.nanmax, axis=0) }[operation_input] except KeyError as exception: raise ValueError(f'Merge operation {operation_input} is not supported') from exception def _return_if_equal_operation(arrays): """ Checks if arrays are all equal and returns first one of them. If they are not equal it raises an error. """ if _all_equal(arrays): return arrays[0] raise ValueError('Cannot merge given arrays because their values are not the same') def _merge_timestamps(eopatches, reduce_timestamps): """ Merges together timestamps from EOPatches. It also prepares masks on how to sort and join data in any time-dependent raster feature. """ all_timestamps = [timestamp for eopatch in eopatches for timestamp in eopatch.timestamp if eopatch.timestamp is not None] if not all_timestamps: return [], None, None sort_mask = np.argsort(all_timestamps) all_timestamps = sorted(all_timestamps) if not reduce_timestamps: return all_timestamps, sort_mask, None split_mask = [ index + 1 for index, (timestamp, next_timestamp) in enumerate(zip(all_timestamps[:-1], all_timestamps[1:])) if timestamp != next_timestamp ] reduced_timestamps = [timestamp for index, timestamp in enumerate(all_timestamps) if index == 0 or timestamp != all_timestamps[index - 1]] return reduced_timestamps, sort_mask, split_mask def _merge_time_dependent_raster_feature(eopatches, feature, operation, sort_mask, split_mask): """ Merges numpy arrays of a time-dependent raster feature with a given operation and masks on how to sort and join time raster's time slices. """ arrays = _extract_feature_values(eopatches, feature) merged_array = np.concatenate(arrays, axis=0) del arrays if sort_mask is None: return merged_array merged_array = merged_array[sort_mask] if split_mask is None or len(split_mask) == merged_array.shape[0] - 1: return merged_array split_arrays = np.split(merged_array, split_mask) del merged_array try: split_arrays = [operation(array_chunk) for array_chunk in split_arrays] except ValueError as exception: raise ValueError(f'Failed to merge {feature} with {operation}, try setting a different value for merging ' f'parameter time_dependent_op') from exception return np.array(split_arrays) def _merge_timeless_raster_feature(eopatches, feature, operation): """ Merges numpy arrays of a timeless raster feature with a given operation. """ arrays = _extract_feature_values(eopatches, feature) if len(arrays) == 1: return arrays[0] try: return operation(arrays) except ValueError as exception: raise ValueError(f'Failed to merge {feature} with {operation}, try setting a different value for merging ' f'parameter timeless_op') from exception def _merge_vector_feature(eopatches, feature): """ Merges GeoDataFrames of a vector feature. """ dataframes = _extract_feature_values(eopatches, feature) if len(dataframes) == 1: return dataframes[0] crs_list = [dataframe.crs for dataframe in dataframes if dataframe.crs is not None] if not crs_list: crs_list = [None] if not _all_equal(crs_list): raise ValueError(f'Cannot merge feature {feature} because dataframes are defined for ' f'different CRS') merged_dataframe = GeoDataFrame(pd.concat(dataframes, ignore_index=True), crs=crs_list[0]) merged_dataframe = merged_dataframe.drop_duplicates(ignore_index=True) # In future a support for vector operations could be added here return merged_dataframe def _select_meta_info_feature(eopatches, feature_name): """ Selects a value for a meta info feature of a merged EOPatch. By default the value is the first one. """ values = _extract_feature_values(eopatches, (FeatureType.META_INFO, feature_name)) if not _all_equal(values): message = f'EOPatches have different values of meta info feature {feature_name}. The first value will be ' \ f'used in a merged EOPatch' warnings.warn(message, category=UserWarning) return values[0] def _get_common_bbox(eopatches): """ Makes sure that all EOPatches, which define a bounding box and CRS, define the same ones. """ bboxes = [eopatch.bbox for eopatch in eopatches if eopatch.bbox is not None] if not bboxes: return None if _all_equal(bboxes): return bboxes[0] raise ValueError('Cannot merge EOPatches because they are defined for different bounding boxes') def _extract_feature_values(eopatches, feature): """ A helper function that extracts a feature values from those EOPatches where a feature exists. """ feature_type, feature_name = feature return [eopatch[feature] for eopatch in eopatches if feature_name in eopatch[feature_type]] def _all_equal(values): """ A helper function that checks if all values in a given list are equal to each other. """ first_value = values[0] if isinstance(first_value, np.ndarray): is_numeric_dtype = np.issubdtype(first_value.dtype, np.number) return all(np.array_equal(first_value, array, equal_nan=is_numeric_dtype) for array in values[1:]) return all(first_value == value for value in values[1:])
py
1a4179e1262a9173eb80c366709ec53c3fb106df
# Copyright 2017 Battelle Energy Alliance, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def evaluate(self): return self.b - self.c
py
1a417a39aec4e307b5788f20466897d8a07d5d83
from coverage import coverage import unittest cov = coverage(branch=True, include=['app/*']) cov.set_option('report:show_missing', True) cov.erase() cov.start() from .client_test import ClientTestCase from .features_test import FeatureTestCase from .product_area_test import ProductAreaTestCase if __name__ == '__main__': tests = unittest.TestLoader().discover('./tests', pattern='*test.py') unittest.TextTestRunner(verbosity=1).run(tests) cov.stop() cov.save() print("\n\nCoverage Report:\n") cov.report()
py
1a417b5caaf79906d4871b08b106d312e1bbdd53
#!/usr/bin/env python # -*- coding: utf-8 -*- # Part of the PsychoPy library # Copyright (C) 2012-2020 iSolver Software Solutions (C) 2021 Open Science Tools Ltd. # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function from builtins import next from past.builtins import basestring from builtins import object import numbers # numbers.Integral is like (int, long) but supports Py3 from tables import * import os from collections import namedtuple import json from ..errors import print2err from pkg_resources import parse_version import tables if parse_version(tables.__version__) < parse_version('3'): from tables import openFile as open_file walk_groups = "walkGroups" list_nodes = "listNodes" get_node = "getNode" read_where = "readWhere" else: from tables import open_file walk_groups = "walk_groups" list_nodes = "list_nodes" get_node = "get_node" read_where = "read_where" _hubFiles = [] def openHubFile(filepath, filename, mode): """ Open an HDF5 DataStore file and register it so that it is closed even on interpreter crash. """ global _hubFiles hubFile = open_file(os.path.join(filepath, filename), mode) _hubFiles.append(hubFile) return hubFile def displayDataFileSelectionDialog(starting_dir=None): """Shows a FileDialog and lets you select a .hdf5 file to open for processing.""" from psychopy.gui.qtgui import fileOpenDlg filePath = fileOpenDlg(tryFilePath=starting_dir, prompt = "Select a ioHub HDF5 File", allowed='HDF5 Files (*.hdf5)') if filePath is None: return None return filePath def displayEventTableSelectionDialog( title, list_label, list_values, default=u'Select'): from psychopy import gui if default not in list_values: list_values.insert(0, default) else: list_values.remove(list_values) list_values.insert(0, default) selection_dict = dict(list_label=list_values) dlg_info = dict(selection_dict) infoDlg = gui.DlgFromDict(dictionary=dlg_info, title=title) if not infoDlg.OK: return None while list(dlg_info.values())[0] == default and infoDlg.OK: dlg_info=dict(selection_dict) infoDlg = gui.DlgFromDict(dictionary=dlg_info, title=title) if not infoDlg.OK: return None return list(dlg_info.values())[0] ########### Experiment / Experiment Session Based Data Access ################# class ExperimentDataAccessUtility(object): """The ExperimentDataAccessUtility provides a simple, high level, way to access data saved in an ioHub DataStore HDF5 file. Data access is done by providing information at an experiment and session level, as well as specifying the ioHub Event types you want to retieve data for. An instance of the ExperimentDataAccessUtility class is created by providing the location and name of the file to read, as well as any session code filtering you want applied to the retieved datasets. Args: hdfFilePath (str): The path of the directory the DataStore HDF5 file is in. hdfFileName (str): The name of the DataStore HDF5 file. experimentCode (str): If multi-experiment support is enabled for the DataStore file, this arguement can be used to specify what experiment data to load based on the experiment_code given. NOTE: Multi-experiment data file support is not well tested and should not be used at this point. sessionCodes (str or list): The experiment session code to filter data by. If a list of codes is given, then all codes in the list will be used. Returns: object: the created instance of the ExperimentDataAccessUtility, ready to get your data! """ def __init__( self, hdfFilePath, hdfFileName, experimentCode=None, sessionCodes=[], mode='r'): """An instance of the ExperimentDataAccessUtility class is created by providing the location and name of the file to read, as well as any session code filtering you want applied to the retieved datasets. Args: hdfFilePath (str): The path of the directory the DataStore HDF5 file is in. hdfFileName (str): The name of the DataStore HDF5 file. experimentCode (str): If multi-experiment support is enabled for the DataStore file, this arguement can be used to specify what experiment data to load based on the experiment_code given. NOTE: Multi-experiment data file support is not well tested and should not be used at this point. sessionCodes (str or list): The experiment session code to filter data by. If a list of codes is given, then all codes in the list will be used. Returns: object: the created instance of the ExperimentDataAccessUtility, ready to get your data! """ self.hdfFilePath = hdfFilePath self.hdfFileName = hdfFileName self.mode = mode self.hdfFile = None self._experimentCode = experimentCode self._sessionCodes = sessionCodes self._lastWhereClause = None try: self.hdfFile = openHubFile(hdfFilePath, hdfFileName, mode) except Exception as e: print(e) raise ExperimentDataAccessException(e) self.getExperimentMetaData() def printTableStructure(self,tableName): """Print to stdout the current structure and content statistics of the specified DataStore table. To print out the complete structure of the DataStore file, including the name of all available tables, see the printHubFileStructure method. Args: tableName (str): The DataStore table name to print metadata information out for. """ if self.hdfFile: hubFile = self.hdfFile for group in getattr(hubFile, walk_groups)("/"): for table in getattr(hubFile, list_nodes)(group, classname='Table'): if table.name == tableName: print('------------------') print('Path:', table) print('Table name:', table.name) print('Number of rows in table:', table.nrows) print('Number of cols in table:', len(table.colnames)) print('Attribute name := type, shape:') for name in table.colnames: print('\t', name, ':= %s, %s' % (table.coldtypes[name], table.coldtypes[name].shape)) print('------------------') return def printHubFileStructure(self): """Print to stdout the current global structure of the loaded DataStore File.""" if self.hdfFile: print(self.hdfFile) def getExperimentMetaData(self): """Returns the the metadata for the experiment the datStore file is for. **Docstr TBC.** """ if self.hdfFile: expcols = self.hdfFile.root.data_collection.experiment_meta_data.colnames if 'sessions' not in expcols: expcols.append('sessions') ExperimentMetaDataInstance = namedtuple( 'ExperimentMetaDataInstance', expcols) experiments=[] for e in self.hdfFile.root.data_collection.experiment_meta_data: self._experimentID = e['experiment_id'] a_exp = list(e[:]) a_exp.append(self.getSessionMetaData()) experiments.append(ExperimentMetaDataInstance(*a_exp)) return experiments def getSessionMetaData(self, sessions=None): """ Returns the the metadata associated with the experiment session codes in use. **Docstr TBC.** """ if self.hdfFile: if sessions is None: sessions = [] sessionCodes = self._sessionCodes sesscols = self.hdfFile.root.data_collection.session_meta_data.colnames SessionMetaDataInstance = namedtuple('SessionMetaDataInstance', sesscols) for r in self.hdfFile.root.data_collection.session_meta_data: if (len(sessionCodes) == 0 or r['code'] in sessionCodes) and r[ 'experiment_id'] == self._experimentID: rcpy=list(r[:]) rcpy[-1]=json.loads(rcpy[-1]) sessions.append(SessionMetaDataInstance(*rcpy)) return sessions def getTableForPath(self, path): """ Given a valid table path within the DataStore file, return the accociated table. """ getattr(self.hdfFile, get_node)(path) def getEventTable(self, event_type): """ Returns the DataStore table that contains events of the specified type. **Docstr TBC.** """ if self.hdfFile: klassTables = self.hdfFile.root.class_table_mapping event_column = None event_value = None if isinstance(event_type, basestring): if event_type.find('Event') >= 0: event_column = 'class_name' event_value = event_type else: event_value = '' tokens = event_type.split('_') for t in tokens: event_value += t[0].upper()+t[1:].lower() event_value = event_type+'Event' elif isinstance(event_type, numbers.Integral): event_column = 'class_id' event_value = event_type else: print2err( 'getEventTable error: event_type arguement must be a string or and int') return None result = [] where_cls = '(%s == b"%s") & (class_type_id == 1)'%(event_column, event_value) for row in klassTables.where(where_cls): result.append(row.fetch_all_fields()) if len(result) == 0: return None if len(result)!= 1: print2err( 'event_type_id passed to getEventAttribute can only return one row from CLASS_MAPPINGS: ', len(result)) return None tablePathString = result[0][3] if isinstance(tablePathString, bytes): tablePathString = tablePathString.decode('utf-8') return getattr(self.hdfFile, get_node)(tablePathString) return None def getEventMappingInformation(self): """Returns details on how ioHub Event Types are mapped to tables within the given DataStore file.""" if self.hdfFile: eventMappings=dict() class_2_table=self.hdfFile.root.class_table_mapping EventTableMapping = namedtuple( 'EventTableMapping', self.hdfFile.root.class_table_mapping.colnames) for row in class_2_table[:]: eventMappings[row['class_id']] = EventTableMapping(*row) return eventMappings return None def getEventsByType(self, condition_str = None): """Returns a dict of all event tables within the DataStore file that have at least one event instance saved. Keys are Event Type constants, as specified by iohub.EventConstants. Each value is a row iterator for events of that type. """ eventTableMappings = self.getEventMappingInformation() if eventTableMappings: events_by_type = dict() getNode = getattr(self.hdfFile, get_node) for event_type_id, event_mapping_info in eventTableMappings.items(): try: cond = '(type == %d)' % (event_type_id) if condition_str: cond += ' & ' + condition_str et_path = event_mapping_info.table_path if isinstance(et_path, bytes): et_path = et_path.decode('utf-8') events_by_type[event_type_id] = next(getNode(et_path).where(cond)) except StopIteration: pass return events_by_type return None def getConditionVariablesTable(self): """ **Docstr TBC.** """ cv_group = self.hdfFile.root.data_collection.condition_variables ecv = 'EXP_CV_%d' % (self._experimentID,) if ecv in cv_group._v_leaves: return cv_group._v_leaves[ecv] return None def getConditionVariableNames(self): """ **Docstr TBC.** """ cv_group = self.hdfFile.root.data_collection.condition_variables ecv = "EXP_CV_%d" % (self._experimentID,) if ecv in cv_group._v_leaves: ecvTable = cv_group._v_leaves[ecv] return ecvTable.colnames return None def getConditionVariables(self, filter=None): """ **Docstr TBC.** """ if filter is None: session_ids = [] for s in self.getExperimentMetaData()[0].sessions: session_ids.append(s.session_id) filter = dict(session_id=(' in ', session_ids)) ConditionSetInstance = None for conditionVarName, conditionVarComparitor in filter.items(): avComparison, value = conditionVarComparitor cv_group = self.hdfFile.root.data_collection.condition_variables cvrows = [] ecv = "EXP_CV_%d" % (self._experimentID,) if ecv in cv_group._v_leaves: ecvTable = cv_group._v_leaves[ecv] if ConditionSetInstance is None: colnam = ecvTable.colnames ConditionSetInstance = namedtuple('ConditionSetInstance', colnam) cvrows.extend( [ ConditionSetInstance( * r[:]) for r in ecvTable if all( [ eval( '{0} {1} {2}'.format( r[conditionVarName], conditionVarComparitor[0], conditionVarComparitor[1])) for conditionVarName, conditionVarComparitor in filter.items()])]) return cvrows def getValuesForVariables(self, cv, value, cvNames): """ **Docstr TBC.** """ if isinstance(value, (list, tuple)): resolvedValues = [] for v in value: if isinstance(value, basestring) and value.startswith( '@') and value.endswith('@'): value=value[1:-1] if value in cvNames: resolvedValues.append(getattr(cv, v)) else: raise ExperimentDataAccessException( 'getEventAttributeValues: {0} is not a valid attribute name in {1}'.format( v, cvNames)) elif isinstance(value, basestring): resolvedValues.append(value) return resolvedValues elif isinstance(value, basestring) and value.startswith('@') and value.endswith('@'): value = value[1:-1] if value in cvNames: return getattr(cv, value) else: raise ExperimentDataAccessException( 'getEventAttributeValues: {0} is not a valid attribute name in {1}'.format( value, cvNames)) else: raise ExperimentDataAccessException( 'Unhandled value type !: {0} is not a valid type for value {1}'.format( type(value), value)) def getEventAttributeValues( self, event_type_id, event_attribute_names, filter_id=None, conditionVariablesFilter=None, startConditions=None, endConditions=None): """ **Docstr TBC.** Args: event_type_id event_attribute_names conditionVariablesFilter startConditions endConditions Returns: Values for the specified event type and event attribute columns which match the provided experiment condition variable filter, starting condition filer, and ending condition filter criteria. """ if self.hdfFile: klassTables = self.hdfFile.root.class_table_mapping deviceEventTable = None result = [ row.fetch_all_fields() for row in klassTables.where( '(class_id == %d) & (class_type_id == 1)' % (event_type_id))] if len(result) != 1: raise ExperimentDataAccessException("event_type_id passed to getEventAttribute should only return one row from CLASS_MAPPINGS.") tablePathString = result[0][3] deviceEventTable = getattr(self.hdfFile, get_node)(tablePathString) for ename in event_attribute_names: if ename not in deviceEventTable.colnames: raise ExperimentDataAccessException( 'getEventAttribute: %s does not have a column named %s' % (deviceEventTable.title, event_attribute_names)) resultSetList = [] csier = list(event_attribute_names) csier.append('query_string') csier.append('condition_set') EventAttributeResults = namedtuple('EventAttributeResults', csier) if deviceEventTable is not None: if not isinstance(event_attribute_names, (list, tuple)): event_attribute_names = [event_attribute_names, ] filteredConditionVariableList = None if conditionVariablesFilter is None: filteredConditionVariableList= self.getConditionVariables() else: filteredConditionVariableList = self.getConditionVariables( conditionVariablesFilter) cvNames = self.getConditionVariableNames() # no further where clause building needed; get reseults and # return if startConditions is None and endConditions is None: for cv in filteredConditionVariableList: wclause = '( experiment_id == {0} ) & ( session_id == {1} )'.format( self._experimentID, cv.session_id) wclause += ' & ( type == {0} ) '.format(event_type_id) if filter_id is not None: wclause += '& ( filter_id == {0} ) '.format( filter_id) resultSetList.append([]) for ename in event_attribute_names: resultSetList[-1].append(getattr(deviceEventTable, read_where)(wclause, field=ename)) resultSetList[-1].append(wclause) resultSetList[-1].append(cv) eventAttributeResults = EventAttributeResults( *resultSetList[-1]) resultSetList[-1]=eventAttributeResults return resultSetList #start or end conditions exist.... for cv in filteredConditionVariableList: resultSetList.append([]) wclause = '( experiment_id == {0} ) & ( session_id == {1} )'.format( self._experimentID, cv.session_id) wclause += ' & ( type == {0} ) '.format(event_type_id) if filter_id is not None: wclause += '& ( filter_id == {0} ) '.format(filter_id) # start Conditions need to be added to where clause if startConditions is not None: wclause += '& (' for conditionAttributeName, conditionAttributeComparitor in startConditions.items(): avComparison,value=conditionAttributeComparitor value = self.getValuesForVariables( cv, value, cvNames) wclause += ' ( {0} {1} {2} ) & '.format( conditionAttributeName, avComparison, value) wclause=wclause[:-3] wclause += ' ) ' # end Conditions need to be added to where clause if endConditions is not None: wclause += ' & (' for conditionAttributeName, conditionAttributeComparitor in endConditions.items(): avComparison,value=conditionAttributeComparitor value = self.getValuesForVariables( cv, value, cvNames) wclause += ' ( {0} {1} {2} ) & '.format( conditionAttributeName, avComparison, value) wclause=wclause[:-3] wclause += ' ) ' for ename in event_attribute_names: resultSetList[-1].append(getattr(deviceEventTable, read_where)(wclause, field=ename)) resultSetList[-1].append(wclause) resultSetList[-1].append(cv) eventAttributeResults = EventAttributeResults( *resultSetList[-1]) resultSetList[-1]=eventAttributeResults return resultSetList return None def getEventIterator(self, event_type): """ **Docstr TBC.** Args: event_type Returns: (iterator): An iterator providing access to each matching event as a numpy recarray. """ return self.getEventTable(event_type).iterrows() def close(self): """Close the ExperimentDataAccessUtility and associated DataStore File.""" global _hubFiles if self.hdfFile in _hubFiles: _hubFiles.remove(self.hdfFile) self.hdfFile.close() self.experimentCodes = None self.hdfFilePath = None self.hdfFileName = None self.mode = None self.hdfFile = None def __del__(self): try: self.close() except Exception: pass class ExperimentDataAccessException(Exception): pass
py
1a417b88714857c13a1a24604cae0976403a7d40
""" Copyright 2020 Lightbend Inc. Licensed under the Apache License, Version 2.0. """ from dataclasses import dataclass, field from typing import MutableSet from google.protobuf.empty_pb2 import Empty from akkaserverless.replicated_context import ReplicatedEntityCommandContext from akkaserverless.replicated_entity import ReplicatedEntity from akkaserverless.replicated.counter import ReplicatedCounter from akkaserverless.replicated.counter_map import ReplicatedCounterMap from akkaserverless.replicated.multi_map import ReplicatedMultiMap from akkaserverless.replicated.vote import ReplicatedVote from replicated_entity_example_pb2 import (UpdateCounter, CounterValue, _REPLICATEDENTITYEXAMPLE, DESCRIPTOR as API_DESCRIPTOR) ''' def init(entity_id: str) -> ReplicatedCounter: return ReplicatedCounter() def init(entity_id: str) -> ReplicatedCounterMap: return ReplicatedCounterMap() def init(entity_id: str) -> ReplicatedMultiMap: return ReplicatedMultiMap() ''' def init(entity_id: str) -> ReplicatedVote: return ReplicatedVote() #entity = ReplicatedEntity(_REPLICATEDENTITYEXAMPLE, [API_DESCRIPTOR], ReplicatedCounter, 'counter', init) #entity = ReplicatedEntity(_REPLICATEDENTITYEXAMPLE, [API_DESCRIPTOR], ReplicatedCounterMap, 'counter', init) #entity = ReplicatedEntity(_REPLICATEDENTITYEXAMPLE, [API_DESCRIPTOR], ReplicatedMultiMap, 'counter', init) entity = ReplicatedEntity(_REPLICATEDENTITYEXAMPLE, [API_DESCRIPTOR], ReplicatedVote, 'counter', init) ''' @entity.command_handler("UpdateReplicatedCounter") def update(state: ReplicatedCounter, command: UpdateCounter, context: ReplicatedEntityCommandContext): context.state.increment(command.value) return CounterValue(value=context.state.current_value) @entity.command_handler("UpdateReplicatedCounter") def update(state: ReplicatedCounterMap, command: UpdateCounter, context: ReplicatedEntityCommandContext): context.state.increment(command.key, command.value) return CounterValue(value=context.state.get(command.key).current_value) @entity.command_handler("UpdateReplicatedCounter") def update(state: ReplicatedMultiMap, command: UpdateCounter, context: ReplicatedEntityCommandContext): context.state.put(command.key, command.value) return CounterValue(value=0) ''' @entity.command_handler("UpdateReplicatedCounter") def update(state: ReplicatedVote, command: UpdateCounter, context: ReplicatedEntityCommandContext): context.state.vote(True) return CounterValue(value=context.state.get_votes())
py
1a417caffffefb7549b6d8f45eb516ab3aeabd17
import pandas as pd from bokeh.plotting import figure, show, curdoc from bokeh.layouts import widgetbox, layout, row, column from bokeh.models import ColumnDataSource, Button, Slider, Dropdown, PreText, DataTable, TableColumn, MultiSelect, NumberFormatter, Spacer from collections import OrderedDict, Counter import numpy as np from functools import partial import swing_table import os doc = curdoc() file_path = os.path.dirname(os.path.abspath(__file__)) class MCDMModel: def __init__(self): self.rubric = pd.read_excel(os.path.join(file_path, "data/Rubric.xlsx"), "Rubric v3") self.cost_model = pd.read_excel(os.path.join(file_path, "data/Rubric.xlsx"), "Cost_Model") try: self.rubric.drop(["Category", "Definition", "Grading Scale"], inplace=True, axis=1) except KeyError: pass self.criteria = self.rubric["Criteria"].drop_duplicates().tolist() self.swing_table = swing_table.create_swing_table() self.chosen_criteria = [] self.criteria_selection = MultiSelect(title="Choose Criteria:", size=10) self.choose_criteria() self.rubric_values = self.rubric.replace("Excellent", 1.0) self.rubric_values.replace("Good", 0.5, inplace=True) self.rubric_values.replace("Poor", 0, inplace=True) self.rubric_values = self.rubric_values.melt(id_vars=["Criteria"], var_name=["Tool"], value_name="Score") self.weight_sliders = OrderedDict() self.ranking = OrderedDict() self.b = Button(label="Update Model", button_type="primary") self.b.on_click(self.submit_callback) self.criteria_b = Button(label="Submit Criteria", button_type="primary") self.criteria_b.on_click(self.choose_criteria_callback) self.clear_button = Button(label="Reset", button_type="warning") self.clear_button.on_click(self.clear_model) self.rank_submit = Button(label="Calculate Ranks", button_type="primary") self.rank_submit.on_click(self.submit_ranks) self.source = ColumnDataSource() self.data_table = DataTable self.app_layout = layout() def clear_model(self): self.swing_table = swing_table.create_swing_table() self.app_layout.children.pop(1) self.app_layout.children.append(layout([[self.swing_table]])) def choose_criteria(self): self.criteria_selection.options = self.rubric["Criteria"].drop_duplicates().tolist() def choose_criteria_callback(self): self.chosen_criteria = [] self.chosen_criteria = self.criteria_selection.value if len(self.chosen_criteria) > 0: self.ranking = OrderedDict() self.rank_criteria() self.swing_table = swing_table.create_swing_table(self.chosen_criteria) try: self.app_layout.children.pop(1) except IndexError: pass self.app_layout.children.append(layout([[Spacer(width=300), self.swing_table], *[self.ranking[k] for k in self.ranking.keys()], [self.rank_submit], [self.clear_button]])) def rank_criteria(self): for c in sorted(self.chosen_criteria): self.ranking.update({c: [PreText(text="Scenario {}".format(sorted(self.criteria).index(c) + 1)), Dropdown(menu=[(str(i), str(i)) for i in range(1, len(self.chosen_criteria) + 1)], button_type="primary", label="Rank")]}) for k in self.ranking.keys(): self.ranking[k][1].on_change("value", partial(self.ranking_label_callback, k=k)) def weight_calc(self): for c in self.chosen_criteria: self.weight_sliders.update({c: Slider(start=0, end=1, step=.01, title=c, id=c, value=1/len(self.chosen_criteria))}) self.weight_sliders[self.chosen_criteria[0]].disabled = True self.weight_sliders[self.chosen_criteria[0]].value = 1 for w in self.weight_sliders.keys(): self.weight_sliders[w].on_change("value", partial(self.weight_callback, c=w)) def ranking_label_callback(self, attr, old, new, k): self.ranking[k][1].label = new if self.ranking[k][1].button_type == "danger": print("test") self.ranking[k][1].button_type = "primary" try: self.ranking[k].pop(-1) self.app_layout.children.pop(1) self.app_layout.children.append(layout([[Spacer(width=300), self.swing_table], *[self.ranking[k] for k in self.ranking.keys()], [self.rank_submit], [self.clear_button]])) except IndexError: pass def submit_ranks(self): self.weight_sliders = OrderedDict() ranks = [] for k in self.chosen_criteria: if not self.ranking[k][1].value: self.ranking[k][1].button_type = "danger" self.ranking[k].append(PreText(text="Please enter a rank for all chosen criteria")) self.app_layout.children.pop(1) self.app_layout.children.append(layout([[Spacer(width=300), self.swing_table], *[self.ranking[k] for k in self.ranking.keys()], [self.rank_submit], [self.clear_button]])) else: ranks.append(self.ranking[k][1].value) if len(ranks) == len(self.ranking.keys()): if len(ranks) != len(list(set(ranks))): dup_values = [] for crit, count in Counter(ranks).items(): if count > 1: dup_values.append(crit) for k in self.ranking.keys(): if self.ranking[k][1].value in dup_values: self.ranking[k][1].button_type = "danger" self.ranking[k].append(PreText(text="Please enter unique ranks for each criteria")) self.app_layout.children.pop(1) self.app_layout.children.append(layout([[Spacer(width=300), self.swing_table], *[self.ranking[k] for k in self.ranking.keys()], [self.rank_submit], [self.clear_button]])) else: for k in self.ranking.keys(): self.ranking[k][1].button_type = "primary" temp_list = [] for r in np.argsort(ranks): temp_list.append(self.chosen_criteria[r]) self.chosen_criteria = temp_list self.add_weight_changes() def weight_callback(self, attr, old, new, c): next_index = self.chosen_criteria.index(c) + 1 prev_index = self.chosen_criteria.index(c) - 1 if next_index != len(self.chosen_criteria): if self.weight_sliders[self.chosen_criteria[next_index]].value > new: self.weight_sliders[self.chosen_criteria[next_index]].value = new if prev_index != 0: if self.weight_sliders[self.chosen_criteria[prev_index]].value < new: self.weight_sliders[self.chosen_criteria[prev_index]].value = new def submit_callback(self): total_weight = sum([self.weight_sliders[s].value for s in self.weight_sliders.keys()]) normed_weights = [] for w in self.weight_sliders.keys(): normed_weights.append((w, self.weight_sliders[w].value/total_weight)) weights_df = pd.DataFrame(normed_weights, columns=["Criteria", "Normed_Weights"]) rubric_calc = self.rubric_values.merge(weights_df, on=["Criteria"]) rubric_calc = rubric_calc.merge(self.cost_model[["Tool", "Normalized Cost"]], on="Tool", how="left") rubric_calc.loc[rubric_calc.Criteria == "Cost", "Score"] = rubric_calc["Normalized Cost"] rubric_calc.drop("Normalized Cost", axis=1) rubric_calc["WeightedScore"] = rubric_calc["Score"] * rubric_calc["Normed_Weights"] values = rubric_calc[["Tool", "WeightedScore"]].groupby(["Tool"]).sum().reset_index() values.sort_values(by="WeightedScore", inplace=True, ascending=False) values["Rank"] = values.rank(method="dense", numeric_only=True, ascending=False) self.source = ColumnDataSource() self.source.data.update({"tool": values["Tool"].tolist(), "score": values["WeightedScore"], "rank": values["Rank"].tolist()}) self.add_rank_table() def start_model(self): self.app_layout = layout([[self.criteria_selection, self.criteria_b]]) self.app_layout.children.append(layout(self.swing_table)) return self.app_layout def add_weight_changes(self): self.weight_calc() buttons = zip([self.ranking[k][0] for k in self.chosen_criteria], [self.ranking[k][1] for k in self.chosen_criteria], [self.weight_sliders[k] for k in self.weight_sliders.keys()]) b_layout = [[t[0], t[1], t[2]] for t in buttons] b_layout.append([self.rank_submit, self.b]) b_layout.append(self.clear_button) b_layout.insert(0, [Spacer(width=300), self.swing_table]) self.app_layout.children.pop(1) self.app_layout.children.append(layout(b_layout)) def add_rank_table(self): columns = [TableColumn(field="tool", title="Tool"), TableColumn(field="score", title="Weighted Score", formatter=NumberFormatter(format="0.00")), TableColumn(field="rank", title="Rank")] self.data_table = DataTable(columns=columns, source=self.source, reorderable=True) buttons = zip([self.ranking[k][0] for k in self.chosen_criteria], [self.ranking[k][1] for k in self.chosen_criteria], [self.weight_sliders[k] for k in self.weight_sliders.keys()]) self.app_layout.children.pop(1) b_layout = [[t[0], t[1], t[2]] for t in buttons] b_layout.append([self.rank_submit, self.b]) b_layout.append(widgetbox(self.data_table)) b_layout.append([self.clear_button]) b_layout.insert(0, [Spacer(width=300), self.swing_table]) self.app_layout.children.append(layout(b_layout)) mcdm = MCDMModel() app_layout = mcdm.start_model()
py
1a417cfb116bd8e204e92b61b0e5733953c11547
#!/usr/bin/env python3 import os import pathlib import sys import github import msgpack import packaging.version from jinja2 import Template from slugify import slugify from tqdm import tqdm DISABLE_TQDM = "CI" in os.environ HEADERS = {"user-agent": "https://github.com/salt-extensions/salt-extensions-index"} REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent LOCAL_CACHE_PATH = pathlib.Path( os.environ.get("LOCAL_CACHE_PATH") or REPO_ROOT.joinpath(".cache") ) if not LOCAL_CACHE_PATH.is_dir(): LOCAL_CACHE_PATH.mkdir(0o755) PACKAGE_INFO_CACHE = LOCAL_CACHE_PATH / "packages-info" if not PACKAGE_INFO_CACHE.is_dir(): PACKAGE_INFO_CACHE.mkdir(0o755) BLACKLISTED_EXTENSIONS = {"salt-extension"} print(f"Local Cache Path: {LOCAL_CACHE_PATH}", file=sys.stderr, flush=True) if sys.version_info < (3, 7): print("This script is meant to only run on Py3.7+", file=sys.stderr, flush=True) def set_progress_description(progress, message): progress.set_description(f"{message: <60}") def get_lastest_major_releases(progress, count=3): # This logic might have to change because the order of tags seems to be by creation time set_progress_description(progress, "Searching for latest salt releases...") gh = github.Github(login_or_token=os.environ.get("GITHUB_TOKEN") or None) repo = gh.get_repo("saltstack/salt") releases = [] last_version = None for tag in repo.get_tags(): if len(releases) == count: break version = packaging.version.parse(tag.name) try: if version.major < 3000: # Don't test versions of salt older than 3000 continue except AttributeError: progress.write(f"Failed to parse tag {tag}") continue if last_version is None: last_version = version releases.append(tag.name) continue if version.major == last_version.major: continue last_version = version releases.append(tag.name) progress.write(f"Found the folowing salt releases: {', '.join(releases)}") return releases def collect_extensions_info(): packages = {} for path in sorted(PACKAGE_INFO_CACHE.glob("*.msgpack")): url = None if path.stem in BLACKLISTED_EXTENSIONS: continue package_data = msgpack.unpackb(path.read_bytes()) package = package_data["info"]["name"] for urlinfo in package_data["urls"]: if urlinfo["packagetype"] == "sdist": url = urlinfo["url"] break if url is not None: packages[package] = url else: packages[package] = "no-sdist" return packages def main(): workflow = REPO_ROOT / ".github" / "workflows" / "test-extensions.yml" content = ( REPO_ROOT / ".github" / "workflows" / "templates" / "generate-index-base.yml" ).read_text() platform_templates = ( REPO_ROOT / ".github" / "workflows" / "templates" / "linux.yml.j2", REPO_ROOT / ".github" / "workflows" / "templates" / "macos.yml.j2", REPO_ROOT / ".github" / "workflows" / "templates" / "windows.yml.j2", ) packages = collect_extensions_info() progress = tqdm( total=len(packages), unit="pkg", unit_scale=True, desc=f"{' ' * 60} :", disable=DISABLE_TQDM, ) progress.write("Currently known extensions:") for package in packages: progress.write(f" * {package}") try: salt_versions = get_lastest_major_releases(progress) except Exception as exc: progress.write(f"Failed to get latest salt releases: {exc}") return 1 common_context = { "salt_versions": salt_versions, "python_versions": ["3.5", "3.6", "3.7", "3.8", "3.9"], } with progress: needs = [] for package, url in packages.items(): set_progress_description(progress, f"Processing {package}") context = common_context.copy() slug = slugify(package) context["slug"] = slug context["package"] = package context["package_url"] = url for template_path in platform_templates: content += Template(template_path.read_text()).render(**context) for platform in ("linux", "macos", "windows"): needs.append(f"{slug}-{platform}") progress.update() generate_extensions_index = ( REPO_ROOT / ".github" / "workflows" / "templates" / "generate-index.yml.j2" ) set_progress_description(progress, "Writing workflow") content += Template(generate_extensions_index.read_text()).render(needs=needs) workflow.write_text(content.rstrip() + "\n") progress.write("Complete") return 0 if __name__ == "__main__": exitcode = 0 try: main() except Exception: exitcode = 1 raise finally: sys.exit(exitcode)
py
1a417d2ebc3b81b697686af2a3a799c66b0e7a49
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class OcrLine(Model): """An object describing a single recognized line of text. :param bounding_box: Bounding box of a recognized line. The four integers represent the x-coordinate of the left edge, the y-coordinate of the top edge, width, and height of the bounding box, in the coordinate system of the input image, after it has been rotated around its center according to the detected text angle (see textAngle property), with the origin at the top-left corner, and the y-axis pointing down. :type bounding_box: str :param words: An array of objects, where each object represents a recognized word. :type words: list[~azure.cognitiveservices.vision.computervision.models.OcrWord] """ _attribute_map = { 'bounding_box': {'key': 'boundingBox', 'type': 'str'}, 'words': {'key': 'words', 'type': '[OcrWord]'}, } def __init__(self, bounding_box=None, words=None): super(OcrLine, self).__init__() self.bounding_box = bounding_box self.words = words
py
1a417d55104773b2d8ae8ef085deeef8b0e92d30
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('books', '0007_auto_20160422_1121'), ] operations = [ migrations.AlterField( model_name='author', name='slug', field=models.SlugField(max_length=200, blank=True), ), migrations.AlterField( model_name='book', name='slug', field=models.SlugField(max_length=200, blank=True), ), migrations.AlterField( model_name='bookhasauthor', name='slug', field=models.SlugField(max_length=200, blank=True), ), migrations.AlterField( model_name='bookhascategory', name='slug', field=models.SlugField(max_length=200, blank=True), ), migrations.AlterField( model_name='category', name='slug', field=models.SlugField(max_length=200, blank=True), ), ]
py
1a417dbb2efb584cd7eac76d3b01fb45e7550f14
import numpy.testing as np_testing from pymanopt.manifolds import Oblique from .._test import TestCase class TestObliqueManifold(TestCase): def setUp(self): self.m = m = 100 self.n = n = 50 self.man = Oblique(m, n) # def test_dim(self): # def test_typicaldist(self): # def test_dist(self): # def test_inner(self): # def test_proj(self): # def test_ehess2rhess(self): # def test_retr(self): # def test_egrad2rgrad(self): # def test_norm(self): # def test_rand(self): # def test_randvec(self): # def test_transp(self): def test_exp_log_inverse(self): s = self.man x = s.rand() y = s.rand() u = s.log(x, y) z = s.exp(x, u) np_testing.assert_almost_equal(0, s.dist(y, z), decimal=6) def test_log_exp_inverse(self): s = self.man x = s.rand() u = s.randvec(x) y = s.exp(x, u) v = s.log(x, y) # Check that the manifold difference between the tangent vectors u and # v is 0 np_testing.assert_almost_equal(0, s.norm(x, u - v)) def test_pairmean(self): s = self.man X = s.rand() Y = s.rand() Z = s.pairmean(X, Y) np_testing.assert_array_almost_equal(s.dist(X, Z), s.dist(Y, Z))
py
1a417e0ee367b56a861dab6047cae1481e23261d
import allure from tep.client import request @allure.title("重定向--put") def test(env_vars): # 描述 # 数据 # 请求 response = request( "put", url=env_vars.domain + "/redirect-to?url=https%3A%2F%2Fwww.baidu.com&status_code=200", headers={'Host': 'httpbin.org', 'Proxy-Connection': 'keep-alive', 'Content-Length': '47', 'accept': 'text/html', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.109 Safari/537.36', 'Content-Type': 'application/x-www-form-urlencoded', 'Origin': 'http://httpbin.org', 'Referer': 'http://httpbin.org/', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7', 'Cookie': 'stale_after=never; fake=fake_value; freeform=3; name=dongfanger'}, ) # 提取 # 断言 assert response.status_code == 404
py
1a417e11c9ebba5fd1d43cc2d60873dfc776665a
from __future__ import annotations import re import warnings from typing import TYPE_CHECKING, Any, Dict, List, Optional import pandas as pd from dateutil import parser from cimsparql.query_support import combine_statements, unionize if TYPE_CHECKING: # pragma: no cover from cimsparql.model import CimModel as_type_able = [int, float, str, "Int64", "Int32", "Int16"] python_type_map = { "string": str, "integer": int, "boolean": lambda x: x.lower() == "true", "float": float, "dateTime": parser.parse, } uri_snmst = re.compile("^urn:snmst:#_") sparql_type_map = {"literal": str, "uri": lambda x: uri_snmst.sub("", x)} class TypeMapperQueries: @property def generals(self) -> List[List[str]]: """For sparql-types that are not sourced from objects of type rdf:property, sparql & type are required Sparql values should be like: http://iec.ch/TC57/2010/CIM-schema-cim15#PerCent this is how type or DataType usually looks like for each data point in the converted query result from SPARQLWrapper. type can be anything as long as it is represented in the python_type_map. """ return [ [ "?sparql_type rdf:type rdfs:Datatype", "?sparql_type owl:equivalentClass ?range", 'BIND(STRBEFORE(str(?range), "#") as ?prefix)', 'BIND(STRAFTER(str(?range), "#") as ?type)', ] ] @property def prefix_general(self) -> List[str]: """Common query used as a base for all prefix_based queries.""" return [ "?sparql_type rdf:type rdf:Property", "?sparql_type rdfs:range ?range", 'BIND(STRBEFORE(str(?range), "#") as ?prefix)', ] @property def prefix_based(self) -> Dict[str, List[str]]: """Each prefix can have different locations of where DataTypes are described. Based on a object of type rdf:property & its rdfs:range, one has edit the query such that one ends up with the DataType. """ return { "https://www.w3.org/2001/XMLSchema": ["?range rdfs:label ?type"], "https://iec.ch/TC57/2010/CIM-schema-cim15": [ "?range owl:equivalentClass ?class", "?class rdfs:label ?type", ], } @property def query(self) -> str: select_query = "SELECT ?sparql_type ?type ?prefix" grouped_generals = [combine_statements(*g, split=" .\n") for g in self.generals] grouped_prefixes = [ combine_statements(*v, f'FILTER (?prefix = "{k}")', split=" .\n") for k, v in self.prefix_based.items() ] grouped_prefix_general = combine_statements(*self.prefix_general, split=" .\n") unionized_generals = unionize(*grouped_generals) unionized_prefixes = unionize(*grouped_prefixes) full_prefixes = combine_statements(grouped_prefix_general, unionized_prefixes, group=True) full_union = unionize(unionized_generals, full_prefixes, group=False) return f"{select_query}\nWHERE\n{{\n{full_union}\n}}" class TypeMapper(TypeMapperQueries): def __init__(self, client: CimModel, custom_additions: Optional[Dict[str, Any]] = None) -> None: self.prefixes = client.prefixes custom_additions = custom_additions if custom_additions is not None else {} self.map = {**sparql_type_map, **self.get_map(client), **custom_additions} def have_cim_version(self, cim) -> bool: return cim in (val.split("#")[0] for val in self.map.keys()) @staticmethod def type_map(df: pd.DataFrame) -> Dict[str, Any]: df["type"] = df["type"].str.lower() d = df.set_index("sparql_type").to_dict("index") return {k: python_type_map.get(v.get("type", "String")) for k, v in d.items()} @staticmethod def prefix_map(df: pd.DataFrame) -> Dict[str, Any]: df = df.loc[~df["prefix"].isna()].head() df["comb"] = df["prefix"] + "#" + df["type"] df = df.drop_duplicates("comb") d2 = df.set_index("comb").to_dict("index") return {k: python_type_map.get(v.get("type", "String")) for k, v in d2.items()} def get_map(self, client: CimModel) -> Dict[str, Any]: """Reads all metadata from the sparql backend & creates a sparql-type -> python type map Args: client: initialized CimModel Returns: sparql-type -> python type map """ df = client.get_table(self.query, map_data_types=False) if df.empty: return {} type_map = self.type_map(df) prefix_map = self.prefix_map(df) xsd_map = { f"{self.prefixes['xsd']}#{xsd_type}": xsd_map for xsd_type, xsd_map in python_type_map.items() } return {**type_map, **prefix_map, **xsd_map} def get_type( self, sparql_type: str, missing_return: str = "identity", custom_maps: Optional[Dict[str, Any]] = None, ): """Gets the python type/function to apply on columns of the sparql_type Args: sparql_type: missing_return: returns the identity-function if python- type/function is not found, else returns None custom_maps: dictionary on the form {'sparql_data_type': function/datatype} overwrites the default types gained from the graphdb. Applies the function/datatype on all columns in the DataFrame that are of the sparql_data_type Returns: python datatype or function to apply on DataFrame columns """ type_map = {**self.map, **custom_maps} if custom_maps is not None else self.map try: return type_map[sparql_type] except KeyError: warnings.warn(f"{sparql_type} not found in the sparql -> python type map") if missing_return == "identity": return lambda x: x return None def convert_dict( self, d: Dict, drop_missing: bool = True, custom_maps: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: """Converts a col_name -> sparql_datatype map to a col_name -> python_type map Args: d: dictionary with {'column_name': 'sparql type/DataType'} drop_missing: drops columns where no corresponding python type could be found custom_maps: dictionary on the form {'sparql_data_type': function/datatype} overwrites the default types gained from the graphdb. Applies the function/datatype on all columns in the DataFrame that are of the sparql_data_type. Returns: col_name -> python_type/function map """ missing_return = "None" if drop_missing else "identity" base = { column: self.get_type(data_type, missing_return, custom_maps) for column, data_type in d.items() } if drop_missing: return {key: value for key, value in base.items() if value is not None} return base @staticmethod def map_base_types(df: pd.DataFrame, type_map: Dict) -> pd.DataFrame: """Maps the datatypes in type_map which can be used with the df.astype function Args: df: type_map: {'column_name': type/function} map of functions/types to apply on the columns Returns: mapped DataFrame """ as_type_able_columns = {c for c, datatype in type_map.items() if datatype in as_type_able} if not df.empty: df = df.astype({column: type_map[column] for column in as_type_able_columns}) return df @staticmethod def map_exceptions(df: pd.DataFrame, type_map: Dict) -> pd.DataFrame: """Maps the functions/datatypes in type_map which cant be done with the df.astype function Args: df: type_map: {'column_name': type/function} map of functions/types to apply on the columns Returns: mapped DataFrame """ ex_columns = {c for c, datatype in type_map.items() if datatype not in as_type_able} for column in ex_columns: df[column] = df[column].apply(type_map[column]) return df def map_data_types( self, df: pd.DataFrame, col_map: Dict, custom_maps: Dict = None, columns: Dict = None ) -> pd.DataFrame: """Maps the dtypes of a DataFrame to the python-corresponding types of the sparql-types from the source data Args: df: DataFrame with columns to be converted data_row: a complete row with data from the source data of which the DataFrame is constructed from custom_maps: dictionary on the form {'sparql_data_type': function/datatype} overwrites the default types gained from the graphdb. Applies the function/datatype on all columns in the DataFrame that are of the sparql_data_type. columns: dictionary on the form {'DataFrame_column_name: function/datatype} overwrites the default types gained from the graphdb. Applies the function/datatype on the column. Returns: mapped DataFrame """ type_map = {**self.convert_dict(col_map, custom_maps=custom_maps), **columns} df = self.map_base_types(df, type_map) df = self.map_exceptions(df, type_map) return df
py
1a417e72971b452472afbcfa79379b97346d62a7
""" Includes the XMattersEvent class which wraps the xMatters Event to make it easier to use correct formatting """ import json # pylint: disable = import-error from common_utils.setup_logging import setup_logging # pylint: enable = import-error DEFAULT_LOGGER = setup_logging('xmatters_alert_action.log', 'xmatters_event') class XMattersEvent(object): """ Class that wraps an xMatters Event so that it is easier to use correct formatting """ def __init__(self, **kwargs): """ Constructor, takes no arguments """ self.logger = kwargs.get('logger', DEFAULT_LOGGER) self.properties = {} self.recipients = [] self.priority = None self.valid_priorities = [ 'HIGH', 'MEDIUM', 'LOW' ] def add_property(self, key, value): """ Adds a property to the event @param key: <str>, The name of the property @param value: <str>, The value of the property """ self.properties[key] = value def add_recipient(self, target_name): """ Adds a recipient to the recipients list in the xMatters Event @param target_name: <str>, the target name of the user, group, team, device in xMatters """ self.recipients.append({ 'targetName': target_name }) def set_priority(self, priority): """ Sets the priority of the xMatters Event @param priority: <str>, valid values are HIGH, MEDIUM, and LOW (case insensitive) @raise: ValueError, if the priority is invalid """ upper_priority = priority.upper() if upper_priority in self.valid_priorities: self.priority = upper_priority else: raise ValueError('error=XM_INVALID_PRIORITY value=%s valid_priorities=%s', upper_priority, ';'.join(self.valid_priorities) ) def get_json_payload(self): """ Gets the json payload as a string to send to xMatters @return <str> """ body = { 'properties': self.properties } # empty arrays are considered falsey in python if self.recipients: body['recipients'] = self.recipients if self.priority is not None: body['priority'] = self.priority return json.dumps(body)
py
1a417e84c3d48ce82be779ca23ec50c3e4f5ea84
from http import HTTPStatus from uuid import uuid4 import pytest import structlog from server.utils.json import json_dumps logger = structlog.getLogger(__name__) def test_kinds_get_multi(kind_1, kind_2, test_client, superuser_token_headers): response = test_client.get("/api/kinds", headers=superuser_token_headers) assert HTTPStatus.OK == response.status_code kinds = response.json() assert 2 == len(kinds) def test_kind_get_by_id(kind_1, test_client, superuser_token_headers): response = test_client.get(f"/api/kinds/{kind_1.id}", headers=superuser_token_headers) print(response.__dict__) assert HTTPStatus.OK == response.status_code kind = response.json() assert kind["name"] == "Indica" assert len(kind["tags"]) == 1 assert kind["tags_amount"] == 1 assert len(kind["flavors"]) == 1 assert kind["flavors_amount"] == 1 assert len(kind["strains"]) == 1 def test_kind_get_by_id_404(kind_1, test_client, superuser_token_headers): response = test_client.get(f"/api/kinds/{str(uuid4())}", headers=superuser_token_headers) assert HTTPStatus.NOT_FOUND == response.status_code def test_kind_save(test_client, superuser_token_headers): body = {"name": "New Kind", "icon": "New Icon", "color": "#ffffff"} response = test_client.post("/api/kinds/", data=json_dumps(body), headers=superuser_token_headers) assert HTTPStatus.CREATED == response.status_code kinds = test_client.get("/api/kinds", headers=superuser_token_headers).json() assert 1 == len(kinds) def test_kind_update(kind_1, test_client, superuser_token_headers): body = {"name": "Updated Kind", "icon": "moon", "color": "00fff0"} response = test_client.put(f"/api/kinds/{kind_1.id}", data=json_dumps(body), headers=superuser_token_headers) assert HTTPStatus.CREATED == response.status_code response_updated = test_client.get(f"/api/kinds/{kind_1.id}", headers=superuser_token_headers) kind = response_updated.json() assert kind["name"] == "Updated Kind" def test_kind_delete(kind_1, test_client, superuser_token_headers): response = test_client.delete(f"/api/kinds/{kind_1.id}", headers=superuser_token_headers) assert HTTPStatus.NO_CONTENT == response.status_code kinds = test_client.get("/api/kinds", headers=superuser_token_headers).json() assert 0 == len(kinds)
py
1a417f01558d7d1a1e1433a0f5800564eb746336
#searches file import sqlite3 import os import databaseCreate #seacrh function db=sqlite3.connect("SongStorage.db") def searchSong(searchBy , searchText): databaseCreate.createDb() db = """SELECT * FROM song WHERE ? = ? """(searchBy ,searchText) try: cur = db.cursor() cur.execute(db) output=cur.fetchall() db.close() except Exception as e: raise e print("There was a problem while accessing our systems") input("press Enter to continue") return print("===================================") print("SEARCHED RESULTS ARE HERE:") print("===================================") if output == (): print("NO RECORDS FOUND") print("===================================") else: for entry in output: print("Title: " + entry[0]) print("Star: " + entry[0]) print("Costar: " + entry[0]) print("Year: " + entry[0]) print("Genre: " + entry[0]) print("===================================") input("Press enter to continue") #take user inputs and run the function above to query the database def searchLookup(): print (""" =============================== DVD LOOKUP: =============================== Enter the criteria to look up by: 1 - Song title 2 - Star 3 - Costar 4 - Year released 5 - Genre""") choice = input("\nType a number and press enter: ") try: choice = int(choice) if choice == 1: searchBy = "title" searchText = input("Enter the song title to search for: ") elif choice == 2: searchBy = "star" searchText = input('Enter the song star name to search for: ') elif choice == 3: searchBy = "costar" searchText = input("Enter the song costar name to search for: ") elif choice == 4: searchBy = "year" searchText = input("Enter the song release year to search for: ") elif choice == 5: searchby = "genre" print (""" Enter the genre to search for: 1 - Drama 2 - reggae 3 - Rnb 4 - Romance """) entrychoice=input("Your value please!\t") try: entrychoice = int(entrychoice) if entrychoice == 1: searchText = "Drama" elif entrychoice == 2: searchText = "Reggae" elif entrychoice == 3 : searchText = "Rnb" elif entrychoice == 4: searchText = "Romance" else: print("Error in your choice") input("Press enter to return to the main menu:") except: print("Please enter only numbers please!") except: print("Choose an integer please!") searchSong(searchBy , searchText)
py
1a418001237c06521c8b956fab82701c970bfe91
# Generated by Django 3.1.6 on 2021-02-03 21:38 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Question', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('question_text', models.CharField(max_length=200)), ('pub_date', models.DateTimeField(verbose_name='date published')), ], ), migrations.CreateModel( name='Choice', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('choice_text', models.CharField(max_length=200)), ('votes', models.IntegerField(default=0)), ('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.question')), ], ), ]
py
1a41801f3c42c20610e5ecbd4e91bfc1d217698f
from janitor.finance import get_symbol import pytest @pytest.mark.xfail(reason="Flaky because it depends on internet connectivity.") def test_convert_stock(): """ Tests get_symbol function, get_symbol should return appropriate string corresponding to abbreviation. This string will be a company's full name, and the abbreviation will be the NSYE symbol for the company. Example: print(get_symbol("aapl")) console >> Apple Inc. If the symbol does not have a corresponding company, Nonetype should be returned. """ assert get_symbol("GME") == "GameStop Corp." assert get_symbol("AAPL") != "Aramark" assert get_symbol("ASNF") is None
py
1a418058901bbfb05c40c0f73fb00741ab46b0da
#!/usr/local/bin/python3 import json, os, sys from diff_adt import DiffConfig, DiffResult from time import localtime, strftime from subprocess import call from diff_lev import * CURRENT_TIMESTAMP = strftime("%Y-%m-%d-%H%M", localtime()) DEBUG_MODE = False def main(): print('Getting config and preparing run ...', end=' ') config = get_config() print('done!') print('Initiating grading', *config.labs, '...', end='\n\n') # Detect initial run or directory structure corruption and run setup if not (os.path.isdir(config.csv_path) and os.path.isdir(config.rosters_dir) and os.path.isdir(config.results_dir) and os.path.isdir(config.submissions_dir)): run_init_setup(config) rosters = build_rosters(config.roster_paths) write_lab_list_for_MATLAB(config) if not setup_solution_files(config): print('\n\nUnable to set up reference solutions. Exiting.') exit(1) print(' Running MATLAB script to generate student outputs ...', end='\n\n') print('\n\nMATLAB run ' + ('finished!' if generate_MATLAB_output() else '\n\nFAILED!'), end='\n\n') print('Comparing results and writing output ...', end=' ') result = DiffResult() for lab in config.labs: diff_lab_outputs(result, lab[:-2], config) output_result_to_csv(result, config, rosters) for lab in config.labs: output_result_to_csv(result, config, rosters, lab_num=lab[3:-2]) print('ALL DONE!', end='\n\n') def run_init_setup(config): print('Looks like this is the first time you are running this script.\n' 'Let me set up some directories ...', end='\n\n') for p in [config.csv_path, config.rosters_dir, config.results_dir, config.submissions_dir]: if p is config.submissions_dir: mkdir(config.submissions_dir) for lab in config.labs: mkdir(config.submissions_dir + lab[:-2]) else: mkdir(p) print('\nAll set up! Now, copy student submissions into {}labXX/, and'.format(config.submissions_dir), '\nplace the class rosters (CSV exported from PolyLearn) into {}.'.format(config.rosters_dir), '\nOnce copying is done, please re-run:', *sys.argv) exit(0) def mkdir(directory): print(' mkdir', directory) if not os.path.isdir(directory): os.mkdir(directory) def write_lab_list_for_MATLAB(config): # Write list of labs to .dat file for MATLAB to read which items to execute lab_list_dat = open(config.submissions_dir + 'lab_list.dat', 'w') for name in config.labs: lab_list_dat.write(name + '\n') lab_list_dat.close() def setup_solution_files(config): new_solutions_success = False default_solution_success = False if check_solution_source(config): print('Solution source for all labs detected.\n', ' Firing up MATLAB to generate new solutions ...', end='\n\n') sys.stdout.flush() new_solutions_success = generate_new_solutions(config) print('Solution generation', 'successful!' if new_solutions_success else 'failed :(', end='\n\n') if not new_solutions_success: print('Could not find solution sources for all labs.\n', ' Copying default solutions over instead ... ', end='\n ') sys.stdout.flush() default_solution_success = copy_default_solutions(config) print('\nCopy complete!' if default_solution_success else 'Copy failed! Please check permissions.', end='\n\n') return new_solutions_success or default_solution_success def check_solution_source(config): result = True for lab in config.labs: result &= os.path.isfile(config.solutions_dir + 'source/' + lab) return result def generate_new_solutions(config): return not call(['matlab', '-nodesktop', '-nosplash', '-nodisplay', '-r', "try, cd '{}', pwd, run('./generate_solution'), catch exc, getReport(exc), end, exit".format( os.getcwd())]) def copy_default_solutions(config): result = True default_dir = config.solutions_dir + 'default/' for file_name in os.listdir(default_dir): if '.txt' in file_name: result &= not call(['cp', default_dir + file_name, config.solutions_dir]) return result def generate_MATLAB_output(): script = 'generate_output_vm.m' if len(sys.argv) > 1 and sys.argv[1].lower() == '-vm' else 'generate_output.m' return not call(['matlab', '-nodesktop', '-nosplash', '-nodisplay', '-r', "try, cd '{}', pwd, run('./{}'), catch exc, getReport(exc), end, exit".format( os.getcwd(), script)]) def diff_lab_outputs(result_obj, lab_dir_name, config): submissions_dir = config.submissions_dir solutions_dir = config.solutions_dir results_dir = config.results_dir files = [f for f in os.listdir(results_dir) if os.path.isfile(os.path.join(results_dir, f)) and lab_dir_name in f] solution_file = solutions_dir + lab_dir_name + '.out.txt' alt_solution_file = solutions_dir + lab_dir_name + '.alt.txt' for f in files: lab_index = f.find('_lab') author_name = join_last_name(f[:lab_index]) if DEBUG_MODE: print('comparing', solution_file, 'and', submissions_dir + f, 'for ' + author_name, end='') if os.path.isfile(alt_solution_file): diff_result = max(cmp(solution_file, results_dir + f), cmp(alt_solution_file, results_dir + f)) else: diff_result = cmp(solution_file, results_dir + f) if DEBUG_MODE: print(' ... comparison result', diff_result) result_obj.add_result(author_name, lab_dir_name, round(diff_result * config.score_out_of, 2)) def output_result_to_csv(result_obj, config, rosters, lab_num=''): if DEBUG_MODE: print('Final Result Object:\n', result_obj) rosters.append(("", [])) csv_roster = {} for id, roster in rosters: csv = open('{}{}_{}{}{}.csv'.format( config.csv_path, CURRENT_TIMESTAMP, config.csv_name, ('_' if id else '') + id, ('_lab' + lab_num) if lab_num else '' ), 'w') if lab_num: write_to_csv(csv, config.csv_header + 'lab' + lab_num) else: write_to_csv(csv, config.csv_header + str(config.labs)[1:-1].replace(' ', '')) csv_roster[id] = (csv, roster) result = result_obj.result result_tuple_list = sorted([(k, v) for k, v in result.items()]) for author_name, diff_results in result_tuple_list: id = find_roster_id_for_author(author_name, rosters) all_results = per_author_result_to_csv_entry(config.labs, diff_results) entry_str = '{},{},{}'.format( author_name.replace('_', ','), csv_roster[id][1][author_name] if id else '', all_results if not lab_num else ( str(diff_results['lab' + lab_num]) if 'lab' + lab_num in diff_results else '' ) ) csv_to_write_to = csv_roster[id][0] write_to_csv(csv_to_write_to, entry_str) if csv_to_write_to is not csv_roster[""][0]: write_to_csv(csv_roster[""][0], entry_str) for csv, _ in csv_roster.values(): csv.close() def per_author_result_to_csv_entry(lab_file_names, author_result): csv_entry_str = '' for lab_file_name in lab_file_names: lab = lab_file_name[:-2] csv_entry_str += str(author_result[lab]) if lab in author_result else '' csv_entry_str += ',' return csv_entry_str[:-1] def find_roster_id_for_author(author_name, rosters): for id, roster in rosters: if DEBUG_MODE: print('Author name used to look up in roster: ' + author_name) print('Roster\n' + str(roster)) if author_name in roster: return id return "" def write_to_csv(csv_file, line_to_write): if DEBUG_MODE: print(line_to_write) csv_file.write(line_to_write + '\n') def get_config(): with open('diff_config.json') as data_file: data = json.load(data_file) # get the list of lab file names lab_file_names = [] for num in data['labs']: lab_file_names.append('lab{:02}.m'.format(num) if num else 'final.m') return DiffConfig(lab_file_names, data['submissions_dir'], data['solutions_dir'], data['rosters_dir'], data['results_dir'], data['result_csv_path'], data['result_csv_name'], data['score_out_of'], data['roster_paths']) def join_last_name(orig_name_str, wrapper_str='"'): tokens = orig_name_str.split('_') if len(tokens) > 2: return '_'.join([tokens[0], '{}{}{}'.format(wrapper_str, ' '.join(tokens[1:]), wrapper_str)]) else: return orig_name_str def build_rosters(roster_paths): rosters = [] for id, path in roster_paths: roster_file = open(path, 'r') lines = roster_file.readlines()[1:] roster = {} for l in lines: tokens = l.split(',') author_name = '_'.join(tokens[:2]) author_email = tokens[2] roster[author_name] = author_email rosters.append((id, roster)) return rosters if __name__ == '__main__': main()
py
1a41809a7ea820ebe769329f4293d146f1747646
#!/usr/bin/python #coding = utf-8 from RiskQuantLib.Property.NumberProperty.numberProperty import numberProperty class faceValue(numberProperty): def __init__(self,value,unit = 'RMB'): super(faceValue,self).__init__(value,unit)
py
1a4180e9af8627883d45ebc1608277be201df0e3
# Copyright 2018 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from threading import Lock from fasteners.process_lock import InterProcessLock from os.path import exists from os import chmod class ComboLock: """ A combined process and thread lock. Args: path (str): path to the lockfile for the lock """ def __init__(self, path): # Create lock file if it doesn't exist and set permissions for # all users to lock/unlock if not exists(path): f = open(path, 'w+') f.close() chmod(path, 0x1ff) self.plock = InterProcessLock(path) self.tlock = Lock() def acquire(self, blocking=True): """ Acquire lock, locks thread and process lock. Args: blocking(bool): Set's blocking mode of acquire operation. Default True. Returns: True if lock succeeded otherwise False """ if not blocking: # Lock thread tlocked = self.tlock.acquire(blocking=False) if not tlocked: return False # Lock process plocked = self.plock.acquire(blocking=False) if not plocked: # Release thread lock if process couldn't be locked self.tlock.release() return False else: # blocking, just wait and acquire ALL THE LOCKS!!! self.tlock.acquire() self.plock.acquire() return True def release(self): """ Release acquired lock. """ self.plock.release() self.tlock.release() def __enter__(self): """ Context handler, acquires lock in blocking mode. """ self.acquire() return self def __exit__(self, _type, value, traceback): """ Releases the lock. """ self.release()
py
1a41830b8c37b522c7b6701f56107c74d3051ba1
from flask import Blueprint, jsonify, request from multiprocessing.connection import Client from interface import IRequest, IPageResult, MessageProtocol import uuid, zlib from datetime import datetime, timedelta from tool import log l = log("Api") NAME = ("localhost", 25100) Api = Blueprint('Api', __name__) @Api.route("/info") def info(): return jsonify({"error": False, "result": ["hello", "world"] }) @Api.route("/start") def start(): return jsonify({"error": False, "result": ["hello", "world"] }) @Api.route("/render", methods=["GET", "POST"]) def render(): data = [] if request.form: url = request.form.get("url") wait = request.form.get("wait") jscript = request.form.get("jscript") ctime =int( ( datetime.now() + timedelta(seconds=60*5) ).timestamp() ) param = IRequest( id=uuid.uuid4().hex, url=url, param={}, wait=float(wait) if wait else 0, expiration_date = ctime, jscript = jscript if jscript else "", method = "render" ) c = Client(NAME, authkey=b"qwerty") c.send( param.__dict__ ) data.append(param.id) l.info(f"Request {param}") c.close() # data.append( c.recv() ) return jsonify(MessageProtocol( status_code=200, action='', message='', payload=data ).to_dict()) # return jsonify({"response": True, "data" : data}) @Api.route("/result/<keyid>", methods=["GET", "POST"]) def get_result(keyid): data = [] res = IPageResult(id=keyid, method="result") c = Client(NAME, authkey=b"qwerty") c.send( res.__dict__ ) response = c.recv() if response: l.info(f"Request {res}") data.append( zlib.decompress( response ).decode("utf8") ) c.close() return jsonify(MessageProtocol( status_code=200, action='', message='', payload=data ).to_dict()) # return jsonify({"response": True, "data" : data}) @Api.route("/a_content", methods=["POST"]) def active_content(): data = [] if request.form: wait = request.form.get("wait") jscript = request.form.get("jscript") param = IRequest( id="", url="", param={}, wait=float(wait) if wait else 0, expiration_date = 0, jscript = jscript if jscript else "", method = "active_content" ) c = Client(NAME, authkey=b"qwerty") c.send( param.__dict__ ) '''Здесь часто происходит ошибка''' response = c.recv() if response: data.append( response ) l.info(f"Request {param}") c.close() return jsonify(MessageProtocol( status_code=200, action='', message='', payload=data ).to_dict()) # return jsonify({"response": True, "data" : data})
py
1a4183ab20b5e0979367c406fc0f8551e3bebc84
# EDIT THIS FILE AND RENAME TO config.py TO MAKE THIS BOT WORKING # FILL THESE VALUES ACCORDINGLY. from userbot.config import Config class Development(Config): # get these values from my.telegram.org. APP_ID = 6 # 6 is a placeholder. Fill your 6 digit api id API_HASH = "eb06d4abfb49dc3eeb1aeb98ae0f581e" # replace this with your api hash # the name to display in your alive message. # If not filled anything then default value is I'm Eiva. YOUR_NAME = "I'm EÍVÁ" # create any PostgreSQL database. # I recommend to use elephantsql and paste that link here DATABASE_URL = "Your value" # After cloning the repo and installing requirements... # Do `python string.py` and fill the on screen prompts. # String session will be saved in your saved message of telegram. # Put that string here. ANDENCENTO_SESSION = "Your value" # Create a bot in @BotFather # And fill the following values with bot token and username. BOT_TOKEN = "Your value" #token BOT_USERNAME = "Your value" #username # Create a private group and add rose bot to it. # and type /id and paste that id here. # replace that -100 with that group id. LOGGER_ID = -100 # Custom Command Handler. HANDLER = "." # enter the userid of sudo users. # you can add multiple ids by separating them by space. # fill values in [] only. SUDO_USERS = [] # Custom Command Handler for sudo users. SUDO_HANDLER = "," # end of required config # Andencento
py
1a418426cef4a7804b741d0d288d4c648820c3c0
from django.db import models class ExampleAwareModelManager(models.Manager): pass
py
1a41842ce5ec85e7cf87b632442af3f09f4d89aa
#!/usr/bin/env python import yaml import json import urllib.request import urllib.parse # Fixme: non-trivial cases commented out for now repos = { # "SAP/SapMachine": "JDK_VERSION", # "apache/maven": "MAVEN_VERSION", # "gradle/gradle": "GRADLE_VERSION", "nodejs/node": "NODE_VERSION", # "golang/go": "GO_VERSION", "cli/cli": "GH_VERSION", "JetBrains/kotlin": "KOTLIN_VERSION", # "r-darwish/topgrade": "TOPGRADE_VERSION", } # current_versions = {'GRADLE_VERSION': '7.1.0', 'NODE_VERSION': '14.17.1', 'GH_VERSION': '1.11.0', 'KOTLIN_VERSION': '1.5.10'} current_versions = {} for r in repos: print(r) url = f"https://api.github.com/repos/{r}/releases/latest" f = urllib.request.urlopen(url) tag_name = json.loads(f.read().decode("utf-8"))["tag_name"] current_versions[repos.get(r)] = tag_name.removeprefix('v') print(current_versions) versions = {} with open('./versions.yml', "r") as f: versions = yaml.safe_load(f) versions.update(current_versions) print(versions) with open('./versions.yml', "w") as f: yaml.safe_dump(versions, f, default_flow_style=False)
py
1a41843a8e7f1000983b3339aa505e215cac6165
""" Utilities of MobileNet training """ from models import modules import os import sys import time import math import shutil import tabulate import numpy as np import pandas as pd import torch import torch.nn as nn import torchvision import torch.optim as optim import torchvision.transforms as transforms import matplotlib.pyplot as plt import seaborn as sns from functools import partial from models import QConvBN2d import models _print_freq = 50 class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def train(trainloader, net, criterion, optimizer, epoch, args): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to train mode net.train() train_loss = 0 correct = 0 total = 0 end = time.time() for batch_idx, (inputs, targets) in enumerate(trainloader): data_time.update(time.time() - end) targets = targets.cuda(non_blocking=True) inputs = inputs.cuda() outputs = net(inputs) loss = criterion(outputs, targets) if args.clp: reg_alpha = torch.tensor(0.).cuda() a_lambda = torch.tensor(args.a_lambda).cuda() alpha = [] for name, param in net.named_parameters(): if 'alpha' in name: alpha.append(param.item()) reg_alpha += param.item() ** 2 loss += a_lambda * (reg_alpha) optimizer.zero_grad() loss.backward() # for module in net.modules(): # if 'BatchNorm' in str(type(module)): # if module.weight.grad is not None: # module.weight.grad.data.fill_(0) # if module.bias.grad is not None: # module.bias.grad.data.fill_(0) optimizer.step() prec1, prec5 = accuracy(outputs.data, targets, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) batch_time.update(time.time() - end) end = time.time() # import pdb;pdb.set_trace() train_loss += loss.item() if args.clp: res = { 'acc':top1.avg, 'loss':losses.avg, 'clp_alpha':np.array(alpha) } else: res = { 'acc':top1.avg, 'loss':losses.avg, } return res def test(testloader, net, criterion, epoch): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() net.eval() test_loss = 0 end = time.time() with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(testloader): mean_loader = [] inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True) outputs = net(inputs) loss = criterion(outputs, targets) prec1, prec5 = accuracy(outputs.data, targets, topk=(1, 5)) losses.update(loss.item(), inputs.size(0)) top1.update(prec1.item(), inputs.size(0)) top5.update(prec5.item(), inputs.size(0)) test_loss += loss.item() batch_time.update(time.time() - end) end = time.time() # break return top1.avg, losses.avg def convert_secs2time(epoch_time): need_hour = int(epoch_time / 3600) need_mins = int((epoch_time - 3600*need_hour) / 60) need_secs = int(epoch_time - 3600*need_hour - 60*need_mins) return need_hour, need_mins, need_secs def print_log(print_string, log): print("{}".format(print_string)) log.write('{}\n'.format(print_string)) log.flush() def print_table(values, columns, epoch, logger): table = tabulate.tabulate([values], columns, tablefmt='simple', floatfmt='8.4f') if epoch == 0: table = table.split('\n') table = '\n'.join([table[1]] + table) else: table = table.split('\n')[2] logger.info(table) def adjust_learning_rate_schedule(optimizer, epoch, gammas, schedule, lr, mu): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" if optimizer != "YF": assert len(gammas) == len( schedule), "length of gammas and schedule should be equal" for (gamma, step) in zip(gammas, schedule): if (epoch >= step): lr = lr * gamma else: break for param_group in optimizer.param_groups: param_group['lr'] = lr elif optimizer == "YF": lr = optimizer._lr mu = optimizer._mu return lr, mu def save_checkpoint(state, is_best, save_path, filename='checkpoint.pth.tar'): torch.save(state, save_path+filename) if is_best: shutil.copyfile(save_path+filename, save_path+'model_best.pth.tar') def get_alpha_w(model): alpha = [] count = 0 for m in model.modules(): if isinstance(m, nn.Conv2d): if not count in [0] and not m.weight.size(2)==1: alpha.append(m.alpha_w) count += 1 return alpha def log2df(log_file_name): ''' return a pandas dataframe from a log file ''' with open(log_file_name, 'r') as f: lines = f.readlines() # search backward to find table header num_lines = len(lines) for i in range(num_lines): if lines[num_lines-1-i].startswith('---'): break header_line = lines[num_lines-2-i] num_epochs = i columns = header_line.split() df = pd.DataFrame(columns=columns) for i in range(num_epochs): df.loc[i] = [float(x) for x in lines[num_lines-num_epochs+i].split()] return df """ PROFIT Util """ def categorize_param(model, skip_list=()): quant = [] skip = [] bnbias = [] weight = [] for name, param, in model.named_parameters(): skip_found = False for s in skip_list: if name.find(s) != -1: skip_found = True if not param.requires_grad: continue elif name.endswith(".a") or name.endswith(".c"): quant.append(param) elif skip_found: skip.append(param) elif len(param.shape) == 1 or name.endswith(".bias"): bnbias.append(param) else: weight.append(param) return (quant, skip, weight, bnbias) def get_optimizer(params, train_quant, train_weight, train_bnbias, args): (quant, skip, weight, bnbias) = params optimizer = optim.SGD([ {'params': skip, 'weight_decay': 0, 'lr': 0}, {'params': quant, 'weight_decay': 0., 'lr': args.lr * 1e-2 if train_quant else 0}, {'params': bnbias, 'weight_decay': 0., 'lr': args.lr if train_bnbias else 0}, {'params': weight, 'weight_decay': args.weight_decay, 'lr': args.lr if train_weight else 0}, ], momentum=0.9, nesterov=True) return optimizer def reset_weight_copy(model): for name, module in model.module.named_modules(): if hasattr(module, "WQ"): if hasattr(module.WQ, "weight_old"): del module.WQ.weight_old module.WQ.weight_old = None def lasso_thre(var, thre=1.0): thre = torch.tensor(thre).cuda() a = var.pow(2).pow(1/2) p = torch.max(a, thre) # penalize or not return p def train_profit(train_loader, net, net_t, criterion, optimizer, epoch, metric_map={}, logger=None, lasso=True): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() net.train() # reset weight copy reset_weight_copy(net) if net_t is not None: net_t.train() end = time.time() for i, (input, target) in enumerate(train_loader): data_time.update(time.time() - end) # deploy the data input = input.cuda() target = target.cuda(non_blocking=True) if net_t is not None: output_t = net_t(input) # create and attach hook for layer-wise aiwq measure hooks = [] metric_itr_map = {} if len(metric_map) > 0: def forward_hook(self, input, output): if self.WQ.weight_old is not None and input[0].get_device() == 0: with torch.no_grad(): out_old = torch.nn.functional.conv2d(input[0], self.WQ.weight_old, self.bias, self.stride, self.padding, self.dilation, self.groups) out_t = torch.transpose(output, 0, 1).contiguous().view(self.out_channels, -1) out_mean = torch.mean(out_t, 1) out_std = torch.std(out_t, 1) # + 1e-8 out_old_t = torch.transpose(out_old, 0, 1).contiguous().view(self.out_channels, -1) out_old_mean = torch.mean(out_old_t, 1) out_old_std = torch.std(out_old_t, 1) # + 1e-8 out_cond = out_std != 0 out_old_cond = out_old_std != 0 cond = out_cond & out_old_cond out_mean = out_mean[cond] out_std = out_std[cond] out_old_mean = out_old_mean[cond] out_old_std = out_old_std[cond] KL = torch.log(out_old_std / out_std) + \ (out_std ** 2 + (out_mean - out_old_mean) ** 2) / (2 * out_old_std ** 2) - 0.5 metric_itr_map[self.name] = KL.mean().data.cpu().numpy() for name, module in net.module.named_modules(): if hasattr(module, "WQ") and isinstance(module, torch.nn.Conv2d): module.name = name hooks.append(module.register_forward_hook(forward_hook)) # feed forward output = net(input) for hook in hooks: hook.remove() loss_s = criterion(output, target) # student model loss if net_t is not None: loss_kd = -1 * torch.mean( torch.sum(torch.nn.functional.softmax(output_t, dim=1) * torch.nn.functional.log_softmax(output, dim=1), dim=1)) loss = loss_s + loss_kd else: loss = loss_s # backward optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # measure accuracy and record loss prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) losses.update(loss_s.data.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) if ((i+1) % _print_freq) == 0: logger.info('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( epoch+1, i+1, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5)) for key, value in metric_itr_map.items(): if value > 1: continue metric_map[key] = 0.999 * metric_map[key] + 0.001 * value return top1.avg, losses.avg, metric_map def init_precision(model, loader, abit, wbit, set_a=False, set_w=False, eps=0.05): def init_hook(module, input, output): if isinstance(module, models.modules.QConv2d) or isinstance(module, models.modules.QLinear): if not isinstance(input, torch.Tensor): input = input[0] input = input.detach().cpu() input = input.reshape(-1) input = input[input > 0] input, _ = torch.sort(input) if len(input) == 0: small, large = 0, 1e-3 else: small, large = input[int(len(input)*eps)], input[int(len(input)*(1-eps))] if set_a: module.AQ._update_param(abit, small, large-small) # import pdb;pdb.set_trace() if set_w: max_val = module.weight.data.abs().max().item() module.WQ._update_param(wbit, max_val) hooks = [] for name, module in model.named_modules(): hook = module.register_forward_hook(init_hook) hooks.append(hook) model.train() model.cpu() for i, (input, target) in enumerate(loader): with torch.no_grad(): if isinstance(model, nn.DataParallel): output = model.module(input) else: output = model(input) break model.cuda() for hook in hooks: hook.remove() def bn_merge(model): r""" Fuse the batchnorm to the weight given a pretrained model """ for module_name in model._modules: block = model._modules[module_name] if not isinstance(block, nn.Sequential): # import pdb;pdb.set_trace() model._modules[module_name] = block continue else: stack = [] for m in block.children(): sub_module = [] for n in m.children(): if isinstance(n, nn.BatchNorm2d): if isinstance(sub_module[-1], QConvBN2d): bn_st_dict = n.state_dict() conv_st_dict = sub_module[-1].state_dict() # batchnorm parameters eps = n.eps mu = bn_st_dict['running_mean'] var = bn_st_dict['running_var'] gamma = bn_st_dict['weight'] nb_tr = bn_st_dict['num_batches_tracked'] if 'bias' in bn_st_dict: beta = bn_st_dict['bias'] else: beta = torch.zeros(gamma.size(0)).float().to(gamma.device) sub_module[-1].gamma.data = gamma sub_module[-1].beta.data = beta sub_module[-1].running_mean.data = mu sub_module[-1].running_var.data = var sub_module[-1].num_batches_tracked.data = nb_tr sub_module[-1].eps = eps # import pdb;pdb.set_trace() else: sub_module.append(n) seq_module = nn.Sequential(*sub_module) stack.append(seq_module) seq_stack = nn.Sequential(*stack) model._modules[module_name] = seq_stack # import pdb;pdb.set_trace() return model def set_precision(model, abit=32, wbit=32, set_a=False, set_w=False): for name, module in model.named_modules(): if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear): if set_a: module.AQ.abit = abit else: module.AQ.abit = 32 if set_w: module.WQ.wbit = wbit else: module.WQ.wbit = 32 if __name__ == "__main__": log = log2df('./save/resnet20_quant_grp8/resnet20_quant_w4_a4_modemean_k2_lambda0.0010_ratio0.7_wd0.0005_lr0.01_swpFalse_groupch8_pushFalse_iter4000_g01/resnet20_quant_w4_a4_modemean_k2_lambda0.0010_ratio0.7_wd0.0005_lr0.01_swpFalse_groupch8_pushFalse_iter4000_tmp_g03.log') epoch = log['ep'] grp_spar = log['grp_spar'] ovall_spar = log['ovall_spar'] spar_groups = log['spar_groups'] penalty_groups = log['penalty_groups'] table = { 'epoch': epoch, 'grp_spar': grp_spar, 'ovall_spar': ovall_spar, 'spar_groups':spar_groups, 'penalty_groups':penalty_groups, } variable = pd.DataFrame(table, columns=['epoch','grp_spar','ovall_spar', 'spar_groups', 'penalty_groups']) variable.to_csv('resnet20_quant_w4_a4_modemean_k2_lambda0.0010_ratio0.7_wd0.0005_lr0.01_swpFalse_groupch8_pushFalse_iter4000_tmp_g03.csv', index=False)
py
1a41848d3e1d733f70358e7a3295d0aba73b361f
from kivy.uix.slider import Slider from kivy.properties import ListProperty from flat_kivy.uix.behaviors import (GrabBehavior, SliderTouchRippleBehavior, ThemeBehavior) class FlatSlider(GrabBehavior, SliderTouchRippleBehavior, ThemeBehavior, Slider): color_tuple = ListProperty(['Blue', '500']) slider_color_tuple = ListProperty(['Orange', '300']) outline_color_tuple = ListProperty(['Blue', '600']) slider_outline_color_tuple = ListProperty(['Orange', '500']) ripple_color_tuple = ListProperty(['Grey', '0000'])
py
1a4184f0c3d9e5aea78bf9e07a6f81aabc2efeb3
""" Binary search """ import unittest from typing import TypeVar T = TypeVar('T') def binary_search(sorted_array: list[T], key: T, lo: int, hi: int) -> int: if lo > hi: return -1 mi = (lo + hi) // 2 if sorted_array[mi] == key: return mi elif key < sorted_array[mi]: return binary_search(sorted_array, key, lo, mi - 1) return binary_search(sorted_array, key, mi + 1, hi) class TestBinarySearch(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.integers = [2, 3, 5, 6, 8, 9, 10, 22, 26, 32, 40] self.strings = ['c', 'cpp', 'go', 'java', 'python', 'sql', 'swift'] self.floats = [0.7, 1.2, 3.2, 4.4, 5.2, 5.9, 6.8, 9.5] def test_handles_multiple_array_type_input(self): self.assertEqual(binary_search(self.integers, 8, 0, len(self.integers) - 1), 4) self.assertEqual(binary_search(self.strings, 'c', 0, len(self.strings) - 1), 0) self.assertEqual(binary_search(self.floats, 9.5, 0, len(self.floats) - 1), 7) def test_handles_non_exist_element_input(self): self.assertEqual(binary_search(self.integers, 1, 0, len(self.integers) - 1), -1) self.assertEqual(binary_search(self.strings, 'rust', 0, len(self.strings) - 1), -1) self.assertEqual(binary_search(self.floats, 4.5, 0, len(self.floats) - 1), -1) if __name__ == '__main__': unittest.main()
py
1a41863c6d87f810fcb2a0862eb2e622fe3b5369
class LayerNorm(Module): __parameters__ = ["weight", "bias", ] __buffers__ = [] weight : Tensor bias : Tensor training : bool def forward(self: __torch__.multimodal.model.multimodal_transformer.___torch_mangle_9367.LayerNorm, x: Tensor) -> Tensor: _0 = self.bias _1 = self.weight input = torch.to(x, torch.device("cpu"), 6, False, False, None) ret = torch.layer_norm(input, [768], _1, _0, 1.0000000000000001e-05, True) x0 = torch.to(ret, torch.device("cpu"), 5, False, False, None) return x0 def forward1(self: __torch__.multimodal.model.multimodal_transformer.___torch_mangle_9367.LayerNorm, x: Tensor) -> Tensor: _2 = self.bias _3 = self.weight input = torch.to(x, torch.device("cpu"), 6, False, False, None) ret = torch.layer_norm(input, [768], _3, _2, 1.0000000000000001e-05, True) x1 = torch.to(ret, torch.device("cpu"), 5, False, False, None) return x1
py
1a4186e367176eaf25de4fac3480e075d3492b68
#!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Helpful routines for regression testing.""" from base64 import b64encode from binascii import unhexlify from decimal import Decimal, ROUND_DOWN from subprocess import CalledProcessError import inspect import json import logging import os import random import re import time from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException from io import BytesIO logger = logging.getLogger("TestFramework.utils") # Assert functions ################## def assert_approx(v, vexp, vspan=0.00001): """Assert that `v` is within `vspan` of `vexp`""" if v < vexp - vspan: raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan))) if v > vexp + vspan: raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan))) def assert_fee_amount(fee, tx_size, fee_per_kB): """Assert the fee was in range""" target_fee = round(tx_size * fee_per_kB / 1000, 8) if fee < target_fee: raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee))) # allow the wallet's estimation to be at most 2 bytes off if fee > (tx_size + 2) * fee_per_kB / 1000: raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee))) def assert_equal(thing1, thing2, *args): if thing1 != thing2 or any(thing1 != arg for arg in args): raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args)) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("%s <= %s" % (str(thing1), str(thing2))) def assert_greater_than_or_equal(thing1, thing2): if thing1 < thing2: raise AssertionError("%s < %s" % (str(thing1), str(thing2))) def assert_raises(exc, fun, *args, **kwds): assert_raises_message(exc, None, fun, *args, **kwds) def assert_raises_message(exc, message, fun, *args, **kwds): try: fun(*args, **kwds) except JSONRPCException: raise AssertionError("Use assert_raises_rpc_error() to test RPC failures") except exc as e: if message is not None and message not in e.error['message']: raise AssertionError( "Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format( message, e.error['message'])) except Exception as e: raise AssertionError("Unexpected exception raised: " + type(e).__name__) else: raise AssertionError("No exception raised") def assert_raises_process_error(returncode, output, fun, *args, **kwds): """Execute a process and asserts the process return code and output. Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError and verifies that the return code and output are as expected. Throws AssertionError if no CalledProcessError was raised or if the return code and output are not as expected. Args: returncode (int): the process return code. output (string): [a substring of] the process output. fun (function): the function to call. This should execute a process. args*: positional arguments for the function. kwds**: named arguments for the function. """ try: fun(*args, **kwds) except CalledProcessError as e: if returncode != e.returncode: raise AssertionError("Unexpected returncode %i" % e.returncode) if output not in e.output: raise AssertionError("Expected substring not found:" + e.output) else: raise AssertionError("No exception raised") def assert_raises_rpc_error(code, message, fun, *args, **kwds): """Run an RPC and verify that a specific JSONRPC exception code and message is raised. Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException and verifies that the error code and message are as expected. Throws AssertionError if no JSONRPCException was raised or if the error code/message are not as expected. Args: code (int), optional: the error code returned by the RPC call (defined in src/rpc/protocol.h). Set to None if checking the error code is not required. message (string), optional: [a substring of] the error string returned by the RPC call. Set to None if checking the error string is not required. fun (function): the function to call. This should be the name of an RPC. args*: positional arguments for the function. kwds**: named arguments for the function. """ assert try_rpc(code, message, fun, *args, **kwds), "No exception raised" def try_rpc(code, message, fun, *args, **kwds): """Tries to run an rpc command. Test against error code and message if the rpc fails. Returns whether a JSONRPCException was raised.""" try: fun(*args, **kwds) except JSONRPCException as e: # JSONRPCException was thrown as expected. Check the code and message values are correct. if (code is not None) and (code != e.error["code"]): raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"]) if (message is not None) and (message not in e.error['message']): raise AssertionError( "Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format( message, e.error['message'])) return True except Exception as e: raise AssertionError("Unexpected exception raised: " + type(e).__name__) else: return False def assert_is_hex_string(string): try: int(string, 16) except Exception as e: raise AssertionError( "Couldn't interpret %r as hexadecimal; raised: %s" % (string, e)) def assert_is_hash_string(string, length=64): if not isinstance(string, str): raise AssertionError("Expected a string, got type %r" % type(string)) elif length and len(string) != length: raise AssertionError( "String of length %d expected; got %d" % (length, len(string))) elif not re.match('[abcdef0-9]+$', string): raise AssertionError( "String %r contains invalid characters for a hash." % string) def assert_array_result(object_array, to_match, expected, should_not_find=False): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. If the should_not_find flag is true, to_match should not be found in object_array """ if should_not_find: assert_equal(expected, {}) num_matched = 0 for item in object_array: all_match = True for key, value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue elif should_not_find: num_matched = num_matched + 1 for key, value in expected.items(): if item[key] != value: raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value))) num_matched = num_matched + 1 if num_matched == 0 and not should_not_find: raise AssertionError("No objects matched %s" % (str(to_match))) if num_matched > 0 and should_not_find: raise AssertionError("Objects were found %s" % (str(to_match))) def assert_scale(number, expected_scale=8): """Assert number has expected scale, e.g. fractional digits; number of digits after the decimal. The default of 8 corresponds to a Bitcoin amount.""" number = str(number) mantissa = number.split('.')[-1].upper() if mantissa[:3] == '0E-': assert_equal(mantissa, '0E-{}'.format(expected_scale)) # exponent notation elif mantissa == number: assert_equal(0, expected_scale) # no mantissa, ergo, expected scale must be 0 else: assert_equal(len(mantissa), expected_scale) # Utility functions ################### def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def EncodeDecimal(o): if isinstance(o, Decimal): return str(o) raise TypeError(repr(o) + " is not JSON serializable") def count_bytes(hex_string): return len(bytearray.fromhex(hex_string)) def hex_str_to_bytes(hex_str): return unhexlify(hex_str.encode('ascii')) def str_to_b64str(string): return b64encode(string.encode('utf-8')).decode('ascii') def satoshi_round(amount): return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None): if attempts == float('inf') and timeout == float('inf'): timeout = 60 attempt = 0 time_end = time.time() + timeout while attempt < attempts and time.time() < time_end: if lock: with lock: if predicate(): return else: if predicate(): return attempt += 1 time.sleep(0.05) # Print the cause of the timeout predicate_source = "''''\n" + inspect.getsource(predicate) + "'''" logger.error("wait_until() failed. Predicate: {}".format(predicate_source)) if attempt >= attempts: raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts)) elif time.time() >= time_end: raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout)) raise RuntimeError('Unreachable') # RPC/P2P connection constants and functions ############################################ # The maximum number of nodes a single test can spawn MAX_NODES = 12 # Don't assign rpc or p2p ports lower than this PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000)) # The number of ports to "reserve" for p2p and rpc, each PORT_RANGE = 5000 class PortSeed: # Must be initialized with a unique integer for each process n = None def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None): """ Args: url (str): URL of the RPC server to call node_number (int): the node number (or id) that this calls to Kwargs: timeout (int): HTTP timeout in seconds coveragedir (str): Directory Returns: AuthServiceProxy. convenience object for making RPC calls. """ proxy_kwargs = {} if timeout is not None: proxy_kwargs['timeout'] = timeout proxy = AuthServiceProxy(url, **proxy_kwargs) proxy.url = url # store URL on proxy for info coverage_logfile = coverage.get_filename( coveragedir, node_number) if coveragedir else None return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile) def p2p_port(n): assert n <= MAX_NODES return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) def rpc_port(n): return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) def rpc_url(datadir, i, chain, rpchost): rpc_u, rpc_p = get_auth_cookie(datadir, chain) host = '127.0.0.1' port = rpc_port(i) if rpchost: parts = rpchost.split(':') if len(parts) == 2: host, port = parts else: host = rpchost return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port)) # Node functions ################ def initialize_datadir(dirname, n, chain): datadir = get_datadir_path(dirname, n) if not os.path.isdir(datadir): os.makedirs(datadir) # Translate chain name to config name if chain == 'testnet3': chain_name_conf_arg = 'testnet' chain_name_conf_section = 'test' else: chain_name_conf_arg = chain chain_name_conf_section = chain with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f: f.write("{}=1\n".format(chain_name_conf_arg)) f.write("[{}]\n".format(chain_name_conf_section)) f.write("port=" + str(p2p_port(n)) + "\n") f.write("rpcport=" + str(rpc_port(n)) + "\n") f.write("fallbackfee=0.0002\n") f.write("server=1\n") f.write("keypool=1\n") f.write("discover=0\n") f.write("dnsseed=0\n") f.write("listenonion=0\n") f.write("printtoconsole=0\n") f.write("upnp=0\n") f.write("shrinkdebugfile=0\n") os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True) os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True) return datadir def get_datadir_path(dirname, n): return os.path.join(dirname, "node" + str(n)) def append_config(datadir, options): with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f: for option in options: f.write(option + "\n") def get_auth_cookie(datadir, chain): user = None password = None if os.path.isfile(os.path.join(datadir, "bitcoin.conf")): with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f: for line in f: if line.startswith("rpcuser="): assert user is None # Ensure that there is only one rpcuser line user = line.split("=")[1].strip("\n") if line.startswith("rpcpassword="): assert password is None # Ensure that there is only one rpcpassword line password = line.split("=")[1].strip("\n") try: with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f: userpass = f.read() split_userpass = userpass.split(':') user = split_userpass[0] password = split_userpass[1] except OSError: pass if user is None or password is None: raise ValueError("No RPC credentials") return user, password # If a cookie file exists in the given datadir, delete it. def delete_cookie_file(datadir, chain): if os.path.isfile(os.path.join(datadir, chain, ".cookie")): logger.debug("Deleting leftover cookie file") os.remove(os.path.join(datadir, chain, ".cookie")) def softfork_active(node, key): """Return whether a softfork is active.""" return node.getblockchaininfo()['softforks'][key]['active'] def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) def disconnect_nodes(from_connection, node_num): for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]: try: from_connection.disconnectnode(nodeid=peer_id) except JSONRPCException as e: # If this node is disconnected between calculating the peer id # and issuing the disconnect, don't worry about it. # This avoids a race condition if we're mass-disconnecting peers. if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED raise # wait to disconnect wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5) def connect_nodes(from_connection, node_num): ip_port = "127.0.0.1:" + str(p2p_port(node_num)) from_connection.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo())) def sync_blocks(rpc_connections, *, wait=1, timeout=60): """ Wait until everybody has the same tip. sync_blocks needs to be called with an rpc_connections set that has least one node already synced to the latest, stable tip, otherwise there's a chance it might return before all nodes are stably synced. """ stop_time = time.time() + timeout while time.time() <= stop_time: best_hash = [x.getbestblockhash() for x in rpc_connections] if best_hash.count(best_hash[0]) == len(rpc_connections): return # Check that each peer has at least one connection assert (all([len(x.getpeerinfo()) for x in rpc_connections])) time.sleep(wait) raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash))) def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True): """ Wait until everybody has the same transactions in their memory pools """ stop_time = time.time() + timeout while time.time() <= stop_time: pool = [set(r.getrawmempool()) for r in rpc_connections] if pool.count(pool[0]) == len(rpc_connections): if flush_scheduler: for r in rpc_connections: r.syncwithvalidationinterfacequeue() return # Check that each peer has at least one connection assert (all([len(x.getpeerinfo()) for x in rpc_connections])) time.sleep(wait) raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool))) # Transaction/Block functions ############################# def find_output(node, txid, amount, *, blockhash=None): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1, blockhash) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount))) def gather_inputs(from_node, amount_needed, confirmations_required=1): """ Return a random set of unspent txouts that are enough to pay amount_needed """ assert confirmations_required >= 0 utxo = from_node.listunspent(confirmations_required) random.shuffle(utxo) inputs = [] total_in = Decimal("0.00000000") while total_in < amount_needed and len(utxo) > 0: t = utxo.pop() total_in += t["amount"] inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]}) if total_in < amount_needed: raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in)) return (total_in, inputs) def make_change(from_node, amount_in, amount_out, fee): """ Create change output(s), return them """ outputs = {} amount = amount_out + fee change = amount_in - amount if change > amount * 2: # Create an extra change output to break up big inputs change_address = from_node.getnewaddress() # Split change in two, being careful of rounding: outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) change = amount_in - amount - outputs[change_address] if change > 0: outputs[from_node.getnewaddress()] = change return outputs def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment * random.randint(0, fee_variants) (total_in, inputs) = gather_inputs(from_node, amount + fee) outputs = make_change(from_node, total_in, amount, fee) outputs[to_node.getnewaddress()] = float(amount) rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransactionwithwallet(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], 0) return (txid, signresult["hex"], fee) # Helper to create at least "count" utxos # Pass in a fee that is sufficient for relay and mining new transactions. def create_confirmed_utxos(fee, node, count): to_generate = int(0.5 * count) + 101 while to_generate > 0: node.generate(min(25, to_generate)) to_generate -= 25 utxos = node.listunspent() iterations = count - len(utxos) addr1 = node.getnewaddress() addr2 = node.getnewaddress() if iterations <= 0: return utxos for i in range(iterations): t = utxos.pop() inputs = [] inputs.append({"txid": t["txid"], "vout": t["vout"]}) outputs = {} send_value = t['amount'] - fee outputs[addr1] = satoshi_round(send_value / 2) outputs[addr2] = satoshi_round(send_value / 2) raw_tx = node.createrawtransaction(inputs, outputs) signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"] node.sendrawtransaction(signed_tx) while (node.getmempoolinfo()['size'] > 0): node.generate(1) utxos = node.listunspent() assert len(utxos) >= count return utxos # Create large OP_RETURN txouts that can be appended to a transaction # to make it large (helper for constructing large transactions). def gen_return_txouts(): # Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create # So we have big transactions (and therefore can't fit very many into each block) # create one script_pubkey script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes for i in range(512): script_pubkey = script_pubkey + "01" # concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change txouts = [] from .messages import CTxOut txout = CTxOut() txout.nValue = 0 txout.scriptPubKey = hex_str_to_bytes(script_pubkey) for k in range(128): txouts.append(txout) return txouts # Create a spend of each passed-in utxo, splicing in "txouts" to each raw # transaction to make it large. See gen_return_txouts() above. def create_lots_of_big_transactions(node, txouts, utxos, num, fee): addr = node.getnewaddress() txids = [] from .messages import CTransaction for _ in range(num): t = utxos.pop() inputs = [{"txid": t["txid"], "vout": t["vout"]}] outputs = {} change = t['amount'] - fee outputs[addr] = satoshi_round(change) rawtx = node.createrawtransaction(inputs, outputs) tx = CTransaction() tx.deserialize(BytesIO(hex_str_to_bytes(rawtx))) for txout in txouts: tx.vout.append(txout) newtx = tx.serialize().hex() signresult = node.signrawtransactionwithwallet(newtx, None, "NONE") txid = node.sendrawtransaction(signresult["hex"], 0) txids.append(txid) return txids def mine_large_block(node, utxos=None): # generate a 66k transaction, # and 14 of them is close to the 1MB block limit num = 14 txouts = gen_return_txouts() utxos = utxos if utxos is not None else [] if len(utxos) < num: utxos.clear() utxos.extend(node.listunspent()) fee = 100 * node.getnetworkinfo()["relayfee"] create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee) node.generate(1) def find_vout_for_address(node, txid, addr): """ Locate the vout index of the given transaction sending to the given address. Raises runtime error exception if not found. """ tx = node.getrawtransaction(txid, True) for i in range(len(tx["vout"])): if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]): return i raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
py
1a418888793eb06c18cb03b4b1fc520f48c4edbe
# MIT license # # Copyright (C) 2018 by XESS Corporation / Hildo Guillardi Junior # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Inserted by Pasteurize tool. from __future__ import print_function, unicode_literals, division, absolute_import from builtins import zip, range, int, str from future import standard_library standard_library.install_aliases() import future import re, difflib from bs4 import BeautifulSoup import http.client # For web scraping exceptions. from ...global_vars import PartHtmlError from ...global_vars import logger, DEBUG_OVERVIEW, DEBUG_DETAILED, DEBUG_OBSESSIVE, DEBUG_HTTP_RESPONSES from .. import fake_browser from .. import distributor from ..global_vars import distributor_dict from urllib.parse import quote_plus as urlquote class dist_newark(distributor.distributor): def __init__(self, name, scrape_retries, throttle_delay): super(dist_newark, self).__init__(name, distributor_dict[name]['site']['url'], scrape_retries, throttle_delay) self.browser.start_new_session() @staticmethod def dist_init_distributor_dict(): distributor_dict.update( { 'newark': { 'module': 'newark', # The directory name containing this file. 'scrape': 'web', # Allowable values: 'web' or 'local'. 'label': 'Newark', # Distributor label used in spreadsheet columns. 'order_cols': ['part_num', 'purch', 'refs'], # Sort-order for online orders. 'order_delimiter': ',', # Delimiter for online orders. # Formatting for distributor header in worksheet. 'wrk_hdr_format': { 'font_size': 14, 'font_color': 'white', 'bold': True, 'align': 'center', 'valign': 'vcenter', 'bg_color': '#A2AE06' # Newark/E14 olive green. }, # Web site defitions. 'site': { 'url': 'https://www.newark.com/', 'currency': 'USD', 'locale': 'US' }, } }) def dist_get_price_tiers(self, html_tree): '''@brief Get the pricing tiers from the parsed tree of the Newark product page. @param html_tree `str()` html of the distributor part page. @return `dict()` price breaks, the keys are the quantities breaks. ''' price_tiers = {} try: qty_strs = [] for qty in html_tree.find( 'table', class_=('tableProductDetailPrice', 'pricing')).find_all( 'td', class_='qty'): qty_strs.append(qty.text) price_strs = [] for price in html_tree.find( 'table', class_=('tableProductDetailPrice', 'pricing')).find_all( 'td', class_='threeColTd'): price_strs.append(price.text) qtys_prices = list(zip(qty_strs, price_strs)) for qty_str, price_str in qtys_prices: try: qty = re.search('(\s*)([0-9,]+)', qty_str).group(2) qty = int(re.sub('[^0-9]', '', qty)) price_tiers[qty] = float(re.sub('[^0-9\.]', '', price_str)) except (TypeError, AttributeError, ValueError): continue except AttributeError: # This happens when no pricing info is found in the tree. self.logger.log(DEBUG_OBSESSIVE, 'No Newark pricing information found!') return price_tiers # Return empty price tiers. return price_tiers def dist_get_part_num(self, html_tree): '''@brief Get the part number from the Newark product page. @param html_tree `str()` html of the distributor part page. @return `list()`of the parts that match. ''' try: # Newark catalog number is stored in a description list, so get # all the list terms and descriptions, strip all the spaces from those, # and pair them up. div = html_tree.find('div', class_='productDescription').find('dl') dt = [re.sub('\s','',d.text) for d in div.find_all('dt')] dd = [re.sub('\s','',d.text) for d in div.find_all('dd')] dtdd = {k:v for k,v in zip(dt,dd)} # Pair terms with descriptions. return dtdd.get('NewarkPartNo.:', '') except KeyError: self.logger.log(DEBUG_OBSESSIVE, 'No Newark catalog number found!') return '' # No catalog number found in page. except AttributeError: self.logger.log(DEBUG_OBSESSIVE, 'No Newark product description found!') return '' # No ProductDescription found in page. def dist_get_qty_avail(self, html_tree): '''@brief Get the available quantity of the part from the Newark product page. @param html_tree `str()` html of the distributor part page. @return `int` avaliable quantity. ''' try: qty_str = html_tree.find('p', class_='availabilityHeading').text except (AttributeError, ValueError): # No quantity found (not even 0) so this is probably a non-stocked part. # Return None so the part won't show in the spreadsheet for this dist. return None try: qty = re.sub('[^0-9]','',qty_str) # Strip all non-number chars. return int(re.sub('[^0-9]', '', qty_str)) # Return integer for quantity. except ValueError: # No quantity found (not even 0) so this is probably a non-stocked part. # Return None so the part won't show in the spreadsheet for this dist. self.logger.log(DEBUG_OBSESSIVE, 'No Newark part quantity found!') return None def dist_get_part_html_tree(self, pn, extra_search_terms='', url=None, descend=2): '''@brief Find the Newark HTML page for a part number and return the URL and parse tree. @param pn Part number `str()`. @param extra_search_terms @param url @param descend @return (html `str()` of the page, url) ''' # Use the part number to lookup the part using the site search function, unless a starting url was given. if url is None: url = 'http://www.newark.com/webapp/wcs/stores/servlet/Search?catalogId=15003&langId=-1&storeId=10194&gs=true&st=' \ + urlquote(pn, safe='') if extra_search_terms: url = url + urlquote(' ' + extra_search_terms, safe='') elif url[0] == '/': url = 'http://www.newark.com' + url elif url.startswith('..'): url = 'http://www.newark.com/Search/' + url # Open the URL, read the HTML from it, and parse it into a tree structure. try: html = self.browser.scrape_URL(url) except: self.logger.log(DEBUG_OBSESSIVE,'No HTML page for {} from {}'.format(pn, self.name)) raise PartHtmlError try: tree = BeautifulSoup(html, 'lxml') except Exception: self.logger.log(DEBUG_OBSESSIVE,'No HTML tree for {} from {}'.format(pn, self.name)) raise PartHtmlError # Abort if the part number isn't in the HTML somewhere. # (Only use the numbers and letters to compare PN to HTML.) if re.sub('[\W_]','',str.lower(pn)) not in re.sub('[\W_]','',str.lower(str(html))): self.logger.log(DEBUG_OBSESSIVE,'No part number {} in HTML page from {}'.format(pn, self.name)) raise PartHtmlError # If the tree contains the tag for a product page, then just return it. if tree.find('div', class_='productDisplay', id='page') is not None: return tree, url # If the tree is for a list of products, then examine the links to try to find the part number. if tree.find('table', class_='productLister', id='sProdList') is not None: self.logger.log(DEBUG_OBSESSIVE,'Found product table for {} from {}'.format(pn, self.name)) if descend <= 0: self.logger.log(DEBUG_OBSESSIVE,'Passed descent limit for {} from {}'.format(pn, self.name)) raise PartHtmlError else: # Look for the table of products. products = tree.find('table', class_='productLister', id='sProdList').find('tbody').find_all('tr') # Extract the product links for the part numbers from the table. product_links = [] for p in products: try: product_links.append( p.find('td', class_='mftrPart').find('a')) except AttributeError: continue # Extract all the part numbers from the text portion of the links. part_numbers = [l.text for l in product_links] # Look for the part number in the list that most closely matches the requested part number. try: match = difflib.get_close_matches(pn, part_numbers, 1, 0.0)[0] except IndexError: raise PartHtmlError # Now look for the link that goes with the closest matching part number. for l in product_links: if l.text == match: # Get the tree for the linked-to page and return that. self.logger.log(DEBUG_OBSESSIVE,'Selecting {} from product table for {} from {}'.format(l.text.strip(), pn, self.name)) return self.dist_get_part_html_tree(pn, extra_search_terms, url=l.get('href', ''), descend=descend-1) # I don't know what happened here, so give up. self.logger.log(DEBUG_OBSESSIVE,'Unknown error for {} from {}'.format(pn, self.name)) self.logger.log(DEBUG_HTTP_RESPONSES,'Response was %s' % html) raise PartHtmlError
py
1a418a8a7fb4ff06ca908c30c52decb0d6f8d3ec
"""Common datatypes and pytd utilities.""" from typing import Any, List, Tuple import dataclasses from pytype import utils from pytype.pytd import pytd from pytype.pytd import pytd_utils from pytype.pytd.codegen import pytdgen from pytype.pytd.parse import node as pytd_node from typed_ast import ast3 _STRING_TYPES = ("str", "bytes", "unicode") class ParseError(Exception): """Exceptions raised by the parser.""" def __init__(self, msg, line=None, filename=None, column=None, text=None): super().__init__(msg) self._line = line self._filename = filename self._column = column self._text = text @classmethod def from_exc(cls, exc) -> "ParseError": if isinstance(exc, cls): return exc elif exc.args: return cls(exc.args[0]) else: return cls(repr(exc)) def at(self, node, filename=None, src_code=None): """Add position information from `node` if it doesn't already exist.""" # NOTE: ast3.Module has no position info, and will be the `node` when # build_type_decl_unit() is called, so we cannot call `node.lineno` if not self._line: self._line = getattr(node, "lineno", None) self._column = getattr(node, "col_offset", None) if not self._filename: self._filename = filename if self._line and src_code: try: self._text = src_code.splitlines()[self._line-1] except IndexError: pass return self def clear_position(self): self._line = None @property def line(self): return self._line def __str__(self): lines = [] if self._filename or self._line is not None: lines.append(f' File: "{self._filename}", line {self._line}') if self._column and self._text: indent = 4 stripped = self._text.lstrip() lines.append("%*s%s" % (indent, "", stripped)) # Output a pointer below the error column, adjusting for stripped spaces. pos = indent + (self._column - 1) - (len(self._text) - len(stripped)) lines.append("%*s^" % (pos, "")) lines.append("%s: %s" % (type(self).__name__, utils.message(self))) return "\n".join(lines) # Type aliases Parameters = Tuple[pytd_node.Node, ...] class Ellipsis: # pylint: disable=redefined-builtin pass @dataclasses.dataclass class Raise: exception: pytd.NamedType @dataclasses.dataclass class SlotDecl: slots: Tuple[str, ...] @dataclasses.dataclass class Constant: """Literal constants in pyi files.""" type: str value: Any @classmethod def from_num(cls, node: ast3.Num): if isinstance(node.n, int): return cls("int", node.n) else: return cls("float", node.n) @classmethod def from_str(cls, node: ast3.Str): if node.kind == "b": return cls("bytes", node.s) elif node.kind == "u": return cls("unicode", node.s) else: return cls("str", node.s) @classmethod def from_const(cls, node: ast3.NameConstant): if node.value is None: return pytd.NamedType("None") return cls(type(node.value).__name__, node.value) def to_pytd(self): return pytd.NamedType(self.type) def repr_str(self): """String representation with prefixes.""" if self.type == "str": val = f"'{self.value}'" elif self.type == "unicode": val = f"u'{self.value}'" elif self.type == "bytes": val = str(self.value) else: # For non-strings val = repr(self.value) return val def to_pytd_literal(self): """Make a pytd node from Literal[self.value].""" if self.value is None: return pytd.NamedType("None") if self.type in _STRING_TYPES: val = self.repr_str() elif self.type == "float": raise ParseError(f"Invalid type `float` in Literal[{self.value}].") else: val = self.value return pytd.Literal(val) def negated(self): """Return a new constant with value -self.value.""" if self.type in ("int", "float"): return Constant(self.type, -self.value) raise ParseError("Unary `-` can only apply to numeric literals.") @classmethod def is_str(cls, value): return isinstance(value, cls) and value.type in _STRING_TYPES def __repr__(self): return f"LITERAL({self.repr_str()})" def string_value(val, context=None) -> str: """Convert a Constant(str) to a string if needed.""" if isinstance(val, str): return val elif Constant.is_str(val): return str(val.value) else: if context: msg = f"Type mismatch in {context}" else: msg = "Type mismatch" raise ParseError(f"{msg}: Expected str, got {val}") def is_any(val) -> bool: if isinstance(val, Ellipsis): return True return pytdgen.is_any(val) def pytd_literal(parameters: List[Any]) -> pytd_node.Node: """Create a pytd.Literal.""" literal_parameters = [] for p in parameters: if pytdgen.is_none(p): literal_parameters.append(p) elif isinstance(p, pytd.NamedType): # TODO(b/173742489): support enums. literal_parameters.append(pytd.AnythingType()) elif isinstance(p, Constant): literal_parameters.append(p.to_pytd_literal()) elif isinstance(p, pytd.Literal): literal_parameters.append(p) elif isinstance(p, pytd.UnionType): for t in p.type_list: if isinstance(t, pytd.Literal): literal_parameters.append(t) else: raise ParseError(f"Literal[{t}] not supported") else: raise ParseError(f"Literal[{p}] not supported") return pytd_utils.JoinTypes(literal_parameters) def pytd_annotated(parameters: List[Any]) -> pytd_node.Node: """Create a pytd.Annotated.""" if len(parameters) < 2: raise ParseError( "typing.Annotated takes at least two parameters: " "Annotated[type, 'annotation', ...].") typ, *annotations = parameters if not all(isinstance(x, Constant) for x in annotations): raise ParseError( "Annotations needs to be string literals: " "Annotated[type, 'annotation', ...].") annotations = tuple(x.repr_str() for x in annotations) return pytd.Annotated(typ, annotations) def builtin_keyword_constants(): # We cannot define these in a pytd file because assigning to a keyword breaks # the python parser. defs = [ ("True", "bool"), ("False", "bool"), ("None", "NoneType"), ("__debug__", "bool") ] return [pytd.Constant(name, pytd.NamedType(typ)) for name, typ in defs]
py
1a418ab6044c1167f701811885406f1492aa3558
class Indexer(object): @property def idx(self): return SvIdIndexer("idx", self) class RootIndexer: def __init__(self, name, obj): self.name = name self.obj = obj class SvIdIndexer(RootIndexer): def __getitem__(self, value): if type(value) is str: value = [value] return self.obj.filter_by_id(value)
py
1a418b42723d2e41891027b614700c2b04bf80aa
#!/usr/bin/env python3 import os import argparse import torch import torch.distributed as dist import torchvision import torchvision.transforms as transforms from torchvision.models import AlexNet from torchvision.models import vgg19 import deepspeed from deepspeed.pipe import PipelineModule from deepspeed.utils import RepeatingLoader def cifar_trainset(local_rank, dl_path='/tmp/cifar10-data'): transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) # Ensure only one rank downloads. # Note: if the download path is not on a shared filesytem, remove the semaphore # and switch to args.local_rank dist.barrier() if local_rank != 0: dist.barrier() trainset = torchvision.datasets.CIFAR10(root=dl_path, train=True, download=True, transform=transform) if local_rank == 0: dist.barrier() return trainset def get_args(): parser = argparse.ArgumentParser(description='CIFAR') parser.add_argument('--local_rank', type=int, default=-1, help='local rank passed from distributed launcher') parser.add_argument('-s', '--steps', type=int, default=100, help='quit after this many steps') parser.add_argument('-p', '--pipeline-parallel-size', type=int, default=2, help='pipeline parallelism') parser.add_argument('--backend', type=str, default='nccl', help='distributed backend') parser.add_argument('--seed', type=int, default=1138, help='PRNG seed') parser = deepspeed.add_config_arguments(parser) args = parser.parse_args() return args def train_base(args): torch.manual_seed(args.seed) # VGG also works :-) #net = vgg19(num_classes=10) net = AlexNet(num_classes=10) trainset = cifar_trainset(args.local_rank) engine, _, dataloader, __ = deepspeed.initialize( args=args, model=net, model_parameters=[p for p in net.parameters() if p.requires_grad], training_data=trainset) dataloader = RepeatingLoader(dataloader) data_iter = iter(dataloader) rank = dist.get_rank() gas = engine.gradient_accumulation_steps() criterion = torch.nn.CrossEntropyLoss() total_steps = args.steps * engine.gradient_accumulation_steps() step = 0 for micro_step in range(total_steps): batch = next(data_iter) inputs = batch[0].to(engine.device) labels = batch[1].to(engine.device) outputs = engine(inputs) loss = criterion(outputs, labels) engine.backward(loss) engine.step() if micro_step % engine.gradient_accumulation_steps() == 0: step += 1 if rank == 0 and (step % 10 == 0): print(f'step: {step:3d} / {args.steps:3d} loss: {loss}') def join_layers(vision_model): layers = [ *vision_model.features, vision_model.avgpool, lambda x: torch.flatten(x, 1), *vision_model.classifier, ] return layers def train_pipe(args, part='parameters'): torch.manual_seed(args.seed) deepspeed.runtime.utils.set_random_seed(args.seed) # # Build the model # # VGG also works :-) #net = vgg19(num_classes=10) net = AlexNet(num_classes=10) net = PipelineModule(layers=join_layers(net), loss_fn=torch.nn.CrossEntropyLoss(), num_stages=args.pipeline_parallel_size, partition_method=part, activation_checkpoint_interval=0) trainset = cifar_trainset(args.local_rank) engine, _, _, _ = deepspeed.initialize( args=args, model=net, model_parameters=[p for p in net.parameters() if p.requires_grad], training_data=trainset) for step in range(args.steps): loss = engine.train_batch() if __name__ == '__main__': args = get_args() deepspeed.init_distributed(dist_backend=args.backend) args.local_rank = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(args.local_rank) if args.pipeline_parallel_size == 0: train_base(args) else: train_pipe(args)
py
1a418b7913ff861449182c2e0dce2013d34c7001
import argparse import os import pickle parser = argparse.ArgumentParser() parser.add_argument("--model_ind", type=int, required=True) parser.add_argument("--out_root", type=str, default="/scratch/shared/slow/xuji/iid_private") given_config = parser.parse_args() given_config.out_dir = os.path.join(given_config.out_root, str(given_config.model_ind)) reloaded_config_path = os.path.join(given_config.out_dir, "config.pickle") print("Loading restarting config from: %s" % reloaded_config_path) with open(reloaded_config_path, "rb") as config_f: config = pickle.load(config_f) if not hasattr(config, "batchnorm_track"): print("adding batchnorm track") config.batchnorm_track = True if not hasattr(config, "num_sub_heads"): print("adding num sub heads") config.num_sub_heads = config.num_heads if not hasattr(config, "select_sub_head_on_loss"): print("adding select_sub_head_on_loss") config.select_sub_head_on_loss = False if not hasattr(config, "use_doersch_datasets"): # only needed for seg configs print("adding use doersch datasets") config.use_doersch_datasets = False with open(os.path.join(config.out_dir, "config.pickle"), 'wb') as outfile: pickle.dump(config, outfile) with open(os.path.join(config.out_dir, "config.txt"), "w") as text_file: text_file.write("%s" % config) # these are for backup with open(os.path.join(config.out_dir, "best_config.pickle"), 'wb') as outfile: pickle.dump(config, outfile) with open(os.path.join(config.out_dir, "best_config.txt"), "w") as text_file: text_file.write("%s" % config)
py
1a418bd796aafb74abde699dbe0318ef749b1066
from . import plugin, watchdog from .command import Command, CommandHandler, TypeCommand from ._client import Natsumi as Client from ._client import g as prefix
py
1a418c9f87dda1adfb8dcc1354f38aca3571f87d
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class ConnectionMonitorsOperations(object): """ConnectionMonitorsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2021_02_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def _create_or_update_initial( self, resource_group_name, # type: str network_watcher_name, # type: str connection_monitor_name, # type: str parameters, # type: "_models.ConnectionMonitor" migrate=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> "_models.ConnectionMonitorResult" cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_or_update_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if migrate is not None: query_parameters['migrate'] = self._serialize.query("migrate", migrate, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'ConnectionMonitor') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore def begin_create_or_update( self, resource_group_name, # type: str network_watcher_name, # type: str connection_monitor_name, # type: str parameters, # type: "_models.ConnectionMonitor" migrate=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> LROPoller["_models.ConnectionMonitorResult"] """Create or update a connection monitor. :param resource_group_name: The name of the resource group containing Network Watcher. :type resource_group_name: str :param network_watcher_name: The name of the Network Watcher resource. :type network_watcher_name: str :param connection_monitor_name: The name of the connection monitor. :type connection_monitor_name: str :param parameters: Parameters that define the operation to create a connection monitor. :type parameters: ~azure.mgmt.network.v2021_02_01.models.ConnectionMonitor :param migrate: Value indicating whether connection monitor V1 should be migrated to V2 format. :type migrate: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either ConnectionMonitorResult or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_02_01.models.ConnectionMonitorResult] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, network_watcher_name=network_watcher_name, connection_monitor_name=connection_monitor_name, parameters=parameters, migrate=migrate, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore def get( self, resource_group_name, # type: str network_watcher_name, # type: str connection_monitor_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.ConnectionMonitorResult" """Gets a connection monitor by name. :param resource_group_name: The name of the resource group containing Network Watcher. :type resource_group_name: str :param network_watcher_name: The name of the Network Watcher resource. :type network_watcher_name: str :param connection_monitor_name: The name of the connection monitor. :type connection_monitor_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: ConnectionMonitorResult, or the result of cls(response) :rtype: ~azure.mgmt.network.v2021_02_01.models.ConnectionMonitorResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore def _delete_initial( self, resource_group_name, # type: str network_watcher_name, # type: str connection_monitor_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore def begin_delete( self, resource_group_name, # type: str network_watcher_name, # type: str connection_monitor_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Deletes the specified connection monitor. :param resource_group_name: The name of the resource group containing Network Watcher. :type resource_group_name: str :param network_watcher_name: The name of the Network Watcher resource. :type network_watcher_name: str :param connection_monitor_name: The name of the connection monitor. :type connection_monitor_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._delete_initial( resource_group_name=resource_group_name, network_watcher_name=network_watcher_name, connection_monitor_name=connection_monitor_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore def update_tags( self, resource_group_name, # type: str network_watcher_name, # type: str connection_monitor_name, # type: str parameters, # type: "_models.TagsObject" **kwargs # type: Any ): # type: (...) -> "_models.ConnectionMonitorResult" """Update tags of the specified connection monitor. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_watcher_name: The name of the network watcher. :type network_watcher_name: str :param connection_monitor_name: The name of the connection monitor. :type connection_monitor_name: str :param parameters: Parameters supplied to update connection monitor tags. :type parameters: ~azure.mgmt.network.v2021_02_01.models.TagsObject :keyword callable cls: A custom type or function that will be passed the direct response :return: ConnectionMonitorResult, or the result of cls(response) :rtype: ~azure.mgmt.network.v2021_02_01.models.ConnectionMonitorResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update_tags.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(parameters, 'TagsObject') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore def _stop_initial( self, resource_group_name, # type: str network_watcher_name, # type: str connection_monitor_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" # Construct URL url = self._stop_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.post(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore def begin_stop( self, resource_group_name, # type: str network_watcher_name, # type: str connection_monitor_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Stops the specified connection monitor. :param resource_group_name: The name of the resource group containing Network Watcher. :type resource_group_name: str :param network_watcher_name: The name of the Network Watcher resource. :type network_watcher_name: str :param connection_monitor_name: The name of the connection monitor. :type connection_monitor_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._stop_initial( resource_group_name=resource_group_name, network_watcher_name=network_watcher_name, connection_monitor_name=connection_monitor_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore def _start_initial( self, resource_group_name, # type: str network_watcher_name, # type: str connection_monitor_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" # Construct URL url = self._start_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.post(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore def begin_start( self, resource_group_name, # type: str network_watcher_name, # type: str connection_monitor_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Starts the specified connection monitor. :param resource_group_name: The name of the resource group containing Network Watcher. :type resource_group_name: str :param network_watcher_name: The name of the Network Watcher resource. :type network_watcher_name: str :param connection_monitor_name: The name of the connection monitor. :type connection_monitor_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._start_initial( resource_group_name=resource_group_name, network_watcher_name=network_watcher_name, connection_monitor_name=connection_monitor_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore def _query_initial( self, resource_group_name, # type: str network_watcher_name, # type: str connection_monitor_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.ConnectionMonitorQueryResult" cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" # Construct URL url = self._query_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.post(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response) if response.status_code == 202: deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore def begin_query( self, resource_group_name, # type: str network_watcher_name, # type: str connection_monitor_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller["_models.ConnectionMonitorQueryResult"] """Query a snapshot of the most recent connection states. :param resource_group_name: The name of the resource group containing Network Watcher. :type resource_group_name: str :param network_watcher_name: The name of the Network Watcher resource. :type network_watcher_name: str :param connection_monitor_name: The name given to the connection monitor. :type connection_monitor_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_02_01.models.ConnectionMonitorQueryResult] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._query_initial( resource_group_name=resource_group_name, network_watcher_name=network_watcher_name, connection_monitor_name=connection_monitor_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore def list( self, resource_group_name, # type: str network_watcher_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.ConnectionMonitorListResult"] """Lists all connection monitors for the specified Network Watcher. :param resource_group_name: The name of the resource group containing Network Watcher. :type resource_group_name: str :param network_watcher_name: The name of the Network Watcher resource. :type network_watcher_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.ConnectionMonitorListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-02-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
py
1a418cbea4663b432a25b148bfec269dad16a207
# encoding: utf-8 """ Paragraph-related proxy types. """ from __future__ import ( absolute_import, division, print_function, unicode_literals ) from ..enum.text import WD_LINE_SPACING from ..shared import ElementProxy, Emu, lazyproperty, Length, Pt, Twips from .tabstops import TabStops class ParagraphFormat(ElementProxy): """ Provides access to paragraph formatting such as justification, indentation, line spacing, space before and after, and widow/orphan control. """ __slots__ = ('_tab_stops',) @property def alignment(self): """ A member of the :ref:`WdParagraphAlignment` enumeration specifying the justification setting for this paragraph. A value of |None| indicates paragraph alignment is inherited from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return pPr.jc_val @alignment.setter def alignment(self, value): pPr = self._element.get_or_add_pPr() pPr.jc_val = value @property def first_line_indent(self): """ |Length| value specifying the relative difference in indentation for the first line of the paragraph. A positive value causes the first line to be indented. A negative value produces a hanging indent. |None| indicates first line indentation is inherited from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return pPr.first_line_indent @first_line_indent.setter def first_line_indent(self, value): pPr = self._element.get_or_add_pPr() pPr.first_line_indent = value @property def keep_together(self): """ |True| if the paragraph should be kept "in one piece" and not broken across a page boundary when the document is rendered. |None| indicates its effective value is inherited from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return pPr.keepLines_val @keep_together.setter def keep_together(self, value): self._element.get_or_add_pPr().keepLines_val = value @property def keep_with_next(self): """ |True| if the paragraph should be kept on the same page as the subsequent paragraph when the document is rendered. For example, this property could be used to keep a section heading on the same page as its first paragraph. |None| indicates its effective value is inherited from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return pPr.keepNext_val @keep_with_next.setter def keep_with_next(self, value): self._element.get_or_add_pPr().keepNext_val = value @property def left_indent(self): """ |Length| value specifying the space between the left margin and the left side of the paragraph. |None| indicates the left indent value is inherited from the style hierarchy. Use an |Inches| value object as a convenient way to apply indentation in units of inches. """ pPr = self._element.pPr if pPr is None: return None return pPr.ind_left @left_indent.setter def left_indent(self, value): pPr = self._element.get_or_add_pPr() pPr.ind_left = value @property def line_spacing(self): """ |float| or |Length| value specifying the space between baselines in successive lines of the paragraph. A value of |None| indicates line spacing is inherited from the style hierarchy. A float value, e.g. ``2.0`` or ``1.75``, indicates spacing is applied in multiples of line heights. A |Length| value such as ``Pt(12)`` indicates spacing is a fixed height. The |Pt| value class is a convenient way to apply line spacing in units of points. Assigning |None| resets line spacing to inherit from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return self._line_spacing(pPr.spacing_line, pPr.spacing_lineRule) @line_spacing.setter def line_spacing(self, value): pPr = self._element.get_or_add_pPr() if value is None: pPr.spacing_line = None pPr.spacing_lineRule = None elif isinstance(value, Length): pPr.spacing_line = value if pPr.spacing_lineRule != WD_LINE_SPACING.AT_LEAST: pPr.spacing_lineRule = WD_LINE_SPACING.EXACTLY else: pPr.spacing_line = Emu(value * Twips(240)) pPr.spacing_lineRule = WD_LINE_SPACING.MULTIPLE @property def line_spacing_rule(self): """ A member of the :ref:`WdLineSpacing` enumeration indicating how the value of :attr:`line_spacing` should be interpreted. Assigning any of the :ref:`WdLineSpacing` members :attr:`SINGLE`, :attr:`DOUBLE`, or :attr:`ONE_POINT_FIVE` will cause the value of :attr:`line_spacing` to be updated to produce the corresponding line spacing. """ pPr = self._element.pPr if pPr is None: return None return self._line_spacing_rule( pPr.spacing_line, pPr.spacing_lineRule ) @line_spacing_rule.setter def line_spacing_rule(self, value): pPr = self._element.get_or_add_pPr() if value == WD_LINE_SPACING.SINGLE: pPr.spacing_line = Twips(240) pPr.spacing_lineRule = WD_LINE_SPACING.MULTIPLE elif value == WD_LINE_SPACING.ONE_POINT_FIVE: pPr.spacing_line = Twips(360) pPr.spacing_lineRule = WD_LINE_SPACING.MULTIPLE elif value == WD_LINE_SPACING.DOUBLE: pPr.spacing_line = Twips(480) pPr.spacing_lineRule = WD_LINE_SPACING.MULTIPLE else: pPr.spacing_lineRule = value @property def page_break_before(self): """ |True| if the paragraph should appear at the top of the page following the prior paragraph. |None| indicates its effective value is inherited from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return pPr.pageBreakBefore_val @page_break_before.setter def page_break_before(self, value): self._element.get_or_add_pPr().pageBreakBefore_val = value @property def right_indent(self): """ |Length| value specifying the space between the right margin and the right side of the paragraph. |None| indicates the right indent value is inherited from the style hierarchy. Use a |Cm| value object as a convenient way to apply indentation in units of centimeters. """ pPr = self._element.pPr if pPr is None: return None return pPr.ind_right @right_indent.setter def right_indent(self, value): pPr = self._element.get_or_add_pPr() pPr.ind_right = value @property def shading_fill(self): """ A member of :ref:`WdColorIndex` indicating the color of highlighting applied, or `None` if no highlighting is applied. """ pPr = self._element.pPr if pPr is None: return None return pPr.shading_fill @shading_fill.setter def shading_fill(self, value): pPr = self._element.get_or_add_pPr() pPr.shading_fill = value @property def space_after(self): """ |Length| value specifying the spacing to appear between this paragraph and the subsequent paragraph. |None| indicates this value is inherited from the style hierarchy. |Length| objects provide convenience properties, such as :attr:`~.Length.pt` and :attr:`~.Length.inches`, that allow easy conversion to various length units. """ pPr = self._element.pPr if pPr is None: return None return pPr.spacing_after @space_after.setter def space_after(self, value): self._element.get_or_add_pPr().spacing_after = value @property def space_before(self): """ |Length| value specifying the spacing to appear between this paragraph and the prior paragraph. |None| indicates this value is inherited from the style hierarchy. |Length| objects provide convenience properties, such as :attr:`~.Length.pt` and :attr:`~.Length.cm`, that allow easy conversion to various length units. """ pPr = self._element.pPr if pPr is None: return None return pPr.spacing_before @space_before.setter def space_before(self, value): self._element.get_or_add_pPr().spacing_before = value @lazyproperty def tab_stops(self): """ |TabStops| object providing access to the tab stops defined for this paragraph format. """ pPr = self._element.get_or_add_pPr() return TabStops(pPr) @property def widow_control(self): """ |True| if the first and last lines in the paragraph remain on the same page as the rest of the paragraph when Word repaginates the document. |None| indicates its effective value is inherited from the style hierarchy. """ pPr = self._element.pPr if pPr is None: return None return pPr.widowControl_val @widow_control.setter def widow_control(self, value): self._element.get_or_add_pPr().widowControl_val = value @staticmethod def _line_spacing(spacing_line, spacing_lineRule): """ Return the line spacing value calculated from the combination of *spacing_line* and *spacing_lineRule*. Returns a |float| number of lines when *spacing_lineRule* is ``WD_LINE_SPACING.MULTIPLE``, otherwise a |Length| object of absolute line height is returned. Returns |None| when *spacing_line* is |None|. """ if spacing_line is None: return None if spacing_lineRule == WD_LINE_SPACING.MULTIPLE: return spacing_line / Pt(12) return spacing_line @staticmethod def _line_spacing_rule(line, lineRule): """ Return the line spacing rule value calculated from the combination of *line* and *lineRule*. Returns special members of the :ref:`WdLineSpacing` enumeration when line spacing is single, double, or 1.5 lines. """ if lineRule == WD_LINE_SPACING.MULTIPLE: if line == Twips(240): return WD_LINE_SPACING.SINGLE if line == Twips(360): return WD_LINE_SPACING.ONE_POINT_FIVE if line == Twips(480): return WD_LINE_SPACING.DOUBLE return lineRule
py
1a418d0f5af4d6ff5877fb8d9283c5b3e417c1d6
# Copyright (C) 2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import logging as log import os import subprocess import sys from openvino.tools.mo.utils.versions_checker import check_python_version # pylint: disable=no-name-in-module def log_ie_not_found(): log.error("Could not find the Inference Engine or nGraph Python API.\n" "Consider building the Inference Engine and nGraph Python APIs" " from sources or try to install OpenVINO (TM) Toolkit using \"install_prerequisites.{}\"" .format("bat" if sys.platform == "windows" else "sh")) def setup_env(): ret_code = check_python_version() if ret_code: sys.exit(ret_code) from openvino.tools.mo.utils.find_ie_version import find_ie_version ie_found = True try: ie_found = find_ie_version(silent=True) except Exception: ie_found = False if not ie_found: log_ie_not_found() sys.exit(1) mo_root_path = os.path.join(os.path.dirname(__file__), os.pardir) python_path_key = 'PYTHONPATH' if python_path_key not in os.environ: os.environ[python_path_key] = mo_root_path else: os.environ[python_path_key] = os.pathsep.join([os.environ[python_path_key], mo_root_path]) return True def subprocess_main(framework=None): """ Please keep this file compatible with python2 in order to check user python version. This function checks that Inference Engine Python API available and working as expected and then in sub-process it executes main_<fw>.py files. Due to some OSs specifics we can't just add paths to Python modules and libraries into current env. So to make Inference Engine Python API to be available inside MO we need to use subprocess with new env. """ setup_env() path_to_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'main_{}.py'.format(framework) if framework else 'main.py') # python2 compatible code. Do not remove. args = [sys.executable, path_to_main] for arg in sys.argv[1:]: args.append(arg) status = subprocess.run(args, env=os.environ) sys.exit(status.returncode)
py
1a418d349b4b53714b6dfd1e17e4eb440cd24c93
from setuptools import setup # Current status: pre-alpha setup(name='opticalmethodspy', version='0.1.0', description='Python library for Optical Methods', author='Jiovani Ledesma Arredondo', author_email='[email protected]', license = "MIT", keywords=["Optics","Methods", "Optical"], url='https://github.com/JiovaniLedesma/OpticalMethodsPy', packages=['opticalmethodspy'], install_requires=['numpy','sympy','matplotlib','scipy'], classifiers=[ "Development Status :: 2 - Pre-Alpha", "Intended Audience :: Education", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython", ] )
py
1a418d48d748543c390db6a06ac114d51b2dea1d
from unittest.mock import patch from django.core.management import call_command from django.db.utils import OperationalError from django.test import TestCase class CommandTests(TestCase): def test_wait_for_db_ready(self): """Test if operational error is thrown!""" with patch('django.db.utils.ConnectionHandler.__getitem__') as gi: gi.return_value = True call_command('wait_for_db') self.assertEqual(gi.call_count, 1) @patch('time.sleep', return_value=True) def test_wait_for_db(self, ts): """Test waiting for db""" with patch('django.db.utils.ConnectionHandler.__getitem__') as gi: gi.side_effect = [OperationalError]*5 + [True] call_command('wait_for_db') self.assertEqual(gi.call_count, 6)
py
1a418dbfbb2750a54bbb34b7f4f98aadd0dab893
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import tempfile from typing import TYPE_CHECKING from flask import flash, g, redirect from flask_appbuilder import expose, SimpleFormView from flask_appbuilder.models.sqla.interface import SQLAInterface from flask_appbuilder.security.decorators import has_access from flask_babel import lazy_gettext as _ from werkzeug.wrappers import Response from wtforms.fields import StringField from wtforms.validators import ValidationError import superset.models.core as models from superset import app, db, is_feature_enabled from superset.connectors.sqla.models import SqlaTable from superset.constants import RouteMethod from superset.exceptions import CertificateException from superset.sql_parse import Table from superset.typing import FlaskResponse from superset.utils import core as utils from superset.views.base import DeleteMixin, SupersetModelView, YamlExportMixin from .forms import CsvToDatabaseForm, ExcelToDatabaseForm from .mixins import DatabaseMixin from .validators import schema_allows_csv_upload, sqlalchemy_uri_validator if TYPE_CHECKING: from werkzeug.datastructures import FileStorage # pylint: disable=unused-import config = app.config stats_logger = config["STATS_LOGGER"] def sqlalchemy_uri_form_validator(_: _, field: StringField) -> None: """ Check if user has submitted a valid SQLAlchemy URI """ sqlalchemy_uri_validator(field.data, exception=ValidationError) def certificate_form_validator(_: _, field: StringField) -> None: """ Check if user has submitted a valid SSL certificate """ if field.data: try: utils.parse_ssl_cert(field.data) except CertificateException as ex: raise ValidationError(ex.message) def upload_stream_write(form_file_field: "FileStorage", path: str) -> None: chunk_size = app.config["UPLOAD_CHUNK_SIZE"] with open(path, "bw") as file_description: while True: chunk = form_file_field.stream.read(chunk_size) if not chunk: break file_description.write(chunk) class DatabaseView( DatabaseMixin, SupersetModelView, DeleteMixin, YamlExportMixin ): # pylint: disable=too-many-ancestors datamodel = SQLAInterface(models.Database) include_route_methods = RouteMethod.CRUD_SET add_template = "superset/models/database/add.html" edit_template = "superset/models/database/edit.html" validators_columns = { "sqlalchemy_uri": [sqlalchemy_uri_form_validator], "server_cert": [certificate_form_validator], } yaml_dict_key = "databases" def _delete(self, pk: int) -> None: DeleteMixin._delete(self, pk) @expose("/list/") @has_access def list(self) -> FlaskResponse: if not is_feature_enabled("ENABLE_REACT_CRUD_VIEWS"): return super().list() return super().render_app_template() class CsvToDatabaseView(SimpleFormView): form = CsvToDatabaseForm form_template = "superset/form_view/csv_to_database_view/edit.html" form_title = _("CSV to Database configuration") add_columns = ["database", "schema", "table_name"] def form_get(self, form: CsvToDatabaseForm) -> None: form.sep.data = "," form.header.data = 0 form.mangle_dupe_cols.data = True form.skipinitialspace.data = False form.skip_blank_lines.data = True form.infer_datetime_format.data = True form.decimal.data = "." form.if_exists.data = "fail" def form_post(self, form: CsvToDatabaseForm) -> Response: database = form.con.data csv_table = Table(table=form.name.data, schema=form.schema.data) if not schema_allows_csv_upload(database, csv_table.schema): message = _( 'Database "%(database_name)s" schema "%(schema_name)s" ' "is not allowed for csv uploads. Please contact your Superset Admin.", database_name=database.database_name, schema_name=csv_table.schema, ) flash(message, "danger") return redirect("/csvtodatabaseview/form") if "." in csv_table.table and csv_table.schema: message = _( "You cannot specify a namespace both in the name of the table: " '"%(csv_table.table)s" and in the schema field: ' '"%(csv_table.schema)s". Please remove one', table=csv_table.table, schema=csv_table.schema, ) flash(message, "danger") return redirect("/csvtodatabaseview/form") uploaded_tmp_file_path = tempfile.NamedTemporaryFile( dir=app.config["UPLOAD_FOLDER"], suffix=os.path.splitext(form.csv_file.data.filename)[1].lower(), delete=False, ).name try: utils.ensure_path_exists(config["UPLOAD_FOLDER"]) upload_stream_write(form.csv_file.data, uploaded_tmp_file_path) con = form.data.get("con") database = ( db.session.query(models.Database).filter_by(id=con.data.get("id")).one() ) # More can be found here: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html csv_to_df_kwargs = { "sep": form.sep.data, "header": form.header.data if form.header.data else 0, "index_col": form.index_col.data, "mangle_dupe_cols": form.mangle_dupe_cols.data, "skipinitialspace": form.skipinitialspace.data, "skiprows": form.skiprows.data, "nrows": form.nrows.data, "skip_blank_lines": form.skip_blank_lines.data, "parse_dates": form.parse_dates.data, "infer_datetime_format": form.infer_datetime_format.data, "chunksize": 1000, } if form.null_values.data: csv_to_df_kwargs["na_values"] = form.null_values.data csv_to_df_kwargs["keep_default_na"] = False # More can be found here: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_sql.html df_to_sql_kwargs = { "name": csv_table.table, "if_exists": form.if_exists.data, "index": form.index.data, "index_label": form.index_label.data, "chunksize": 1000, } database.db_engine_spec.create_table_from_csv( uploaded_tmp_file_path, csv_table, database, csv_to_df_kwargs, df_to_sql_kwargs, ) # Connect table to the database that should be used for exploration. # E.g. if hive was used to upload a csv, presto will be a better option # to explore the table. expore_database = database explore_database_id = database.explore_database_id if explore_database_id: expore_database = ( db.session.query(models.Database) .filter_by(id=explore_database_id) .one_or_none() or database ) sqla_table = ( db.session.query(SqlaTable) .filter_by( table_name=csv_table.table, schema=csv_table.schema, database_id=expore_database.id, ) .one_or_none() ) if sqla_table: sqla_table.fetch_metadata() if not sqla_table: sqla_table = SqlaTable(table_name=csv_table.table) sqla_table.database = expore_database sqla_table.database_id = database.id sqla_table.user_id = g.user.id sqla_table.schema = csv_table.schema sqla_table.fetch_metadata() db.session.add(sqla_table) db.session.commit() except Exception as ex: # pylint: disable=broad-except db.session.rollback() try: os.remove(uploaded_tmp_file_path) except OSError: pass message = _( 'Unable to upload CSV file "%(filename)s" to table ' '"%(table_name)s" in database "%(db_name)s". ' "Error message: %(error_msg)s", filename=form.csv_file.data.filename, table_name=form.name.data, db_name=database.database_name, error_msg=str(ex), ) flash(message, "danger") stats_logger.incr("failed_csv_upload") return redirect("/csvtodatabaseview/form") os.remove(uploaded_tmp_file_path) # Go back to welcome page / splash screen message = _( 'CSV file "%(csv_filename)s" uploaded to table "%(table_name)s" in ' 'database "%(db_name)s"', csv_filename=form.csv_file.data.filename, table_name=str(csv_table), db_name=sqla_table.database.database_name, ) flash(message, "info") stats_logger.incr("successful_csv_upload") return redirect("/tablemodelview/list/") class ExcelToDatabaseView(SimpleFormView): form = ExcelToDatabaseForm form_template = "superset/form_view/excel_to_database_view/edit.html" form_title = _("Excel to Database configuration") add_columns = ["database", "schema", "table_name"] def form_get(self, form: ExcelToDatabaseForm) -> None: form.header.data = 0 form.mangle_dupe_cols.data = True form.decimal.data = "." form.if_exists.data = "fail" form.sheet_name.data = "" def form_post(self, form: ExcelToDatabaseForm) -> Response: database = form.con.data excel_table = Table(table=form.name.data, schema=form.schema.data) if not schema_allows_csv_upload(database, excel_table.schema): message = _( 'Database "%(database_name)s" schema "%(schema_name)s" ' "is not allowed for excel uploads. Please contact your Superset Admin.", database_name=database.database_name, schema_name=excel_table.schema, ) flash(message, "danger") return redirect("/exceltodatabaseview/form") if "." in excel_table.table and excel_table.schema: message = _( "You cannot specify a namespace both in the name of the table: " '"%(excel_table.table)s" and in the schema field: ' '"%(excel_table.schema)s". Please remove one', table=excel_table.table, schema=excel_table.schema, ) flash(message, "danger") return redirect("/exceltodatabaseview/form") uploaded_tmp_file_path = tempfile.NamedTemporaryFile( dir=app.config["UPLOAD_FOLDER"], suffix=os.path.splitext(form.excel_file.data.filename)[1].lower(), delete=False, ).name try: utils.ensure_path_exists(config["UPLOAD_FOLDER"]) upload_stream_write(form.excel_file.data, uploaded_tmp_file_path) con = form.data.get("con") database = ( db.session.query(models.Database).filter_by(id=con.data.get("id")).one() ) # some params are not supported by pandas.read_excel (e.g. chunksize). # More can be found here: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_excel.html excel_to_df_kwargs = { "header": form.header.data if form.header.data else 0, "index_col": form.index_col.data, "mangle_dupe_cols": form.mangle_dupe_cols.data, "skiprows": form.skiprows.data, "nrows": form.nrows.data, "sheet_name": form.sheet_name.data if form.sheet_name.data else 0, "parse_dates": form.parse_dates.data, } if form.null_values.data: excel_to_df_kwargs["na_values"] = form.null_values.data excel_to_df_kwargs["keep_default_na"] = False df_to_sql_kwargs = { "name": excel_table.table, "if_exists": form.if_exists.data, "index": form.index.data, "index_label": form.index_label.data, "chunksize": 1000, } database.db_engine_spec.create_table_from_excel( uploaded_tmp_file_path, excel_table, database, excel_to_df_kwargs, df_to_sql_kwargs, ) # Connect table to the database that should be used for exploration. # E.g. if hive was used to upload a excel, presto will be a better option # to explore the table. expore_database = database explore_database_id = database.explore_database_id if explore_database_id: expore_database = ( db.session.query(models.Database) .filter_by(id=explore_database_id) .one_or_none() or database ) sqla_table = ( db.session.query(SqlaTable) .filter_by( table_name=excel_table.table, schema=excel_table.schema, database_id=expore_database.id, ) .one_or_none() ) if sqla_table: sqla_table.fetch_metadata() if not sqla_table: sqla_table = SqlaTable(table_name=excel_table.table) sqla_table.database = expore_database sqla_table.database_id = database.id sqla_table.user_id = g.user.id sqla_table.schema = excel_table.schema sqla_table.fetch_metadata() db.session.add(sqla_table) db.session.commit() except Exception as ex: # pylint: disable=broad-except db.session.rollback() try: os.remove(uploaded_tmp_file_path) except OSError: pass message = _( 'Unable to upload Excel file "%(filename)s" to table ' '"%(table_name)s" in database "%(db_name)s". ' "Error message: %(error_msg)s", filename=form.excel_file.data.filename, table_name=form.name.data, db_name=database.database_name, error_msg=str(ex), ) flash(message, "danger") stats_logger.incr("failed_excel_upload") return redirect("/exceltodatabaseview/form") os.remove(uploaded_tmp_file_path) # Go back to welcome page / splash screen message = _( 'Excel file "%(excel_filename)s" uploaded to table "%(table_name)s" in ' 'database "%(db_name)s"', excel_filename=form.excel_file.data.filename, table_name=str(excel_table), db_name=sqla_table.database.database_name, ) flash(message, "info") stats_logger.incr("successful_excel_upload") return redirect("/tablemodelview/list/")
py
1a418e1f1387f99e60bde9f1b8b5bbad0c391f5d
class DataframeUtils(object): @classmethod def to_records(cls, data): """ :type data:pandas.DataFrame :rtype: list """ return list(data.T.to_dict().values())
py
1a418e6c65f4c57cd5e37ac28fdb103c8aa9d0ad
# Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2020, Emanuele Bugliarello (@e-bug). # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import sys import json import yaml import random import logging import argparse from io import open from tqdm import tqdm import _pickle as cPickle from easydict import EasyDict as edict import numpy as np import torch import torch.nn as nn import torch.distributed as dist from torch.utils.data import DataLoader from pytorch_transformers.tokenization_bert import BertTokenizer from volta.config import BertConfig from volta.encoders import BertForVLPreTraining from volta.datasets import FlickrVis4LangDataset from volta.datasets._all_image_features_reader import ImageFeaturesH5Reader logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser() # Model parser.add_argument("--from_pretrained", default="bert-base-uncased", type=str, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") parser.add_argument("--bert_model", default="bert-base-uncased", type=str, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") parser.add_argument("--config_file", default="config/bert_config.json", type=str, help="The config file which specified the model details.") # Output parser.add_argument("--output_dir", default="results", type=str, help="The output directory where the model checkpoints will be written.") parser.add_argument("--dump_results", default=False, action="store_true", help="Whether to save predictions onto disk") # Task parser.add_argument("--tasks_config_file", default="config_tasks/vilbert_trainval_tasks.yml", type=str, help="The config file which specified the tasks details.") parser.add_argument("--task", default="", type=str, help="training task number") parser.add_argument("--masking", default=None, type=str, choices=["all", "object", "none"], help="Image regions to mask") parser.add_argument("--overlap_threshold", default=0.5, type=float, help="Threshold for image regions to mask") # Text parser.add_argument("--do_lower_case", default=True, type=bool, help="Whether to lower case the input text. True for uncased models, False for cased models.") # Evaluation parser.add_argument("--split", default="", type=str, help="which split to use.") parser.add_argument("--batch_size", default=30, type=int, help="batch size.") parser.add_argument("--drop_last", action="store_true", help="whether to drop last incomplete batch") # Seed parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") # Distributed parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument("--num_workers", type=int, default=0, help="Number of workers in the dataloader.") parser.add_argument("--in_memory", default=False, type=bool, help="whether use chunck for parallel training.") parser.add_argument("--use_chunk", default=0, type=float, help="whether use chunck for parallel training.") return parser.parse_args() def main(): args = parse_args() # Devices if args.local_rank == -1: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 torch.distributed.init_process_group(backend="nccl") default_gpu = False if dist.is_available() and args.local_rank != -1: rank = dist.get_rank() if rank == 0: default_gpu = True else: default_gpu = True logger.info(f"device: {device} n_gpu: {n_gpu}, distributed training: {bool(args.local_rank != -1)}") # Load config config = BertConfig.from_json_file(args.config_file) # Load task config with open(args.tasks_config_file, "r") as f: task_cfg = edict(yaml.safe_load(f)) task_id = args.task.strip() task = "TASK" + task_id task_name = task_cfg[task]["name"] if task_cfg[task].get("fusion_method", None): # VL-BERT pooling for VQA config.fusion_method = task_cfg[task]["fusion_method"] # Output dirs savePath = args.output_dir if default_gpu and not os.path.exists(savePath): os.makedirs(savePath) # Seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) # Dataset feats_h5path = task_cfg[task]["features_h5path1"] features_reader = ImageFeaturesH5Reader(feats_h5path, config, args.in_memory) batch_size = task_cfg[task]["batch_size"] num_workers = args.num_workers if args.local_rank != -1: batch_size = int(batch_size / dist.get_world_size()) num_workers = int(num_workers / dist.get_world_size()) logger.info("Loading %s Dataset with batch size %d" % (task_name, batch_size)) eval_split = args.split or task_cfg[task]["val_split"] tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case) dset = FlickrVis4LangDataset( task, task_cfg[task]["dataroot"], args.masking, eval_split, features_reader, None, tokenizer, args.bert_model, max_seq_length=task_cfg[task]["max_seq_length"], max_region_num=task_cfg[task]["max_region_num"], num_locs=config.num_locs, threshold=args.overlap_threshold, add_global_imgfeat=config.add_global_imgfeat ) dl = DataLoader(dset, shuffle=False, batch_size=batch_size, num_workers=num_workers, pin_memory=True) # Model config.visual_target_weights = {} model = BertForVLPreTraining.from_pretrained(args.from_pretrained, config=config) # Move to GPU(s) model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) model = DDP(model, delay_allreduce=True) elif n_gpu > 1: model = nn.DataParallel(model) # Print summary if default_gpu: print("***** Running evaluation *****") print(" Num Iters: ", len(dl)) print(" Batch size: ", batch_size) # Evaluate model.eval() loss_fct = nn.CrossEntropyLoss(ignore_index=-1) phrase_ids, image_ids, pred_tokens, true_tokens, pred_scores, lm_losses = [], [], [], [], [], [] for batch in tqdm(dl, total=len(dl)): image_id = batch[-1] batch = batch[:-1] if device.type != 'cpu': batch = tuple(t.cuda(device=device, non_blocking=True) for t in batch) phrase_id, caption, input_mask, segment_ids, lm_label_ids, features, spatials, image_cls, \ obj_labels, obj_confs, attr_labels, attr_confs, image_attrs, image_mask, image_labels = batch with torch.no_grad(): predictions_t, _, _, _, _ = model( caption, features, spatials, token_type_ids=segment_ids, attention_mask=input_mask, image_attention_mask=image_mask, masked_lm_labels=None, image_label=None, image_cls=image_cls, obj_labels=obj_labels, obj_confs=obj_confs, attr_labels=attr_labels, attr_confs=attr_confs, image_attrs=image_attrs ) # loss = masked_loss_t + masked_loss_v + pair_match_loss target_ixs = [[] for _ in range(predictions_t.size(0))] xs, ys = torch.where(lm_label_ids != -1) for x, y in zip(xs, ys): target_ixs[x].append(y.item()) for bix in range(predictions_t.size(0)): pred_bix_tokens, true_bix_tokens, bix_predictions = [], [], [] for masked_ix in target_ixs[bix]: predicted_index = torch.argmax(predictions_t[bix, masked_ix]).item() predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0] label_token = tokenizer.convert_ids_to_tokens([lm_label_ids[bix, masked_ix].item()])[0] pred_bix_tokens.append(predicted_token) true_bix_tokens.append(label_token) bix_predictions.append(predictions_t[bix, masked_ix].numpy()) masked_lm_loss = loss_fct(predictions_t[bix].view(-1, config.vocab_size), lm_label_ids[bix].view(-1),).unsqueeze(0).item() if args.dump_results: # pred_tokens.append(pred_bix_tokens) # true_tokens.append(true_bix_tokens) # pred_scores.append(bix_predictions) # image_ids.append(image_id[bix].item()) # phrase_ids.append(phrase_id[bix].item()) lm_losses.append(masked_lm_loss) if default_gpu: print("MLM:", np.mean(np.array(lm_losses))) if args.dump_results: eval_path = os.path.join(savePath, eval_split) masking_str = args.masking if args.masking != "ref" else args.masking+str(args.overlap_threshold) # cPickle.dump(pred_tokens, open(eval_path + "_%s_preds.pkl" % masking_str, "wb")) # cPickle.dump(true_tokens, open(eval_path + "_%s_truth.pkl" % masking_str, "wb")) # cPickle.dump(pred_scores, open(eval_path + "_%s_score.pkl" % masking_str, "wb")) # cPickle.dump(image_ids, open(eval_path + "_%s_imgids.pkl" % masking_str, "wb")) # cPickle.dump(phrase_ids, open(eval_path + "_%s_phrids.pkl" % masking_str, "wb")) cPickle.dump(lm_losses, open(eval_path + "_%s_mlm.pkl" % masking_str, "wb")) if __name__ == "__main__": main()
py
1a418fd04e6252a8a83d2b8bdd0f64397861d4a0
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Databricks hook. This hook enable the submitting and running of jobs to the Databricks platform. Internally the operators talk to the ``api/2.0/jobs/runs/submit`` `endpoint <https://docs.databricks.com/api/latest/jobs.html#runs-submit>`_. """ import copy import sys import time from typing import Any, Dict, Optional, Tuple from urllib.parse import urlparse import requests from requests import PreparedRequest, exceptions as requests_exceptions from requests.auth import AuthBase, HTTPBasicAuth from requests.exceptions import JSONDecodeError from tenacity import RetryError, Retrying, retry_if_exception, stop_after_attempt, wait_exponential from airflow import __version__ from airflow.exceptions import AirflowException from airflow.hooks.base import BaseHook from airflow.models import Connection if sys.version_info >= (3, 8): from functools import cached_property else: from cached_property import cached_property USER_AGENT_HEADER = {'user-agent': f'airflow-{__version__}'} # https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token#--get-an-azure-active-directory-access-token # https://docs.microsoft.com/en-us/graph/deployments#app-registration-and-token-service-root-endpoints AZURE_DEFAULT_AD_ENDPOINT = "https://login.microsoftonline.com" AZURE_TOKEN_SERVICE_URL = "{}/{}/oauth2/token" # https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token AZURE_METADATA_SERVICE_TOKEN_URL = "http://169.254.169.254/metadata/identity/oauth2/token" AZURE_METADATA_SERVICE_INSTANCE_URL = "http://169.254.169.254/metadata/instance" TOKEN_REFRESH_LEAD_TIME = 120 AZURE_MANAGEMENT_ENDPOINT = "https://management.core.windows.net/" DEFAULT_DATABRICKS_SCOPE = "2ff814a6-3304-4ab8-85cb-cd0e6f879c1d" class BaseDatabricksHook(BaseHook): """ Base for interaction with Databricks. :param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`. :param timeout_seconds: The amount of time in seconds the requests library will wait before timing-out. :param retry_limit: The number of times to retry the connection in case of service outages. :param retry_delay: The number of seconds to wait between retries (it might be a floating point number). :param retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class. """ conn_name_attr = 'databricks_conn_id' default_conn_name = 'databricks_default' conn_type = 'databricks' extra_parameters = [ 'token', 'host', 'use_azure_managed_identity', 'azure_ad_endpoint', 'azure_resource_id', 'azure_tenant_id', ] def __init__( self, databricks_conn_id: str = default_conn_name, timeout_seconds: int = 180, retry_limit: int = 3, retry_delay: float = 1.0, retry_args: Optional[Dict[Any, Any]] = None, ) -> None: super().__init__() self.databricks_conn_id = databricks_conn_id self.timeout_seconds = timeout_seconds if retry_limit < 1: raise ValueError('Retry limit must be greater than or equal to 1') self.retry_limit = retry_limit self.retry_delay = retry_delay self.aad_tokens: Dict[str, dict] = {} self.aad_timeout_seconds = 10 def my_after_func(retry_state): self._log_request_error(retry_state.attempt_number, retry_state.outcome) if retry_args: self.retry_args = copy.copy(retry_args) self.retry_args['retry'] = retry_if_exception(self._retryable_error) self.retry_args['after'] = my_after_func else: self.retry_args = dict( stop=stop_after_attempt(self.retry_limit), wait=wait_exponential(min=self.retry_delay, max=(2**retry_limit)), retry=retry_if_exception(self._retryable_error), after=my_after_func, ) @cached_property def databricks_conn(self) -> Connection: return self.get_connection(self.databricks_conn_id) def get_conn(self) -> Connection: return self.databricks_conn @cached_property def host(self) -> str: if 'host' in self.databricks_conn.extra_dejson: host = self._parse_host(self.databricks_conn.extra_dejson['host']) else: host = self._parse_host(self.databricks_conn.host) return host @staticmethod def _parse_host(host: str) -> str: """ The purpose of this function is to be robust to improper connections settings provided by users, specifically in the host field. For example -- when users supply ``https://xx.cloud.databricks.com`` as the host, we must strip out the protocol to get the host.:: h = DatabricksHook() assert h._parse_host('https://xx.cloud.databricks.com') == \ 'xx.cloud.databricks.com' In the case where users supply the correct ``xx.cloud.databricks.com`` as the host, this function is a no-op.:: assert h._parse_host('xx.cloud.databricks.com') == 'xx.cloud.databricks.com' """ urlparse_host = urlparse(host).hostname if urlparse_host: # In this case, host = https://xx.cloud.databricks.com return urlparse_host else: # In this case, host = xx.cloud.databricks.com return host def _get_retry_object(self) -> Retrying: """ Instantiates a retry object :return: instance of Retrying class """ return Retrying(**self.retry_args) def _get_aad_token(self, resource: str) -> str: """ Function to get AAD token for given resource. Supports managed identity or service principal auth :param resource: resource to issue token to :return: AAD token, or raise an exception """ aad_token = self.aad_tokens.get(resource) if aad_token and self._is_aad_token_valid(aad_token): return aad_token['token'] self.log.info('Existing AAD token is expired, or going to expire soon. Refreshing...') try: for attempt in self._get_retry_object(): with attempt: if self.databricks_conn.extra_dejson.get('use_azure_managed_identity', False): params = { "api-version": "2018-02-01", "resource": resource, } resp = requests.get( AZURE_METADATA_SERVICE_TOKEN_URL, params=params, headers={**USER_AGENT_HEADER, "Metadata": "true"}, timeout=self.aad_timeout_seconds, ) else: tenant_id = self.databricks_conn.extra_dejson['azure_tenant_id'] data = { "grant_type": "client_credentials", "client_id": self.databricks_conn.login, "resource": resource, "client_secret": self.databricks_conn.password, } azure_ad_endpoint = self.databricks_conn.extra_dejson.get( "azure_ad_endpoint", AZURE_DEFAULT_AD_ENDPOINT ) resp = requests.post( AZURE_TOKEN_SERVICE_URL.format(azure_ad_endpoint, tenant_id), data=data, headers={ **USER_AGENT_HEADER, 'Content-Type': 'application/x-www-form-urlencoded', }, timeout=self.aad_timeout_seconds, ) resp.raise_for_status() jsn = resp.json() if ( 'access_token' not in jsn or jsn.get('token_type') != 'Bearer' or 'expires_on' not in jsn ): raise AirflowException(f"Can't get necessary data from AAD token: {jsn}") token = jsn['access_token'] self.aad_tokens[resource] = {'token': token, 'expires_on': int(jsn["expires_on"])} break except RetryError: raise AirflowException(f'API requests to Azure failed {self.retry_limit} times. Giving up.') except requests_exceptions.HTTPError as e: raise AirflowException(f'Response: {e.response.content}, Status Code: {e.response.status_code}') return token def _get_aad_headers(self) -> dict: """ Fills AAD headers if necessary (SPN is outside of the workspace) :return: dictionary with filled AAD headers """ headers = {} if 'azure_resource_id' in self.databricks_conn.extra_dejson: mgmt_token = self._get_aad_token(AZURE_MANAGEMENT_ENDPOINT) headers['X-Databricks-Azure-Workspace-Resource-Id'] = self.databricks_conn.extra_dejson[ 'azure_resource_id' ] headers['X-Databricks-Azure-SP-Management-Token'] = mgmt_token return headers @staticmethod def _is_aad_token_valid(aad_token: dict) -> bool: """ Utility function to check AAD token hasn't expired yet :param aad_token: dict with properties of AAD token :return: true if token is valid, false otherwise :rtype: bool """ now = int(time.time()) if aad_token['expires_on'] > (now + TOKEN_REFRESH_LEAD_TIME): return True return False @staticmethod def _check_azure_metadata_service() -> None: """ Check for Azure Metadata Service https://docs.microsoft.com/en-us/azure/virtual-machines/linux/instance-metadata-service """ try: jsn = requests.get( AZURE_METADATA_SERVICE_INSTANCE_URL, params={"api-version": "2021-02-01"}, headers={"Metadata": "true"}, timeout=2, ).json() if 'compute' not in jsn or 'azEnvironment' not in jsn['compute']: raise AirflowException( f"Was able to fetch some metadata, but it doesn't look like Azure Metadata: {jsn}" ) except (requests_exceptions.RequestException, ValueError) as e: raise AirflowException(f"Can't reach Azure Metadata Service: {e}") def _get_token(self, raise_error: bool = False) -> Optional[str]: if 'token' in self.databricks_conn.extra_dejson: self.log.info( 'Using token auth. For security reasons, please set token in Password field instead of extra' ) return self.databricks_conn.extra_dejson['token'] elif not self.databricks_conn.login and self.databricks_conn.password: self.log.info('Using token auth.') return self.databricks_conn.password elif 'azure_tenant_id' in self.databricks_conn.extra_dejson: if self.databricks_conn.login == "" or self.databricks_conn.password == "": raise AirflowException("Azure SPN credentials aren't provided") self.log.info('Using AAD Token for SPN.') return self._get_aad_token(DEFAULT_DATABRICKS_SCOPE) elif self.databricks_conn.extra_dejson.get('use_azure_managed_identity', False): self.log.info('Using AAD Token for managed identity.') self._check_azure_metadata_service() return self._get_aad_token(DEFAULT_DATABRICKS_SCOPE) elif raise_error: raise AirflowException('Token authentication isn\'t configured') return None def _log_request_error(self, attempt_num: int, error: str) -> None: self.log.error('Attempt %s API Request to Databricks failed with reason: %s', attempt_num, error) def _do_api_call(self, endpoint_info: Tuple[str, str], json: Optional[Dict[str, Any]] = None): """ Utility function to perform an API call with retries :param endpoint_info: Tuple of method and endpoint :param json: Parameters for this API call. :return: If the api call returns a OK status code, this function returns the response in JSON. Otherwise, we throw an AirflowException. :rtype: dict """ method, endpoint = endpoint_info # TODO: get rid of explicit 'api/' in the endpoint specification url = f'https://{self.host}/{endpoint}' aad_headers = self._get_aad_headers() headers = {**USER_AGENT_HEADER.copy(), **aad_headers} auth: AuthBase token = self._get_token() if token: auth = _TokenAuth(token) else: self.log.info('Using basic auth.') auth = HTTPBasicAuth(self.databricks_conn.login, self.databricks_conn.password) request_func: Any if method == 'GET': request_func = requests.get elif method == 'POST': request_func = requests.post elif method == 'PATCH': request_func = requests.patch elif method == 'DELETE': request_func = requests.delete else: raise AirflowException('Unexpected HTTP Method: ' + method) try: for attempt in self._get_retry_object(): with attempt: response = request_func( url, json=json if method in ('POST', 'PATCH') else None, params=json if method == 'GET' else None, auth=auth, headers=headers, timeout=self.timeout_seconds, ) response.raise_for_status() return response.json() except RetryError: raise AirflowException(f'API requests to Databricks failed {self.retry_limit} times. Giving up.') except requests_exceptions.HTTPError as e: raise AirflowException(f'Response: {e.response.content}, Status Code: {e.response.status_code}') @staticmethod def _get_error_code(exception: BaseException) -> str: if isinstance(exception, requests_exceptions.HTTPError): try: jsn = exception.response.json() return jsn.get('error_code', '') except JSONDecodeError: pass return "" @staticmethod def _retryable_error(exception: BaseException) -> bool: if not isinstance(exception, requests_exceptions.RequestException): return False return isinstance(exception, (requests_exceptions.ConnectionError, requests_exceptions.Timeout)) or ( exception.response is not None and ( exception.response.status_code >= 500 or exception.response.status_code == 429 or ( exception.response.status_code == 400 and BaseDatabricksHook._get_error_code(exception) == 'COULD_NOT_ACQUIRE_LOCK' ) ) ) class _TokenAuth(AuthBase): """ Helper class for requests Auth field. AuthBase requires you to implement the __call__ magic function. """ def __init__(self, token: str) -> None: self.token = token def __call__(self, r: PreparedRequest) -> PreparedRequest: r.headers['Authorization'] = 'Bearer ' + self.token return r
py
1a418fd87be36f06da34ec63028f5f1862b57ead
''' python-lambda-local: Test Direct Invocations (command-line and direct). Meant for use with py.test. Copyright 2015-2020 HENNGE K.K. (formerly known as HDE, Inc.) Licensed under MIT ''' import json import argparse from multiprocessing import Process import os from lambda_local.main import run as lambda_run from lambda_local.main import call as lambda_call from lambda_local.main import ERR_TYPE_EXCEPTION from lambda_local.context import Context def my_lambda_function(event, context): print("Hello World from My Lambda Function!") return 42 def my_failing_lambda_function(event, context): raise Exception('Oh no') def test_function_call_for_pytest(): (result, error_type) = lambda_call( my_lambda_function, {}, Context(1)) assert error_type is None assert result == 42 def test_handle_exceptions_gracefully(): (result, error_type) = lambda_call( my_failing_lambda_function, {}, Context(1)) assert error_type is ERR_TYPE_EXCEPTION def test_check_command_line(): request = json.dumps({}) request_file = 'check_command_line_event.json' with open(request_file, "w") as f: f.write(request) args = argparse.Namespace(event=request_file, file='tests/test_direct_invocations.py', function='my_lambda_function', timeout=1, environment_variables='', library=None, version_name='', arn_string='' ) p = Process(target=lambda_run, args=(args,)) p.start() p.join() os.remove(request_file) assert p.exitcode == 0 def test_check_command_line_error(): request = json.dumps({}) request_file = 'check_command_line_event.json' with open(request_file, "w") as f: f.write(request) args = argparse.Namespace(event=request_file, file='tests/test_direct_invocations.py', function='my_failing_lambda_function', timeout=1, environment_variables='', library=None, version_name='', arn_string='' ) p = Process(target=lambda_run, args=(args,)) p.start() p.join() os.remove(request_file) assert p.exitcode == 1
py
1a418fe6164297397b409284a5ada5e0b3ecc209
import turtle as tt from random import randint, sample def draw(): size = randint(40, 300) angles = (144, 150, 157.5, 160, 165) angle = sample(angles, 1)[0] colors = [ ('#922B21', '#E6B0AA'), ('#76448A', '#D2B4DE'), ('#1F618D', '#AED6F1'), ('#515A5A', '#EAEDED'), ('#148F77', '#D1F2EB'), ('#B7950B', '#F7DC6F'), ('#F39C12', '#FDEBD0'), ('#BA4A00', '#F6DDCC')] color = sample(colors, 1)[0] tt.color(color[0], color[1]) x_pos = randint(-200, 200) y_pos = randint(-200, 200) tt.pu() tt.setpos(x_pos, y_pos) start_position = tt.pos() tt.pd() tt.begin_fill() while True: tt.forward(size) tt.left(angle) if abs(tt.pos() - start_position) < 1: break tt.end_fill() tt.circle(100) for i in range(3): tt.pensize(i % 3) draw() tt.done()
py
1a4190349dddaab02d89d50e6b4f3468454e9053
from .feeder import Feeder from .username import FeedUsername from .taskname import FeedTaskname
py
1a41905995cff67baf623278d06798f8308ff8e0
maxsections = 5 commonDict = { "abbrev" : "O", "name" : "common", "default" : 2, "O2" : { 1 : [ 'Geometry/CMSCommonData/data/materials.xml', 'Geometry/CMSCommonData/data/rotations.xml', 'Geometry/CMSCommonData/data/extend/v2/cmsextent.xml', 'Geometry/CMSCommonData/data/cms/2026/v1/cms.xml', 'Geometry/CMSCommonData/data/eta3/etaMax.xml', 'Geometry/CMSCommonData/data/cmsMother.xml', 'Geometry/CMSCommonData/data/cmsTracker.xml', 'Geometry/CMSCommonData/data/caloBase/2026/v1/caloBase.xml', 'Geometry/CMSCommonData/data/cmsCalo.xml', 'Geometry/CMSCommonData/data/muonBase/2026/v2/muonBase.xml', 'Geometry/CMSCommonData/data/cmsMuon.xml', 'Geometry/CMSCommonData/data/mgnt.xml', 'Geometry/CMSCommonData/data/beampipe/2026/v1/beampipe.xml', 'Geometry/CMSCommonData/data/cmsBeam/2026/v1/cmsBeam.xml', 'Geometry/CMSCommonData/data/muonMB.xml', 'Geometry/CMSCommonData/data/muonMagnet.xml', 'Geometry/CMSCommonData/data/cavern/2017/v2/cavern.xml', 'Geometry/CMSCommonData/data/cavernData/2017/v1/cavernData.xml', 'Geometry/CMSCommonData/data/cavernFloor/2017/v1/cavernFloor.xml', ], 5 : [ 'Geometry/CMSCommonData/data/FieldParameters.xml', ], "era" : "run2_common, run3_common, phase2_common", }, "O3" : { 1 : [ 'Geometry/CMSCommonData/data/materials.xml', 'Geometry/CMSCommonData/data/rotations.xml', 'Geometry/CMSCommonData/data/extend/v2/cmsextent.xml', 'Geometry/CMSCommonData/data/cms/2026/v2/cms.xml', 'Geometry/CMSCommonData/data/eta3/etaMax.xml', 'Geometry/CMSCommonData/data/cmsMother.xml', 'Geometry/CMSCommonData/data/cmsTracker.xml', 'Geometry/CMSCommonData/data/caloBase/2026/v2/caloBase.xml', 'Geometry/CMSCommonData/data/cmsCalo.xml', 'Geometry/CMSCommonData/data/muonBase/2026/v2/muonBase.xml', 'Geometry/CMSCommonData/data/cmsMuon.xml', 'Geometry/CMSCommonData/data/mgnt.xml', 'Geometry/CMSCommonData/data/beampipe/2026/v1/beampipe.xml', 'Geometry/CMSCommonData/data/cmsBeam/2026/v1/cmsBeam.xml', 'Geometry/CMSCommonData/data/muonMB.xml', 'Geometry/CMSCommonData/data/muonMagnet.xml', 'Geometry/CMSCommonData/data/cavern/2017/v2/cavern.xml', 'Geometry/CMSCommonData/data/cavernData/2017/v1/cavernData.xml', 'Geometry/CMSCommonData/data/cavernFloor/2017/v1/cavernFloor.xml', ], 5 : [ 'Geometry/CMSCommonData/data/FieldParameters.xml', ], "era" : "run2_common, run3_common, phase2_common", }, "O4" : { 1 : [ 'Geometry/CMSCommonData/data/materials.xml', 'Geometry/CMSCommonData/data/rotations.xml', 'Geometry/CMSCommonData/data/extend/v2/cmsextent.xml', 'Geometry/CMSCommonData/data/cmsMother.xml', 'Geometry/CMSCommonData/data/eta3/etaMax.xml', 'Geometry/CMSCommonData/data/cmsTracker.xml', 'Geometry/CMSCommonData/data/cmsCalo.xml', 'Geometry/CMSCommonData/data/cmsMuon.xml', 'Geometry/CMSCommonData/data/mgnt.xml', 'Geometry/CMSCommonData/data/beampipe/2026/v1/beampipe.xml', 'Geometry/CMSCommonData/data/cmsBeam/2026/v1/cmsBeam.xml', 'Geometry/CMSCommonData/data/muonMB.xml', 'Geometry/CMSCommonData/data/muonMagnet.xml', 'Geometry/CMSCommonData/data/cavern/2021/v1/cavern.xml', 'Geometry/CMSCommonData/data/cavernData/2021/v1/cavernData.xml', 'Geometry/CMSCommonData/data/cavernFloor/2017/v1/cavernFloor.xml', 'Geometry/CMSCommonData/data/cms/2026/v3/cms.xml', 'Geometry/CMSCommonData/data/caloBase/2026/v2/caloBase.xml', 'Geometry/CMSCommonData/data/muonBase/2026/v3/muonBase.xml', ], 5 : [ 'Geometry/CMSCommonData/data/FieldParameters.xml', ], "era" : "run2_common, run3_common, phase2_common", } } trackerDict = { "abbrev" : "T", "name" : "tracker", "default" : 5, "T5" : { 1 : [ 'Geometry/TrackerCommonData/data/PhaseII/trackerParameters.xml', 'Geometry/TrackerCommonData/data/pixfwdCommon.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker4025/pixfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker4025/pixbar.xml', 'Geometry/TrackerCommonData/data/trackermaterial.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker4025/tracker.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker4025/pixel.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker4025/trackerbar.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker4025/trackerfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker4025/trackerStructureTopology.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker4025/pixelStructureTopology.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker4025/trackersens.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker4025/pixelsens.xml', 'Geometry/TrackerRecoData/data/PhaseII/TiltedTracker4025/trackerRecoMaterial.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker4025/trackerProdCuts.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker4025/pixelProdCuts.xml', 'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml', ], "sim" : [ 'from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *', 'from SLHCUpgradeSimulations.Geometry.fakeConditions_phase2TkT5_cff import *', ], "reco" : [ 'from Geometry.CommonTopologies.globalTrackingGeometry_cfi import *', 'from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import *', 'from Geometry.TrackerGeometryBuilder.trackerParameters_cfi import *', 'from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import *', 'from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import *', 'trackerGeometry.applyAlignment = cms.bool(False)', ], "era" : "phase2_tracker, trackingPhase2PU140", }, "T6" : { 1 : [ 'Geometry/TrackerCommonData/data/PhaseII/trackerParameters.xml', 'Geometry/TrackerCommonData/data/pixfwdCommon.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/pixfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/pixbar.xml', 'Geometry/TrackerCommonData/data/trackermaterial.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/otst.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/tracker.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/pixel.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerbar.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerStructureTopology.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/pixelStructureTopology.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/trackersens.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/pixelsens.xml', 'Geometry/TrackerRecoData/data/PhaseII/TiltedTracker404/trackerRecoMaterial.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/trackerProdCuts.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/pixelProdCuts.xml', 'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml', ], "sim" : [ 'from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *', 'from SLHCUpgradeSimulations.Geometry.fakeConditions_phase2TkT6_cff import *', ], "reco" : [ 'from Geometry.CommonTopologies.globalTrackingGeometry_cfi import *', 'from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import *', 'from Geometry.TrackerGeometryBuilder.trackerParameters_cfi import *', 'from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import *', 'from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import *', 'trackerGeometry.applyAlignment = cms.bool(False)', ], "era" : "phase2_tracker, trackingPhase2PU140", }, "T14" : { 1 : [ 'Geometry/TrackerCommonData/data/PhaseII/trackerParameters.xml', 'Geometry/TrackerCommonData/data/pixfwdCommon.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613/pixfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/pixbar.xml', 'Geometry/TrackerCommonData/data/trackermaterial.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/otst.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613/tracker.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613/pixel.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerbar.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerStructureTopology.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613/pixelStructureTopology.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/trackersens.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/pixelsens.xml', 'Geometry/TrackerRecoData/data/PhaseII/TiltedTracker613/trackerRecoMaterial.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/trackerProdCuts.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/pixelProdCuts.xml', 'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml', ], "sim" : [ 'from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *', 'from SLHCUpgradeSimulations.Geometry.fakeConditions_phase2TkT14_cff import *', ], "reco" : [ 'from Geometry.CommonTopologies.globalTrackingGeometry_cfi import *', 'from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import *', 'from Geometry.TrackerGeometryBuilder.trackerParameters_cfi import *', 'from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import *', 'from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import *', 'trackerGeometry.applyAlignment = cms.bool(False)', ], "era" : "phase2_tracker, trackingPhase2PU140", }, "T15" : { 1 : [ 'Geometry/TrackerCommonData/data/PhaseII/trackerParameters.xml', 'Geometry/TrackerCommonData/data/pixfwdCommon.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/pixfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/pixbar.xml', 'Geometry/TrackerCommonData/data/trackermaterial.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/otst.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/tracker.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/pixel.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerbar.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerStructureTopology.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613/pixelStructureTopology.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/trackersens.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/pixelsens.xml', 'Geometry/TrackerRecoData/data/PhaseII/TiltedTracker613_MB_2019_04/trackerRecoMaterial.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/trackerProdCuts.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/pixelProdCuts.xml', 'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml', ], "sim" : [ 'from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *', 'from SLHCUpgradeSimulations.Geometry.fakeConditions_phase2TkT14_cff import *', ], "reco" : [ 'from Geometry.CommonTopologies.globalTrackingGeometry_cfi import *', 'from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import *', 'from Geometry.TrackerGeometryBuilder.trackerParameters_cfi import *', 'from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import *', 'from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import *', 'trackerGeometry.applyAlignment = cms.bool(False)', ], "era" : "phase2_tracker, trackingPhase2PU140", }, "T16" : { 1 : [ 'Geometry/TrackerCommonData/data/PhaseII/trackerParameters.xml', 'Geometry/TrackerCommonData/data/pixfwdCommon.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/pixfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/pixbar.xml', 'Geometry/TrackerCommonData/data/trackermaterial.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/otst.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/tracker.xml', 'Geometry/TrackerCommonData/data/PhaseII/Tracker_Skewed_IT_2019_08/pixel.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerbar.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerStructureTopology.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613/pixelStructureTopology.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/trackersens.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/pixelsens.xml', 'Geometry/TrackerRecoData/data/PhaseII/TiltedTracker613_MB_2019_04/trackerRecoMaterial.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/trackerProdCuts.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/pixelProdCuts.xml', 'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml', ], "sim" : [ 'from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *', 'from SLHCUpgradeSimulations.Geometry.fakeConditions_phase2TkT14_cff import *', ], "reco" : [ 'from Geometry.CommonTopologies.globalTrackingGeometry_cfi import *', 'from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import *', 'from Geometry.TrackerGeometryBuilder.trackerParameters_cfi import *', 'from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import *', 'from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import *', 'trackerGeometry.applyAlignment = cms.bool(False)', ], "era" : "phase2_tracker, trackingPhase2PU140", }, "T17" : { 1 : [ 'Geometry/TrackerCommonData/data/PhaseII/trackerParameters.xml', 'Geometry/TrackerCommonData/data/pixfwdCommon.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/pixfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/pixbar.xml', 'Geometry/TrackerCommonData/data/trackermaterial.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/otst.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/tracker.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker615/pixel.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerbar.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerStructureTopology.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613/pixelStructureTopology.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/trackersens.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/pixelsens.xml', 'Geometry/TrackerRecoData/data/PhaseII/TiltedTracker613_MB_2019_04/trackerRecoMaterial.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/trackerProdCuts.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/pixelProdCuts.xml', 'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml', ], "sim" : [ 'from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *', 'from SLHCUpgradeSimulations.Geometry.fakeConditions_phase2TkT14_cff import *', ], "reco" : [ 'from Geometry.CommonTopologies.globalTrackingGeometry_cfi import *', 'from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import *', 'from Geometry.TrackerGeometryBuilder.trackerParameters_cfi import *', 'from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import *', 'from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import *', 'trackerGeometry.applyAlignment = cms.bool(False)', ], "era" : "phase2_tracker, trackingPhase2PU140", }, "T18" : { 1 : [ 'Geometry/TrackerCommonData/data/PhaseII/trackerParameters.xml', 'Geometry/TrackerCommonData/data/pixfwdCommon.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/pixfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/pixbar.xml', 'Geometry/TrackerCommonData/data/trackermaterial.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/otst.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613_MB_2019_04/tracker.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker616/pixel.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerbar.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerfwd.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker404/trackerStructureTopology.xml', 'Geometry/TrackerCommonData/data/PhaseII/TiltedTracker613/pixelStructureTopology.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/trackersens.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/pixelsens.xml', 'Geometry/TrackerRecoData/data/PhaseII/TiltedTracker613_MB_2019_04/trackerRecoMaterial.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/trackerProdCuts.xml', 'Geometry/TrackerSimData/data/PhaseII/TiltedTracker404/pixelProdCuts.xml', 'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml', ], "sim" : [ 'from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *', 'from SLHCUpgradeSimulations.Geometry.fakeConditions_phase2TkT14_cff import *', ], "reco" : [ 'from Geometry.CommonTopologies.globalTrackingGeometry_cfi import *', 'from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import *', 'from Geometry.TrackerGeometryBuilder.trackerParameters_cfi import *', 'from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import *', 'from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import *', 'trackerGeometry.applyAlignment = cms.bool(False)', ], "era" : "phase2_tracker, trackingPhase2PU140", } } caloDict = { "abbrev" : "C", "name" : "calo", "default" : 4, "C4" : { 1 : [ 'Geometry/EcalCommonData/data/ectkcable.xml', 'Geometry/EcalCommonData/data/eregalgo/2026/v1/eregalgo.xml', 'Geometry/EcalCommonData/data/ebalgo.xml', 'Geometry/EcalCommonData/data/ebcon.xml', 'Geometry/EcalCommonData/data/ebrot.xml', 'Geometry/EcalCommonData/data/eecon.xml', 'Geometry/EcalCommonData/data/escon/2026/v1/escon.xml', 'Geometry/EcalCommonData/data/esalgo/2026/v1/esalgo.xml', 'Geometry/HcalCommonData/data/hcalrotations.xml', 'Geometry/HcalCommonData/data/hcal/NoHE/hcalalgo.xml', 'Geometry/HcalCommonData/data/hcalbarrelalgo.xml', 'Geometry/HcalCommonData/data/hcalouteralgo.xml', 'Geometry/HcalCommonData/data/hcalforwardalgo.xml', 'Geometry/HcalCommonData/data/hcalSimNumbering/NoHE/hcalSimNumbering.xml', 'Geometry/HcalCommonData/data/hcalRecNumbering/NoHE/hcalRecNumbering.xml', 'Geometry/HcalCommonData/data/average/hcalforwardmaterial.xml', 'Geometry/HGCalCommonData/data/hgcalMaterial/v1/hgcalMaterial.xml', 'Geometry/HGCalCommonData/data/hgcal/v9/hgcal.xml', 'Geometry/HGCalCommonData/data/hgcalEE/v9/hgcalEE.xml', 'Geometry/HGCalCommonData/data/hgcalHEsil/v9/hgcalHEsil.xml', 'Geometry/HGCalCommonData/data/hgcalHEmix/v9/hgcalHEmix.xml', 'Geometry/HGCalCommonData/data/hgcalwafer/v9/hgcalwafer.xml', 'Geometry/HGCalCommonData/data/hgcalcell/v9/hgcalcell.xml', 'Geometry/HGCalCommonData/data/hgcalCons/v9/hgcalCons.xml', ], 3 : [ 'Geometry/EcalSimData/data/PhaseII/ecalsens.xml', 'Geometry/HcalCommonData/data/hcalsens/NoHE/hcalsenspmf.xml', 'Geometry/HcalSimData/data/hf.xml', 'Geometry/HcalSimData/data/hfpmt.xml', 'Geometry/HcalSimData/data/hffibrebundle.xml', 'Geometry/HGCalSimData/data/CaloUtil.xml', 'Geometry/HGCalSimData/data/hgcsensv9.xml', ], 4 : [ 'Geometry/HcalSimData/data/HcalProdCuts.xml', 'Geometry/EcalSimData/data/EcalProdCuts.xml', 'Geometry/HGCalSimData/data/hgcProdCutsv9.xml', ], "sim" : [ 'from Geometry.EcalCommonData.ecalSimulationParameters_cff import *', 'from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *', 'from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import *', 'from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import *' ], "reco" : [ 'from Geometry.CaloEventSetup.HGCalV9Topology_cfi import *', 'from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import *', 'from Geometry.CaloEventSetup.CaloTopology_cfi import *', 'from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import *', 'CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder",', ' SelectedCalos = cms.vstring("HCAL",', ' "ZDC",', ' "EcalBarrel",', ' "TOWER",', ' "HGCalEESensitive",', ' "HGCalHESiliconSensitive",', ' "HGCalHEScintillatorSensitive"', ' )', ')', 'from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import *', 'from Geometry.HcalEventSetup.HcalGeometry_cfi import *', 'from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import *', 'from Geometry.HcalEventSetup.CaloTowerTopology_cfi import *', 'from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import *', 'from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import *', 'from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import *', 'from Geometry.EcalMapping.EcalMapping_cfi import *', 'from Geometry.EcalMapping.EcalMappingRecord_cfi import *', ], "era" : "run2_HE_2017, run2_HF_2017, run2_HCAL_2017, run3_HB, phase2_hcal, phase2_hgcal, phase2_hgcalV9, hcalHardcodeConditions, hcalSkipPacker", }, "C6" : { 1 : [ 'Geometry/EcalCommonData/data/ectkcable.xml', 'Geometry/EcalCommonData/data/eregalgo/2026/v1/eregalgo.xml', 'Geometry/EcalCommonData/data/ebalgo.xml', 'Geometry/EcalCommonData/data/ebcon.xml', 'Geometry/EcalCommonData/data/ebrot.xml', 'Geometry/EcalCommonData/data/eecon.xml', 'Geometry/EcalCommonData/data/escon/2026/v1/escon.xml', 'Geometry/EcalCommonData/data/esalgo/2026/v1/esalgo.xml', 'Geometry/HcalCommonData/data/hcalrotations.xml', 'Geometry/HcalCommonData/data/hcal/NoHE/hcalalgo.xml', 'Geometry/HcalCommonData/data/hcalbarrelalgo.xml', 'Geometry/HcalCommonData/data/hcalouteralgo.xml', 'Geometry/HcalCommonData/data/hcalforwardalgo.xml', 'Geometry/HcalCommonData/data/hcalSimNumbering/NoHE/hcalSimNumbering.xml', 'Geometry/HcalCommonData/data/hcalRecNumbering/NoHE/hcalRecNumbering.xml', 'Geometry/HcalCommonData/data/average/hcalforwardmaterial.xml', 'Geometry/HGCalCommonData/data/hgcalMaterial/v1/hgcalMaterial.xml', 'Geometry/HGCalCommonData/data/hgcal/v9/hgcal.xml', 'Geometry/HGCalCommonData/data/hgcalEE/v9/hgcalEE.xml', 'Geometry/HGCalCommonData/data/hgcalHEsil/v9/hgcalHEsil.xml', 'Geometry/HGCalCommonData/data/hgcalHEmix/v9/hgcalHEmix.xml', 'Geometry/HGCalCommonData/data/hgcalwafer/v9/hgcalwafer.xml', 'Geometry/HGCalCommonData/data/hgcalcell/v9/hgcalcell.xml', 'Geometry/HGCalCommonData/data/hgcalCons/v9/hgcalCons.xml', 'Geometry/ForwardCommonData/data/hfnose/v1/hfnose.xml', 'Geometry/ForwardCommonData/data/hfnoseWafer/v1/hfnoseWafer.xml', 'Geometry/ForwardCommonData/data/hfnoseCell/v1/hfnoseCell.xml', 'Geometry/ForwardCommonData/data/hfnoseCons/v1/hfnoseCons.xml', ], 3 : [ 'Geometry/EcalSimData/data/PhaseII/ecalsens.xml', 'Geometry/HcalCommonData/data/hcalsens/NoHE/hcalsenspmf.xml', 'Geometry/HcalSimData/data/hf.xml', 'Geometry/HcalSimData/data/hfpmt.xml', 'Geometry/HcalSimData/data/hffibrebundle.xml', 'Geometry/HGCalSimData/data/CaloUtil.xml', 'Geometry/HGCalSimData/data/hgcsensv9.xml', 'Geometry/ForwardSimData/data/hfnosesens.xml', ], 4 : [ 'Geometry/HcalSimData/data/HcalProdCuts.xml', 'Geometry/EcalSimData/data/EcalProdCuts.xml', 'Geometry/HGCalSimData/data/hgcProdCutsv9.xml', 'Geometry/ForwardSimData/data/hfnoseProdCuts.xml', ], "sim" : [ 'from Geometry.EcalCommonData.ecalSimulationParameters_cff import *', 'from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *', 'from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import *', 'from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import *', 'from Geometry.ForwardCommonData.hfnoseParametersInitialization_cfi import *', 'from Geometry.ForwardCommonData.hfnoseNumberingInitialization_cfi import *', ], "reco" : [ 'from Geometry.CaloEventSetup.HGCalV9Topology_cfi import *', 'from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import *', 'from Geometry.CaloEventSetup.HFNoseTopology_cfi import *', 'from Geometry.ForwardGeometry.HFNoseGeometryESProducer_cfi import *', 'from Geometry.CaloEventSetup.CaloTopology_cfi import *', 'from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import *', 'CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder",', ' SelectedCalos = cms.vstring("HCAL",', ' "ZDC",', ' "EcalBarrel",', ' "TOWER",', ' "HGCalEESensitive",', ' "HGCalHESiliconSensitive",', ' "HGCalHEScintillatorSensitive",', ' "HGCalHFNoseSensitive",', ' )', ')', 'from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import *', 'from Geometry.HcalEventSetup.HcalGeometry_cfi import *', 'from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import *', 'from Geometry.HcalEventSetup.CaloTowerTopology_cfi import *', 'from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import *', 'from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import *', 'from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import *', 'from Geometry.EcalMapping.EcalMapping_cfi import *', 'from Geometry.EcalMapping.EcalMappingRecord_cfi import *', ], "era" : "run2_HE_2017, run2_HF_2017, run2_HCAL_2017, run3_HB, phase2_hcal, phase2_hgcal, phase2_hgcalV9, hcalHardcodeConditions, hcalSkipPacker, phase2_hfnose", }, "C8" : { 1 : [ 'Geometry/EcalCommonData/data/eregalgo/2026/v2/eregalgo.xml', 'Geometry/EcalCommonData/data/ectkcable/2026/v1/ectkcable.xml', 'Geometry/EcalCommonData/data/ectkcablemat/2026/v1/ectkcablemat.xml', 'Geometry/EcalCommonData/data/ebalgo.xml', 'Geometry/EcalCommonData/data/ebcon.xml', 'Geometry/EcalCommonData/data/ebrot.xml', 'Geometry/HcalCommonData/data/hcalrotations.xml', 'Geometry/HcalCommonData/data/hcal/v2/hcalalgo.xml', 'Geometry/HcalCommonData/data/hcalbarrelalgo.xml', 'Geometry/HcalCommonData/data/hcalcablealgo/v2/hcalcablealgo.xml', 'Geometry/HcalCommonData/data/hcalouteralgo.xml', 'Geometry/HcalCommonData/data/hcalforwardalgo.xml', 'Geometry/HcalCommonData/data/hcalSimNumbering/NoHE/hcalSimNumbering.xml', 'Geometry/HcalCommonData/data/hcalRecNumbering/NoHE/hcalRecNumbering.xml', 'Geometry/HcalCommonData/data/average/hcalforwardmaterial.xml', 'Geometry/HGCalCommonData/data/hgcalMaterial/v1/hgcalMaterial.xml', 'Geometry/HGCalCommonData/data/hgcal/v10/hgcal.xml', 'Geometry/HGCalCommonData/data/hgcalEE/v10/hgcalEE.xml', 'Geometry/HGCalCommonData/data/hgcalHEsil/v10/hgcalHEsil.xml', 'Geometry/HGCalCommonData/data/hgcalHEmix/v10/hgcalHEmix.xml', 'Geometry/HGCalCommonData/data/hgcalwafer/v9/hgcalwafer.xml', 'Geometry/HGCalCommonData/data/hgcalcell/v9/hgcalcell.xml', 'Geometry/HGCalCommonData/data/hgcalCons/v10/hgcalCons.xml', ], 3 : [ 'Geometry/EcalSimData/data/PhaseII/ecalsens.xml', 'Geometry/HcalCommonData/data/hcalsens/NoHE/hcalsenspmf.xml', 'Geometry/HcalSimData/data/hf.xml', 'Geometry/HcalSimData/data/hfpmt.xml', 'Geometry/HcalSimData/data/hffibrebundle.xml', 'Geometry/HcalSimData/data/CaloUtil.xml', 'Geometry/HGCalSimData/data/hgcsensv9.xml', ], 4 : [ 'Geometry/HcalSimData/data/HcalProdCuts.xml', 'Geometry/EcalSimData/data/EcalProdCuts.xml', 'Geometry/HGCalSimData/data/hgcProdCutsv9.xml', ], "sim" : [ 'from Geometry.EcalCommonData.ecalSimulationParameters_cff import *', 'from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *', 'from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import *', 'from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import *' ], "reco" : [ 'from Geometry.CaloEventSetup.HGCalV9Topology_cfi import *', 'from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import *', 'from Geometry.CaloEventSetup.CaloTopology_cfi import *', 'from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import *', 'CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder",', ' SelectedCalos = cms.vstring("HCAL",', ' "ZDC",', ' "EcalBarrel",', ' "TOWER",', ' "HGCalEESensitive",', ' "HGCalHESiliconSensitive",', ' "HGCalHEScintillatorSensitive"', ' )', ')', 'from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import *', 'from Geometry.HcalEventSetup.HcalGeometry_cfi import *', 'from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import *', 'from Geometry.HcalEventSetup.CaloTowerTopology_cfi import *', 'from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import *', 'from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import *', 'from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import *', 'from Geometry.EcalMapping.EcalMapping_cfi import *', 'from Geometry.EcalMapping.EcalMappingRecord_cfi import *', ], "era" : "run2_HE_2017, run2_HF_2017, run2_HCAL_2017, run3_HB, phase2_hcal, phase2_hgcal, phase2_hgcalV9, phase2_hgcalV10, hcalHardcodeConditions, hcalSkipPacker", }, "C9" : { 1 : [ 'Geometry/EcalCommonData/data/eregalgo/2026/v2/eregalgo.xml', 'Geometry/EcalCommonData/data/ectkcable/2026/v1/ectkcable.xml', 'Geometry/EcalCommonData/data/ectkcablemat/2026/v1/ectkcablemat.xml', 'Geometry/EcalCommonData/data/ebalgo.xml', 'Geometry/EcalCommonData/data/ebcon.xml', 'Geometry/EcalCommonData/data/ebrot.xml', 'Geometry/HcalCommonData/data/hcalrotations.xml', 'Geometry/HcalCommonData/data/hcal/v2/hcalalgo.xml', 'Geometry/HcalCommonData/data/hcalbarrelalgo.xml', 'Geometry/HcalCommonData/data/hcalcablealgo/v2/hcalcablealgo.xml', 'Geometry/HcalCommonData/data/hcalouteralgo.xml', 'Geometry/HcalCommonData/data/hcalforwardalgo.xml', 'Geometry/HcalCommonData/data/hcalSimNumbering/NoHE/hcalSimNumbering.xml', 'Geometry/HcalCommonData/data/hcalRecNumbering/NoHE/hcalRecNumbering.xml', 'Geometry/HcalCommonData/data/average/hcalforwardmaterial.xml', 'Geometry/HGCalCommonData/data/hgcalMaterial/v1/hgcalMaterial.xml', 'Geometry/HGCalCommonData/data/hgcal/v11/hgcal.xml', 'Geometry/HGCalCommonData/data/hgcalEE/v10/hgcalEE.xml', 'Geometry/HGCalCommonData/data/hgcalHEsil/v11/hgcalHEsil.xml', 'Geometry/HGCalCommonData/data/hgcalHEmix/v11/hgcalHEmix.xml', 'Geometry/HGCalCommonData/data/hgcalwafer/v9/hgcalwafer.xml', 'Geometry/HGCalCommonData/data/hgcalcell/v9/hgcalcell.xml', 'Geometry/HGCalCommonData/data/hgcalCons/v11/hgcalCons.xml', ], 3 : [ 'Geometry/EcalSimData/data/PhaseII/ecalsens.xml', 'Geometry/HcalCommonData/data/hcalsens/NoHE/hcalsenspmf.xml', 'Geometry/HcalSimData/data/hf.xml', 'Geometry/HcalSimData/data/hfpmt.xml', 'Geometry/HcalSimData/data/hffibrebundle.xml', 'Geometry/HcalSimData/data/CaloUtil.xml', 'Geometry/HGCalSimData/data/hgcsensv9.xml', ], 4 : [ 'Geometry/HcalSimData/data/HcalProdCuts.xml', 'Geometry/EcalSimData/data/EcalProdCuts.xml', 'Geometry/HGCalSimData/data/hgcProdCutsv9.xml', ], "sim" : [ 'from Geometry.EcalCommonData.ecalSimulationParameters_cff import *', 'from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *', 'from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import *', 'from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import *' ], "reco" : [ 'from Geometry.CaloEventSetup.HGCalV9Topology_cfi import *', 'from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import *', 'from Geometry.CaloEventSetup.CaloTopology_cfi import *', 'from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import *', 'CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder",', ' SelectedCalos = cms.vstring("HCAL",', ' "ZDC",', ' "EcalBarrel",', ' "TOWER",', ' "HGCalEESensitive",', ' "HGCalHESiliconSensitive",', ' "HGCalHEScintillatorSensitive"', ' )', ')', 'from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import *', 'from Geometry.HcalEventSetup.HcalGeometry_cfi import *', 'from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import *', 'from Geometry.HcalEventSetup.CaloTowerTopology_cfi import *', 'from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import *', 'from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import *', 'from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import *', 'from Geometry.EcalMapping.EcalMapping_cfi import *', 'from Geometry.EcalMapping.EcalMappingRecord_cfi import *', ], "era" : "run2_HE_2017, run2_HF_2017, run2_HCAL_2017, run3_HB, phase2_hcal, phase2_hgcal, phase2_hgcalV9, phase2_hgcalV10, phase2_hgcalV11, hcalHardcodeConditions, hcalSkipPacker", }, "C10" : { 1 : [ 'Geometry/EcalCommonData/data/eregalgo/2026/v2/eregalgo.xml', 'Geometry/EcalCommonData/data/ectkcable/2026/v1/ectkcable.xml', 'Geometry/EcalCommonData/data/ectkcablemat/2026/v1/ectkcablemat.xml', 'Geometry/EcalCommonData/data/ebalgo.xml', 'Geometry/EcalCommonData/data/ebcon.xml', 'Geometry/EcalCommonData/data/ebrot.xml', 'Geometry/HcalCommonData/data/hcalrotations.xml', 'Geometry/HcalCommonData/data/hcal/v2/hcalalgo.xml', 'Geometry/HcalCommonData/data/hcalbarrelalgo.xml', 'Geometry/HcalCommonData/data/hcalcablealgo/v2/hcalcablealgo.xml', 'Geometry/HcalCommonData/data/hcalouteralgo.xml', 'Geometry/HcalCommonData/data/hcalforwardalgo.xml', 'Geometry/HcalCommonData/data/hcalSimNumbering/NoHE/hcalSimNumbering.xml', 'Geometry/HcalCommonData/data/hcalRecNumbering/NoHE/hcalRecNumbering.xml', 'Geometry/HcalCommonData/data/average/hcalforwardmaterial.xml', 'Geometry/HGCalCommonData/data/hgcalMaterial/v1/hgcalMaterial.xml', 'Geometry/HGCalCommonData/data/hgcal/v11/hgcal.xml', 'Geometry/HGCalCommonData/data/hgcalEE/v10/hgcalEE.xml', 'Geometry/HGCalCommonData/data/hgcalHEsil/v11/hgcalHEsil.xml', 'Geometry/HGCalCommonData/data/hgcalHEmix/v11/hgcalHEmix.xml', 'Geometry/HGCalCommonData/data/hgcalwafer/v9/hgcalwafer.xml', 'Geometry/HGCalCommonData/data/hgcalcell/v9/hgcalcell.xml', 'Geometry/HGCalCommonData/data/hgcalCons/v11/hgcalCons.xml', 'Geometry/ForwardCommonData/data/hfnose/v2/hfnose.xml', 'Geometry/ForwardCommonData/data/hfnoseWafer/v1/hfnoseWafer.xml', 'Geometry/ForwardCommonData/data/hfnoseCell/v1/hfnoseCell.xml', 'Geometry/ForwardCommonData/data/hfnoseCons/v1/hfnoseCons.xml', ], 3 : [ 'Geometry/EcalSimData/data/PhaseII/ecalsens.xml', 'Geometry/HcalCommonData/data/hcalsens/NoHE/hcalsenspmf.xml', 'Geometry/HcalSimData/data/hf.xml', 'Geometry/HcalSimData/data/hfpmt.xml', 'Geometry/HcalSimData/data/hffibrebundle.xml', 'Geometry/HcalSimData/data/CaloUtil.xml', 'Geometry/HGCalSimData/data/hgcsensv9.xml', 'Geometry/ForwardSimData/data/hfnosesens.xml', ], 4 : [ 'Geometry/HcalSimData/data/HcalProdCuts.xml', 'Geometry/EcalSimData/data/EcalProdCuts.xml', 'Geometry/HGCalSimData/data/hgcProdCutsv9.xml', 'Geometry/ForwardSimData/data/hfnoseProdCuts.xml', ], "sim" : [ 'from Geometry.EcalCommonData.ecalSimulationParameters_cff import *', 'from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *', 'from Geometry.HGCalCommonData.hgcalParametersInitialization_cfi import *', 'from Geometry.HGCalCommonData.hgcalNumberingInitialization_cfi import *', 'from Geometry.ForwardCommonData.hfnoseParametersInitialization_cfi import *', 'from Geometry.ForwardCommonData.hfnoseNumberingInitialization_cfi import *', ], "reco" : [ 'from Geometry.CaloEventSetup.HGCalV9Topology_cfi import *', 'from Geometry.HGCalGeometry.HGCalGeometryESProducer_cfi import *', 'from Geometry.CaloEventSetup.HFNoseTopology_cfi import *', 'from Geometry.ForwardGeometry.HFNoseGeometryESProducer_cfi import *', 'from Geometry.CaloEventSetup.CaloTopology_cfi import *', 'from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import *', 'CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder",', ' SelectedCalos = cms.vstring("HCAL",', ' "ZDC",', ' "EcalBarrel",', ' "TOWER",', ' "HGCalEESensitive",', ' "HGCalHESiliconSensitive",', ' "HGCalHEScintillatorSensitive",', ' "HGCalHFNoseSensitive",', ' )', ')', 'from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import *', 'from Geometry.HcalEventSetup.HcalGeometry_cfi import *', 'from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import *', 'from Geometry.HcalEventSetup.CaloTowerTopology_cfi import *', 'from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import *', 'from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import *', 'from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import *', 'from Geometry.EcalMapping.EcalMapping_cfi import *', 'from Geometry.EcalMapping.EcalMappingRecord_cfi import *', ], "era" : "run2_HE_2017, run2_HF_2017, run2_HCAL_2017, run3_HB, phase2_hcal, phase2_hgcal, phase2_hgcalV9, phase2_hgcalV10, phase2_hgcalV11, phase2_hfnose, hcalHardcodeConditions, hcalSkipPacker", }, } muonDict = { "abbrev" : "M", "name" : "muon", "default" : 2, "M2" : { 1 : [ 'Geometry/MuonCommonData/data/mbCommon/2017/v2/mbCommon.xml', 'Geometry/MuonCommonData/data/mb1/2015/v1/mb1.xml', 'Geometry/MuonCommonData/data/mb2/2015/v1/mb2.xml', 'Geometry/MuonCommonData/data/mb3/2015/v1/mb3.xml', 'Geometry/MuonCommonData/data/mb4/2015/v1/mb4.xml', 'Geometry/MuonCommonData/data/muonYoke/2021/v2/muonYoke.xml', 'Geometry/MuonCommonData/data/mf/2026/v2/mf.xml', 'Geometry/MuonCommonData/data/rpcf/2026/v1/rpcf.xml', 'Geometry/MuonCommonData/data/gemf/TDR_BaseLine/gemf.xml', 'Geometry/MuonCommonData/data/gem11/TDR_BaseLine/gem11.xml', 'Geometry/MuonCommonData/data/gem21/TDR_Dev/gem21.xml', 'Geometry/MuonCommonData/data/csc/2015/v1/csc.xml', 'Geometry/MuonCommonData/data/mfshield/2026/v1/mfshield.xml', 'Geometry/MuonCommonData/data/me0/TDR_Dev/me0.xml', ], 2 : [ 'Geometry/MuonCommonData/data/muonNumbering/TDR_DeV/muonNumbering.xml', ], 3 : [ 'Geometry/MuonSimData/data/PhaseII/ME0EtaPart/muonSens.xml', 'Geometry/DTGeometryBuilder/data/dtSpecsFilter.xml', 'Geometry/CSCGeometryBuilder/data/cscSpecsFilter.xml', 'Geometry/CSCGeometryBuilder/data/cscSpecs.xml', 'Geometry/RPCGeometryBuilder/data/PhaseII/RPCSpecs.xml', 'Geometry/GEMGeometryBuilder/data/v7/GEMSpecsFilter.xml', 'Geometry/GEMGeometryBuilder/data/v7/GEMSpecs.xml', ], 4 : [ 'Geometry/MuonSimData/data/PhaseII/muonProdCuts.xml', ], "reco" : [ 'from Geometry.MuonNumbering.muonNumberingInitialization_cfi import *', 'from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import *', 'from Geometry.GEMGeometryBuilder.gemGeometry_cfi import *', 'from Geometry.GEMGeometryBuilder.me0Geometry_cfi import *', 'from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import *', 'from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import *', ], "era" : "phase2_muon, run3_GEM", }, "M3" : { 1 : [ 'Geometry/MuonCommonData/data/mbCommon/2017/v2/mbCommon.xml', 'Geometry/MuonCommonData/data/mb1/2015/v1/mb1.xml', 'Geometry/MuonCommonData/data/mb2/2015/v1/mb2.xml', 'Geometry/MuonCommonData/data/mb3/2015/v1/mb3.xml', 'Geometry/MuonCommonData/data/mb4/2015/v1/mb4.xml', 'Geometry/MuonCommonData/data/muonYoke/2021/v2/muonYoke.xml', 'Geometry/MuonCommonData/data/mf/2026/v2/mf.xml', 'Geometry/MuonCommonData/data/rpcf/2026/v1/rpcf.xml', 'Geometry/MuonCommonData/data/gemf/TDR_BaseLine/gemf.xml', 'Geometry/MuonCommonData/data/gem11/TDR_BaseLine/gem11.xml', 'Geometry/MuonCommonData/data/gem21/TDR_Dev/gem21.xml', 'Geometry/MuonCommonData/data/csc/2015/v1/csc.xml', 'Geometry/MuonCommonData/data/mfshield/2026/v1/mfshield.xml', 'Geometry/MuonCommonData/data/me0/TDR_Dev/me0.xml', ], 2 : [ 'Geometry/MuonCommonData/data/muonNumbering/TDR_DeV/muonNumbering.xml', ], 3 : [ 'Geometry/MuonSimData/data/PhaseII/ME0EtaPart/muonSens.xml', 'Geometry/DTGeometryBuilder/data/dtSpecsFilter.xml', 'Geometry/CSCGeometryBuilder/data/cscSpecsFilter.xml', 'Geometry/CSCGeometryBuilder/data/cscSpecs.xml', 'Geometry/RPCGeometryBuilder/data/2026/v1/RPCSpecs.xml', 'Geometry/GEMGeometryBuilder/data/v7/GEMSpecsFilter.xml', 'Geometry/GEMGeometryBuilder/data/v7/GEMSpecs.xml', ], 4 : [ 'Geometry/MuonSimData/data/PhaseII/muonProdCuts.xml', ], "reco" : [ 'from Geometry.MuonNumbering.muonNumberingInitialization_cfi import *', 'from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import *', 'from Geometry.GEMGeometryBuilder.gemGeometry_cfi import *', 'from Geometry.GEMGeometryBuilder.me0Geometry_cfi import *', 'from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import *', 'from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import *', ], "era" : "phase2_muon, run3_GEM", }, "M4" : { 1 : [ 'Geometry/MuonCommonData/data/mbCommon/2021/v1/mbCommon.xml', 'Geometry/MuonCommonData/data/mb1/2015/v2/mb1.xml', 'Geometry/MuonCommonData/data/mb2/2015/v2/mb2.xml', 'Geometry/MuonCommonData/data/mb3/2015/v2/mb3.xml', 'Geometry/MuonCommonData/data/mb4/2015/v2/mb4.xml', 'Geometry/MuonCommonData/data/mb4Shield/2021/v1/mb4Shield.xml', 'Geometry/MuonCommonData/data/muonYoke/2021/v2/muonYoke.xml', 'Geometry/MuonCommonData/data/csc/2021/v1/csc.xml', 'Geometry/MuonCommonData/data/mfshield/2017/v1/mfshield.xml', 'Geometry/MuonCommonData/data/mf/2026/v2/mf.xml', 'Geometry/MuonCommonData/data/rpcf/2026/v2/rpcf.xml', 'Geometry/MuonCommonData/data/gemf/TDR_BaseLine/gemf.xml', 'Geometry/MuonCommonData/data/gem11/TDR_BaseLine/gem11.xml', 'Geometry/MuonCommonData/data/gem21/TDR_Dev/gem21.xml', 'Geometry/MuonCommonData/data/mfshield/2026/v1/mfshield.xml', 'Geometry/MuonCommonData/data/me0/TDR_Dev/v2/me0.xml', ], 2 : [ 'Geometry/MuonCommonData/data/muonNumbering/TDR_DeV/muonNumbering.xml', ], 3 : [ 'Geometry/MuonSimData/data/PhaseII/ME0EtaPart/muonSens.xml', 'Geometry/DTGeometryBuilder/data/dtSpecsFilter.xml', 'Geometry/CSCGeometryBuilder/data/cscSpecsFilter.xml', 'Geometry/CSCGeometryBuilder/data/cscSpecs.xml', 'Geometry/RPCGeometryBuilder/data/2026/v1/RPCSpecs.xml', 'Geometry/GEMGeometryBuilder/data/v7/GEMSpecsFilter.xml', 'Geometry/GEMGeometryBuilder/data/v7/GEMSpecs.xml', ], 4 : [ 'Geometry/MuonSimData/data/PhaseII/muonProdCuts.xml', ], "reco" : [ 'from Geometry.MuonNumbering.muonNumberingInitialization_cfi import *', 'from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import *', 'from Geometry.GEMGeometryBuilder.gemGeometry_cfi import *', 'from Geometry.GEMGeometryBuilder.me0Geometry_cfi import *', 'from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import *', 'from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import *', ], "era" : "phase2_muon, run3_GEM", } } forwardDict = { "abbrev" : "F", "name" : "forward", "default" : 2, "F2" : { 1 : [ 'Geometry/ForwardCommonData/data/forwardshield/2017/v1/forwardshield.xml', 'Geometry/ForwardCommonData/data/brmrotations.xml', 'Geometry/ForwardCommonData/data/PostLS2/brm.xml', 'Geometry/ForwardCommonData/data/zdcmaterials.xml', 'Geometry/ForwardCommonData/data/lumimaterials.xml', 'Geometry/ForwardCommonData/data/zdcrotations.xml', 'Geometry/ForwardCommonData/data/lumirotations.xml', 'Geometry/ForwardCommonData/data/zdc.xml', 'Geometry/ForwardCommonData/data/zdclumi.xml', 'Geometry/ForwardCommonData/data/cmszdc.xml', ], 3 : [ 'Geometry/ForwardCommonData/data/brmsens.xml', 'Geometry/ForwardSimData/data/zdcsens.xml', ], 4 : [ 'Geometry/ForwardSimData/data/zdcProdCuts.xml', 'Geometry/ForwardSimData/data/ForwardShieldProdCuts.xml', ], "reco" :[ 'from Geometry.ForwardGeometry.ForwardGeometry_cfi import *', ] }, "F3" : { 1 : [ 'Geometry/ForwardCommonData/data/forwardshield/2026/v1/forwardshield.xml', 'Geometry/ForwardCommonData/data/brmrotations.xml', 'Geometry/ForwardCommonData/data/brm/2026/v2/brm.xml', 'Geometry/ForwardCommonData/data/zdcmaterials.xml', 'Geometry/ForwardCommonData/data/lumimaterials.xml', 'Geometry/ForwardCommonData/data/zdcrotations.xml', 'Geometry/ForwardCommonData/data/lumirotations.xml', 'Geometry/ForwardCommonData/data/zdc.xml', 'Geometry/ForwardCommonData/data/zdclumi.xml', 'Geometry/ForwardCommonData/data/cmszdc.xml', ], 3 : [ 'Geometry/ForwardCommonData/data/brmsens.xml', 'Geometry/ForwardSimData/data/zdcsens.xml', ], 4 : [ 'Geometry/ForwardSimData/data/zdcProdCuts.xml', 'Geometry/ForwardSimData/data/ForwardShieldProdCuts.xml', ], "sim" : [ ], "reco" :[ 'from Geometry.ForwardGeometry.ForwardGeometry_cfi import *', ] } } timingDict = { "abbrev" : "I", "name" : "timing", "default" : 5, "I5" : { 1 : [ 'Geometry/MTDCommonData/data/btl.xml', 'Geometry/MTDCommonData/data/etl.xml', 'Geometry/MTDCommonData/data/CrystalBarZflat/mtd.xml', 'Geometry/MTDCommonData/data/CrystalBarZflat/mtdStructureTopology.xml', 'Geometry/MTDCommonData/data/CrystalBarZflat/mtdParameters.xml', ], 3 : [ 'Geometry/MTDSimData/data/CrystalBarZflat/mtdsens.xml' ], 4 : [ 'Geometry/MTDSimData/data/CrystalBarZflat/mtdProdCuts.xml' ], "sim" : [ 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cfi import *', ], "reco" :[ 'from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import *', 'from Geometry.MTDGeometryBuilder.mtdParameters_cfi import *', 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cfi import *', 'from Geometry.MTDNumberingBuilder.mtdTopology_cfi import *', 'from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import *', 'from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import *', 'mtdGeometry.applyAlignment = cms.bool(False)' ], "era" : "phase2_timing, phase2_timing_layer", }, "I7" : { 1 : [ 'Geometry/MTDCommonData/data/btl.xml', 'Geometry/MTDCommonData/data/etl.xml', 'Geometry/MTDCommonData/data/CrystalBarPhiFlat/mtd.xml', 'Geometry/MTDCommonData/data/CrystalBarPhiFlat/mtdStructureTopology.xml', 'Geometry/MTDCommonData/data/CrystalBarPhiFlat/mtdParameters.xml', ], 3 : [ 'Geometry/MTDSimData/data/CrystalBarPhiFlat/mtdsens.xml' ], 4 : [ 'Geometry/MTDSimData/data/CrystalBarPhiFlat/mtdProdCuts.xml' ], "sim" : [ 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cfi import *', ], "reco" :[ 'from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import *', 'from Geometry.MTDGeometryBuilder.mtdParameters_cfi import *', 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cfi import *', 'from Geometry.MTDNumberingBuilder.mtdTopology_cfi import *', 'from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import *', 'from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import *', 'mtdGeometry.applyAlignment = cms.bool(False)' ], "era" : "phase2_timing, phase2_timing_layer", }, "I9" : { 1 : [ 'Geometry/MTDCommonData/data/btl.xml', 'Geometry/MTDCommonData/data/etl/v2/etl.xml', 'Geometry/MTDCommonData/data/CrystalBarPhiFlat/v2/mtd.xml', 'Geometry/MTDCommonData/data/CrystalBarPhiFlat/mtdStructureTopology.xml', 'Geometry/MTDCommonData/data/CrystalBarPhiFlat/mtdParameters.xml', ], 3 : [ 'Geometry/MTDSimData/data/CrystalBarPhiFlat/mtdsens.xml' ], 4 : [ 'Geometry/MTDSimData/data/CrystalBarPhiFlat/mtdProdCuts.xml' ], "sim" : [ 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cfi import *', ], "reco" :[ 'from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import *', 'from Geometry.MTDGeometryBuilder.mtdParameters_cfi import *', 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cfi import *', 'from Geometry.MTDNumberingBuilder.mtdTopology_cfi import *', 'from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import *', 'from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import *', 'mtdGeometry.applyAlignment = cms.bool(False)' ], "era" : "phase2_timing, phase2_timing_layer", }, "I10" : { 1 : [ 'Geometry/MTDCommonData/data/btl.xml', 'Geometry/MTDCommonData/data/etl/v2/etl.xml', 'Geometry/MTDCommonData/data/CrystalBarPhiFlat/v3/mtd.xml', 'Geometry/MTDCommonData/data/CrystalBarPhiFlat/mtdStructureTopology.xml', 'Geometry/MTDCommonData/data/CrystalBarPhiFlat/mtdParameters.xml', ], 3 : [ 'Geometry/MTDSimData/data/CrystalBarPhiFlat/mtdsens.xml' ], 4 : [ 'Geometry/MTDSimData/data/CrystalBarPhiFlat/mtdProdCuts.xml' ], "sim" : [ 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cfi import *', ], "reco" :[ 'from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import *', 'from Geometry.MTDGeometryBuilder.mtdParameters_cfi import *', 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cfi import *', 'from Geometry.MTDNumberingBuilder.mtdTopology_cfi import *', 'from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import *', 'from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import *', 'mtdGeometry.applyAlignment = cms.bool(False)' ], "era" : "phase2_timing, phase2_timing_layer", }, "I11" : { 1 : [ 'Geometry/MTDCommonData/data/mtdMaterial/v1/mtdMaterial.xml', 'Geometry/MTDCommonData/data/btl/v1/btl.xml', 'Geometry/MTDCommonData/data/btl/v1/btlStructureTopology.xml', 'Geometry/MTDCommonData/data/etl/v3/etl.xml', 'Geometry/MTDCommonData/data/mtdParameters/v1/mtdParameters.xml', ], 3 : [ 'Geometry/MTDSimData/data/v1/mtdsens.xml' ], 4 : [ 'Geometry/MTDSimData/data/v1/mtdProdCuts.xml' ], "sim" : [ 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cfi import *', ], "reco" :[ 'from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import *', 'from Geometry.MTDGeometryBuilder.mtdParameters_cfi import *', 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cfi import *', 'from Geometry.MTDNumberingBuilder.mtdTopology_cfi import *', 'from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import *', 'from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import *', 'mtdGeometry.applyAlignment = cms.bool(False)' ], "era" : "phase2_timing, phase2_timing_layer", }, "I12" : { 1 : [ 'Geometry/MTDCommonData/data/mtdMaterial/v2/mtdMaterial.xml', 'Geometry/MTDCommonData/data/btl/v1/btl.xml', 'Geometry/MTDCommonData/data/btl/v1/btlStructureTopology.xml', 'Geometry/MTDCommonData/data/etl/v4/etl.xml', 'Geometry/MTDCommonData/data/mtdParameters/v1/mtdParameters.xml', ], 3 : [ 'Geometry/MTDSimData/data/v2/mtdsens.xml' ], 4 : [ 'Geometry/MTDSimData/data/v2/mtdProdCuts.xml' ], "sim" : [ 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cfi import *', ], "reco" :[ 'from RecoMTD.DetLayers.mtdDetLayerGeometry_cfi import *', 'from Geometry.MTDGeometryBuilder.mtdParameters_cfi import *', 'from Geometry.MTDNumberingBuilder.mtdNumberingGeometry_cfi import *', 'from Geometry.MTDNumberingBuilder.mtdTopology_cfi import *', 'from Geometry.MTDGeometryBuilder.mtdGeometry_cfi import *', 'from Geometry.MTDGeometryBuilder.idealForDigiMTDGeometry_cff import *', 'mtdGeometry.applyAlignment = cms.bool(False)' ], "era" : "phase2_timing, phase2_timing_layer", }, } allDicts = [ commonDict, trackerDict, caloDict, muonDict, forwardDict, timingDict ] detectorVersionDict = { ("O2","T6","C4","M2","F2","I5") : "D35", ("O3","T14","C8","M3","F2","I9") : "D41", ("O2","T14","C4","M3","F2","I7") : "D43", ("O2","T14","C6","M3","F3","I7") : "D44", ("O3","T15","C8","M3","F2","I10") : "D45", ("O3","T15","C9","M3","F2","I10") : "D46", ("O3","T15","C10","M3","F3","I10") : "D47", ("O3","T16","C9","M3","F2","I10") : "D48", ("O4","T15","C9","M4","F2","I10") : "D49", ("O4","T15","C9","M4","F2","I11") : "D50", ("O4","T17","C9","M4","F2","I10") : "D51", ("O4","T18","C9","M4","F2","I10") : "D52", ("O4","T15","C9","M4","F2","I12") : "D53", } deprecatedDets = set([ "D1", "D2", "D3", "D5", "D6" , "D7", "D4", "D8" , "D9", "D12", "D13", "D15", "D10", "D11", "D14", "D16", "D17", "D18", "D19", "D20", "D21", "D22", "D23", "D24", "D25", "D26", "D27", "D28", "D29", "D30", "D31", "D32", "D33", "D34", "D36", "D37", "D38", "D39", "D40", "D42" ]) deprecatedSubdets = set([ "T1", "T2" ,"T3", "T4", "T7", "T8", "T9", "T10", "T11", "T12", "T13", "C1", "C2", "C3", "C5", "C7", "M1", "I1", "I2", "I3", "I4", "I6", "I8", "O1", "F1" ])
py
1a4190ae369f07d3d17d17152fb91f823144ffd0
from pathlib import Path import scrapli TEST_DATA_PATH = f"{Path(scrapli.__file__).parents[1]}/tests/test_data" FUNCTIONAL_USERNAME = "vrnetlab" FUNCTIONAL_PASSWORD = "VR-netlab9" FUNCTIONAL_PASSPHRASE = "scrapli" PRIVATE_KEY = f"{TEST_DATA_PATH}/files/vrnetlab_key" ENCRYPTED_PRIVATE_KEY = f"{TEST_DATA_PATH}/files/vrnetlab_key_encrypted" INVALID_PRIVATE_KEY = f"{TEST_DATA_PATH}/files/invalid_key" MOCK_USERNAME = "scrapli" MOCK_PASSWORD = "scrapli" MOCK_PASSPHRASE = FUNCTIONAL_PASSPHRASE DEVICES = { "cisco_iosxe": { "auth_username": FUNCTIONAL_USERNAME, "auth_password": FUNCTIONAL_PASSWORD, "auth_secondary": FUNCTIONAL_PASSWORD, "auth_private_key_passphrase": FUNCTIONAL_PASSPHRASE, "auth_strict_key": False, "host": "172.18.0.11", "base_config": f"{TEST_DATA_PATH}/base_configs/cisco_iosxe", }, "mock_cisco_iosxe": { "auth_username": MOCK_USERNAME, "auth_password": MOCK_PASSWORD, "auth_secondary": MOCK_PASSWORD, "auth_private_key_passphrase": MOCK_PASSPHRASE, "auth_strict_key": False, "host": "localhost", "port": 2211, }, "cisco_nxos": { "auth_username": FUNCTIONAL_USERNAME, "auth_password": FUNCTIONAL_PASSWORD, "auth_secondary": FUNCTIONAL_PASSWORD, "auth_strict_key": False, "host": "172.18.0.12", "base_config": f"{TEST_DATA_PATH}/base_configs/cisco_nxos", }, "cisco_iosxr": { "auth_username": FUNCTIONAL_USERNAME, "auth_password": FUNCTIONAL_PASSWORD, "auth_secondary": FUNCTIONAL_PASSWORD, "auth_strict_key": False, "host": "172.18.0.13", "base_config": f"{TEST_DATA_PATH}/base_configs/cisco_iosxr", }, "arista_eos": { "auth_username": FUNCTIONAL_USERNAME, "auth_password": FUNCTIONAL_PASSWORD, "auth_secondary": FUNCTIONAL_PASSWORD, "auth_strict_key": False, "host": "172.18.0.14", "comms_ansi": True, "base_config": f"{TEST_DATA_PATH}/base_configs/arista_eos", }, "juniper_junos": { "auth_username": FUNCTIONAL_USERNAME, "auth_password": FUNCTIONAL_PASSWORD, "auth_secondary": FUNCTIONAL_PASSWORD, "auth_strict_key": False, "host": "172.18.0.15", "base_config": f"{TEST_DATA_PATH}/base_configs/juniper_junos", }, "linux": { "auth_username": "root", "auth_password": "docker", "auth_strict_key": False, "host": "172.18.0.20", "comms_ansi": True, "comms_prompt_pattern": r"^linux:~#\s*$", }, }
py
1a4191332cf5a7b838bd372dbb9407dc1cd7c20d
from pm4pyws.user_iam.versions import basic_user_management
py
1a419161a67f5d42a4273610d089b92cb6739842
''' 17-treat-ephemerids.py ========================= AIM: Using the ephemerids computed by 16-compute-ephemerids.py and observational constraints (period of the planet, transit time) calculates observations period. To be used by the two next scripts (18, 19) to treat and plot. INPUT: files: - <orbit_id>_misc/ephemerids_inter_<max_interruptions>_mag_<mag_max><_SAA?>.npz variables: see section PARAMETERS (below) OUTPUT: <orbit_id>_<SL_angle>misc/ephemerids_obs<transit_duration>h_<max_interruptions>inter_V<mag_max><_SAA?>.npz CMD: python 17-treat-ephemerids.py ISSUES: <none known> REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy) - Structure of the root folder: * <orbit_id>_flux/ --> flux files * <orbit_id>_figures/maps/ --> figures * <orbit_id>_misc/ --> storages of data REMARKS: Not with real catalogue. ''' ########################################################################### ### INCLUDES import numpy as np import os import matplotlib.cm as cm import time from resources.routines import * from resources.TimeStepping import * import parameters as param import resources.figures as figures from resources.targets import * from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter ########################################################################### ### PARAMETERS # Orbit id alt = 700 orbit_id = '6am_%d_5_conf4e' % alt apogee=alt perigee=alt # File name for the list of orbit file orbits_file = 'orbits.dat' # Minimum observable time for plots [h] (Only used for consecutive observation time) transit_duration = None # Maximum interruption time tolerated [min] max_interruptions = 97 # Maximum visible magnitude mag_max = 12. # Take SAA into account? SAA = True # Print much information ? verbose = False # If set to True, then it will be observations of at least (period - max_interruptions) # If set to False, then it is minimum (period - max_interruptions) minutes per orbit, # not necesseraly consecutive. consecutive = False # Factor in the SL post treatment correction ? SL_post_treat = True # Stop before saving results to file. early_stop = False # Minimal # of days of obs (if consecutive == False), must be a list nb_obs_days = [13]#[50]#range(1,51)#[13]#range(1,81)# range(1,51)#range(1,91)#range(1,61)##range(1,51)#range(1,91)#range(10,110,10)#range(5,60,5)#[13]#range(20,45,5)#[13]#range(5,45,5)#[0,10,20,30,40]#range(10,17,1)##range(10,110,10)# # Minimal minutes to be observed per orbit (if consecutive == False), must be a list mins_t_obs_per_orbit = [59]#[49]#[79]#[78]#range(68,78,1) # This is a way to vary the results by multiplying the whole pst by a number. # This is very easy as if the pst is multiplied by a constant, it can be taken out of the # integral and only multplying the flux is equivalent to re-running all the simulations pst_factor=1. # File name for the input file (in a compressed binary Python format) if SAA: note = '_SAA' else: note = '' if not pst_factor == 1.: note += '_%1.1fpst' % pst_factor if SL_post_treat: note+= '_%4.3fSLreduction' % param.SL_post_treat_reduction input_fname = 'ephemerids_inter_%d_mag_%3.1f%s.npz' % (max_interruptions,mag_max,note) if not consecutive: note += '_cumul_' for min_t_obs_per_orbit in mins_t_obs_per_orbit: print '*'*30, 'min_t_obs_per_orbit %1.1f' % min_t_obs_per_orbit skycoverage_fname = 'skycoverage_%dmin_V%3.1f%s.txt' % (min_t_obs_per_orbit,mag_max,note) for nb_obs_day in nb_obs_days: # File name for the input file (in a compressed binary Python format) if consecutive: output_fname = 'ephemerids_obs%dh_%dinter_V%3.1f%s.npz' % (transit_duration,max_interruptions,mag_max,note) else: output_fname = 'ephemerids_%ddays_%dmin_V%3.1f%s.npz' % (nb_obs_day,min_t_obs_per_orbit,mag_max,note) ##################################################################################################################### # CONSTANTS AND PHYSICAL PARAMETERS period = altitude2period(apogee, perigee) ########################################################################### ### INITIALISATION # Formatted folders definitions folder_flux, folder_figures, folder_misc = init_folders(orbit_id) sky_coverage=0. print 'ORBIT ID:\t\t%s\nPST factor:\t\t%d\nMin Days of Coverage:\t%d\nmin_t_obs_per_orbit\t%d (%.1f%%)\nMAGNITIUDE:\t\t%02.1f\nSAA :\t%g' % (orbit_id,pst_factor,nb_obs_day,min_t_obs_per_orbit,min_t_obs_per_orbit/period*100., mag_max, SAA) # loading data sys.stdout.write("Loading worthy targets from %s ...\t" % input_fname) sys.stdout.flush() worthy_targets = np.load(folder_misc+input_fname) worthy_targets = worthy_targets['worthy_targets'] max_len = 0 for k in range(0, len(worthy_targets)): if max_len < np.shape(worthy_targets[k].Visibility())[0]: max_len = np.shape(worthy_targets[k].Visibility())[0] # too optimistic max_len = int(max_len) start_obs = np.empty([len(worthy_targets),max_len]) stop_obs = np.empty([len(worthy_targets),max_len]) interruptions_obs = np.empty([len(worthy_targets),max_len]) print 'Done\n.%d targets loaded' % len(worthy_targets) ########################################################################### ### COMPUTATIONS ########################################################################### ########################################################################### # consecutive if consecutive: # Loop on all worthy targets for ii in range (0, len(worthy_targets)): y = float(ii) visi = worthy_targets[ii].Visibility() invi = worthy_targets[ii].Invisibility() inter = worthy_targets[ii].get_interruption_time() # for every region in the sky/worthy target: # >> Find when you can look with transit_duration [h] with maximal max_interruptions [min] # >>>> return start and end time of observations with duration of interruptions [min] # Initialise all variables k = 0 j = 0 total_interruptions = 0 start_observation_time = 0 count_observation_time = 0 do_observe = False has_observed=False # iterate on the visibility (i.e. time when the target becomes visible) for k in range(0, len(visi)): # shorthand notations vis = visi[k] ini = invi[k] inte = inter[k] # Try to compute the interruption time with the next observability window try: time_to_next_vis = visi[k+1] - ini - inte stop_to_observe = False except IndexError: stop_to_observe = True # if the time to next visiblity is larger than the max interruption time or no next window --> can't observe anymore if stop_to_observe or max_interruptions < time_to_next_vis: # if the observation time is larger than the transit duration, then it can be observed. if do_observe and count_observation_time >= transit_duration*60. : # if you have been observing for longer than transit_duration [h], then remember when and remember the interruptions start_obs[ii,j] = start_observation_time stop_obs[ii,j] = ini interruptions_obs[ii,j] = total_interruptions j+=1 has_observed = True do_observe = False total_interruptions = 0 start_observation_time = 0 count_observation_time = 0 k+=1 if stop_to_observe: break else: continue # if the time to next visiblity is smaller than the max interruption time --> save the interruption time else: total_interruptions += time_to_next_vis # if you were not observing, you can now. if max_interruptions > time_to_next_vis and not do_observe: do_observe=True start_observation_time = vis # count the time you can observe count_observation_time += ini-vis + time_to_next_vis k+=1 if stop_to_observe: break # Debugging infos has_observed = False if has_observed and verbose: print start_obs[ii,0], stop_obs[ii,0], interruptions_obs[ii,0] ########################################################################### # non-consecutive count = 0 check=np.zeros(len(worthy_targets)) if not consecutive: sky_coverage=0. for ii in range(len(worthy_targets)): y = float(ii) message = '\r%3.1f %%' % (y/float(len(worthy_targets))*100.) sys.stdout.write(message) sys.stdout.flush() visi = worthy_targets[ii].Visibility() invi = worthy_targets[ii].Invisibility() inter = worthy_targets[ii].get_interruption_time() observations = invi - visi - inter validated_ids = observations>=min_t_obs_per_orbit validated_observations = observations[validated_ids] vinter = inter[validated_ids] vvis = visi[validated_ids] vinvi = invi[validated_ids] if np.size(validated_observations)>0: check[ii] += validated_observations.sum() #print validated_observations; #obs_efficiency_in_orbit = validated_observations/period #time_lost = np.ceil(obs_efficiency_in_orbit) - obs_efficiency_in_orbit if check[ii]>nb_obs_day*24.*60.: rat, dect = worthy_targets[ii].Coordinates() sky_coverage+=0.5/param.resx/param.resy*np.pi*np.cos(dect) message = '\rComputations done.' sys.stdout.write(message) sys.stdout.flush() print '\nSky coverage for %d days' % nb_obs_day print nb_obs_day,'\t***', round(sky_coverage*100.,3), ' % ***' if early_stop: output=open(os.path.join(folder_misc,skycoverage_fname),"a") print >> output, nb_obs_day,'\t', round(sky_coverage*100.,3) output.close() if early_stop: continue np.savez_compressed(folder_misc+output_fname, worthy_targets=worthy_targets, obs_tot=check) print 'Filed saved as %s' % output_fname
py
1a4191c06a70746d7e171dfb33e009b5bbc11a00
# Copyright 2021 The NetKet Authors - All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Optional, Tuple import abc from flax import linen as nn from jax import numpy as jnp from netket.utils.types import PyTree, PRNGKeyT from netket.utils import struct @struct.dataclass class MetropolisRule(abc.ABC): """ Base class for transition rules of Metropolis, such as Local, Exchange, Hamiltonian and several others. """ def init_state( self, sampler: "MetropolisSampler", # noqa: F821 machine: nn.Module, params: PyTree, key: PRNGKeyT, ) -> Optional[Any]: """ Initialises the optional internal state of the Metropolis sampler transition rule. The provided key is unique and does not need to be splitted. It should return an immutable data structure. Arguments: sampler: The Metropolis sampler. machine: A Flax module with the forward pass of the log-pdf. params: The PyTree of parameters of the model. key: A Jax PRNGKey. Returns: An optional state. """ return None def reset( self, sampler: "MetropolisSampler", # noqa: F821 machine: nn.Module, params: PyTree, sampler_state: "SamplerState", # noqa: F821 ) -> Optional[Any]: """ Resets the internal state of the Metropolis Sampler Transition Rule. The default implementation returns the current rule_state without modifying it. Arguments: sampler: The Metropolis sampler. machine: A Flax module with the forward pass of the log-pdf. params: The PyTree of parameters of the model. sampler_state: The current state of the sampler. Should not modify it. Returns: A resetted, state of the rule. This returns the same type of :py:meth:`~nk.sampler.rule.MetropolisRule.rule_state` and might be `None`. """ return sampler_state.rule_state @abc.abstractmethod def transition( self, sampler: "MetropolisSampler", # noqa: F821 machine: nn.Module, params: PyTree, sampler_state: "SamplerState", # noqa: F821 key: PRNGKeyT, σ: jnp.ndarray, ) -> Tuple[jnp.ndarray, Optional[jnp.ndarray]]: r""" Proposes a new configuration set of configurations $\sigma'$ starting from the current chain configurations :math:`\sigma`. The new configurations :math:`\sigma'` should be a matrix with the same dimension as :math:`\sigma`. This function should return a tuple. where the first element are the new configurations $\sigma'$ and the second element is either `None` or an array of length `σ.shape[0]` containing an optional log-correction factor. The correction factor should be non-zero when the transition rule is non-symmetrical. Arguments: sampler: The Metropolis sampler. machine: A Flax module with the forward pass of the log-pdf. params: The PyTree of parameters of the model. sampler_state: The current state of the sampler. Should not modify it. key: A Jax PRNGKey to use to generate new random configurations. σ: The current configurations stored in a 2D matrix. Returns: A tuple containing the new configurations :math:`\sigma'` and the optional vector of log corrections to the transition probability. """ pass def random_state( self, sampler: "MetropolisSampler", # noqa: F821 machine: nn.Module, params: PyTree, sampler_state: "SamplerState", # noqa: F821 key: PRNGKeyT, ): """ Generates a random state compatible with this rule. By default this calls :func:`netket.hilbert.random.random_state`. Arguments: sampler: The Metropolis sampler. machine: A Flax module with the forward pass of the log-pdf. params: The PyTree of parameters of the model. sampler_state: The current state of the sampler. Should not modify it. key: The PRNGKey to use to generate the random state. """ return sampler.hilbert.random_state( key, size=sampler.n_batches, dtype=sampler.dtype )
py
1a4191f2cee5feb31153d14bc6c7270f8d23be86
from g2net.models.base.architectures import SpectroCNN from g2net.models.base.wavegram import CNNSpectrogram def create_base_model() -> SpectroCNN: return SpectroCNN(model_name='tf_efficientnet_b6_ns', pretrained=True, num_classes=1, spectrogram=CNNSpectrogram, spec_params=dict( base_filters=128, kernel_sizes=(64, 16, 4), ), resize_img=None, custom_classifier='gem', upsample='bicubic')
py
1a41925a2fe0f83e629ab07c4f5fdd6f7e76faef
from immobilus.logic import immobilus __all__ = ['immobilus']
py
1a41929db6e2ecc112d4718466dcf01ecfad6800
import docker from time import sleep client = docker.from_env() container = client.containers.run( 'dasxran/tensorflow:trainimages', 'python /image_classifier/scripts/return_pct.py --graph=/image_classifier/outputModel/retrained_graph.pb ' '--labels=/image_classifier/outputModel/retrained_labels.txt --input_layer=Placeholder ' '--output_layer=final_result --image=/image_classifier/imageScrapingData/image1.png --lookfor=magnifyingglass', detach=False, auto_remove=False, remove=True, tty=True, stdin_open=True, volumes={ '/home/adam/workspace/Selenium-Machine-Learning/tfImageClassifier': { 'bind': '/image_classifier', 'mode': 'rw', } }) # detach=False mode print(float(container.decode().split('\r\n')[-2])) # detach=True mode #sleep(10) #dockerRes = container.logs() #print(float(dockerRes.decode().split('\r\n')[-2])) #container.remove()
py
1a4192b92681c811d834a86c2cd7928db9cbc672
from core.models import InstanceTag, Instance, Tag from rest_framework import serializers from api.v2.serializers.summaries import InstanceSuperSummarySerializer from .tag import TagSerializer class InstanceRelatedField(serializers.PrimaryKeyRelatedField): def get_queryset(self): return Instance.objects.all() def to_representation(self, value): instance = Instance.objects.get(pk=value.pk) # important! We have to use the SuperSummary because there are non-end_dated # instances that don't have a valid size (size='Unknown') serializer = InstanceSuperSummarySerializer( instance, context=self.context ) return serializer.data class TagRelatedField(serializers.PrimaryKeyRelatedField): def get_queryset(self): return Tag.objects.all() def to_representation(self, value): tag = Tag.objects.get(pk=value.pk) serializer = TagSerializer(tag, context=self.context) return serializer.data class InstanceTagSerializer(serializers.HyperlinkedModelSerializer): instance = InstanceRelatedField(queryset=Instance.objects.none()) tag = TagRelatedField(queryset=Tag.objects.none()) url = serializers.HyperlinkedIdentityField( view_name='api:v2:instancetag-detail', ) class Meta: model = InstanceTag fields = ('id', 'url', 'instance', 'tag')
py
1a4192bcc5625af114ec1d217bba18086653a006
""" Internal subroutines for e.g. aborting execution with an error message, or performing indenting on multiline output. """ import os import six import sys import struct import textwrap from traceback import format_exc def _encode(msg, stream): if six.PY2 and isinstance(msg, six.text_type) \ and hasattr(stream, 'encoding') and stream.encoding is not None: return msg.encode(stream.encoding) else: return str(msg) def isatty(stream): """Check if a stream is a tty. Not all file-like objects implement the `isatty` method. """ fn = getattr(stream, 'isatty', None) if fn is None: return False return fn() def abort(msg): """ Abort execution, print ``msg`` to stderr and exit with error status (1.) This function currently makes use of `SystemExit`_ in a manner that is similar to `sys.exit`_ (but which skips the automatic printing to stderr, allowing us to more tightly control it via settings). Therefore, it's possible to detect and recover from inner calls to `abort` by using ``except SystemExit`` or similar. .. _sys.exit: http://docs.python.org/library/sys.html#sys.exit .. _SystemExit: http://docs.python.org/library/exceptions.html#exceptions.SystemExit """ from fabric.state import output, env if not env.colorize_errors: red = lambda x: x # noqa: E731 else: from fabric.colors import red if output.aborts: sys.stderr.write(red("\nFatal error: %s\n" % _encode(msg, sys.stderr))) sys.stderr.write(red("\nAborting.\n")) if env.abort_exception: raise env.abort_exception(msg) else: # See issue #1318 for details on the below; it lets us construct a # valid, useful SystemExit while sidestepping the automatic stderr # print (which would otherwise duplicate with the above in a # non-controllable fashion). e = SystemExit(1) e.message = msg raise e def warn(msg): """ Print warning message, but do not abort execution. This function honors Fabric's :doc:`output controls <../../usage/output_controls>` and will print the given ``msg`` to stderr, provided that the ``warnings`` output level (which is active by default) is turned on. """ from fabric.state import output, env if not env.colorize_errors: magenta = lambda x: x # noqa: E731 else: from fabric.colors import magenta if output.warnings: msg = _encode(msg, sys.stderr) sys.stderr.write(magenta("\nWarning: %s\n\n" % msg)) def indent(text, spaces=4, strip=False): """ Return ``text`` indented by the given number of spaces. If text is not a string, it is assumed to be a list of lines and will be joined by ``\\n`` prior to indenting. When ``strip`` is ``True``, a minimum amount of whitespace is removed from the left-hand side of the given string (so that relative indents are preserved, but otherwise things are left-stripped). This allows you to effectively "normalize" any previous indentation for some inputs. """ # Normalize list of strings into a string for dedenting. "list" here means # "not a string" meaning "doesn't have splitlines". Meh. if not hasattr(text, 'splitlines'): text = '\n'.join(text) # Dedent if requested if strip: text = textwrap.dedent(text) prefix = ' ' * spaces output = '\n'.join(prefix + line for line in text.splitlines()) # Strip out empty lines before/aft output = output.strip() # Reintroduce first indent (which just got stripped out) output = prefix + output return output def puts(text, show_prefix=None, end="\n", flush=False): """ An alias for ``print`` whose output is managed by Fabric's output controls. In other words, this function simply prints to ``sys.stdout``, but will hide its output if the ``user`` :doc:`output level </usage/output_controls>` is set to ``False``. If ``show_prefix=False``, `puts` will omit the leading ``[hostname]`` which it tacks on by default. (It will also omit this prefix if ``env.host_string`` is empty.) Newlines may be disabled by setting ``end`` to the empty string (``''``). (This intentionally mirrors Python 3's ``print`` syntax.) You may force output flushing (e.g. to bypass output buffering) by setting ``flush=True``. .. seealso:: `~fabric.utils.fastprint` """ from fabric.state import output, env if show_prefix is None: show_prefix = env.output_prefix if output.user: prefix = "" if env.host_string and show_prefix: prefix = "[%s] " % env.host_string sys.stdout.write(prefix + _encode(text, sys.stdout) + end) if flush: sys.stdout.flush() def fastprint(text, show_prefix=False, end="", flush=True): """ Print ``text`` immediately, without any prefix or line ending. This function is simply an alias of `~fabric.utils.puts` with different default argument values, such that the ``text`` is printed without any embellishment and immediately flushed. It is useful for any situation where you wish to print text which might otherwise get buffered by Python's output buffering (such as within a processor intensive ``for`` loop). Since such use cases typically also require a lack of line endings (such as printing a series of dots to signify progress) it also omits the traditional newline by default. .. note:: Since `~fabric.utils.fastprint` calls `~fabric.utils.puts`, it is likewise subject to the ``user`` :doc:`output level </usage/output_controls>`. .. seealso:: `~fabric.utils.puts` """ return puts(text=text, show_prefix=show_prefix, end=end, flush=flush) def handle_prompt_abort(prompt_for): import fabric.state reason = "Needed to prompt for %s (host: %s), but %%s" % ( prompt_for, fabric.state.env.host_string ) # Explicit "don't prompt me bro" if fabric.state.env.abort_on_prompts: abort(reason % "abort-on-prompts was set to True") # Implicit "parallel == stdin/prompts have ambiguous target" if fabric.state.env.parallel: abort(reason % "input would be ambiguous in parallel mode") class _AttributeDict(dict): """ Dictionary subclass enabling attribute lookup/assignment of keys/values. For example:: >>> m = _AttributeDict({'foo': 'bar'}) >>> m.foo 'bar' >>> m.foo = 'not bar' >>> m['foo'] 'not bar' ``_AttributeDict`` objects also provide ``.first()`` which acts like ``.get()`` but accepts multiple keys as arguments, and returns the value of the first hit, e.g.:: >>> m = _AttributeDict({'foo': 'bar', 'biz': 'baz'}) >>> m.first('wrong', 'incorrect', 'foo', 'biz') 'bar' """ def __getattr__(self, key): try: return self[key] except KeyError: # to conform with __getattr__ spec raise AttributeError(key) def __setattr__(self, key, value): self[key] = value def first(self, *names): for name in names: value = self.get(name) if value: return value class _AliasDict(_AttributeDict): """ `_AttributeDict` subclass that allows for "aliasing" of keys to other keys. Upon creation, takes an ``aliases`` mapping, which should map alias names to lists of key names. Aliases do not store their own value, but instead set (override) all mapped keys' values. For example, in the following `_AliasDict`, calling ``mydict['foo'] = True`` will set the values of ``mydict['bar']``, ``mydict['biz']`` and ``mydict['baz']`` all to True:: mydict = _AliasDict( {'biz': True, 'baz': False}, aliases={'foo': ['bar', 'biz', 'baz']} ) Because it is possible for the aliased values to be in a heterogenous state, reading aliases is not supported -- only writing to them is allowed. This also means they will not show up in e.g. ``dict.keys()``. .. note:: Aliases are recursive, so you may refer to an alias within the key list of another alias. Naturally, this means that you can end up with infinite loops if you're not careful. `_AliasDict` provides a special function, `expand_aliases`, which will take a list of keys as an argument and will return that list of keys with any aliases expanded. This function will **not** dedupe, so any aliases which overlap will result in duplicate keys in the resulting list. """ def __init__(self, arg=None, aliases=None): init = super(_AliasDict, self).__init__ if arg is not None: init(arg) else: init() # Can't use super() here because of _AttributeDict's setattr override dict.__setattr__(self, 'aliases', aliases) def __setitem__(self, key, value): # Attr test required to not blow up when deepcopy'd if hasattr(self, 'aliases') and key in self.aliases: for aliased in self.aliases[key]: self[aliased] = value else: return super(_AliasDict, self).__setitem__(key, value) def expand_aliases(self, keys): ret = [] for key in keys: if key in self.aliases: ret.extend(self.expand_aliases(self.aliases[key])) else: ret.append(key) return ret def _pty_size(): """ Obtain (rows, cols) tuple for sizing a pty on the remote end. Defaults to 80x24 (which is also the 'ssh' lib's default) but will detect local (stdout-based) terminal window size on non-Windows platforms. """ win32 = (sys.platform == 'win32') default_rows, default_cols = 24, 80 rows, cols = default_rows, default_cols if not win32 and isatty(sys.stdout): import fcntl import termios # We want two short unsigned integers (rows, cols) fmt = 'HH' # Create an empty (zeroed) buffer for ioctl to map onto. Yay for C! buffer = struct.pack(fmt, 0, 0) # Call TIOCGWINSZ to get window size of stdout, returns our filled # buffer try: result = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, buffer) # Unpack buffer back into Python data types rows, cols = struct.unpack(fmt, result) # Fall back to defaults if TIOCGWINSZ returns unreasonable values if rows == 0: rows = default_rows if cols == 0: cols = default_cols # Deal with e.g. sys.stdout being monkeypatched, such as in testing. # Or termios not having a TIOCGWINSZ. except AttributeError: pass return rows, cols def error(message, func=None, exception=None, stdout=None, stderr=None): """ Call ``func`` with given error ``message``. If ``func`` is None (the default), the value of ``env.warn_only`` determines whether to call ``abort`` or ``warn``. If ``exception`` is given, it is inspected to get a string message, which is printed alongside the user-generated ``message``. If ``stdout`` and/or ``stderr`` are given, they are assumed to be strings to be printed. """ import fabric.state if func is None: func = fabric.state.env.warn_only and warn or abort # If exception printing is on, append a traceback to the message if fabric.state.output.exceptions or fabric.state.output.debug: exception_message = format_exc() if exception_message: message += "\n\n" + exception_message # Otherwise, if we were given an exception, append its contents. elif exception is not None: # Figure out how to get a string out of the exception; EnvironmentError # subclasses, for example, "are" integers and .strerror is the string. # Others "are" strings themselves. May have to expand this further for # other error types. if hasattr(exception, 'strerror') and exception.strerror is not None: underlying = exception.strerror else: underlying = exception message += "\n\nUnderlying exception:\n" + indent(str(underlying)) if func is abort: if stdout and not fabric.state.output.stdout: message += _format_error_output("Standard output", stdout) if stderr and not fabric.state.output.stderr: message += _format_error_output("Standard error", stderr) return func(message) def _format_error_output(header, body): term_width = _pty_size()[1] header_side_length = int((term_width - (len(header) + 2)) / 2) mark = "=" side = mark * header_side_length return "\n\n%s %s %s\n\n%s\n\n%s" % ( side, header, side, body, mark * term_width ) def apply_lcwd(path, env): # Apply CWD if a relative path if not os.path.isabs(path) and env.lcwd: path = os.path.join(env.lcwd, path) return path
py
1a4193b5af95bd5bd02cffccc8c714fdf253dc72
from programs.schema.attributes.abstractattribute import AbstractAttribute from constants import CC class HHRaceAttr(AbstractAttribute): @staticmethod def getName(): return CC.ATTR_HHRACE @staticmethod def getLevels(): return { 'white' : [0], 'black' : [1], 'aian' : [2], 'asian' : [3], 'nhopi' : [4], 'sor' : [5], 'two or more': [6] } @staticmethod def recodeWhiteAlone(): name = CC.HHRACE_WHITEALONE groupings = { "White alone": [0] } return name, groupings
py
1a4194aff9633ab034df6780d6a4980638afd6d8
''' n Python, we can pass a variable number of arguments to a function using special symbols. There are two special symbols: *args (Non Keyword Arguments) **kwargs (Keyword Arguments) We use *args and **kwargs as an argument when we are unsure about the number of arguments to pass in the functions. ''' def func(a,b,c,d): return sum((a,b,c,d))*0.18 print(func(12,14,45,12)) # when we don't know th number of arguments that user is going to enter then we use # args there it allows us to set an arbitrary amount of arguments # all the variables will be inserted into the tuple and the action will be performed on the tuple as a whole def func1(*args): return sum(args) * 0.18 print(func1(2323,344,534,6,7,567867,878,989,980,980)) def func1(*args): for i in args: print(i) print(func1(2323,344,534,6,7,567867,878,989,980,980)) # kwargs allows us to add a keyword argument # kwargs returns back a dictionary def func3(**kwargs): if 'fruit' in kwargs: print(f"My fruit of choice is {kwargs['fruit']}") else: print("I did not found any fruits") print(func3(fruit="apple",car="aston martin",color="red",band="maroon 5")) # Wec can use different words for args and kwargs. But args and kwargs are prefred as per the convention. # Combination of both args and kwargs def combo(*args,**kwargs): print(args) # List print(kwargs) # Dictionary print(f"These are best combinations {args[0]},{kwargs['food']}") print(combo(87,4,5,food='apple',color='red',watch='casio'))
py
1a41957590a79d40cea7cdb4bd024a12cd9c7db9
# coding:utf-8 from flask import Flask from flask import request app = Flask(__name__) @app.route('/', methods=['GET', 'POST']) def home(): return '<h1>Home</h1>' @app.route('/signin', methods=['GET']) def signin_form(): return '''<form action="/signin" method="post"> <p><input name="username"></p> <p><input name="password" type="password"></p> <p><button type="submit">Sign In</button></p> </form>''' @app.route('/signin', methods=['POST']) def signin(): # 需要从request对象读取表单内容: if request.form['username']=='admin' and request.form['password']=='password': return '<h3>Hello, admin!</h3>' return '<h3>Bad username or password.</h3>' if __name__ == '__main__': app.run()
py
1a4196c5038c1f5a7416bd060268433511f8e3ce
# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import urlparse from oslo.config import cfg import routes as routes_mapper import webob import webob.dec import webob.exc from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import base from neutron import manager from neutron.openstack.common import log as logging from neutron import wsgi LOG = logging.getLogger(__name__) RESOURCES = {'network': 'networks', 'subnet': 'subnets', 'port': 'ports'} SUB_RESOURCES = {} COLLECTION_ACTIONS = ['index', 'create'] MEMBER_ACTIONS = ['show', 'update', 'delete'] REQUIREMENTS = {'id': attributes.UUID_PATTERN, 'format': 'xml|json'} class Index(wsgi.Application): def __init__(self, resources): self.resources = resources @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): metadata = {'application/xml': {'attributes': { 'resource': ['name', 'collection'], 'link': ['href', 'rel']}}} layout = [] for name, collection in self.resources.iteritems(): href = urlparse.urljoin(req.path_url, collection) resource = {'name': name, 'collection': collection, 'links': [{'rel': 'self', 'href': href}]} layout.append(resource) response = dict(resources=layout) content_type = req.best_match_content_type() body = wsgi.Serializer(metadata=metadata).serialize(response, content_type) return webob.Response(body=body, content_type=content_type) class APIRouter(wsgi.Router): @classmethod def factory(cls, global_config, **local_config): return cls(**local_config) def __init__(self, **local_config): mapper = routes_mapper.Mapper() plugin = manager.NeutronManager.get_plugin() ext_mgr = extensions.PluginAwareExtensionManager.get_instance() ext_mgr.extend_resources("2.0", attributes.RESOURCE_ATTRIBUTE_MAP) col_kwargs = dict(collection_actions=COLLECTION_ACTIONS, member_actions=MEMBER_ACTIONS) def _map_resource(collection, resource, params, parent=None): allow_bulk = cfg.CONF.allow_bulk allow_pagination = cfg.CONF.allow_pagination allow_sorting = cfg.CONF.allow_sorting controller = base.create_resource( collection, resource, plugin, params, allow_bulk=allow_bulk, parent=parent, allow_pagination=allow_pagination, allow_sorting=allow_sorting) path_prefix = None if parent: path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'], parent['member_name'], collection) mapper_kwargs = dict(controller=controller, requirements=REQUIREMENTS, path_prefix=path_prefix, **col_kwargs) return mapper.collection(collection, resource, **mapper_kwargs) mapper.connect('index', '/', controller=Index(RESOURCES)) for resource in RESOURCES: _map_resource(RESOURCES[resource], resource, attributes.RESOURCE_ATTRIBUTE_MAP.get( RESOURCES[resource], dict())) for resource in SUB_RESOURCES: _map_resource(SUB_RESOURCES[resource]['collection_name'], resource, attributes.RESOURCE_ATTRIBUTE_MAP.get( SUB_RESOURCES[resource]['collection_name'], dict()), SUB_RESOURCES[resource]['parent']) super(APIRouter, self).__init__(mapper)
py
1a4197b4fdcdd1c19270819bd67f129717812d34
# 开发环境配置文件 """ Django settings for meiduo_mall project. Generated by 'django-admin startproject' using Django 2.2.5. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ import os import sys # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # print(sys.path) print(os.path.join(os.path.dirname(BASE_DIR), 'logs\meiduo.log')) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'baot-=8^^-%ufl*5=yi&3b@b_b2e#nm1@$)im*$55_m6v4r$s8' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'meiduo_mall.urls' TEMPLATES = [ { 'BACKEND': "django.template.backends.jinja2.Jinja2", # 配置Jinja2模板引擎 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], # 补充jinja2模板环境引擎 'environment': 'meiduo_mall.utils.jinja2_env.jinja2_environment', }, }, { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'meiduo_mall.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { 'default': { # 'ENGINE': 'django.db.backends.sqlite3', # 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 'ENGINE': 'django.db.backends.mysql', 'HOST': '192.168.33.3', 'PORT': 3306, 'USER': 'ringo', 'PASSWORD': '123456', 'NAME': 'meiduo' } } # 配置Redis数据库 CACHES = { "default": { "BACKEND": "django_redis.cache.RedisCache", "LOCATION": "redis://192.168.33.3:6379/0", "OPTIONS": { "CLIENT_CLASS": "django_redis.client.DefaultClient", } }, "session": { "BACKEND": "django_redis.cache.RedisCache", "LOCATION": "redis://192.168.33.3:6379/1", "OPTIONS": { "CLIENT_CLASS": "django_redis.client.DefaultClient", } } } SESSION_ENGINE = "django.contrib.sessions.backends.cache" SESSION_CACHE_ALIAS = "session" # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")] # 配置日志工程 LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'filters': { 'require_debug_true': { # 在debug模式下才输出日志 '()': 'django.utils.log.RequireDebugTrue', }, }, 'handlers': { 'console': { 'level': 'INFO', 'filters': ['require_debug_true'], 'class': 'logging.StreamHandler', 'formatter': 'simple' }, 'file': { # 向文件输出日志 'level': 'INFO', # 输出级别 'class': 'logging.handlers.RotatingFileHandler', 'filename': os.path.join(os.path.dirname(BASE_DIR), 'logs\meiduo.log'), # 输出路径 'maxBytes': 300 * 1024 * 1024, 'backupCount': 10, 'formatter': 'verbose' }, }, 'loggers': { # 日志器 'django': { # 定义一个名为django的日志器 'handlers': ['console', 'file'], # 同时向终端和日志文件输出日志 # 'handlers': ['console'], 'propagate': True, # 是否续传日志 'level': 'INFO', # 日志最低级别 }, }, }
py
1a4198c5f8e97926bd91493cbdb8ec2eca58ee27
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', 'stack', 'vstack'] import functools import itertools import operator import warnings from . import numeric as _nx from . import overrides from .multiarray import array, asanyarray, normalize_axis_index from . import fromnumeric as _from_nx array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') def _atleast_1d_dispatcher(*arys): return arys @array_function_dispatch(_atleast_1d_dispatcher) def atleast_1d(*arys): """ Convert inputs to arrays with at least one dimension. Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved. Parameters ---------- arys1, arys2, ... : array_like One or more input arrays. Returns ------- ret : ndarray An array, or list of arrays, each with ``a.ndim >= 1``. Copies are made only if necessary. See Also -------- atleast_2d, atleast_3d Examples -------- >>> np.atleast_1d(1.0) array([1.]) >>> x = np.arange(9.0).reshape(3,3) >>> np.atleast_1d(x) array([[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]) >>> np.atleast_1d(x) is x True >>> np.atleast_1d(1, [3, 4]) [array([1]), array([3, 4])] """ res = [] for ary in arys: ary = asanyarray(ary) if ary.ndim == 0: result = ary.reshape(1) else: result = ary res.append(result) if len(res) == 1: return res[0] else: return res def _atleast_2d_dispatcher(*arys): return arys @array_function_dispatch(_atleast_2d_dispatcher) def atleast_2d(*arys): """ View inputs as arrays with at least two dimensions. Parameters ---------- arys1, arys2, ... : array_like One or more array-like sequences. Non-array inputs are converted to arrays. Arrays that already have two or more dimensions are preserved. Returns ------- res, res2, ... : ndarray An array, or list of arrays, each with ``a.ndim >= 2``. Copies are avoided where possible, and views with two or more dimensions are returned. See Also -------- atleast_1d, atleast_3d Examples -------- >>> np.atleast_2d(3.0) array([[3.]]) >>> x = np.arange(3.0) >>> np.atleast_2d(x) array([[0., 1., 2.]]) >>> np.atleast_2d(x).base is x True >>> np.atleast_2d(1, [1, 2], [[1, 2]]) [array([[1]]), array([[1, 2]]), array([[1, 2]])] """ res = [] for ary in arys: ary = asanyarray(ary) if ary.ndim == 0: result = ary.reshape(1, 1) elif ary.ndim == 1: result = ary[_nx.newaxis, :] else: result = ary res.append(result) if len(res) == 1: return res[0] else: return res def _atleast_3d_dispatcher(*arys): return arys @array_function_dispatch(_atleast_3d_dispatcher) def atleast_3d(*arys): """ View inputs as arrays with at least three dimensions. Parameters ---------- arys1, arys2, ... : array_like One or more array-like sequences. Non-array inputs are converted to arrays. Arrays that already have three or more dimensions are preserved. Returns ------- res1, res2, ... : ndarray An array, or list of arrays, each with ``a.ndim >= 3``. Copies are avoided where possible, and views with three or more dimensions are returned. For example, a 1-D array of shape ``(N,)`` becomes a view of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a view of shape ``(M, N, 1)``. See Also -------- atleast_1d, atleast_2d Examples -------- >>> np.atleast_3d(3.0) array([[[3.]]]) >>> x = np.arange(3.0) >>> np.atleast_3d(x).shape (1, 3, 1) >>> x = np.arange(12.0).reshape(4,3) >>> np.atleast_3d(x).shape (4, 3, 1) >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself True >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): ... print(arr, arr.shape) # doctest: +SKIP ... [[[1] [2]]] (1, 2, 1) [[[1] [2]]] (1, 2, 1) [[[1 2]]] (1, 1, 2) """ res = [] for ary in arys: ary = asanyarray(ary) if ary.ndim == 0: result = ary.reshape(1, 1, 1) elif ary.ndim == 1: result = ary[_nx.newaxis, :, _nx.newaxis] elif ary.ndim == 2: result = ary[:, :, _nx.newaxis] else: result = ary res.append(result) if len(res) == 1: return res[0] else: return res def _arrays_for_stack_dispatcher(arrays, stacklevel=4): if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): warnings.warn('arrays to stack must be passed as a "sequence" type ' 'such as list or tuple. Support for non-sequence ' 'iterables such as generators is deprecated as of ' 'NumPy 1.16 and will raise an error in the future.', FutureWarning, stacklevel=stacklevel) return () return arrays def _vhstack_dispatcher(tup): return _arrays_for_stack_dispatcher(tup) @array_function_dispatch(_vhstack_dispatcher) def vstack(tup): """ Stack arrays in sequence vertically (row wise). This is equivalent to concatenation along the first axis after 1-D arrays of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by `vsplit`. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions `concatenate`, `stack` and `block` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of ndarrays The arrays must have the same shape along all but the first axis. 1-D arrays must have the same length. Returns ------- stacked : ndarray The array formed by stacking the given arrays, will be at least 2-D. See Also -------- concatenate : Join a sequence of arrays along an existing axis. stack : Join a sequence of arrays along a new axis. block : Assemble an nd-array from nested lists of blocks. hstack : Stack arrays in sequence horizontally (column wise). dstack : Stack arrays in sequence depth wise (along third axis). column_stack : Stack 1-D arrays as columns into a 2-D array. vsplit : Split an array into multiple sub-arrays vertically (row-wise). Examples -------- >>> a = np.array([1, 2, 3]) >>> b = np.array([4, 5, 6]) >>> np.vstack((a,b)) array([[1, 2, 3], [4, 5, 6]]) >>> a = np.array([[1], [2], [3]]) >>> b = np.array([[4], [5], [6]]) >>> np.vstack((a,b)) array([[1], [2], [3], [4], [5], [6]]) """ if not overrides.ARRAY_FUNCTION_ENABLED: # raise warning if necessary _arrays_for_stack_dispatcher(tup, stacklevel=2) arrs = atleast_2d(*tup) if not isinstance(arrs, list): arrs = [arrs] return _nx.concatenate(arrs, 0) @array_function_dispatch(_vhstack_dispatcher) def hstack(tup): """ Stack arrays in sequence horizontally (column wise). This is equivalent to concatenation along the second axis, except for 1-D arrays where it concatenates along the first axis. Rebuilds arrays divided by `hsplit`. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions `concatenate`, `stack` and `block` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of ndarrays The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length. Returns ------- stacked : ndarray The array formed by stacking the given arrays. See Also -------- concatenate : Join a sequence of arrays along an existing axis. stack : Join a sequence of arrays along a new axis. block : Assemble an nd-array from nested lists of blocks. vstack : Stack arrays in sequence vertically (row wise). dstack : Stack arrays in sequence depth wise (along third axis). column_stack : Stack 1-D arrays as columns into a 2-D array. hsplit : Split an array into multiple sub-arrays horizontally (column-wise). Examples -------- >>> a = np.array((1,2,3)) >>> b = np.array((4,5,6)) >>> np.hstack((a,b)) array([1, 2, 3, 4, 5, 6]) >>> a = np.array([[1],[2],[3]]) >>> b = np.array([[4],[5],[6]]) >>> np.hstack((a,b)) array([[1, 4], [2, 5], [3, 6]]) """ if not overrides.ARRAY_FUNCTION_ENABLED: # raise warning if necessary _arrays_for_stack_dispatcher(tup, stacklevel=2) arrs = atleast_1d(*tup) if not isinstance(arrs, list): arrs = [arrs] # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" if arrs and arrs[0].ndim == 1: return _nx.concatenate(arrs, 0) else: return _nx.concatenate(arrs, 1) def _stack_dispatcher(arrays, axis=None, out=None): arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6) if out is not None: # optimize for the typical case where only arrays is provided arrays = list(arrays) arrays.append(out) return arrays @array_function_dispatch(_stack_dispatcher) def stack(arrays, axis=0, out=None): """ Join a sequence of arrays along a new axis. The ``axis`` parameter specifies the index of the new axis in the dimensions of the result. For example, if ``axis=0`` it will be the first dimension and if ``axis=-1`` it will be the last dimension. .. versionadded:: 1.10.0 Parameters ---------- arrays : sequence of array_like Each array must have the same shape. axis : int, optional The axis in the result array along which the input arrays are stacked. out : ndarray, optional If provided, the destination to place the result. The shape must be correct, matching that of what stack would have returned if no out argument were specified. Returns ------- stacked : ndarray The stacked array has one more dimension than the input arrays. See Also -------- concatenate : Join a sequence of arrays along an existing axis. block : Assemble an nd-array from nested lists of blocks. split : Split array into a list of multiple sub-arrays of equal size. Examples -------- >>> arrays = [np.random.randn(3, 4) for _ in range(10)] >>> np.stack(arrays, axis=0).shape (10, 3, 4) >>> np.stack(arrays, axis=1).shape (3, 10, 4) >>> np.stack(arrays, axis=2).shape (3, 4, 10) >>> a = np.array([1, 2, 3]) >>> b = np.array([4, 5, 6]) >>> np.stack((a, b)) array([[1, 2, 3], [4, 5, 6]]) >>> np.stack((a, b), axis=-1) array([[1, 4], [2, 5], [3, 6]]) """ if not overrides.ARRAY_FUNCTION_ENABLED: # raise warning if necessary _arrays_for_stack_dispatcher(arrays, stacklevel=2) arrays = [asanyarray(arr) for arr in arrays] if not arrays: raise ValueError('need at least one array to stack') shapes = {arr.shape for arr in arrays} if len(shapes) != 1: raise ValueError('all input arrays must have the same shape') result_ndim = arrays[0].ndim + 1 axis = normalize_axis_index(axis, result_ndim) sl = (slice(None),) * axis + (_nx.newaxis,) expanded_arrays = [arr[sl] for arr in arrays] return _nx.concatenate(expanded_arrays, axis=axis, out=out) # Internal functions to eliminate the overhead of repeated dispatch in one of # the two possible paths inside np.block. # Use getattr to protect against __array_function__ being disabled. _size = getattr(_from_nx.size, '__wrapped__', _from_nx.size) _ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim) _concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate) def _block_format_index(index): """ Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``. """ idx_str = ''.join('[{}]'.format(i) for i in index if i is not None) return 'arrays' + idx_str def _block_check_depths_match(arrays, parent_index=[]): """ Recursive function checking that the depths of nested lists in `arrays` all match. Mismatch raises a ValueError as described in the block docstring below. The entire index (rather than just the depth) needs to be calculated for each innermost list, in case an error needs to be raised, so that the index of the offending list can be printed as part of the error. Parameters ---------- arrays : nested list of arrays The arrays to check parent_index : list of int The full index of `arrays` within the nested lists passed to `_block_check_depths_match` at the top of the recursion. Returns ------- first_index : list of int The full index of an element from the bottom of the nesting in `arrays`. If any element at the bottom is an empty list, this will refer to it, and the last index along the empty axis will be None. max_arr_ndim : int The maximum of the ndims of the arrays nested in `arrays`. final_size: int The number of elements in the final array. This is used the motivate the choice of algorithm used using benchmarking wisdom. """ if type(arrays) is tuple: # not strictly necessary, but saves us from: # - more than one way to do things - no point treating tuples like # lists # - horribly confusing behaviour that results when tuples are # treated like ndarray raise TypeError( '{} is a tuple. ' 'Only lists can be used to arrange blocks, and np.block does ' 'not allow implicit conversion from tuple to ndarray.'.format( _block_format_index(parent_index) ) ) elif type(arrays) is list and len(arrays) > 0: idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) for i, arr in enumerate(arrays)) first_index, max_arr_ndim, final_size = next(idxs_ndims) for index, ndim, size in idxs_ndims: final_size += size if ndim > max_arr_ndim: max_arr_ndim = ndim if len(index) != len(first_index): raise ValueError( "List depths are mismatched. First element was at depth " "{}, but there is an element at depth {} ({})".format( len(first_index), len(index), _block_format_index(index) ) ) # propagate our flag that indicates an empty list at the bottom if index[-1] is None: first_index = index return first_index, max_arr_ndim, final_size elif type(arrays) is list and len(arrays) == 0: # We've 'bottomed out' on an empty list return parent_index + [None], 0, 0 else: # We've 'bottomed out' - arrays is either a scalar or an array size = _size(arrays) return parent_index, _ndim(arrays), size def _atleast_nd(a, ndim): # Ensures `a` has at least `ndim` dimensions by prepending # ones to `a.shape` as necessary return array(a, ndmin=ndim, copy=False, subok=True) def _accumulate(values): return list(itertools.accumulate(values)) def _concatenate_shapes(shapes, axis): """Given array shapes, return the resulting shape and slices prefixes. These help in nested concatenation. Returns ------- shape: tuple of int This tuple satisfies:: shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis) shape == concatenate(arrs, axis).shape slice_prefixes: tuple of (slice(start, end), ) For a list of arrays being concatenated, this returns the slice in the larger array at axis that needs to be sliced into. For example, the following holds:: ret = concatenate([a, b, c], axis) _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis) ret[(slice(None),) * axis + sl_a] == a ret[(slice(None),) * axis + sl_b] == b ret[(slice(None),) * axis + sl_c] == c These are called slice prefixes since they are used in the recursive blocking algorithm to compute the left-most slices during the recursion. Therefore, they must be prepended to rest of the slice that was computed deeper in the recursion. These are returned as tuples to ensure that they can quickly be added to existing slice tuple without creating a new tuple every time. """ # Cache a result that will be reused. shape_at_axis = [shape[axis] for shape in shapes] # Take a shape, any shape first_shape = shapes[0] first_shape_pre = first_shape[:axis] first_shape_post = first_shape[axis+1:] if any(shape[:axis] != first_shape_pre or shape[axis+1:] != first_shape_post for shape in shapes): raise ValueError( 'Mismatched array shapes in block along axis {}.'.format(axis)) shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:]) offsets_at_axis = _accumulate(shape_at_axis) slice_prefixes = [(slice(start, end),) for start, end in zip([0] + offsets_at_axis, offsets_at_axis)] return shape, slice_prefixes def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): """ Returns the shape of the final array, along with a list of slices and a list of arrays that can be used for assignment inside the new array Parameters ---------- arrays : nested list of arrays The arrays to check max_depth : list of int The number of nested lists result_ndim : int The number of dimensions in thefinal array. Returns ------- shape : tuple of int The shape that the final array will take on. slices: list of tuple of slices The slices into the full array required for assignment. These are required to be prepended with ``(Ellipsis, )`` to obtain to correct final index. arrays: list of ndarray The data to assign to each slice of the full array """ if depth < max_depth: shapes, slices, arrays = zip( *[_block_info_recursion(arr, max_depth, result_ndim, depth+1) for arr in arrays]) axis = result_ndim - max_depth + depth shape, slice_prefixes = _concatenate_shapes(shapes, axis) # Prepend the slice prefix and flatten the slices slices = [slice_prefix + the_slice for slice_prefix, inner_slices in zip(slice_prefixes, slices) for the_slice in inner_slices] # Flatten the array list arrays = functools.reduce(operator.add, arrays) return shape, slices, arrays else: # We've 'bottomed out' - arrays is either a scalar or an array # type(arrays) is not list # Return the slice and the array inside a list to be consistent with # the recursive case. arr = _atleast_nd(arrays, result_ndim) return arr.shape, [()], [arr] def _block(arrays, max_depth, result_ndim, depth=0): """ Internal implementation of block based on repeated concatenation. `arrays` is the argument passed to block. `max_depth` is the depth of nested lists within `arrays` and `result_ndim` is the greatest of the dimensions of the arrays in `arrays` and the depth of the lists in `arrays` (see block docstring for details). """ if depth < max_depth: arrs = [_block(arr, max_depth, result_ndim, depth+1) for arr in arrays] return _concatenate(arrs, axis=-(max_depth-depth)) else: # We've 'bottomed out' - arrays is either a scalar or an array # type(arrays) is not list return _atleast_nd(arrays, result_ndim) def _block_dispatcher(arrays): # Use type(...) is list to match the behavior of np.block(), which special # cases list specifically rather than allowing for generic iterables or # tuple. Also, we know that list.__array_function__ will never exist. if type(arrays) is list: for subarrays in arrays: yield from _block_dispatcher(subarrays) else: yield arrays @array_function_dispatch(_block_dispatcher) def block(arrays): """ Assemble an nd-array from nested lists of blocks. Blocks in the innermost lists are concatenated (see `concatenate`) along the last dimension (-1), then these are concatenated along the second-last dimension (-2), and so on until the outermost list is reached. Blocks can be of any dimension, but will not be broadcasted using the normal rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim`` the same for all blocks. This is primarily useful for working with scalars, and means that code like ``np.block([v, 1])`` is valid, where ``v.ndim == 1``. When the nested list is two levels deep, this allows block matrices to be constructed from their components. .. versionadded:: 1.13.0 Parameters ---------- arrays : nested list of array_like or scalars (but not tuples) If passed a single ndarray or scalar (a nested list of depth 0), this is returned unmodified (and not copied). Elements shapes must match along the appropriate axes (without broadcasting), but leading 1s will be prepended to the shape as necessary to make the dimensions match. Returns ------- block_array : ndarray The array assembled from the given blocks. The dimensionality of the output is equal to the greatest of: * the dimensionality of all the inputs * the depth to which the input list is nested Raises ------ ValueError * If list depths are mismatched - for instance, ``[[a, b], c]`` is illegal, and should be spelt ``[[a, b], [c]]`` * If lists are empty - for instance, ``[[a, b], []]`` See Also -------- concatenate : Join a sequence of arrays along an existing axis. stack : Join a sequence of arrays along a new axis. vstack : Stack arrays in sequence vertically (row wise). hstack : Stack arrays in sequence horizontally (column wise). dstack : Stack arrays in sequence depth wise (along third axis). column_stack : Stack 1-D arrays as columns into a 2-D array. vsplit : Split an array into multiple sub-arrays vertically (row-wise). Notes ----- When called with only scalars, ``np.block`` is equivalent to an ndarray call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to ``np.array([[1, 2], [3, 4]])``. This function does not enforce that the blocks lie on a fixed grid. ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form:: AAAbb AAAbb cccDD But is also allowed to produce, for some ``a, b, c, d``:: AAAbb AAAbb cDDDD Since concatenation happens along the last axis first, `block` is _not_ capable of producing the following directly:: AAAbb cccbb cccDD Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is equivalent to ``np.block([[A, B, ...], [p, q, ...]])``. Examples -------- The most common use of this function is to build a block matrix >>> A = np.eye(2) * 2 >>> B = np.eye(3) * 3 >>> np.block([ ... [A, np.zeros((2, 3))], ... [np.ones((3, 2)), B ] ... ]) array([[2., 0., 0., 0., 0.], [0., 2., 0., 0., 0.], [1., 1., 3., 0., 0.], [1., 1., 0., 3., 0.], [1., 1., 0., 0., 3.]]) With a list of depth 1, `block` can be used as `hstack` >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) array([1, 2, 3]) >>> a = np.array([1, 2, 3]) >>> b = np.array([4, 5, 6]) >>> np.block([a, b, 10]) # hstack([a, b, 10]) array([ 1, 2, 3, 4, 5, 6, 10]) >>> A = np.ones((2, 2), int) >>> B = 2 * A >>> np.block([A, B]) # hstack([A, B]) array([[1, 1, 2, 2], [1, 1, 2, 2]]) With a list of depth 2, `block` can be used in place of `vstack`: >>> a = np.array([1, 2, 3]) >>> b = np.array([4, 5, 6]) >>> np.block([[a], [b]]) # vstack([a, b]) array([[1, 2, 3], [4, 5, 6]]) >>> A = np.ones((2, 2), int) >>> B = 2 * A >>> np.block([[A], [B]]) # vstack([A, B]) array([[1, 1], [1, 1], [2, 2], [2, 2]]) It can also be used in places of `atleast_1d` and `atleast_2d` >>> a = np.array(0) >>> b = np.array([1]) >>> np.block([a]) # atleast_1d(a) array([0]) >>> np.block([b]) # atleast_1d(b) array([1]) >>> np.block([[a]]) # atleast_2d(a) array([[0]]) >>> np.block([[b]]) # atleast_2d(b) array([[1]]) """ arrays, list_ndim, result_ndim, final_size = _block_setup(arrays) # It was found through benchmarking that making an array of final size # around 256x256 was faster by straight concatenation on a # i7-7700HQ processor and dual channel ram 2400MHz. # It didn't seem to matter heavily on the dtype used. # # A 2D array using repeated concatenation requires 2 copies of the array. # # The fastest algorithm will depend on the ratio of CPU power to memory # speed. # One can monitor the results of the benchmark # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d # to tune this parameter until a C version of the `_block_info_recursion` # algorithm is implemented which would likely be faster than the python # version. if list_ndim * final_size > (2 * 512 * 512): return _block_slicing(arrays, list_ndim, result_ndim) else: return _block_concatenate(arrays, list_ndim, result_ndim) # These helper functions are mostly used for testing. # They allow us to write tests that directly call `_block_slicing` # or `_block_concatenate` without blocking large arrays to force the wisdom # to trigger the desired path. def _block_setup(arrays): """ Returns (`arrays`, list_ndim, result_ndim, final_size) """ bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays) list_ndim = len(bottom_index) if bottom_index and bottom_index[-1] is None: raise ValueError( 'List at {} cannot be empty'.format( _block_format_index(bottom_index) ) ) result_ndim = max(arr_ndim, list_ndim) return arrays, list_ndim, result_ndim, final_size def _block_slicing(arrays, list_ndim, result_ndim): shape, slices, arrays = _block_info_recursion( arrays, list_ndim, result_ndim) dtype = _nx.result_type(*[arr.dtype for arr in arrays]) # Test preferring F only in the case that all input arrays are F F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays) C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays) order = 'F' if F_order and not C_order else 'C' result = _nx.empty(shape=shape, dtype=dtype, order=order) # Note: In a c implementation, the function # PyArray_CreateMultiSortedStridePerm could be used for more advanced # guessing of the desired order. for the_slice, arr in zip(slices, arrays): result[(Ellipsis,) + the_slice] = arr return result def _block_concatenate(arrays, list_ndim, result_ndim): result = _block(arrays, list_ndim, result_ndim) if list_ndim == 0: # Catch an edge case where _block returns a view because # `arrays` is a single numpy array and not a list of numpy arrays. # This might copy scalars or lists twice, but this isn't a likely # usecase for those interested in performance result = result.copy() return result
py
1a419c74147ee78e47f713bd7e5e99a5e593d975
# coding: utf-8 """ ThingsBoard REST API ThingsBoard open-source IoT platform REST API documentation. # noqa: E501 OpenAPI spec version: 3.3.3-SNAPSHOT Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from tb_rest_client.models.models_ce import EventFilter class LifeCycleEventFilter(EventFilter): """ Do not edit the class manually. Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'event_type': 'str', 'server': 'str', 'event': 'str', 'status': 'str', 'error_str': 'str' } if hasattr(EventFilter, "swagger_types"): swagger_types.update(EventFilter.swagger_types) attribute_map = { 'event_type': 'eventType', 'server': 'server', 'event': 'event', 'status': 'status', 'error_str': 'errorStr' } if hasattr(EventFilter, "attribute_map"): attribute_map.update(EventFilter.attribute_map) def __init__(self, event_type=None, server=None, event=None, status=None, error_str=None, *args, **kwargs): # noqa: E501 """LifeCycleEventFilter - a model defined in Swagger""" # noqa: E501 self._event_type = None self._server = None self._event = None self._status = None self._error_str = None self.discriminator = None self.event_type = event_type if server is not None: self.server = server if event is not None: self.event = event if status is not None: self.status = status if error_str is not None: self.error_str = error_str EventFilter.__init__(self, *args, **kwargs) @property def event_type(self): """Gets the event_type of this LifeCycleEventFilter. # noqa: E501 String value representing the event type # noqa: E501 :return: The event_type of this LifeCycleEventFilter. # noqa: E501 :rtype: str """ return self._event_type @event_type.setter def event_type(self, event_type): """Sets the event_type of this LifeCycleEventFilter. String value representing the event type # noqa: E501 :param event_type: The event_type of this LifeCycleEventFilter. # noqa: E501 :type: str """ if event_type is None: raise ValueError("Invalid value for `event_type`, must not be `None`") # noqa: E501 allowed_values = ["DEBUG_RULE_CHAIN", "DEBUG_RULE_NODE", "ERROR", "LC_EVENT", "STATS"] # noqa: E501 if event_type not in allowed_values: raise ValueError( "Invalid value for `event_type` ({0}), must be one of {1}" # noqa: E501 .format(event_type, allowed_values) ) self._event_type = event_type @property def server(self): """Gets the server of this LifeCycleEventFilter. # noqa: E501 String value representing the server name, identifier or ip address where the platform is running # noqa: E501 :return: The server of this LifeCycleEventFilter. # noqa: E501 :rtype: str """ return self._server @server.setter def server(self, server): """Sets the server of this LifeCycleEventFilter. String value representing the server name, identifier or ip address where the platform is running # noqa: E501 :param server: The server of this LifeCycleEventFilter. # noqa: E501 :type: str """ self._server = server @property def event(self): """Gets the event of this LifeCycleEventFilter. # noqa: E501 String value representing the lifecycle event type # noqa: E501 :return: The event of this LifeCycleEventFilter. # noqa: E501 :rtype: str """ return self._event @event.setter def event(self, event): """Sets the event of this LifeCycleEventFilter. String value representing the lifecycle event type # noqa: E501 :param event: The event of this LifeCycleEventFilter. # noqa: E501 :type: str """ self._event = event @property def status(self): """Gets the status of this LifeCycleEventFilter. # noqa: E501 String value representing status of the lifecycle event # noqa: E501 :return: The status of this LifeCycleEventFilter. # noqa: E501 :rtype: str """ return self._status @status.setter def status(self, status): """Sets the status of this LifeCycleEventFilter. String value representing status of the lifecycle event # noqa: E501 :param status: The status of this LifeCycleEventFilter. # noqa: E501 :type: str """ allowed_values = ["Failure", "Success"] # noqa: E501 if status not in allowed_values: raise ValueError( "Invalid value for `status` ({0}), must be one of {1}" # noqa: E501 .format(status, allowed_values) ) self._status = status @property def error_str(self): """Gets the error_str of this LifeCycleEventFilter. # noqa: E501 The case insensitive 'contains' filter based on error message # noqa: E501 :return: The error_str of this LifeCycleEventFilter. # noqa: E501 :rtype: str """ return self._error_str @error_str.setter def error_str(self, error_str): """Sets the error_str of this LifeCycleEventFilter. The case insensitive 'contains' filter based on error message # noqa: E501 :param error_str: The error_str of this LifeCycleEventFilter. # noqa: E501 :type: str """ self._error_str = error_str def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(LifeCycleEventFilter, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, LifeCycleEventFilter): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
py
1a419da354aea014a724ef0e177320249a139768
from marshmallow import Schema, fields, ValidationError from marshmallow.utils import missing import bson from datetime import datetime class ObjectId(fields.Field): def _deserialize(self, value, attr, data): try: return bson.ObjectId(value) except Exception: raise ValidationError("invalid ObjectId `%s`" % value) def _serialize(self, value, attr, obj): if value is None: return missing return str(value) class RequestSchema(Schema): _id = ObjectId() req_class = fields.Integer() req_datetime = fields.DateTime() req_src_ward = fields.Str() req_src_room = fields.Str() req_src_bed = fields.Str() req_message = fields.Str() req_nurse_id = fields.Str(default=None,missing=None) req_start_datetime = fields.DateTime(default=None,missing=None) req_end_datetime = fields.DateTime(default=None,missing=None) req_status = fields.Integer() class PostInsertReturnSchema(Schema): acknowledged = fields.Boolean() insertedId = fields.Str() class AnalyticDataSchema(Schema): _id = fields.DateTime() counts = fields.Integer()
py
1a41a05c4ca01d2289d264d6b9755a36ee7ee676
"""runpy.py - locating and running Python code using the module namespace Provides support for locating and running Python scripts using the Python module namespace instead of the native filesystem. This allows Python code to play nicely with non-filesystem based PEP 302 importers when locating support scripts as well as when importing modules. """ # Written by Nick Coghlan <ncoghlan at gmail.com> # to implement PEP 338 (Executing Modules as Scripts) import sys import importlib.machinery # importlib first so we can test #15386 via -m import importlib.util import io import types from pkgutil import read_code, get_importer __all__ = [ "run_module", "run_path", ] class _TempModule(object): """Temporarily replace a module in sys.modules with an empty namespace""" def __init__(self, mod_name): self.mod_name = mod_name self.module = types.ModuleType(mod_name) self._saved_module = [] def __enter__(self): mod_name = self.mod_name try: self._saved_module.append(sys.modules[mod_name]) except KeyError: pass sys.modules[mod_name] = self.module return self def __exit__(self, *args): if self._saved_module: sys.modules[self.mod_name] = self._saved_module[0] else: del sys.modules[self.mod_name] self._saved_module = [] class _ModifiedArgv0(object): def __init__(self, value): self.value = value self._saved_value = self._sentinel = object() def __enter__(self): if self._saved_value is not self._sentinel: raise RuntimeError("Already preserving saved value") self._saved_value = sys.argv[0] sys.argv[0] = self.value def __exit__(self, *args): self.value = self._sentinel sys.argv[0] = self._saved_value # TODO: Replace these helpers with importlib._bootstrap_external functions. def _run_code(code, run_globals, init_globals=None, mod_name=None, mod_spec=None, pkg_name=None, script_name=None): """Helper to run code in nominated namespace""" if init_globals is not None: run_globals.update(init_globals) if mod_spec is None: loader = None fname = script_name cached = None else: loader = mod_spec.loader fname = mod_spec.origin cached = mod_spec.cached if pkg_name is None: pkg_name = mod_spec.parent run_globals.update(__name__ = mod_name, __file__ = fname, __cached__ = cached, __doc__ = None, __loader__ = loader, __package__ = pkg_name, __spec__ = mod_spec) exec(code, run_globals) return run_globals def _run_module_code(code, init_globals=None, mod_name=None, mod_spec=None, pkg_name=None, script_name=None): """Helper to run code in new namespace with sys modified""" fname = script_name if mod_spec is None else mod_spec.origin with _TempModule(mod_name) as temp_module, _ModifiedArgv0(fname): mod_globals = temp_module.module.__dict__ _run_code(code, mod_globals, init_globals, mod_name, mod_spec, pkg_name, script_name) # Copy the globals of the temporary module, as they # may be cleared when the temporary module goes away return mod_globals.copy() # Helper to get the full name, spec and code for a module def _get_module_details(mod_name, error=ImportError): if mod_name.startswith("."): raise error("Relative module names not supported") pkg_name, _, _ = mod_name.rpartition(".") if pkg_name: # Try importing the parent to avoid catching initialization errors try: __import__(pkg_name) except ImportError as e: # If the parent or higher ancestor package is missing, let the # error be raised by find_spec() below and then be caught. But do # not allow other errors to be caught. if e.name is None or (e.name != pkg_name and not pkg_name.startswith(e.name + ".")): raise # Warn if the module has already been imported under its normal name existing = sys.modules.get(mod_name) if existing is not None and not hasattr(existing, "__path__"): from warnings import warn msg = "{mod_name!r} found in sys.modules after import of " \ "package {pkg_name!r}, but prior to execution of " \ "{mod_name!r}; this may result in unpredictable " \ "behaviour".format(mod_name=mod_name, pkg_name=pkg_name) warn(RuntimeWarning(msg)) try: spec = importlib.util.find_spec(mod_name) except (ImportError, AttributeError, TypeError, ValueError) as ex: # This hack fixes an impedance mismatch between pkgutil and # importlib, where the latter raises other errors for cases where # pkgutil previously raised ImportError msg = "Error while finding module specification for {!r} ({}: {})" raise error(msg.format(mod_name, type(ex).__name__, ex)) from ex if spec is None: raise error("No module named %s" % mod_name) if spec.submodule_search_locations is not None: if mod_name == "__main__" or mod_name.endswith(".__main__"): raise error("Cannot use package as __main__ module") try: pkg_main_name = mod_name + ".__main__" return _get_module_details(pkg_main_name, error) except error as e: if mod_name not in sys.modules: raise # No module loaded; being a package is irrelevant raise error(("%s; %r is a package and cannot " + "be directly executed") %(e, mod_name)) loader = spec.loader if loader is None: raise error("%r is a namespace package and cannot be executed" % mod_name) try: code = loader.get_code(mod_name) except ImportError as e: raise error(format(e)) from e if code is None: raise error("No code object available for %s" % mod_name) return mod_name, spec, code class _Error(Exception): """Error that _run_module_as_main() should report without a traceback""" # XXX ncoghlan: Should this be documented and made public? # (Current thoughts: don't repeat the mistake that lead to its # creation when run_module() no longer met the needs of # mainmodule.c, but couldn't be changed because it was public) def _run_module_as_main(mod_name, alter_argv=True): """Runs the designated module in the __main__ namespace Note that the executed module will have full access to the __main__ namespace. If this is not desirable, the run_module() function should be used to run the module code in a fresh namespace. At the very least, these variables in __main__ will be overwritten: __name__ __file__ __cached__ __loader__ __package__ """ try: if alter_argv or mod_name != "__main__": # i.e. -m switch mod_name, mod_spec, code = _get_module_details(mod_name, _Error) else: # i.e. directory or zipfile execution mod_name, mod_spec, code = _get_main_module_details(_Error) except _Error as exc: msg = "%s: %s" % (sys.executable, exc) sys.exit(msg) main_globals = sys.modules["__main__"].__dict__ if alter_argv: sys.argv[0] = mod_spec.origin return _run_code(code, main_globals, None, "__main__", mod_spec) def run_module(mod_name, init_globals=None, run_name=None, alter_sys=False): """Execute a module's code without importing it Returns the resulting top level namespace dictionary """ mod_name, mod_spec, code = _get_module_details(mod_name) if run_name is None: run_name = mod_name if alter_sys: return _run_module_code(code, init_globals, run_name, mod_spec) else: # Leave the sys module alone return _run_code(code, {}, init_globals, run_name, mod_spec) def _get_main_module_details(error=ImportError): # Helper that gives a nicer error message when attempting to # execute a zipfile or directory by invoking __main__.py # Also moves the standard __main__ out of the way so that the # preexisting __loader__ entry doesn't cause issues main_name = "__main__" saved_main = sys.modules[main_name] del sys.modules[main_name] try: return _get_module_details(main_name) except ImportError as exc: if main_name in str(exc): raise error("can't find %r module in %r" % (main_name, sys.path[0])) from exc raise finally: sys.modules[main_name] = saved_main def _get_code_from_file(run_name, fname): # Check for a compiled file first with io.open_code(fname) as f: code = read_code(f) if code is None: # That didn't work, so try it as normal source code with io.open_code(fname) as f: code = compile(f.read(), fname, 'exec') return code, fname def run_path(path_name, init_globals=None, run_name=None): """Execute code located at the specified filesystem location Returns the resulting top level namespace dictionary The file path may refer directly to a Python script (i.e. one that could be directly executed with execfile) or else it may refer to a zipfile or directory containing a top level __main__.py script. """ if run_name is None: run_name = "<run_path>" pkg_name = run_name.rpartition(".")[0] importer = get_importer(path_name) # Trying to avoid importing imp so as to not consume the deprecation warning. is_NullImporter = False if type(importer).__module__ == 'imp': if type(importer).__name__ == 'NullImporter': is_NullImporter = True if isinstance(importer, type(None)) or is_NullImporter: # Not a valid sys.path entry, so run the code directly # execfile() doesn't help as we want to allow compiled files code, fname = _get_code_from_file(run_name, path_name) return _run_module_code(code, init_globals, run_name, pkg_name=pkg_name, script_name=fname) else: # Finder is defined for path, so add it to # the start of sys.path sys.path.insert(0, path_name) try: # Here's where things are a little different from the run_module # case. There, we only had to replace the module in sys while the # code was running and doing so was somewhat optional. Here, we # have no choice and we have to remove it even while we read the # code. If we don't do this, a __loader__ attribute in the # existing __main__ module may prevent location of the new module. mod_name, mod_spec, code = _get_main_module_details() with _TempModule(run_name) as temp_module, \ _ModifiedArgv0(path_name): mod_globals = temp_module.module.__dict__ return _run_code(code, mod_globals, init_globals, run_name, mod_spec, pkg_name).copy() finally: try: sys.path.remove(path_name) except ValueError: pass if __name__ == "__main__": # Run the module specified as the next command line argument if len(sys.argv) < 2: print("No module specified for execution", file=sys.stderr) else: del sys.argv[0] # Make the requested module sys.argv[0] _run_module_as_main(sys.argv[0])
py
1a41a2fcf7fc13dabf68be8d82f740f2ba06de75
import os import io import sys import csv import random import hashlib import pandas as pd import numpy as np import tensorflow as tf from PIL import Image import xml.etree.ElementTree as ET from matplotlib import pyplot as plt from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as vis_util sys.path.append("/home/tensorflow/models/research/object_detection/") from object_detection.utils import ops as utils_ops from object_detection.utils import dataset_util def save_img_as_jpg(input_record, path_to_test_img_folder): """ Used to make sure that generating record files from images/annotations worked """ record_iterator = tf.python_io.tf_record_iterator(input_record) for string_record in record_iterator: example = tf.train.Example() example.ParseFromString(string_record) fname = example.features.feature["image/filename"].bytes_list.value[0].decode("utf-8") image = example.features.feature["image/encoded"].bytes_list.value[0] decoded_png = tf.image.decode_image(image, channels=3).numpy() Image.fromarray(decoded_png).save(path_to_test_img_folder + fname) # High Level Functions def xml_path_to_filelist(xml_path): filename_list = tf.io.match_filenames_once(xml_path) init = (tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer()) sess = tf.compat.v1.Session() sess.run(init) files_list = sess.run(filename_list) files_list = sorted(files_list) return files_list def split_train_val_test_praefixes_rdm(unique_praefix, SEED, TRAIN_VAL_RATIO ,TEST_RATIO): unique_train_val_praefix, unique_test_praefix = split_praefix(unique_praefix, SEED, TEST_RATIO) unique_train_praefix, unique_val_praefix = split_praefix(unique_train_val_praefix, SEED, TRAIN_VAL_RATIO) return unique_train_praefix, unique_val_praefix, unique_test_praefix def filelists_from_praefixes(unique_train_praefix,unique_val_praefix, unique_test_praefix,files_list): train_list, _ = fileList_from_praefix(unique_train_praefix, files_list) eval_list, _ = fileList_from_praefix(unique_val_praefix, files_list) test_list, _ = fileList_from_praefix(unique_test_praefix, files_list) return train_list, eval_list, test_list def write_records_from_filelists(train_list, eval_list, test_list, REC_NAME, img_path, SEED, unique_test_praefix, output_path): print(f"Writing {len(train_list)} Images to train_{REC_NAME}.record") train_arr = write_list_to_tf(train_list, "train_" + REC_NAME, img_path, SEED, output_path) print(f"Writing {len(eval_list)} Images to val_{REC_NAME}.record") eval_arr = write_list_to_tf(eval_list, "val_" + REC_NAME, img_path, SEED, output_path) print(f"Writing {len(test_list)} Images to test_{REC_NAME}.record") test_arr = write_list_to_tf(test_list, "test_" + REC_NAME, img_path, SEED, output_path) for test_pd in unique_test_praefix: test_pd_list,_ = fileList_from_praefix([test_pd], test_list) print(f"Writing {len(test_pd_list)} Images to PD_{test_pd}.record") #write_PD_to_tf(test_pd_list, "PD_" + test_pd, img_path, SEED, output_path) write_list_to_tf(test_pd_list, "bPD_" + test_pd, img_path, SEED, output_path + "PD/", bPD=True) return train_arr, eval_arr, test_arr def write_summary(unique_train_praefix, unique_val_praefix, unique_test_praefix, train_list, eval_list, test_list, train_arr,eval_arr,test_arr,output_path, REC_NAME): print("Writing Summary") un_train, count_im_train, count_el_train, ratio_im_train, ratio_el_train = get_ratios(train_arr) un_val, count_im_eval, count_el_eval, ratio_im_eval, ratio_el_eval = get_ratios(eval_arr) un_test, count_im_test, count_el_test, ratio_im_test, ratio_el_test = get_ratios(test_arr) with open(output_path + 'summary_{}.txt'.format(REC_NAME), mode='w') as csv_file: csv_reader = csv.writer(csv_file, delimiter=',') csv_reader.writerow(["Summary for the generated record files"]) csv_reader.writerow(["", "# Petri dishes", "cls_name", "# GT"]) csv_reader.writerow(["TRAIN", len(train_list), un_train, count_im_train, count_el_train]) csv_reader.writerow(["VAL", len(eval_list), un_val, count_im_eval, count_el_eval]) csv_reader.writerow(["TEST", len(test_list), un_test, count_im_test, count_el_test]) csv_reader.writerow(["TRAIN", ratio_im_train, ratio_el_train]) csv_reader.writerow(["VAL", ratio_im_eval, ratio_el_eval]) csv_reader.writerow(["TEST", ratio_im_test, ratio_el_test]) csv_reader.writerow(["TRAIN_PREFIX", unique_train_praefix]) csv_reader.writerow(["VAL_PREFIX", unique_val_praefix]) csv_reader.writerow(["TEST_PREFIX", unique_test_praefix]) # Low Level Functions def split_praefix(unique_train_val_praefix, SEED, RATIO): random.seed(SEED) random.shuffle(unique_train_val_praefix) b = int(len(unique_train_val_praefix) * RATIO) unique_train_praefix = unique_train_val_praefix[:b] unique_val_praefix = unique_train_val_praefix[b:len(unique_train_val_praefix)] return unique_train_praefix, unique_val_praefix def fileList_from_praefix(unique_train_praefix, files_list): train_list = list() for _, val in enumerate(unique_train_praefix): # ADDED + "_" because "zm2_1" in str(s) will also mean "zm2_11" and "zm2_12" matching = [s for s in files_list if val + "_" in str(s)] train_list.append(matching) train_list_flat = list() for sublist in train_list: for item in sublist: train_list_flat.append(item) return train_list_flat, train_list def get_praefix_from_fileList(files_list): praefix_files = list() for _, val in enumerate(files_list): praefix_files.append(str(val).split("/")[-1].split("_") [0] + "_" + str(val).split("/")[-1].split("_")[1]) unique_praefix = np.unique(praefix_files) return unique_praefix def create_example(xml_file, img_path): # process the xml file tree = ET.parse(xml_file) root = tree.getroot() image_name = root.find('filename').text file_name = image_name.encode('utf8') size = root.find('size') width = int(size[0].text) height = int(size[1].text) xmin = [] ymin = [] xmax = [] ymax = [] classes = [] classes_text = [] classes_text_str = [] for member in root.findall('object'): classes_text.append(member[0].text.encode('utf8')) classes_text_str.append(member[0].text) for bnd in member.findall("bndbox"): xmin.append(float(bnd[0].text) / width) ymin.append(float(bnd[1].text) / height) xmax.append(float(bnd[2].text) / width) ymax.append(float(bnd[3].text) / height) classes.append(class_text_to_int(member[0].text)) # read corresponding image full_path = os.path.join(img_path, '{}'.format(image_name)) # provide the path of images directory with tf.io.gfile.GFile(full_path, 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) if image.format != 'JPEG': raise ValueError('Image format not JPEG') key = hashlib.sha256(encoded_jpg).hexdigest() # create TFRecord Example example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(file_name), 'image/source_id': dataset_util.bytes_feature(file_name), 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes) })) return classes_text_str, example def write_list_to_tf(train_list_flat, filename, img_path, seed,output_path, bPD=False): train_arr = [] writer_train = tf.io.TFRecordWriter('{}{}.record'.format(output_path, filename)) if bPD==False: random.seed(seed) random.shuffle(train_list_flat) # randomizes the list --> random order on how images are saved in .record for _, train_file in enumerate(train_list_flat): train_classes, example = create_example(train_file, img_path) writer_train.write(example.SerializeToString()) train_arr.append(train_classes) writer_train.close() return train_arr def write_PD_to_tf(train_file, filename, img_path, seed,output_path): #TODO: CHECK IF LOOP NECESSARY train_arr = [] writer_train = tf.io.TFRecordWriter('{}{}.record'.format(output_path, filename)) train_classes, example = create_example(train_file, img_path) writer_train.write(example.SerializeToString()) train_arr.append(train_classes) writer_train.close() return train_arr def class_text_to_int(row_label): if "_im" in row_label: return 1 if "_el" in row_label: return 2 def get_ratios(train_arr): flat_list = [] for sublist in train_arr: for item in sublist: flat_list.append(item) # if no zm_el in dataset, then train_counts[1] doesnt exist unique, train_counts = np.unique(flat_list, return_counts=True) if train_counts.shape[0] == 1: train_sum = train_counts[0] train_zmim_ratio = 1 train_zmel_ratio = 0 return unique, train_counts[0], 0, train_zmim_ratio, train_zmel_ratio train_sum = train_counts[0] + train_counts[1] train_zmim_ratio = round(float(train_counts[0] / train_sum), 3) train_zmel_ratio = round(float(train_counts[1] / train_sum), 3) return unique, train_counts[0], train_counts[1], train_zmim_ratio, train_zmel_ratio ## Functions for "predict_image" def load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) def run_inference_for_single_image(image, graph): with graph.as_default(): with tf.compat.v1.Session() as sess: # Get handles to input and output tensors ops = tf.compat.v1.get_default_graph().get_operations() all_tensor_names = {output.name for op in ops for output in op.outputs} tensor_dict = {} for key in [ 'num_detections', 'detection_boxes', 'detection_scores', 'detection_classes', 'detection_masks' ]: tensor_name = key + ':0' if tensor_name in all_tensor_names: tensor_dict[key] = tf.compat.v1.get_default_graph().get_tensor_by_name( tensor_name) if 'detection_masks' in tensor_dict: # The following processing is only for single image detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0]) detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0]) # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size. real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32) detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1]) detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1]) detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks( detection_masks, detection_boxes, image.shape[0], image.shape[1]) detection_masks_reframed = tf.cast( tf.greater(detection_masks_reframed, 0.5), tf.uint8) # Follow the convention by adding back the batch dimension tensor_dict['detection_masks'] = tf.expand_dims( detection_masks_reframed, 0) image_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name('image_tensor:0') # Run inference output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)}) # all outputs are float32 numpy arrays, so convert types as appropriate output_dict['num_detections'] = int(output_dict['num_detections'][0]) output_dict['detection_classes'] = output_dict[ 'detection_classes'][0].astype(np.uint8) output_dict['detection_boxes'] = output_dict['detection_boxes'][0] output_dict['detection_scores'] = output_dict['detection_scores'][0] if 'detection_masks' in output_dict: output_dict['detection_masks'] = output_dict['detection_masks'][0] return output_dict def detect_seeds_in_image(image_path, category_index, detection_graph, PATH_TO_TEST_IMAGES_OUTDIR): image = Image.open(image_path) # the array based representation of the image will be used later in order to prepare the # result image with boxes and labels on it. image_np = load_image_into_numpy_array(image) # Expand dimensions since the model expects images to have shape: [1, None, None, 3] #image_np_expanded = np.expand_dims(image_np, axis=0) # Actual detection. output_dict = run_inference_for_single_image(image_np, detection_graph) # Visualization of the results of a detection. vis_util.visualize_boxes_and_labels_on_image_array( image_np, output_dict['detection_boxes'], output_dict['detection_classes'], output_dict['detection_scores'], category_index, instance_masks=output_dict.get('detection_masks'), use_normalized_coordinates=True, line_thickness=2) plt.imsave(PATH_TO_TEST_IMAGES_OUTDIR + image_path.split("/")[-1].split(".")[0] + "_detection.jpg", image_np)
py
1a41a336ec6ce9a5b7f647075d48bd3ab0f49139
from crfnet.utils.transform import random_transform_generator from crfnet.utils.anchor_parameters import AnchorParameters from crfnet.data_processing.generator.splits.nuscenes_splits import Scenes from crfnet.utils.anchor_calc import anchor_targets_bbox from crfnet.utils.anchor import guess_shapes def create_generators(cfg, backbone): """ Create generators for training and validation and test data. :param cfg: <Configuration> Config class with config parameters. :param backbone: <Backbone> Backbone class e.g. VGGBackbone :return train_generator: <Generator> The generator for creating training data. :return validation_generator: <Generator> The generator for creating validation data. TODO: @Max make the create generators consistently return train, val and test """ if cfg.anchor_params: if 'small' in cfg.anchor_params: anchor_params = AnchorParameters.small else: anchor_params = None else: anchor_params = None common_args = { 'batch_size': cfg.batchsize, 'config': None, 'image_min_side': cfg.image_size[0], 'image_max_side': cfg.image_size[1], 'filter_annotations_enabled': False, 'preprocess_image': backbone.preprocess_image, 'normalize_radar': cfg.normalize_radar, 'camera_dropout': cfg.dropout_image, 'radar_dropout': cfg.dropout_radar, 'channels': cfg.channels, 'distance': cfg.distance_detection, 'sample_selection': cfg.sample_selection, 'only_radar_annotated': cfg.only_radar_annotated, 'n_sweeps': cfg.n_sweeps, 'noise_filter': cfg.noise_filter_cfg, 'noise_filter_threshold': cfg.noise_filter_threshold, 'noisy_image_method': cfg.noisy_image_method, 'noise_factor': cfg.noise_factor, 'perfect_noise_filter': cfg.noise_filter_perfect, 'radar_projection_height': cfg.radar_projection_height, 'noise_category_selection': None if cfg.class_weights is None else cfg.class_weights.keys(), 'inference': cfg.inference, 'anchor_params': anchor_params, } # create random transform generator for augmenting training data if cfg.random_transform: transform_generator = random_transform_generator( min_rotation=-0.1, max_rotation=0.1, min_translation=(-0.1, -0.1), max_translation=(0.1, 0.1), min_shear=-0.1, max_shear=0.1, min_scaling=(0.9, 0.9), max_scaling=(1.1, 1.1), flip_x_chance=0.5, flip_y_chance=0.0, ) else: transform_generator = random_transform_generator(flip_x_chance=0.5) category_mapping = cfg.category_mapping if 'nuscenes' in cfg.data_set: # import here to prevent unnecessary dependency on nuscenes from crfnet.data_processing.generator.nuscenes_generator import NuscenesGenerator from nuscenes.nuscenes import NuScenes if 'mini' in cfg.data_set: nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True) else: try: nusc = NuScenes(version='v1.0-trainval', dataroot=cfg.data_path, verbose=True) except ValueError: nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True) if 'debug' in cfg.scene_selection or 'mini' in cfg.data_set: scenes = Scenes.debug else: scenes = Scenes.default train_generator = NuscenesGenerator( nusc, scene_indices=scenes.train, transform_generator=transform_generator, category_mapping=category_mapping, compute_anchor_targets=anchor_targets_bbox, compute_shapes=guess_shapes, shuffle_groups=True, group_method='random', **common_args ) # no dropouts in validation common_args['camera_dropout'] = 0 common_args['radar_dropout'] = 0 validation_generator = NuscenesGenerator( nusc, scene_indices=scenes.val, category_mapping=category_mapping, compute_anchor_targets=anchor_targets_bbox, compute_shapes=guess_shapes, **common_args ) test_generator = NuscenesGenerator( nusc, scene_indices=scenes.test, category_mapping=category_mapping, compute_anchor_targets=anchor_targets_bbox, compute_shapes=guess_shapes, **common_args ) test_night_generator = NuscenesGenerator( nusc, scene_indices=scenes.test_night, category_mapping=category_mapping, compute_anchor_targets=anchor_targets_bbox, compute_shapes=guess_shapes, **common_args ) test_rain_generator = NuscenesGenerator( nusc, scene_indices=scenes.test_rain, category_mapping=category_mapping, compute_anchor_targets=anchor_targets_bbox, compute_shapes=guess_shapes, **common_args ) return train_generator, validation_generator, test_generator, test_night_generator, test_rain_generator else: raise ValueError('Invalid data type received: {}'.format(cfg.data_set))
py
1a41a34620bdf8df52e475a8e4f56e84c3698382
# DENG: dynamic engine - powerful 3D game engine # licence: Apache, see LICENCE file # file: BackendChooser.py - Embeddable Python script to select a correct backend to use for required program # author: Karl-Mihkel Ott import tkinter as tk import tkinter.messagebox as msgbox from enum import IntEnum class ApiType(IntEnum): Vulkan = 1 OpenGL = 2 DirectX = 3 Unknown = 4 # Default value api = ApiType.Unknown win = tk.Tk() # Button handler class class ApiButtonHandlers: pixel_virtual: tk.PhotoImage content: tk.Frame opengl: tk.Button vulkan: tk.Button directx: tk.Button @staticmethod def SelectVulkanBackend(): global api, win api = ApiType.Vulkan win.destroy() @staticmethod def SelectOpenGLBackend(): global api, win api = ApiType.OpenGL win.destroy() @staticmethod def SelectDirectXBackend(): msgbox.showerror("Error", "DirectX backend is not supported") def __init__(self, win: tk.Tk): self.pixel_virtual = tk.PhotoImage(width=1, height=1) # OpenGL button self.opengl = tk.Button( win, text="OpenGL", image=self.pixel_virtual, width=50, height=20, command=ApiButtonHandlers.SelectOpenGLBackend, compound='c' ) self.opengl.grid(column=0, row=1) # Vulkan button self.vulkan = tk.Button( win, text="Vulkan", image=self.pixel_virtual, width=50, height=20, command=ApiButtonHandlers.SelectVulkanBackend, compound='c' ) self.vulkan.grid(column=0, row=1) # DirectX button self.directx = tk.Button( win, text="DirectX", image=self.pixel_virtual, width=50, height=20, command=ApiButtonHandlers.SelectDirectXBackend, compound='c' ) self.directx.grid(column=0, row=1) def Prompt(): global api, win win.title("Select renderer API") win.geometry('350x100') win.resizable(False, False) label = tk.Label(win, text="Select renderer API to use for DENG application") btn_handler = ApiButtonHandlers(win) # POSITIONS label.grid(row=0, column=0, columnspan=3, padx=20, pady=10) btn_handler.opengl.grid(row=2) btn_handler.vulkan.grid(row=2, column=1) btn_handler.directx.grid(row=2, column=2) win.mainloop() return api
py
1a41a3b2905cc14a9967aec451c48b885f60213e
import urlparse import requests import logging from framework.celery_tasks import app from website import settings logger = logging.getLogger(__name__) def get_varnish_servers(): # TODO: this should get the varnish servers from HAProxy or a setting return settings.VARNISH_SERVERS def get_bannable_urls(instance): from osf.models import Comment bannable_urls = [] parsed_absolute_url = {} if not hasattr(instance, 'absolute_api_v2_url'): logger.warning('Tried to ban {}:{} but it didn\'t have a absolute_api_v2_url method'.format(instance.__class__, instance)) return [], '' for host in get_varnish_servers(): # add instance url varnish_parsed_url = urlparse.urlparse(host) parsed_absolute_url = urlparse.urlparse(instance.absolute_api_v2_url) url_string = '{scheme}://{netloc}{path}.*'.format(scheme=varnish_parsed_url.scheme, netloc=varnish_parsed_url.netloc, path=parsed_absolute_url.path) bannable_urls.append(url_string) if isinstance(instance, Comment): try: parsed_target_url = urlparse.urlparse(instance.target.referent.absolute_api_v2_url) except AttributeError: # some referents don't have an absolute_api_v2_url # I'm looking at you NodeWikiPage # Note: NodeWikiPage has been deprecated. Is this an issue with WikiPage/WikiVersion? pass else: url_string = '{scheme}://{netloc}{path}.*'.format(scheme=varnish_parsed_url.scheme, netloc=varnish_parsed_url.netloc, path=parsed_target_url.path) bannable_urls.append(url_string) try: parsed_root_target_url = urlparse.urlparse(instance.root_target.referent.absolute_api_v2_url) except AttributeError: # some root_targets don't have an absolute_api_v2_url pass else: url_string = '{scheme}://{netloc}{path}.*'.format(scheme=varnish_parsed_url.scheme, netloc=varnish_parsed_url.netloc, path=parsed_root_target_url.path) bannable_urls.append(url_string) return bannable_urls, parsed_absolute_url.hostname @app.task(max_retries=5, default_retry_delay=60) def ban_url(instance): # TODO: Refactor; Pull url generation into postcommit_task handling so we only ban urls once per request timeout = 0.3 # 300ms timeout for bans if settings.ENABLE_VARNISH: bannable_urls, hostname = get_bannable_urls(instance) for url_to_ban in set(bannable_urls): try: response = requests.request('BAN', url_to_ban, timeout=timeout, headers=dict( Host=hostname )) except Exception as ex: logger.error('Banning {} failed: {}'.format( url_to_ban, ex.message )) else: if not response.ok: logger.error('Banning {} failed: {}'.format( url_to_ban, response.text )) else: logger.info('Banning {} succeeded'.format( url_to_ban ))
py
1a41a3db517114ea377cbaab7a517c126c473d52
import os import slack def send_file(): path = os.path.dirname(os.path.realpath(__file__)) client = slack.WebClient(token="<your_slack_token>") client.files_upload(channels = '#data', file = "{}/weather.png".format(path))
py
1a41a3e6af15333f1423e2c636fdb69a410d3e77
print('Adição + ',10 + 10 ) print('Subtração - ', 10 - 10 ) print('Multiplicação * ', 10 * 10 ) print('Divisão / ', 10 / 10 ) print('Potencia ** ', 10 ** 10 ) print('Divisão Inteiro // ', 10 // 3 ) print('Resto Divisão % ', 10 % 3 )
py
1a41a42edff6c32b8b67788a08ed1ab31dbf777a
"""Script to download the entire Box directory structure. Skips anything that has been downloaded before. Syncs to LOCAL_BOX_DIR To obtain a developer token, navigate to https://salesforcecorp.app.box.com/developers/console/app/1366340/configuration and select "Generate Developer Token", then copy-paste it below. Exampe Usage: python download_box_data.py """ import box_auth from box_auth import BoxNavigator DEVELOPER_TOKEN_60MINS="uu4OyqV78GydCvVLAvzZvXh1kpkHeGnL" LOCAL_BOX_DIR="/export/medical_ai/ucsf/box_data" if __name__ == "__main__": bn = BoxNavigator(token=DEVELOPER_TOKEN_60MINS) bn.locally_recreate_filesystem_directory_structure(root_path=LOCAL_BOX_DIR) bn.maybe_download_filesystem(root_path=LOCAL_BOX_DIR)
py
1a41a456188162269709f7cd97f50cd7ac5cd62a
import os import sys import argparse import yaml import time import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.optim as optim import torchlight from torchlight import str2bool from torchlight import DictAction from torchlight import import_class from .processor import Processor from .data_tools import * from copy import deepcopy from torch.distributions.uniform import Uniform def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv1d') != -1: m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.fill_(0) elif classname.find('Conv2d') != -1: m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.fill_(0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) class REC_Processor(Processor): def load_model(self): self.model = self.io.load_model(self.arg.model, **(self.arg.model_args)) self.model.apply(weights_init) V, W, U = 26, 10, 5 off_diag_joint, off_diag_part, off_diag_body = np.ones([V, V])-np.eye(V, V), np.ones([W, W])-np.eye(W, W), np.ones([U, U])-np.eye(U, U) self.relrec_joint = torch.FloatTensor(np.array(encode_onehot(np.where(off_diag_joint)[1]), dtype=np.float32)).to(self.dev) self.relsend_joint = torch.FloatTensor(np.array(encode_onehot(np.where(off_diag_joint)[0]), dtype=np.float32)).to(self.dev) self.relrec_part = torch.FloatTensor(np.array(encode_onehot(np.where(off_diag_part)[1]), dtype=np.float32)).to(self.dev) self.relsend_part = torch.FloatTensor(np.array(encode_onehot(np.where(off_diag_part)[0]), dtype=np.float32)).to(self.dev) self.relrec_body = torch.FloatTensor(np.array(encode_onehot(np.where(off_diag_body)[1]), dtype=np.float32)).to(self.dev) self.relsend_body = torch.FloatTensor(np.array(encode_onehot(np.where(off_diag_body)[0]), dtype=np.float32)).to(self.dev) self.lower_body_joints = [1,2,3]# [1,2,3,4,5]# [1,2,3]#[0, 1, 2, 3, 4, 5, 6, 7] self.dismodel_args = deepcopy(self.arg.model_args) d_mode =3 if d_mode == 2: self.dismodel_args.pop('n_in_dec', None) self.dismodel_args.pop('n_hid_dec', None) self.dismodel_args.pop('n_hid_enc', None) self.dismodel_args['edge_weighting'] =True self.dismodel_args['fusion_layer'] = 0 self.discriminator = self.io.load_model('net.model.Discriminatorv2', **(self.dismodel_args)) else: self.dismodel_args.pop('n_in_enc', None) self.dismodel_args.pop('n_hid_enc', None) self.dismodel_args.pop('fusion_layer', None) self.dismodel_args.pop('cross_w', None) self.dismodel_args.pop('graph_args_p', None) self.dismodel_args.pop('graph_args_b', None) self.discriminator = self.io.load_model('net.model.Discriminatorv3', **(self.dismodel_args)) # self.dismodel_args['edge_weighting'] =True # self.dismodel_args['fusion_layer'] = 0 self.discriminator.apply(weights_init) self.discriminator.cuda() self.criterion = nn.BCEWithLogitsLoss()# nn.BCELoss() self.visual_sigmoid = nn.Sigmoid() def load_optimizer(self): if self.arg.optimizer == 'SGD': self.optimizer = optim.SGD(params=self.model.parameters(), lr=self.arg.base_lr, momentum=0.9, nesterov=self.arg.nesterov, weight_decay=self.arg.weight_decay) elif self.arg.optimizer == 'Adam': self.optimizer = optim.Adam(params=self.model.parameters(), lr=self.arg.base_lr, weight_decay=self.arg.weight_decay) self.netD_optimizer =optim.Adam(params=self.discriminator.parameters(), lr=0.000004, weight_decay=self.arg.weight_decay) def adjust_lr(self): if self.arg.optimizer == 'SGD' and self.arg.step: lr = self.arg.base_lr * (0.5**np.sum(self.meta_info['iter']>= np.array(self.arg.step))) for param_group in self.optimizer.param_groups: param_group['lr'] = lr self.lr = lr elif self.arg.optimizer == 'Adam' and self.arg.step: lr = self.arg.base_lr * (0.98**np.sum(self.meta_info['iter']>= np.array(self.arg.step))) for param_group in self.optimizer.param_groups: param_group['lr'] = lr self.lr = lr for param_group in self.netD_optimizer.param_groups: param_group['lr'] = self.lr else: raise ValueError('No such Optimizer') def loss_l2(self, pred, target, mask=None): dist = torch.square(pred-target).mean(-1).mean(1).mean(0) if mask is not None: dist = dist * mask loss = torch.mean(dist) return loss def vae_loss_function(self, pred, target, mean_val, log_var): assert pred.shape == target.shape reconstruction_loss = self.loss_l2(pred, target) mean_val = mean_val.mean(-1).mean(1).mean(0) log_var = log_var.mean(-1).mean(1).mean(0) KLD = - 0.5 * torch.sum(1+ log_var - mean_val.pow(2) - log_var.exp()) return reconstruction_loss + 0.1*KLD ''' def build_masking_matrix_add_noise(self, unmasked_matrix, joint_indices): r""" Build masking matrix with same shape as `unmasked_matrix` """ M = np.zeros_like(unmasked_matrix) M = M.reshape(M.shape[0], M.shape[1], -1, 3) # batch size, T, J, 3 for i in range(M.shape[0]): for j in range(M.shape[1]): for k in range(M.shape[2]): if k in joint_indices: M[i, j, k, :] = np.random.normal(0,0.5,1) #M[:, :, joint_indices, :] = np.random.normal(0,0.5,3) M = M.reshape(unmasked_matrix.shape) return M ''' def build_masking_matrix(self, unmasked_matrix, joint_indices): r""" Build masking matrix with same shape as `unmasked_matrix` """ M = np.ones_like(unmasked_matrix) M = M.reshape(M.shape[0], M.shape[1], -1, 3) # batch size, T, J, 3 M[:, :, joint_indices, :] = np.zeros((3,)) M = M.reshape(unmasked_matrix.shape) return M def build_noise_matrix(self, pose_matrix, masking_matrix): r""" Build noise matrix with same shape as `pose_matrix`. We replace each masked joint angle by an IID Gaussian noise signal following distribution N(0, 0.5) :param pose_matrix: matrix of poses :param masking_matrix: binary masking matrix for `pose_matrix` Return: Noise matrix with same shape as `pose_matrix` """ M = np.random.normal(loc=0, scale=0.5, size=pose_matrix.shape) inverted_mask_matrix = (~masking_matrix.astype(np.bool)).astype(np.float32) M = np.multiply(M, inverted_mask_matrix) return M def build_lower_body_masking_matrices(self, lower_body_joints, encoder_inputs, decoder_inputs): # build encoder input mask M_enc_in = self.build_masking_matrix(encoder_inputs, lower_body_joints) # build decoder input mask M_dec_in = self.build_masking_matrix(decoder_inputs, lower_body_joints) # build decoder output / target mask #M_dec_out = self.build_masking_matrix(targets, lower_body_joints) return M_enc_in, M_dec_in def build_random_masking_matrices(self, encoder_inputs, decoder_inputs, seed=None, p=0.8): # set seed if seed is not None: np.random.seed(seed) # build encoder input mask M_enc_in = np.random.binomial(n=1, p=p, size=encoder_inputs.shape).astype(np.float32) # build decoder input mask M_dec_in = np.random.binomial(n=1, p=p, size=decoder_inputs.shape).astype(np.float32) return M_enc_in, M_dec_in def train(self, masking_type="lower-body"): if self.meta_info['iter'] % 2 == 0: with torch.no_grad(): mean, var, gan_decoder_inputs, \ gan_targets, gan_decoder_inputs_previous, \ gan_decoder_inputs_previous2, \ gan_disc_encoder_inputs = self.train_generator( mode='discriminator', masking_type=masking_type) self.train_decoderv3( mean, var, gan_decoder_inputs, gan_targets, gan_decoder_inputs_previous, gan_decoder_inputs_previous2, gan_disc_encoder_inputs) else: self.train_generator(mode='generator', masking_type=masking_type) def train_decoder(self, mean, var, gan_decoder_inputs, gan_targets, gan_decoder_inputs_previous, gan_decoder_inputs_previous2): with torch.no_grad(): dec_mean = mean.clone() dec_var = var.clone() dec_var = torch.exp(0.5 * dec_var) # TBD epsilon = torch.randn_like(dec_var) z = dec_mean + dec_var * epsilon dis_pred = self.model.generate_from_decoder(z, gan_decoder_inputs, gan_decoder_inputs_previous, \ gan_decoder_inputs_previous2,self.arg.target_seq_len) #[32, 26, 10, 3] dis_pred = dis_pred.detach() dis_pred = dis_pred.requires_grad_() dis_pred = dis_pred.permute(0, 2, 1, 3).contiguous().view(32, 10, -1) dis_o = self.discriminator(dis_pred, self.relrec_joint, self.relsend_joint, self.relrec_part, self.relsend_part, self.relrec_body, self.relsend_body, self.arg.lamda)# .view(-1) # dis_o = dis_o.detach() # dis_o =dis_o.requires_grad_() self.netD_optimizer.zero_grad() N = dis_o.size()[0] # label = torch.full((N,), 0.0, dtype=torch.float, device='cuda:0') # label = Uniform(0.0, 0.1).sample((N,1)).cuda() fake_labels = torch.FloatTensor(1).fill_(0.0) fake_labels = fake_labels.requires_grad_(False) fake_labels = fake_labels.expand_as(dis_o).cuda() # print(fake_labels.size()) # print(dis_o.size()) errD_fake= self.criterion(dis_o, fake_labels) # Calculate gradients for D in backward pass # errD_fake.backward() D_x_fake = dis_o.mean().item() # to display # for the real targets = gan_targets#.permute(0, 2, 1, 3).contiguous().view(32, 10, -1) dis_oreal = self.discriminator(targets, self.relrec_joint, self.relsend_joint, self.relrec_part, self.relsend_part, self.relrec_body, self.relsend_body, self.arg.lamda)# .view(-1) # real_labels = torch.full((N,), 1.0, dtype=torch.float, device='cuda:0') # real_labels = Uniform(0.9, 1.0).sample((N,1)).cuda() real_labels = torch.FloatTensor(1).fill_(1.0) real_labels = real_labels.requires_grad_(False) real_labels = real_labels.expand_as(dis_oreal).cuda() # print(real_labels.requires_grad) errD_real= self.criterion(dis_oreal, real_labels) # errD_real.backward() errD = 0.5*(errD_real + errD_fake) errD.backward() self.netD_optimizer.step() D_x_real = dis_oreal.mean().item() self.iter_info['discriminator loss'] = errD self.iter_info['discriminator real out'] = D_x_real self.iter_info['discriminator fake out'] = D_x_fake self.iter_info['discriminator real loss'] = errD_real self.iter_info['discriminator fake loss'] = errD_fake self.show_iter_info() self.meta_info['iter'] += 1 # writer.add_scalar("Loss/train", loss, epoch) def train_decoderv3(self, mean, var, gan_decoder_inputs, gan_targets, gan_decoder_inputs_previous, gan_decoder_inputs_previous2, gan_disc_encoder_inputs): with torch.no_grad(): dec_mean = mean.clone() dec_var = var.clone() dec_var = torch.exp(0.5 * dec_var) # TBD epsilon = torch.randn_like(dec_var) z = dec_mean + dec_var * epsilon dis_pred = self.model.generate_from_decoder(z, gan_decoder_inputs, gan_decoder_inputs_previous, \ gan_decoder_inputs_previous2, self.arg.target_seq_len) #[32, 26, 10, 3] dis_pred = dis_pred.detach() dis_pred = dis_pred.requires_grad_() dis_pred = dis_pred.permute(0, 2, 1, 3).contiguous().view(32, 10, -1) disc_in = torch.cat([gan_disc_encoder_inputs.clone(), dis_pred], dim=1) dis_o = self.discriminator(disc_in)# .view(-1) # dis_o = dis_o.detach() # dis_o =dis_o.requires_grad_() self.netD_optimizer.zero_grad() N = dis_o.size()[0] # label = torch.full((N,), 0.0, dtype=torch.float, device='cuda:0') # label = Uniform(0.0, 0.1).sample((N,1)).cuda() fake_labels = torch.FloatTensor(1).fill_(0.0) fake_labels = fake_labels.requires_grad_(False) fake_labels = fake_labels.expand_as(dis_o).cuda() # print(fake_labels.size()) # print(dis_o.size()) errD_fake= self.criterion(dis_o, fake_labels) # Calculate gradients for D in backward pass # errD_fake.backward() D_x_fake = dis_o.mean().item() # to display # for the real targets = gan_targets#.permute(0, 2, 1, 3).contiguous().view(32, 10, -1) disc_targets_in = torch.cat([gan_disc_encoder_inputs.clone(), targets], dim=1) dis_oreal = self.discriminator(disc_targets_in)# .view(-1) # real_labels = torch.full((N,), 1.0, dtype=torch.float, device='cuda:0') # real_labels = Uniform(0.9, 1.0).sample((N,1)).cuda() real_labels = torch.FloatTensor(1).fill_(1.0) real_labels = real_labels.requires_grad_(False) real_labels = real_labels.expand_as(dis_oreal).cuda() # print(real_labels.requires_grad) errD_real= self.criterion(dis_oreal, real_labels) # errD_real.backward() errD = 0.5*(errD_real + errD_fake) errD.backward() self.netD_optimizer.step() for p in self.discriminator.parameters(): p.data.clamp_(-0.25, 0.25) # nn.utils.clip_grad_norm_(self.discriminator.parameters(), 0.1) D_x_real = dis_oreal.mean().item() self.iter_info['discriminator_loss'] = errD self.iter_info['discriminator real out'] = D_x_real self.iter_info['discriminator fake out'] = D_x_fake self.iter_info['discriminator real loss'] = errD_real self.iter_info['discriminator fake loss'] = errD_fake self.show_iter_info() self.meta_info['iter'] += 1 def train_generator(self, mode='generator', masking_type="lower-body"): self.model.train() self.adjust_lr() loss_value = [] normed_train_dict = normalize_data(self.train_dict, self.data_mean, self.data_std, self.dim_use) encoder_inputs, decoder_inputs, targets = train_sample(normed_train_dict, self.arg.batch_size, self.arg.source_seq_len, self.arg.target_seq_len, len(self.dim_use)) # unmasked gan_disc_encoder_inputs = torch.Tensor(encoder_inputs).float().to(self.dev) #encoder_inputs #.clone().detach().requires_grad_(True) gan_disc_en_in = torch.Tensor(encoder_inputs).float().to(self.dev) # encoder_inputs_p.clone().detach().requires_grad_(True) #build masking matrices if masking_type == "lower-body": self.M_enc_in, self.M_dec_in = self.build_lower_body_masking_matrices( self.lower_body_joints, encoder_inputs, decoder_inputs ) elif masking_type == "random": self.M_enc_in, self.M_dec_in = self.build_random_masking_matrices( encoder_inputs, decoder_inputs, p=0.8 ) else: raise NotImplementedError # mask encoder inputs and decoder inputs encoder_inputs = np.multiply(self.M_enc_in, encoder_inputs) decoder_inputs = np.multiply(self.M_dec_in, decoder_inputs) # add noise to masked encoder/decoder inputs encoder_noise = self.build_noise_matrix(encoder_inputs, self.M_enc_in) decoder_noise = self.build_noise_matrix(decoder_inputs, self.M_dec_in) encoder_inputs = np.add(encoder_inputs, encoder_noise) decoder_inputs = np.add(decoder_inputs, decoder_noise) encoder_inputs_v = np.zeros_like(encoder_inputs) encoder_inputs_v[:, 1:, :] = encoder_inputs[:, 1:, :]-encoder_inputs[:, :-1, :] encoder_inputs_a = np.zeros_like(encoder_inputs) encoder_inputs_a[:, :-1, :] = encoder_inputs_v[:, 1:, :]-encoder_inputs_v[:, :-1, :] encoder_inputs_p = torch.Tensor(encoder_inputs).float().to(self.dev) encoder_inputs_v = torch.Tensor(encoder_inputs_v).float().to(self.dev) encoder_inputs_a = torch.Tensor(encoder_inputs_a).float().to(self.dev) decoder_inputs = torch.Tensor(decoder_inputs).float().to(self.dev) decoder_inputs_previous = torch.Tensor(encoder_inputs[:, -1, :]).unsqueeze(1).to(self.dev) decoder_inputs_previous2 = torch.Tensor(encoder_inputs[:, -2, :]).unsqueeze(1).to(self.dev) targets = torch.Tensor(targets).float().to(self.dev) gan_targets = targets.clone().detach().requires_grad_(True) N, T, D = targets.size() # N = 64(batchsize), T=10, D=63 targets = targets.contiguous().view(N, T, -1, 3).permute(0, 2, 1, 3) # [64, 21, 10, 3] gan_decoder_inputs = decoder_inputs.clone().detach().requires_grad_(True) gan_decoder_inputs_previous = decoder_inputs_previous.clone().detach().requires_grad_(True) gan_decoder_inputs_previous2 = decoder_inputs_previous2.clone().detach().requires_grad_(True) # v3 # gan_disc_encoder_inputs = encoder_inputs_p.clone().detach().requires_grad_(True) # gan_disc_en_in = encoder_inputs_p.clone().detach().requires_grad_(True) outputs, mean, log_var = self.model(encoder_inputs_p, encoder_inputs_v, encoder_inputs_a, decoder_inputs, decoder_inputs_previous, decoder_inputs_previous2, self.arg.target_seq_len, self.relrec_joint, self.relsend_joint, self.relrec_part, self.relsend_part, self.relrec_body, self.relsend_body, self.arg.lamda) # convert spatio-temporal masking matrix to a tensor #st_mask = torch.from_numpy(self.M_dec_out).to(self.dev) #loss = self.vae_loss_function(outputs, targets, mean, log_var, st_mask = st_mask) if mode =='generator': loss = self.vae_loss_function(outputs, targets, mean, log_var) outputs = outputs.permute(0, 2, 1, 3).contiguous().view(32, 10, -1) if True: disc_in = torch.cat([gan_disc_en_in, outputs], dim=1) gen_disco = self.discriminator(disc_in) # adversrial loss real_labels = torch.FloatTensor(1).fill_(1.0) real_labels = real_labels.requires_grad_(False) real_labels = real_labels.expand_as(gen_disco).cuda() # print(real_labels.requires_grad) gan_loss = self.criterion(gen_disco, real_labels) loss = 0.93* loss + 0.07*gan_loss self.optimizer.zero_grad() loss.backward() nn.utils.clip_grad_norm_(self.model.parameters(), 0.5) self.optimizer.step() self.iter_info['generator_loss'] = loss.data.item() if False: self.iter_info['gan_loss'] = gan_loss.data.item() self.show_iter_info() self.meta_info['iter'] += 1 self.epoch_info['mean_loss'] = np.mean(loss_value) return mean, log_var, gan_decoder_inputs, gan_targets, gan_decoder_inputs_previous, gan_decoder_inputs_previous2, gan_disc_encoder_inputs def test( self, evaluation=True, iter_time=0, save_motion=False, phase=False, masking_type="lower-body", fix_rand_masking_seed=False): self.model.eval() loss_value = [] normed_test_dict = normalize_data(self.test_dict, self.data_mean, self.data_std, self.dim_use) self.actions = ["basketball", "basketball_signal", "directing_traffic", "jumping", "running", "soccer", "walking", "washwindow"] self.io.print_log(' ') print_str = "{0: <16} |".format("milliseconds") for ms in [40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 560, 1000]: print_str = print_str + " {0:5d} |".format(ms) self.io.print_log(print_str) for action_num, action in enumerate(self.actions): encoder_inputs, decoder_inputs, targets = srnn_sample(normed_test_dict, action, self.arg.source_seq_len, self.arg.target_seq_len, len(self.dim_use)) #build masking matrices if masking_type == "lower-body": self.M_enc_in, self.M_dec_in = self.build_lower_body_masking_matrices( self.lower_body_joints, encoder_inputs, decoder_inputs ) elif masking_type == "random": rand_masking_seed = None if fix_rand_masking_seed: rand_masking_seed = 0 self.M_enc_in, self.M_dec_in = self.build_random_masking_matrices( encoder_inputs, decoder_inputs, seed=rand_masking_seed, p=0.8 ) else: raise NotImplementedError # mask encoder inputs and decoder inputs encoder_inputs = np.multiply(self.M_enc_in, encoder_inputs) decoder_inputs = np.multiply(self.M_dec_in, decoder_inputs) # add noise to masked encoder/decoder inputs encoder_noise = self.build_noise_matrix(encoder_inputs, self.M_enc_in) decoder_noise = self.build_noise_matrix(decoder_inputs, self.M_dec_in) encoder_inputs = np.add(encoder_inputs, encoder_noise) decoder_inputs = np.add(decoder_inputs, decoder_noise) encoder_inputs_v = np.zeros_like(encoder_inputs) encoder_inputs_v[:, 1:, :] = encoder_inputs[:, 1:, :]-encoder_inputs[:, :-1, :] encoder_inputs_a = np.zeros_like(encoder_inputs) encoder_inputs_a[:, :-1, :] = encoder_inputs_v[:, 1:, :]-encoder_inputs_v[:, :-1, :] encoder_inputs_p = torch.Tensor(encoder_inputs).float().to(self.dev) encoder_inputs_v = torch.Tensor(encoder_inputs_v).float().to(self.dev) encoder_inputs_a = torch.Tensor(encoder_inputs_a).float().to(self.dev) # for saving motion N, T, D = encoder_inputs_p.shape encoder_inputs_p_4d = encoder_inputs_p.view(N, T, -1, 3).permute(0, 2, 1, 3) # Eric: [N, V, T, 3] same with targets for saving motion decoder_inputs = torch.Tensor(decoder_inputs).float().to(self.dev) decoder_inputs_previous = torch.Tensor(encoder_inputs[:, -1, :]).unsqueeze(1).to(self.dev) decoder_inputs_previous2 = torch.Tensor(encoder_inputs[:, -2, :]).unsqueeze(1).to(self.dev) targets = torch.Tensor(targets).float().to(self.dev) N, T, D = targets.size() targets = targets.contiguous().view(N, T, -1, 3).permute(0, 2, 1, 3) # [64, 21, 25, 3] same with outputs for validation loss start_time = time.time() with torch.no_grad(): outputs, mean, var = self.model(encoder_inputs_p, encoder_inputs_v, encoder_inputs_a, decoder_inputs, decoder_inputs_previous, decoder_inputs_previous2, self.arg.target_seq_len, self.relrec_joint, self.relsend_joint, self.relrec_part, self.relsend_part, self.relrec_body, self.relsend_body, self.arg.lamda) ''' p = self.model.cal_posterior(encoder_inputs_p, encoder_inputs_v, encoder_inputs_a, decoder_inputs, decoder_inputs_previous, decoder_inputs_previous2, self.arg.target_seq_len, self.relrec_joint, self.relsend_joint, self.relrec_part, self.relsend_part, self.relrec_body, self.relsend_body, self.arg.lamda) print("posterior {}".format(p)) ''' if evaluation: num_samples_per_action = encoder_inputs_p_4d.shape[0] mean_errors = np.zeros( (num_samples_per_action, self.arg.target_seq_len), dtype=np.float32) # Eric: create data structs to save unnormalized inputs, outputs and targets inputs_denorm = np.zeros( [num_samples_per_action, encoder_inputs_p_4d.shape[2], int(self.data_mean.shape[0]/3), 3]) # num_samples_per_action, t_in, 39, 3 outputs_denorm = np.zeros( [num_samples_per_action, outputs.shape[2], int(self.data_mean.shape[0]/3), 3]) # [num_samples_per_action, t_out, 39, 3] targets_denorm = np.zeros( [num_samples_per_action, targets.shape[2], int(self.data_mean.shape[0]/3), 3]) # [num_samples_per_action, t_out, V, 3] for i in np.arange(num_samples_per_action): input = encoder_inputs_p_4d[i] # V, t_in, d V, t, d = input.shape input = input.permute(1,0,2).contiguous().view(t, V*d) input_denorm = unnormalize_data( input.cpu().numpy(), self.data_mean, self.data_std, self.dim_ignore, self.dim_use, self.dim_zero) inputs_denorm[i] = input_denorm.reshape((t, -1, 3)) output = outputs[i] # output: [V, t, d] = [21, 25, 3] V, t, d = output.shape output = output.permute(1,0,2).contiguous().view(t, V*d) output_denorm = unnormalize_data( output.cpu().numpy(), self.data_mean, self.data_std, self.dim_ignore, self.dim_use, self.dim_zero) outputs_denorm[i] = output_denorm.reshape((t, -1, 3)) t, D = output_denorm.shape output_euler = np.zeros((t,D) , dtype=np.float32) # [21, 99] for j in np.arange(t): for k in np.arange(0,115,3): output_euler[j,k:k+3] = rotmat2euler(expmap2rotmat(output_denorm[j,k:k+3])) target = targets[i] target = target.permute(1,0,2).contiguous().view(t, V*d) target_denorm = unnormalize_data( target.cpu().numpy(), self.data_mean, self.data_std, self.dim_ignore, self.dim_use, self.dim_zero) targets_denorm[i] = target_denorm.reshape((t, -1, 3)) target_euler = np.zeros((t,D) , dtype=np.float32) for j in np.arange(t): for k in np.arange(0,115,3): target_euler[j,k:k+3] = rotmat2euler(expmap2rotmat(target_denorm[j,k:k+3])) target_euler[:,0:6] = 0 idx_to_use1 = np.where(np.std(target_euler,0)>1e-4)[0] idx_to_use2 = self.dim_nonzero idx_to_use = idx_to_use1[np.in1d(idx_to_use1,idx_to_use2)] euc_error = np.power(target_euler[:,idx_to_use]-output_euler[:,idx_to_use], 2) euc_error = np.sqrt(np.sum(euc_error, 1)) # [25] mean_errors[i,:euc_error.shape[0]] = euc_error mean_mean_errors = np.mean(np.array(mean_errors), 0) if save_motion==True: save_dir = os.path.join(self.save_dir,'motions_exp'+str(iter_time*self.arg.savemotion_interval)) if not os.path.exists(save_dir): os.makedirs(save_dir) # save unnormalized inputs np.save(save_dir+f"/motions_{action}_inputs.npy", inputs_denorm) # save unnormalized outputs np.save(save_dir+f"/motions_{action}_outputs.npy", outputs_denorm) # save unnormalized targets np.save(save_dir+f"/motions_{action}_targets.npy", targets_denorm) print_str = "{0: <16} |".format(action) for ms_idx, ms in enumerate([0,1,2,3,4,5,6,7,8,9,13,24]): if self.arg.target_seq_len >= ms+1: print_str = print_str + " {0:.3f} |".format(mean_mean_errors[ms]) if phase is not True: self.MAE_tensor[iter_time, action_num, ms_idx] = mean_mean_errors[ms] else: print_str = print_str + " n/a |" if phase is not True: self.MAE_tensor[iter_time, action_num, ms_idx] = 0 print_str = print_str + 'T: {0:.3f} ms |'.format((time.time()-start_time)*1000/8) self.io.print_log(print_str) self.io.print_log(' ') @staticmethod def get_parser(add_help=False): parent_parser = Processor.get_parser(add_help=False) parser = argparse.ArgumentParser(add_help=add_help, parents=[parent_parser], description='Spatial Temporal Graph Convolution Network') parser.add_argument('--base_lr', type=float, default=0.01, help='initial learning rate') parser.add_argument('--step', type=int, default=[], nargs='+', help='the epoch where optimizer reduce the learning rate') parser.add_argument('--optimizer', default='SGD', help='type of optimizer') parser.add_argument('--nesterov', type=str2bool, default=True, help='use nesterov or not') parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay for optimizer') parser.add_argument('--lamda', type=float, default=1.0, help='adjust part feature') parser.add_argument('--fusion_layer_dir', type=str, default='fusion_1', help='lamda a dir') parser.add_argument('--learning_rate_dir', type=str, default='adam_1e-4', help='lamda a dir') parser.add_argument('--lamda_dir', type=str, default='nothing', help='adjust part feature') parser.add_argument('--crossw_dir', type=str, default='nothing', help='adjust part feature') parser.add_argument('--note', type=str, default='nothing', help='whether seperate') parser.add_argument('--debug', type=bool, default=False, help='whether seperate') return parser
py
1a41a4724681a99831435ebe0d51bf3de7ddeb9d
import numpy as np import pandas as pd from scipy.stats import rankdata def rolling_mean(data, period): rm = pd.rolling_mean(data, period) rm = rm[~np.isnan(rm)] return rm def mean(value): value = np.mean(value) if np.isnan(value): return 0. return value class DCA: def __init__(self, period=30, cash=300.): self.period = period self.cash = cash class Investor: def __init__(self, ticket, dist, dca=DCA()): self.ticket = ticket self.cash = 0. self.invested = 0. self.history = [] self.invested_history = [] self.ror_history = [] self.shares = [] self.dist = dist self.dca = dca self.rms_list = [] self.means = [] self.rank = 0. self.m = 0. self.std = 0. def compute_means(self): for i in range(1, 11): rms = rolling_mean(np.array(self.ror_history), i * 365) m = mean(rms) if m > 0: self.rms_list.append(rms) self.means.append(m.round(2)) else: self.rms_list.append([0.]) self.means = np.array(self.means) self.m = np.mean(self.means).round(2) if np.isnan(self.m): self.m = 0. self.std = np.std(self.means).round(4) if np.isnan(self.std): self.std = 0. def compute_rank(self): self.rank = (self.m + (1. - self.std)) / 2. class BuyAndHoldInvestmentStrategy: def __init__(self, investor, tr_cost): self.investor = investor self.tr_cost = tr_cost def invest(self, data, etf): if len(data.keys()) == 0: return self.investor.shares = np.zeros(len(data.keys())) day = 0 last_index = -1 for i in data.index: prices = data.loc[i].values etf_index = -1 # 30 = 0, 60=1, 90 = 2, 120 = 3, 150 = 4 if day % 30 == 0: last_index += 1 etf_index = last_index % len(etf) if etf_index > -1: price = data[etf[etf_index]].loc[i] if (etf_index > -1 and price == 0.) or (prices == 0).all(): day += 1 continue portfolio = self.investor.cash + np.dot(prices, self.investor.shares) if np.isnan(portfolio): portfolio = 0. self.investor.history.append(portfolio) self.investor.invested_history.append(self.investor.invested) if self.investor.invested == 0: ror = 0 else: ror = (portfolio - self.investor.invested) / self.investor.invested self.investor.ror_history.append(ror) if etf_index > -1: self.investor.cash += self.investor.dca.cash self.investor.invested += self.investor.dca.cash s = np.floor((self.investor.cash - self.tr_cost) / price) self.investor.shares[etf_index] += s self.investor.cash -= s*price - self.tr_cost day += 1
py
1a41a5088b968e6f70b0c51c62cd2ad8e00961bd
def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Gender, obj[4]: Age, obj[5]: Children, obj[6]: Education, obj[7]: Occupation, obj[8]: Income, obj[9]: Bar, obj[10]: Coffeehouse, obj[11]: Restaurant20to50, obj[12]: Direction_same, obj[13]: Distance # {"feature": "Age", "instances": 34, "metric_value": 0.99, "depth": 1} if obj[4]>0: # {"feature": "Distance", "instances": 27, "metric_value": 0.9911, "depth": 2} if obj[13]<=2: # {"feature": "Income", "instances": 23, "metric_value": 0.9986, "depth": 3} if obj[8]>1: # {"feature": "Restaurant20to50", "instances": 20, "metric_value": 0.971, "depth": 4} if obj[11]<=1.0: # {"feature": "Occupation", "instances": 16, "metric_value": 1.0, "depth": 5} if obj[7]<=20: # {"feature": "Education", "instances": 14, "metric_value": 0.9852, "depth": 6} if obj[6]>0: # {"feature": "Coupon", "instances": 8, "metric_value": 0.9544, "depth": 7} if obj[2]<=2: # {"feature": "Coffeehouse", "instances": 5, "metric_value": 0.971, "depth": 8} if obj[10]>1.0: # {"feature": "Passanger", "instances": 3, "metric_value": 0.9183, "depth": 9} if obj[0]<=1: return 'True' elif obj[0]>1: return 'False' else: return 'False' elif obj[10]<=1.0: return 'False' else: return 'False' elif obj[2]>2: return 'True' else: return 'True' elif obj[6]<=0: # {"feature": "Passanger", "instances": 6, "metric_value": 0.65, "depth": 7} if obj[0]<=1: return 'False' elif obj[0]>1: return 'True' else: return 'True' else: return 'False' elif obj[7]>20: return 'True' else: return 'True' elif obj[11]>1.0: return 'True' else: return 'True' elif obj[8]<=1: return 'False' else: return 'False' elif obj[13]>2: return 'False' else: return 'False' elif obj[4]<=0: return 'True' else: return 'True'
py
1a41a550606cd03aa7444415423c603df4baf869
#!/usr/bin/python """ (C) Copyright 2020-2021 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent """ import time from apricot import TestWithServers from general_utils import bytes_to_human, human_to_bytes from server_utils import ServerFailed class PoolTestBase(TestWithServers): """Base pool test class. :avocado: recursive """ def setUp(self): """Set up each test case.""" # Create test-case-specific DAOS log files self.update_log_file_names() super().setUp() self.dmg = self.get_dmg_command() def get_max_pool_sizes(self, scm_ratio=0.9, nvme_ratio=0.9): """Get the maximum pool sizes for the current server configuration. Args: scm_ratio (float, optional): percentage of the maximum SCM capacity to use for the pool sizes. Defaults to 0.9 (90%). nvme_ratio (float, optional): percentage of the maximum NVMe capacity to use for the pool sizes. Defaults to 0.9 (90%). Returns: list: a list of bytes representing the maximum pool creation SCM size and NVMe size """ try: sizes = self.server_managers[0].get_available_storage() except ServerFailed as error: self.fail(error) ratios = (scm_ratio, nvme_ratio) for index, size in enumerate(sizes): if size and ratios[index] < 1: # Reduce the size by the specified percentage sizes[index] *= ratios[index] self.log.info( "Adjusted %s size by %.2f%%: %s (%s)", "SCM" if index == 0 else "NVMe", 100 * ratios[index], str(sizes[index]), bytes_to_human(sizes[index])) return sizes def get_pool_list(self, quantity, scm_ratio, nvme_ratio, svcn=None): """Get a list of TestPool objects. Set each TestPool's scm_size and nvme_size attributes using the specified ratios and the largest SCM or NVMe size common to all the configured servers. Args: quantity (int): number of TestPool objects to create scm_ratio (float): percentage of the maximum SCM capacity to use for the pool sizes, e.g. 0.9 for 90% nvme_ratio (float): percentage of the maximum NVMe capacity to use for the pool sizes, e.g. 0.9 for 90%. Specifying None will setup each pool without NVMe. svcn (int): Number of pool service replicas. The default value of None will use the default set on the server. Returns: list: a list of TestPool objects equal in length to the quantity specified, each configured with the same SCM and NVMe sizes. """ sizes = self.get_max_pool_sizes( scm_ratio, 1 if nvme_ratio is None else nvme_ratio) pool_list = [ self.get_pool(create=False, connect=False) for _ in range(quantity)] for pool in pool_list: pool.svcn.update(svcn) pool.scm_size.update(bytes_to_human(sizes[0]), "scm_size") if nvme_ratio is not None: if sizes[1] is None: self.fail( "Unable to assign a max pool NVMe size; NVMe not " "configured!") # The I/O Engine allocates NVMe storage on targets in multiples # of 1GiB per target. A server with 8 targets will have a # minimum NVMe size of 8 GiB. Specify the largest NVMe size in # GiB that can be used with the configured number of targets and # specified capacity in GiB. targets = self.server_managers[0].get_config_value("targets") increment = human_to_bytes("{}GiB".format(targets)) nvme_multiple = increment while nvme_multiple + increment <= sizes[1]: nvme_multiple += increment self.log.info( "Largest NVMe multiple based on %s targets in %s: %s (%s)", targets, str(sizes[1]), str(nvme_multiple), bytes_to_human(nvme_multiple)) pool.nvme_size.update( bytes_to_human(nvme_multiple), "nvme_size") return pool_list def check_pool_creation(self, max_duration): """Check the duration of each pool creation meets the requirement. Args: max_duration (int): max pool creation duration allowed in seconds """ durations = [] for index, pool in enumerate(self.pool): start = float(time.time()) pool.create() durations.append(float(time.time()) - start) self.log.info( "Pool %s creation: %s seconds", index + 1, durations[-1]) exceeding_duration = 0 for index, duration in enumerate(durations): if duration > max_duration: exceeding_duration += 1 self.assertEqual( exceeding_duration, 0, "Pool creation took longer than {} seconds on {} pool(s)".format( max_duration, exceeding_duration))
py
1a41a5dde35275a7ba9e6a669ed5884e4f3dd396
from __future__ import annotations from typing import Tuple, NoReturn from ...base import BaseEstimator import numpy as np from itertools import product from ...metrics import misclassification_error class DecisionStump(BaseEstimator): """ A decision stump classifier for {-1,1} labels according to the CART algorithm Attributes ---------- self.threshold_ : float The threshold by which the data is split self.j_ : int The index of the feature by which to split the data self.sign_: int The label to predict for samples where the value of the j'th feature is about the threshold """ def __init__(self) -> DecisionStump: """ Instantiate a Decision stump classifier """ super().__init__() self.threshold_, self.j_, self.sign_ = None, None, None def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn: """ fits a decision stump to the given data Parameters ---------- X : ndarray of shape (n_samples, n_features) Input data to fit an estimator for y : ndarray of shape (n_samples, ) Responses of input data to fit to """ th = 0 mis = np.Inf feature_index = 0 sign = 1 for j in range(X.shape[1]): new_th, new_mis = self._find_threshold(X[:, j], y, 1) if new_mis < mis: mis = new_mis th = new_th feature_index = j sign = 1 new_th, new_mis = self._find_threshold(X[:, j], y, -1) if new_mis < mis: mis = new_mis th = new_th feature_index = j sign = -1 self.threshold_ = th self.j_ = feature_index self.sign_ = sign def _predict(self, X: np.ndarray) -> np.ndarray: """ Predict responses for given samples using fitted estimator Parameters ---------- X : ndarray of shape (n_samples, n_features) Input data to predict responses for y : ndarray of shape (n_samples, ) Responses of input data to fit to Returns ------- responses : ndarray of shape (n_samples, ) Predicted responses of given samples Notes ----- Feature values strictly below threshold are predicted as `-sign` whereas values which equal to or above the threshold are predicted as `sign` """ # y_pred = np.zeros(X.shape[0]) values = X[:, self.j_] y_pred = np.where(values < self.threshold_, -self.sign_, self.sign_) return y_pred def _find_threshold(self, values: np.ndarray, labels: np.ndarray, sign: int) -> Tuple[float, float]: """ Given a feature vector and labels, find a threshold by which to perform a split The threshold is found according to the value minimizing the misclassification error along this feature Parameters ---------- values: ndarray of shape (n_samples,) A feature vector to find a splitting threshold for labels: ndarray of shape (n_samples,) The labels to compare against sign: int Predicted label assigned to values equal to or above threshold Returns ------- thr: float Threshold by which to perform split thr_err: float between 0 and 1 Misclassificaiton error of returned threshold Notes ----- For every tested threshold, values strictly below threshold are predicted as `-sign` whereas values which equal to or above the threshold are predicted as `sign` """ th = values[0] mis = np.inf for i in range(values.shape[0]): y_pred = np.where(values < values[i], -sign, sign) # new_mis = misclassification_error(labels, y_pred) new_mis = np.sum(np.where(np.sign(labels) != np.sign(y_pred), abs(labels), 0)) if new_mis < mis: mis = new_mis th = values[i] return th, mis def _loss(self, X: np.ndarray, y: np.ndarray) -> float: """ Evaluate performance under misclassification loss function Parameters ---------- X : ndarray of shape (n_samples, n_features) Test samples y : ndarray of shape (n_samples, ) True labels of test samples Returns ------- loss : float Performance under missclassification loss function """ y_pred = self._predict(X) # return misclassification_error(y, y_pred) loss = np.sum(np.where(np.sign(y) != np.sign(y_pred), abs(y), 0)) # loss = np.sum(np.sign(y) != np.sign(y_pred)) # if normalize: return loss # return loss
py
1a41a697baad1fa0475bfa83b17099ee534d6efd
# -*- coding: utf-8 -*- import json import logging import re from concurrent import futures from urllib.parse import quote, unquote, urlparse from bs4 import BeautifulSoup from bs4.element import Tag from ..utils.crawler import Crawler logger = logging.getLogger('BABELNOVEL') search_url = 'https://babelnovel.com/api/books?page=0&pageSize=8&fields=id,name,canonicalName,lastChapter&ignoreStatus=false&query=%s' novel_page_url = 'https://babelnovel.com/api/books/%s' chapter_list_url = 'https://babelnovel.com/api/books/%s/chapters?bookId=%s&page=%d&pageSize=100&fields=id,name,canonicalName,hasContent,isBought,isFree,isLimitFree' chapter_json_url = 'https://babelnovel.com/api/books/%s/chapters/%s/content' # https://babelnovel.com/api/books/f337b876-f246-40c9-9bcf-d7f31db00296/chapters/ac1ebce2-e62e-4176-a2e7-6012c606ded4/content chapter_page_url = 'https://babelnovel.com/books/%s/chapters/%s' class BabelNovelCrawler(Crawler): base_url = 'https://babelnovel.com/' def search_novel(self, query): # to get cookies self.get_response(self.home_url) url = search_url % quote(query.lower()) logger.debug('Visiting: %s', url) data = self.get_json(url) results = [] for item in data['data']: if not item['canonicalName']: continue # end if info = None if item['lastChapter']: info = 'Latest: %s' % item['lastChapter']['name'] # end if results.append({ 'title': item['name'], 'url': novel_page_url % item['canonicalName'], 'info': info, }) # end for return results # end def def read_novel_info(self): # to get cookies and session info self.parse_content_css(self.home_url) # Determine cannonical novel name path_fragments = urlparse(self.novel_url).path.split('/') if path_fragments[1] == 'books': self.novel_hash = path_fragments[2] else: self.novel_hash = path_fragments[-1] # end if self.novel_url = novel_page_url % self.novel_hash logger.info('Canonical name: %s', self.novel_hash) logger.debug('Visiting %s', self.novel_url) data = self.get_json(self.novel_url) self.novel_id = data['data']['id'] logger.info('Novel ID: %s', self.novel_id) self.novel_title = data['data']['name'] logger.info('Novel title: %s', self.novel_title) self.novel_cover = data['data']['cover'] logger.info('Novel cover: %s', self.novel_cover) chapter_count = int(data['data']['chapterCount']) self.get_list_of_chapters(chapter_count) # end def def get_list_of_chapters(self, chapter_count): futures_to_check = dict() temp_chapters = dict() for page in range(1 + chapter_count // 100): list_url = chapter_list_url % (self.novel_id, self.novel_id, page) future = self.executor.submit(self.parse_chapter_item, list_url) futures_to_check[future] = str(page) # end for for future in futures.as_completed(futures_to_check): page = int(futures_to_check[future]) temp_chapters[page] = future.result() # end for for page in sorted(temp_chapters.keys()): self.volumes.append({'id': page + 1}) for chap in temp_chapters[page]: chap['volume'] = page + 1 chap['id'] = 1 + len(self.chapters) self.chapters.append(chap) # end for # end for # end def def parse_chapter_item(self, list_url): logger.debug('Visiting %s', list_url) data = self.get_json(list_url) chapters = list() for item in data['data']: if not (item['isFree']): # or item['isLimitFree'] or item['isBought']): continue # end if chapters.append({ 'title': item['name'], 'url': chapter_page_url % (self.novel_hash, item['canonicalName']), 'json_url': chapter_json_url % (self.novel_hash, item['id']), }) # end for return chapters # end def def parse_content_css(self, url): try: soup = self.get_soup(url) content = re.findall('window.__STATE__ = "([^"]+)"', str(soup), re.MULTILINE) data = json.loads(unquote(content[0])) cssUrl = self.absolute_url(data['chapterDetailStore']['cssUrl']) logger.info('Getting %s', cssUrl) css = self.get_response(cssUrl).text baddies = css.split('\n')[-1].split('{')[0].strip() self.bad_selectors = baddies logger.info('Bad selectors: %s', self.bad_selectors) except: self.bad_selectors = [] logger.exception('Fail to get bad selectors') # end for # end def def download_chapter_body(self, chapter): logger.info('Visiting %s', chapter['json_url']) data = self.get_json(chapter['json_url']) soup = BeautifulSoup(data['data']['content'], 'lxml') if self.bad_selectors: for tag in soup.select(self.bad_selectors): tag.extract() # end for # end if body = soup.find('body') self.clean_contents(body) for tag in body.contents: if not str(tag).strip(): tag.extract() elif isinstance(tag, Tag): tag.name = 'p' # end if # end for # body = data['data']['content'] result = str(body) result = re.sub(r'\n\n', '<br><br>', result) return result # end def # end class
py
1a41a8c94a38217cfeaba4ced8d70c53d1c276da
import tkinter as tk class AutoScrollbar(tk.Scrollbar): """Create a scrollbar that hides iteself if it's not needed. Only works if you use the pack geometry manager from tkinter. https://stackoverflow.com/questions/57030781/auto-hiding-scrollbar-not-showing-as-expected-with-tkinter-pack-method """ def set(self, low, high): if float(low) <= 0.0 and float(high) >= 1.0: self.pack_forget() else: if self.cget("orient") == tk.HORIZONTAL: self.pack(fill=tk.X, side=tk.BOTTOM) else: self.pack(fill=tk.Y, side=tk.RIGHT) tk.Scrollbar.set(self, low, high) def grid(self, **kw): raise tk.TclError("cannot use grid with this widget") def place(self, **kw): raise tk.TclError("cannot use place with this widget")
py
1a41a8f3c7178024ae2217219ed907a77bed6eb7
# Uses python3 import sys #Time: O(n) #Description: The last digit of (a + b) = the last digit (a) + the last digit (b) def get_fibonacci_last_digit(n, m): if n <= 1: return n previous = 0 current = 1 for _ in range(n - 1): previous, current = current, (previous + current) % m return current if __name__ == '__main__': input = sys.stdin.read() n = int(input) print(get_fibonacci_last_digit(n, 10))
py
1a41ab5fae231b5ed9603002e3763474f5a7ae59
# Ke Yan, Imaging Biomarkers and Computer-Aided Diagnosis Laboratory, # National Institutes of Health Clinical Center, July 2019 """Utilities for DeepLesion""" import numpy as np #from openpyxl import load_workbook import json from collections import Counter #from maskrcnn.utils.miscellaneous import unique from fcos_core.config import cfg def gen_mask_polygon_from_recist(recist): """Generate ellipse from RECIST for weakly-supervised segmentation""" x11, y11, x12, y12, x21, y21, x22, y22 = recist axis1 = np.linalg.solve(np.array([[x11, y11], [x12, y12]]), np.array([1, 1])) axis2 = np.linalg.solve(np.array([[x21, y21], [x22, y22]]), np.array([1, 1])) center = np.linalg.solve(np.array([[axis1[0], axis1[1]], [axis2[0], axis2[1]]]), np.array([1, 1])) centered_recist = recist - np.tile(center, (4,)) centered_recist = np.reshape(centered_recist, (4, 2)) pt_angles = np.arctan2(centered_recist[:, 1], centered_recist[:, 0]) pt_lens = np.sqrt(np.sum(centered_recist ** 2, axis=1)) ord = [0, 2, 1, 3, 0] grid = .1 rotated_pts = [] for p in range(4): # pt1 = centered_recist[ord[p]] # pt2 = centered_recist[ord[p+1]] if (pt_angles[ord[p]] < pt_angles[ord[p + 1]] and pt_angles[ord[p + 1]] - pt_angles[ord[p]] < np.pi) \ or (pt_angles[ord[p]] - pt_angles[ord[p + 1]] > np.pi): # counter-clockwise angles = np.arange(0, np.pi / 2, grid) else: angles = np.arange(0, -np.pi / 2, -grid) xs = np.cos(angles) * pt_lens[ord[p]] ys = np.sin(angles) * pt_lens[ord[p + 1]] r = pt_angles[ord[p]] rotated_pts1 = np.matmul(np.array([[np.cos(r), -np.sin(r)], [np.sin(r), np.cos(r)]]), np.vstack((xs, ys))) rotated_pts.append(rotated_pts1) rotated_pts = np.hstack(rotated_pts) decentered_pts = rotated_pts + center.reshape((2, 1)) polygon = decentered_pts.transpose().ravel() # for p in polygon: # print('%.4f'%p, ',',) # print('\n',recist) return polygon.tolist() def load_tag_dict_from_xlsfile(fn): """Load ontology""" cellname = lambda row, col: '%s%d' % (chr(ord('A') + col - 1), row) wb = load_workbook(fn) sheet = wb.get_active_sheet() tag_dicts = [] for p in range(2, sheet.max_row + 1): ex = sheet[cellname(p, 6)].value ex = [] if ex is None else ex.split(' | ') parents = sheet[cellname(p, 7)].value parents = [] if parents is None else parents.split(' | ') children = sheet[cellname(p, 8)].value children = [] if children is None else children.split(' | ') tag_dict = {'id': sheet[cellname(p, 1)].value, # useless 'class': sheet[cellname(p, 2)].value, 'tag': sheet[cellname(p, 3)].value, 'synonyms': sheet[cellname(p, 4)].value.split(' | '), 'num_detected': sheet[cellname(p, 5)].value, 'exclusive': ex, 'parents': parents, 'children': children } tag_dicts.append(tag_dict) return tag_dicts def load_lesion_tags(split_file, tag_dict): """Load training labels for tags""" with open(split_file, 'r') as f: data = json.load(f) print('loaded', split_file) term_list = data['term_list'] num_labels = len(term_list) prefix = 'train' smp_idxs, labels, uncertain_labels = \ data['%s_lesion_idxs' % prefix], data['%s_relevant_labels' % prefix], \ data['%s_uncertain_labels' % prefix] tag_dict_filtered = {idx: unique(r+u) for idx,r,u in zip(smp_idxs, labels, uncertain_labels)} tag_list_dict = [] class_map = {t['tag']: t['class'] for t in tag_dict} for i in range(num_labels): tag_dict = {'ID': i, 'tag': term_list[i], 'class': class_map[term_list[i]]} tag_list_dict.append(tag_dict) return tag_list_dict, tag_dict_filtered def gen_parent_list(tag_dicts, tag_list): """Hierarchical label relations""" parents_map = {t['tag']: t['parents'] for t in tag_dicts} parent_list = [] for t in tag_list: ps = parents_map[t] parent_list.append([tag_list.index(p) for p in ps if p in tag_list]) return parent_list def gen_children_list(parent_list, tag_list): """Hierarchical label relations""" all_children_list = [[] for _ in tag_list] for i, parent in enumerate(parent_list): for p1 in parent: all_children_list[p1].append(i) direct_children_list = [[] for _ in tag_list] for i, children in enumerate(all_children_list): direct_children_list[i] = [c for c in children if not any([p in children for p in parent_list[c]])] return all_children_list, direct_children_list def gen_tree_depth(tag_list, parent_list): """Hierarchical label relations""" tag_depth = np.ones((len(tag_list),), dtype=int) while True: last_depth = tag_depth.copy() for p in range(len(parent_list)): if len(parent_list[p]) > 0: tag_depth[p] = np.max([tag_depth[idx] for idx in parent_list[p]])+1 if np.all(last_depth == tag_depth): break return tag_depth def gen_exclusive_list(tag_dicts, tag_list, parent_list, all_children_list): """Infer exclusive label relations according to hierarchical relations""" exclusive_list = [] all_d_tags = [t['tag'] for t in tag_dicts] for p in range(len(tag_list)): idx = all_d_tags.index(tag_list[p]) exclusive_list.append([tag_list.index(ex) for ex in tag_dicts[idx]['exclusive'] if ex in tag_list]) while True: flag = False for p in range(len(tag_list)): cur_ex = exclusive_list[p] next_ex = cur_ex[:] for ex in cur_ex: next_ex += all_children_list[ex] for parent in parent_list[p]: next_ex += exclusive_list[parent] next_ex = unique(next_ex) flag = flag or (set(next_ex) != set(cur_ex)) exclusive_list[p] = next_ex if not flag: break return exclusive_list
py
1a41ac60325da217abeca72b6dfcf536acc71dbb
########################################################################## # # Copyright (c) 2015, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import Gaffer Gaffer.Metadata.registerNode( Gaffer.LoopComputeNode, "description", """ Applies a node network to an input iteratively. """, )
py
1a41acf318ed380bc0a1611c298a3b62d78d0f35
nonlocal a, b, c nonlocal : source.python, storage.modifier.declaration.python : source.python a : source.python , : punctuation.separator.element.python, source.python : source.python b : source.python , : punctuation.separator.element.python, source.python : source.python c : source.python
py
1a41ad6c214d31659bbb76a5b79d3c0c23619e45
import requests import datetime import random import time import json def get_time(): return datetime.datetime.now().strftime("%H:%M:%S %Y-%m-%d") def get_token(): return open('token.txt', 'r', encoding='UTF-8').read() def change_status_text(token, text): url = 'https://discord.com/api/v9/users/@me/settings' headers = { "Accept": "application/json", "Content-Type": "application/json", "Authorization": token } payload = {"custom_status": {"text": text}} r = requests.patch(url, headers=headers, data=json.dumps(payload)) token = get_token() while True: change_status_text(token, str(get_time())) time.sleep(0.5)
py
1a41af0d5c196666fb4c450bee9ae0d055ce4273
import requests import json from datetime import datetime import time print("WELCOME TO INSTAFORCER, A PLACE WHERE PASSWORDS OF INSTAGRAM ACCOUNTS ARE CRACKED WITH EXCEPTIONAL EFFICIENCY\n") time.sleep(2) # here are the constants defined username = input("Insert the username of the target(without @): ") passw = input("Insert the name of the text file which will be utilised in hacking(without .txt): ") passwords = open(f"{passw}.txt", 'r').readlines() # actual cracking of the password def main(): for lines in passwords: link = "https://www.instagram.com/accounts/login/" login_url = "https://www.instagram.com/accounts/login/ajax/" password = lines.strip() response = requests.get(link) csrf_token = response.cookies['csrftoken'] time_now = int(datetime.now().timestamp()) payload = { 'username': username, 'enc_password': f'#PWD_INSTAGRAM_BROWSER:0:{time_now}:{password}', 'queryParams': {}, 'optIntoOneTap': 'false' } login_header = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36', 'X-Requested-With': 'XMLHttpRequest', 'Referer': 'https://www.instagram.com/accounts/login/', 'x-csrftoken': csrf_token } login_response = requests.post(login_url, data=payload, headers=login_header) json_data = json.loads(login_response.text) try: if json_data["authenticated"]: print(f"Login Successful: {password}\n") break else: print(f"Login Unsuccessful: {password}\n") time.sleep(3) continue except KeyError: print(f"Login Unsuccessful: {password}\nYour HTTP posts might have been blocked due to too many requests being made.") time.sleep(1) continue if __name__ == '__main__': main()
py
1a41af1f6337cba1372f279f0f49ffc0be6b2211
import asyncio import functools import random import time from testing import Client from testing import default_test_setup from testing import gen_data from testing import gen_points from testing import gen_series from testing import InsertError from testing import PoolError from testing import QueryError from testing import run_test from testing import Series from testing import Server from testing import ServerError from testing import SiriDB from testing import TestBase from testing import UserAuthError from testing import parse_args class TestServer(TestBase): title = 'Test server object' Server.SERVER_ADDRESS = 'localhost' Server.IP_SUPPORT = 'IPV4ONLY' @default_test_setup(4) async def run(self): await self.client0.connect() await self.db.add_pool(self.server1) await self.assertIsRunning(self.db, self.client0, timeout=20) await asyncio.sleep(5) await self.client1.connect() for port in (9010, 9011): result = await self.client0.query( 'alter server "localhost:{}" set log_level error'.format(port)) self.assertEqual( result.pop('success_msg'), "Successfully set log level to 'error' on 'localhost:{}'." .format(port)) result = await self.client1.query('list servers log_level') self.assertEqual(result.pop('servers'), [['error'], ['error']]) result = await self.client1.query('list servers uuid') for uuid in result.pop('servers'): result = await self.client0.query( 'alter server {} set log_level debug'.format(uuid[0])) result = await self.client1.query('list servers log_level') self.assertEqual(result.pop('servers'), [['debug'], ['debug']]) result = await self.client0.query('alter servers set log_level info') self.assertEqual( result.pop('success_msg'), "Successfully set log level to 'info' on 2 servers.") result = await self.client1.query('list servers log_level') self.assertEqual(result.pop('servers'), [['info'], ['info']]) result = await self.client1.query( 'list servers active_tasks where active_tasks == 1 and ' 'idle_time >= 0 and idle_percentage <= 100') self.assertEqual(result.pop('servers'), [[1], [1]]) result = await self.client0.query( 'alter servers where active_handles > 1 set log_level debug') result = await self.client1.query('list servers log_level') self.assertEqual(result.pop('servers'), [['debug'], ['debug']]) with self.assertRaisesRegex( QueryError, "Query error at position 42. Expecting " "debug, info, warning, error or critical"): await self.client0.query( 'alter server "localhost:{}" set log_level unknown') self.client1.close() result = await self.server1.stop() self.assertTrue(result) self.server1.listen_backend_port = 9111 self.server1.create() await self.server1.start(sleep=20) await asyncio.sleep(35) result = await self.client0.query('list servers status') self.assertEqual(result.pop('servers'), [['running'], ['running']]) await self.client1.connect() result = await self.client1.query('show server') self.assertEqual(result.pop('data'), [ {'name': 'server', 'value': 'localhost:9111'}]) await self.db.add_replica(self.server2, 1) await self.assertIsRunning(self.db, self.client0, timeout=35) with self.assertRaisesRegex( QueryError, "Cannot remove server 'localhost:9010' " "because this is the only server for pool 0"): await self.client1.query('drop server "localhost:9010"') with self.assertRaisesRegex( QueryError, "Cannot remove server 'localhost:9012' " "because the server is still online.*"): await self.client1.query('drop server "localhost:9012"') result = await self.server1.stop() self.assertTrue(result) result = await self.server2.stop() self.assertTrue(result) await self.server1.start(sleep=30) result = await self.client1.query('show status') self.assertEqual(result.pop('data'), [ {'name': 'status', 'value': 'running | synchronizing'}]) result = await self.client0.query('drop server "localhost:9012"') self.assertEqual( result.pop('success_msg'), "Successfully dropped server 'localhost:9012'.") self.db.servers.remove(self.server2) time.sleep(1) for client in (self.client0, self.client1): result = await client.query('list servers status') self.assertEqual(result.pop('servers'), [['running'], ['running']]) await self.db.add_replica(self.server3, 1) await self.assertIsRunning(self.db, self.client0, timeout=35) self.client0.close() self.client1.close() # return False if __name__ == '__main__': parse_args() run_test(TestServer())
py
1a41b028d9809d09d97eecacd1b9f2e7178dd6cc
""" The MIT License (MIT) Copyright (c) 2014 Kyle Hollins Wray, University of Massachusetts Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import os import os.path def print_target(f, name, sdir, odir): """ Prints a target rule to the specified file. Parameters: f -- The file where the output will be printed to. name -- The name of the target rule. sdir -- The source directory to compile this rule. odir -- The object directory to store the .o files. """ f.write(name + ': ' + sdir + '/*.cpp \n') print_commands(f, sdir, odir) def print_commands(f, sdir, odir): """ Prints a set of bash comands to create an object directory (if it doesn't exists), compile all source files in a source directory and move the object files to the object directory. Parameters: f -- The file where the output will be printed to. sdir -- The directory where the source files are stored. odir -- The directory to where the .o files will be stored. """ f.write('\tmkdir -p ' + odir + ' \n' + '\t$(CC) $(CFLAGS) -c ' + sdir + '/*.cpp \n' + '\tmv *.o ' + odir + '\n\n') srcdir = 'librbr/src' objdir = 'librbr/obj' testdir = 'librbr_tests' coresubdir = ['states', 'actions', 'observations', 'state_transitions', 'observation_transitions', 'policy', 'rewards','agents'] f = open('Makefile', 'w') # Test if the 'librbr/obj', 'librbr_tests/obj', and 'tmp' # directories exist and make them if they do not. directories = ['librbr/obj', 'librbr_tests/obj', 'librbr_tests/tmp'] for d in directories: if not os.path.exists(d): os.makedirs(d) # Printing flags and directory wildcards. f.write('CC = g++\n' + 'CFLAGS = -std=c++11 -g\n' + 'COINFLAGS = `pkg-config --cflags --libs Coin` ' + '`pkg-config --cflags --libs clp` ' + '`pkg-config --cflags --libs osi` ' + '`pkg-config --libs coinutils` ' + '`pkg-config --cflags --libs osi-clp`\n\n') # Printing target rule for tests. f.write('tests: all.o ' + testdir + '/src/core/*.cpp ' + testdir + '/src/mdp/*.cpp ' + #testdir + '/src/ssp/*.cpp ' + testdir + '/src/pomdp/*.cpp ' + #testdir + '/src/dec_pomdp/*.cpp' + testdir + '/src/management/*.cpp ' + testdir + '/src/utilities/*.cpp\n') f.write('\tmkdir -p ' + testdir + '/obj\n') f.write('\t$(CC) $(CFLAGS) -c -I.. ' + testdir + '/src/core/*.cpp ' + testdir + '/src/mdp/*.cpp ' + #testdir + '/src/ssp/*.cpp ' + testdir + '/src/pomdp/*.cpp ' + #testdir + '/src/dec_pomdp/*.cpp' + testdir + '/src/management/*.cpp ' + testdir + '/src/utilities/*.cpp ' + testdir + '/src/*.cpp\n') f.write('\t$(CC) $(CFLAGS) $(COINFLAGS) -o perform_tests ' + objdir + '/*.o *.o\n') f.write('\tmv *.o ' + testdir + '/obj\n\n') # Printing target rules for all object files. for sd in coresubdir: print_target(f, sd + '.o', srcdir + '/core/' + sd, objdir) print_target(f, 'core.o', srcdir + '/core', objdir) print_target(f, 'utilities.o', srcdir + '/utilities', objdir) print_target(f, 'management.o', srcdir + '/management', objdir) print_target(f, 'mdp.o', srcdir + '/mdp', objdir) print_target(f, 'ssp.o', srcdir + '/ssp', objdir) print_target(f, 'pomdp.o', srcdir + '/pomdp', objdir) print_target(f, 'dec_pomdp.o', srcdir + '/dec_pomdp', objdir) f.write('make all.o: ') for sd in coresubdir: f.write(sd + '.o ') f.write('core.o utilities.o management.o mdp.o ssp.o pomdp.o dec_pomdp.o\n\n') f.close()
py
1a41b158ede078bafb1187ec3c04cbcb197139b7
import glob import cv2 import numpy as np import pickle def _initialize_object_points(n_horizontal, n_vertical): objp = np.zeros((n_horizontal * n_vertical, 3), np.float32) objp[:, :2] = np.mgrid[0:n_horizontal, 0:n_vertical].T.reshape(-1, 2) return objp def get_distortion_matrix(input_path, image_dims, grid_shape=(9, 6)): objp = _initialize_object_points(grid_shape[0], grid_shape[1]) objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. images = glob.glob(input_path) for index, file_name in enumerate(images): img = cv2.imread(file_name) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (grid_shape[0], grid_shape[1]), None) # If found, add object points, image points if ret: objpoints.append(objp) imgpoints.append(corners) _, mtx, dist, _, _ = cv2.calibrateCamera(objpoints, imgpoints, image_dims, None, None) return mtx, dist def setup_undistort(calibration_matrix_path): distortion_matrix = pickle.load(open(calibration_matrix_path, "rb")) mtx = distortion_matrix["mtx"] dist = distortion_matrix["dist"] return lambda img: cv2.undistort(img, mtx, dist, None, mtx)
py
1a41b26b96103e21f0cf94dcfb4b9bb246e4fa24
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: [email protected] # Maintained By: [email protected] import ggrc import ggrc.builder import ggrc.services import json import random import time from datetime import datetime from ggrc import db from ggrc.models.mixins import Base from ggrc.services.common import Resource from integration.ggrc import TestCase from urlparse import urlparse from wsgiref.handlers import format_date_time from nose.plugins.skip import SkipTest class ServicesTestMockModel(Base, ggrc.db.Model): __tablename__ = 'test_model' foo = db.Column(db.String) code = db.Column(db.String, unique=True) # REST properties _publish_attrs = ['modified_by_id', 'foo', 'code'] _update_attrs = ['foo', 'code'] URL_MOCK_COLLECTION = '/api/mock_resources' URL_MOCK_RESOURCE = '/api/mock_resources/{0}' Resource.add_to( ggrc.app.app, URL_MOCK_COLLECTION, model_class=ServicesTestMockModel) COLLECTION_ALLOWED = ['HEAD', 'GET', 'POST', 'OPTIONS'] RESOURCE_ALLOWED = ['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS'] class TestResource(TestCase): def setUp(self): super(TestResource, self).setUp() # Explicitly create test tables if not ServicesTestMockModel.__table__.exists(db.engine): ServicesTestMockModel.__table__.create(db.engine) with self.client.session_transaction() as session: session['permissions'] = { "__GGRC_ADMIN__": {"__GGRC_ALL__": {"contexts": [0]}} } def tearDown(self): super(TestResource, self).tearDown() # Explicitly destroy test tables # Note: This must be after the 'super()', because the session is # closed there. (And otherwise it might stall due to locks). if ServicesTestMockModel.__table__.exists(db.engine): ServicesTestMockModel.__table__.drop(db.engine) def mock_url(self, resource=None): if resource is not None: return URL_MOCK_RESOURCE.format(resource) return URL_MOCK_COLLECTION def mock_json(self, model): format = '%Y-%m-%dT%H:%M:%S' updated_at = unicode(model.updated_at.strftime(format)) created_at = unicode(model.created_at.strftime(format)) return { u'id': int(model.id), u'selfLink': unicode(URL_MOCK_RESOURCE.format(model.id)), u'type': unicode(model.__class__.__name__), u'modified_by': { u'href': u'/api/people/1', u'id': model.modified_by_id, u'type': 'Person', u'context_id': None } if model.modified_by_id is not None else None, u'modified_by_id': int(model.modified_by_id), u'updated_at': updated_at, u'created_at': created_at, u'context': {u'id': model.context_id} if model.context_id is not None else None, u'foo': (unicode(model.foo) if model.foo else None), } def mock_model(self, id=None, modified_by_id=1, **kwarg): if 'id' not in kwarg: kwarg['id'] = random.randint(0, 999999999) if 'modified_by_id' not in kwarg: kwarg['modified_by_id'] = 1 mock = ServicesTestMockModel(**kwarg) ggrc.db.session.add(mock) ggrc.db.session.commit() return mock def http_timestamp(self, timestamp): return format_date_time(time.mktime(timestamp.utctimetuple())) def get_location(self, response): """Ignore the `http://localhost` prefix of the Location""" return response.headers['Location'][16:] def assertRequiredHeaders(self, response, headers={'Content-Type': 'application/json'}): self.assertIn('Etag', response.headers) self.assertIn('Last-Modified', response.headers) self.assertIn('Content-Type', response.headers) for k, v in headers.items(): self.assertEqual(v, response.headers.get(k)) def assertAllow(self, response, allowed=None): self.assert405(response) self.assertIn('Allow', response.headers) if allowed: self.assertItemsEqual(allowed, response.headers['Allow'].split(', ')) def assertOptions(self, response, allowed): self.assertIn('Allow', response.headers) self.assertItemsEqual(allowed, response.headers['Allow'].split(', ')) def headers(self, *args, **kwargs): ret = list(args) ret.append(('X-Requested-By', 'Unit Tests')) ret.extend(kwargs.items()) return ret def test_X_Requested_By_required(self): response = self.client.post(self.mock_url()) self.assert400(response) response = self.client.put(self.mock_url() + '/1', data='blah') self.assert400(response) response = self.client.delete(self.mock_url() + '/1') self.assert400(response) def test_empty_collection_get(self): response = self.client.get(self.mock_url(), headers=self.headers()) self.assert200(response) def test_missing_resource_get(self): response = self.client.get(self.mock_url('foo'), headers=self.headers()) self.assert404(response) @SkipTest def test_collection_get(self): date1 = datetime(2013, 4, 17, 0, 0, 0, 0) date2 = datetime(2013, 4, 20, 0, 0, 0, 0) mock1 = self.mock_model( modified_by_id=42, created_at=date1, updated_at=date1) mock2 = self.mock_model( modified_by_id=43, created_at=date2, updated_at=date2) response = self.client.get(self.mock_url(), headers=self.headers()) self.assert200(response) self.assertRequiredHeaders( response, { 'Last-Modified': self.http_timestamp(date2), 'Content-Type': 'application/json', }) self.assertIn('test_model_collection', response.json) self.assertEqual(2, len(response.json['test_model_collection'])) self.assertIn('selfLink', response.json['test_model_collection']) self.assertIn('test_model', response.json['test_model_collection']) collection = response.json['test_model_collection']['test_model'] self.assertEqual(2, len(collection)) self.assertDictEqual(self.mock_json(mock2), collection[0]) self.assertDictEqual(self.mock_json(mock1), collection[1]) @SkipTest def test_resource_get(self): date1 = datetime(2013, 4, 17, 0, 0, 0, 0) mock1 = self.mock_model( modified_by_id=42, created_at=date1, updated_at=date1) response = self.client.get(self.mock_url(mock1.id), headers=self.headers()) self.assert200(response) self.assertRequiredHeaders( response, { 'Last-Modified': self.http_timestamp(date1), 'Content-Type': 'application/json', }) self.assertIn('services_test_mock_model', response.json) self.assertDictEqual(self.mock_json(mock1), response.json['services_test_mock_model']) def test_collection_put(self): self.assertAllow( self.client.put(URL_MOCK_COLLECTION, headers=self.headers()), COLLECTION_ALLOWED) def test_collection_delete(self): self.assertAllow( self.client.delete(URL_MOCK_COLLECTION, headers=self.headers()), COLLECTION_ALLOWED) def test_collection_post_successful(self): data = json.dumps( {'services_test_mock_model': {'foo': 'bar', 'context': None}}) response = self.client.post( URL_MOCK_COLLECTION, content_type='application/json', data=data, headers=self.headers(), ) self.assertStatus(response, 201) self.assertIn('Location', response.headers) response = self.client.get( self.get_location(response), headers=self.headers()) self.assert200(response) self.assertIn('Content-Type', response.headers) self.assertEqual('application/json', response.headers['Content-Type']) self.assertIn('services_test_mock_model', response.json) self.assertIn('foo', response.json['services_test_mock_model']) self.assertEqual('bar', response.json['services_test_mock_model']['foo']) # check the collection, too response = self.client.get(URL_MOCK_COLLECTION, headers=self.headers()) self.assert200(response) self.assertEqual( 1, len(response.json['test_model_collection']['test_model'])) self.assertEqual( 'bar', response.json['test_model_collection']['test_model'][0]['foo']) def test_collection_post_successful_single_array(self): data = json.dumps( [{'services_test_mock_model': {'foo': 'bar', 'context': None}}]) response = self.client.post( URL_MOCK_COLLECTION, content_type='application/json', data=data, headers=self.headers(), ) self.assert200(response) self.assertEqual(type(response.json), list) self.assertEqual(len(response.json), 1) response = self.client.get(URL_MOCK_COLLECTION, headers=self.headers()) self.assert200(response) self.assertEqual( 1, len(response.json['test_model_collection']['test_model'])) self.assertEqual( 'bar', response.json['test_model_collection']['test_model'][0]['foo']) def test_collection_post_successful_multiple(self): data = json.dumps([ {'services_test_mock_model': {'foo': 'bar1', 'context': None}}, {'services_test_mock_model': {'foo': 'bar2', 'context': None}}, ]) response = self.client.post( URL_MOCK_COLLECTION, content_type='application/json', data=data, headers=self.headers(), ) self.assert200(response) self.assertEqual(type(response.json), list) self.assertEqual(len(response.json), 2) self.assertEqual( 'bar1', response.json[0][1]['services_test_mock_model']['foo']) self.assertEqual( 'bar2', response.json[1][1]['services_test_mock_model']['foo']) response = self.client.get(URL_MOCK_COLLECTION, headers=self.headers()) self.assert200(response) self.assertEqual( 2, len(response.json['test_model_collection']['test_model'])) def test_collection_post_successful_multiple_with_errors(self): data = json.dumps([ {'services_test_mock_model': {'foo': 'bar1', 'code': 'f1', 'context': None}}, {'services_test_mock_model': {'foo': 'bar1', 'code': 'f1', 'context': None}}, {'services_test_mock_model': {'foo': 'bar2', 'code': 'f2', 'context': None}}, {'services_test_mock_model': {'foo': 'bar2', 'code': 'f2', 'context': None}}, ]) response = self.client.post( URL_MOCK_COLLECTION, content_type='application/json', data=data, headers=self.headers(), ) self.assertEqual(403, response.status_code) self.assertEqual([201, 403, 201, 403], [i[0] for i in response.json]) self.assertEqual( 'bar1', response.json[0][1]['services_test_mock_model']['foo']) self.assertEqual( 'bar2', response.json[2][1]['services_test_mock_model']['foo']) response = self.client.get(URL_MOCK_COLLECTION, headers=self.headers()) self.assert200(response) self.assertEqual( 2, len(response.json['test_model_collection']['test_model'])) def test_collection_post_bad_request(self): response = self.client.post( URL_MOCK_COLLECTION, content_type='application/json', data='This is most definitely not valid content.', headers=self.headers(), ) self.assert400(response) def test_collection_post_bad_content_type(self): response = self.client.post( URL_MOCK_COLLECTION, content_type='text/plain', data="Doesn't matter, now does it?", headers=self.headers(), ) self.assertStatus(response, 415) def test_put_successful(self): mock = self.mock_model(foo='buzz') response = self.client.get(self.mock_url(mock.id), headers=self.headers()) self.assert200(response) self.assertRequiredHeaders(response) obj = response.json self.assertEqual('buzz', obj['services_test_mock_model']['foo']) obj['services_test_mock_model']['foo'] = 'baz' url = urlparse(obj['services_test_mock_model']['selfLink']).path original_headers = dict(response.headers) # wait a moment so that we can be sure to get differing Last-Modified # after the put - the lack of latency means it's easy to end up with # the same HTTP timestamp thanks to the standard's lack of precision. time.sleep(1.1) response = self.client.put( url, data=json.dumps(obj), headers=self.headers( ('If-Unmodified-Since', original_headers['Last-Modified']), ('If-Match', original_headers['Etag']), ), content_type='application/json', ) self.assert200(response) response = self.client.get(url, headers=self.headers()) self.assert200(response) self.assertNotEqual( original_headers['Last-Modified'], response.headers['Last-Modified']) self.assertNotEqual( original_headers['Etag'], response.headers['Etag']) self.assertEqual('baz', response.json['services_test_mock_model']['foo']) def test_put_bad_request(self): mock = self.mock_model(foo='tough') response = self.client.get(self.mock_url(mock.id), headers=self.headers()) self.assert200(response) self.assertRequiredHeaders(response) url = urlparse(response.json['services_test_mock_model']['selfLink']).path response = self.client.put( url, content_type='application/json', data='This is most definitely not valid content.', headers=self.headers( ('If-Unmodified-Since', response.headers['Last-Modified']), ('If-Match', response.headers['Etag'])) ) self.assert400(response) @SkipTest def test_put_and_delete_conflict(self): mock = self.mock_model(foo='mudder') response = self.client.get(self.mock_url(mock.id), headers=self.headers()) self.assert200(response) self.assertRequiredHeaders(response) obj = response.json obj['services_test_mock_model']['foo'] = 'rocks' mock = ggrc.db.session.query(ServicesTestMockModel).filter( ServicesTestMockModel.id == mock.id).one() mock.foo = 'dirt' ggrc.db.session.add(mock) ggrc.db.session.commit() url = urlparse(obj['services_test_mock_model']['selfLink']).path original_headers = dict(response.headers) response = self.client.put( url, data=json.dumps(obj), headers=self.headers( ('If-Unmodified-Since', original_headers['Last-Modified']), ('If-Match', original_headers['Etag']) ), content_type='application/json', ) self.assertStatus(response, 409) response = self.client.delete( url, headers=self.headers( ('If-Unmodified-Since', original_headers['Last-Modified']), ('If-Match', original_headers['Etag']) ), content_type='application/json', ) self.assertStatus(response, 409) @SkipTest def test_put_and_delete_missing_precondition(self): mock = self.mock_model(foo='tricky') response = self.client.get(self.mock_url(mock.id), headers=self.headers()) self.assert200(response) obj = response.json obj['services_test_mock_model']['foo'] = 'strings' url = urlparse(obj['services_test_mock_model']['selfLink']).path response = self.client.put( url, data=json.dumps(obj), content_type='application/json', headers=self.headers(), ) self.assertStatus(response, 428) response = self.client.delete(url, headers=self.headers()) self.assertStatus(response, 428) @SkipTest def test_delete_successful(self): mock = self.mock_model(foo='delete me') response = self.client.get(self.mock_url(mock.id), headers=self.headers()) self.assert200(response) url = urlparse(response.json['services_test_mock_model']['selfLink']).path response = self.client.delete( url, headers=self.headers( ('If-Unmodified-Since', response.headers['Last-Modified']), ('If-Match', response.headers['Etag']), ), ) self.assert200(response) response = self.client.get(url, headers=self.headers()) # 410 would be nice! But, requires a tombstone. self.assert404(response) def test_options(self): mock = self.mock_model() response = self.client.open( self.mock_url(mock.id), method='OPTIONS', headers=self.headers()) self.assertOptions(response, RESOURCE_ALLOWED) def test_collection_options(self): response = self.client.open( self.mock_url(), method='OPTIONS', headers=self.headers()) self.assertOptions(response, COLLECTION_ALLOWED) def test_get_bad_accept(self): mock1 = self.mock_model(foo='baz') response = self.client.get( self.mock_url(mock1.id), headers=self.headers(('Accept', 'text/plain'))) self.assertStatus(response, 406) self.assertEqual('text/plain', response.headers.get('Content-Type')) self.assertEqual('application/json', response.data) def test_collection_get_bad_accept(self): response = self.client.get( URL_MOCK_COLLECTION, headers=self.headers(('Accept', 'text/plain'))) self.assertStatus(response, 406) self.assertEqual('text/plain', response.headers.get('Content-Type')) self.assertEqual('application/json', response.data) def test_get_if_none_match(self): mock1 = self.mock_model(foo='baz') response = self.client.get( self.mock_url(mock1.id), headers=self.headers(('Accept', 'application/json'))) self.assert200(response) previous_headers = dict(response.headers) response = self.client.get( self.mock_url(mock1.id), headers=self.headers( ('Accept', 'application/json'), ('If-None-Match', previous_headers['Etag']), ), ) self.assertStatus(response, 304) self.assertIn('Etag', response.headers) @SkipTest def test_collection_get_if_non_match(self): self.mock_model(foo='baz') response = self.client.get( URL_MOCK_COLLECTION, headers=self.headers(('Accept', 'application/json'))) self.assert200(response) previous_headers = dict(response.headers) response = self.client.get( URL_MOCK_COLLECTION, headers=self.headers( ('Accept', 'application/json'), ('If-None-Match', previous_headers['Etag']), ), ) self.assertStatus(response, 304) self.assertIn('Etag', response.headers)
py
1a41b2b3234421ae8fb923cefb5855f7ff075639
from __future__ import division import os from selection10 import * import shutil import pandas as pd from collections import defaultdict import numpy as np from pyevolve import G1DList, GSimpleGA, Selectors, Statistics from pyevolve import Initializators, Mutators, Consts, DBAdapters from math import log, log1p, exp from pyevolve import G1DList from pyevolve import GSimpleGA from pyevolve import Selectors from pyevolve import Statistics from pyevolve import DBAdapters import resource import random import sys rep = sys.argv[1] rounds = sys.argv[2] homeDir = os.getcwd() os.chdir(homeDir + "/FillInBurnIn" + str(rep) + "/") WorkingDir = os.getcwd() #ustvari poddirektorij os.mkdir("GA/") os.chdir("GA/") GAdir = os.getcwd() os.system("cp " + homeDir + "/CodeDir/* .") shutil.copy(WorkingDir + "/SimulatedData/PedigreeAndGeneticValues_cat.txt", GAdir + "/PedigreeAndGeneticValues_cat.txt") #select individuals for optimization and create herds for cows os.system("/exports/cmvm/eddie/eb/groups/tier2_hickey_external/R-3.4.2/bin/Rscript" " Choose_inds_create_herds.R > Choose_inds_create_herds.txt") #calculate relationship and create H matrix for the selected individuals #copy AlphaRelate to directory #copy AlphaRelate_Hmatrix.txt to AlphaRelateSpec.txt #prepare the spec file + prepare the genotype and pedigree files shutil.copy(homeDir + "/CodeDir/AlphaRelateSpec_GA.txt", GAdir + "AlphaRelateSpec.txt") pedA = AlphaRelate(GAdir, WorkingDir) pedA.preparePedigree() #run AlphaRelate pedA.runAlphaRelate() r""" #Calculate relatedness according to herds herds = pd.read_table("PedCows_HERDS.txt", sep=" ") IndGeno = pd.read_table("INDPED.txt", header=None) #Tukaj izračunaj sorodstvo med živalmi v obema čredama RefAmean = defaultdict() number = 1 for herd1 in range(1, 101): for herd2 in range(herd1, 101): ref = sorted(list(herds.Indiv[herds.cluster.isin([herd1, herd2])])) # tukaj odberi živali v obeh čredah pd.DataFrame({"ID": ref}).to_csv("IndMatrix.txt", index=None, header=None) os.system("grep -Fwf IndMatrix.txt PedigreeNrm.txt > RefMatrix") a = pd.read_table("RefMatrix", sep="\s+", header=None) a.columns = ["Indiv"] + list(IndGeno.loc[:, 0]) refA = a.loc[:, ref] meanRef = np.mean(refA).mean() RefAmean[number] = [herd1, herd2, meanRef] number = number + 1 RefDF = pd.DataFrame.from_dict(RefAmean, orient="index") RefADF = RefDF.drop_duplicates() RefADF.columns = ["Herd1", "Herd2", "A"] RefADF.to_csv("RefADF_mean.csv", index=None) #tukaj izračunaj sorodstvo med živalmi v čredi in napovedno populacijo / plemenskimi biki (referenca) ped = pd.read_table("PedigreeAndGeneticValues_cat.txt", sep=" ") nr = ped.Indiv[ped.cat.isin(['potomciNP'])] pb = ped.Indiv[ped.cat == 'pb'] NapAmean = defaultdict() PbAmean = defaultdict() number = 1 for herd in range(1, 101): # odberi živali v obeh čredah ref = sorted(list(herds.Indiv[herds.cluster == herd])) #naredi tabelo krav pd.DataFrame({"ID": ref}).to_csv("IndHerd.txt", index=None, header=None) os.system("grep -Fwf IndHerd.txt PedigreeNrm.txt > HerdMatrix") a = pd.read_table("HerdMatrix", sep="\s+", header=None) a.columns = ["Indiv"] + list(IndGeno.loc[:, 0]) refnapA = a.loc[:, list(nr)] # sorodstvo z napovedno populacijo refpbA = a.loc[:, list(pb)] # orodstvo s plemenskimi biki meanRefNap = np.mean(refnapA).mean() meanRefPb = np.mean(refpbA).mean() NapAmean[number] = [herd, meanRefNap] PbAmean[number] = [herd, meanRefPb] number = number + 1 NapADF = pd.DataFrame.from_dict(NapAmean, orient="index") NapADF.columns = ["Herd", "A"] NapADF.to_csv("NapADF_mean.csv", index=None) PbADF = pd.DataFrame.from_dict(PbAmean, orient="index") PbADF.columns = ["Herd", "A"] PbADF.to_csv("PbADF_mean.csv", index=None) ################################ ################################ #spusti GA Accuracies = pd.DataFrame(np.nan, index=range(rounds), columns=['Opt', 'Random', 'RandomHerd']) # to je skript, ki vozi GA v ponovitvah def reLu(number): return (0 if number < 0 else number) for rep in range(rounds): # 1) dobi rešitev iz GA os.makedirs(GAdir + "/Rep_" + str(rep)) RepDir = GAdir + "/Rep_" + str(rep) os.chdir(RepDir) os.system("cp " + homeDir + "/Essentials/* .") os.system("cp " + homeDir + "/CodeDir/GA/qstat* .") os.system("python GA_genotpingHerds2.py > GAherds.txt") # ekstrahiraj rešitev chromosome = [int(x) for x in open("GAherds.txt").read().strip("\n")[ open("GAherds.txt").read().strip("\n").find("List:"):].strip("'").strip( "List:\t\t ").strip("[").strip("]").split(", ")] # ekstrahiraj živali ped = pd.read_csv("PedCows_HERDS_Total.txt", sep=" ") pedO = pd.read_csv("PedigreeAndGeneticValues_cat.txt", sep="\s+") #tukaj vzami chromosome in vključi izbrane črede krav + pb + potomciNP genK = [herd for (herd, gen) in zip(sorted(list(set(ped.cluster))), chromosome) if gen == 1] pd.DataFrame({"ID": list(ped.loc[ped.cluster.isin(genK), 'Indiv']) + list( pedO.loc[pedO.cat.isin(["potomciNP", "pb"]), 'Indiv'])}).to_csv(RepDir + '/IndForGeno.txt', index=None, header=None) # tukaj zapišeš IndForGeno.txt #tukaj izberi naključne krave # to je enako število random izbranih krav noCows = len(list(ped.loc[ped.cluster.isin(genK), 'Indiv'])) pd.DataFrame({"ID": list(random.sample(ped.Indiv, noCows)) + list( pedO.loc[pedO.cat.isin(["potomciNP", "pb"]), 'Indiv'])}).to_csv(RepDir + '/IndForGeno_Random.txt', index=None, header=None) #tu izberi naključne črede krav # to je enako število random izbranih čred noHerds = sum(chromosome) randomHerds = sorted(random.sample(range(1, 101), noHerds)) pd.DataFrame({"ID": list(ped.loc[ped.cluster.isin(randomHerds), 'Indiv']) + list( pedO.loc[pedO.cat.isin(["potomciNP", "pb"]), 'Indiv'])}).to_csv(RepDir + '/IndForGeno_RandomHerds.txt', index=None, header=None) # Tukaj skreiraj GenoFile os.system( 'grep -Fwf IndForGeno.txt ' + WorkingDir + '/SimulatedData/AllIndividualsSnpChips/Chip1Genotype.txt > ChosenInd.txt') os.system("sed 's/^ *//' ChosenInd.txt > ChipFile.txt") os.system("cut -f1 -d ' ' ChipFile.txt > Individuals.txt") os.system('''awk '{$1=""; print $0}' ChipFile.txt | sed 's/ //g' > Snps.txt''') os.system( r'''paste Individuals.txt Snps.txt | awk '{printf "%- 10s %+ 15s\n",$1,$2}' > GenoFile.txt''') pd.read_csv(WorkingDir + 'SimulatedData/Chip1SnpInformation.txt', sep='\s+')[[0, 1, 2]].to_csv('SnpMap.txt', index=None, sep=" ", header=None) print("Created Geno File") #vstavi ime za genotipsko datoteko os.system("sed 's/GENOTYPEFILE/GenoFile.txt/g' renumf90_generic.par > renumf90.par") # sfuraj blupf90 os.system("./renumf90 < renumParam") # run renumf90 resource.setrlimit(resource.RLIMIT_STACK, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) os.system('./blupf90 renf90.par') # renumber the solutions # copy the solution in a file that does not get overwritten os.system("bash Match_AFTERRenum.sh") # dodaj rešitve in izračunaj točnost blupSol = pd.read_csv('renumbered_Solutions', header=None, sep='\s+', names=['renID', 'ID', 'Solution']) AlphaPed = pd.read_table(WorkingDir + "/PedigreeAndGeneticValues_cat.txt", sep=" ") AlphaSelPed = AlphaPed.loc[:, ['Generation', 'Indiv', 'Father', 'Mother', 'cat', 'gvNormUnres1']] AlphaSelPed.loc[:, 'EBV'] = blupSol.Solution AlphaSelPed = AlphaSelPed.loc[AlphaSelPed.cat.isin(["potomciNP"])] Accuracies.Opt[rep] = list(np.corrcoef(AlphaSelPed.EBV, AlphaSelPed.gvNormUnres1)[0])[1] AlphaSelPed.to_csv('GenPed_EBV' + str(rep) + '_Opt.txt', index=None) # potem pa naredi za vsako optimizacijo še eno random izbiro # Tukaj skreiraj GenoFile os.system("rm GenoFile*") os.system( 'grep -Fwf IndForGeno_Random.txt ' + WorkingDir + '/SimulatedData/AllIndividualsSnpChips/Chip1Genotype.txt > ChosenIndRandom.txt') os.system("sed 's/^ *//' ChosenIndRandom.txt > ChipFileRandom.txt") os.system("cut -f1 -d ' ' ChipFileRandom.txt > IndividualsRandom.txt") os.system('''awk '{$1=""; print $0}' ChipFileRandom.txt | sed 's/ //g' > SnpsRandom.txt''') os.system( r'''paste IndividualsRandom.txt SnpsRandom.txt | awk '{printf "%- 10s %+ 15s\n",$1,$2}' > GenoFileRandom.txt''') pd.read_csv(WorkingDir + '/SimulatedData/Chip1SnpInformation.txt', sep='\s+')[[0, 1, 2]].to_csv('SnpMap.txt', index=None, sep=" ", header=None) print("Created Geno File for Random choice") #vstavi ime za genotipsko datoteko os.system("sed 's/GENOTYPEFILE/GenoFileRandom.txt/g' renumf90_generic.par > renumf90.par") # sfuraj blupf90 os.system("./renumf90 < renumParam") # run renumf90 resource.setrlimit(resource.RLIMIT_STACK, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) os.system('./blupf90 renf90.par') # renumber the solutions # copy the solution in a file that does not get overwritten os.system("bash Match_AFTERRenum.sh") # dodaj rešitve in izračunaj točnost blupSol = pd.read_csv('renumbered_Solutions', header=None, sep='\s+', names=['renID', 'ID', 'Solution']) AlphaPed = pd.read_table(WorkingDir + "/PedigreeAndGeneticValues_cat.txt", sep=" ") AlphaSelPed = AlphaPed.loc[:, ['Generation', 'Indiv', 'Father', 'Mother', 'cat', 'gvNormUnres1']] AlphaSelPed.loc[:, 'EBV'] = blupSol.Solution AlphaSelPed = AlphaSelPed.loc[AlphaSelPed.cat.isin(["potomciNP"])] Accuracies.Random[rep] = list(np.corrcoef(AlphaSelPed.EBV, AlphaSelPed.gvNormUnres1)[0])[1] AlphaSelPed.to_csv('GenPed_EBV' + str(rep) + '_Random.txt', index=None) # potem pa naredi za vsako optimizacijo še eno random izbiro ČRED # Tukaj skreiraj GenoFile os.system("rm GenoFile*") os.system( 'grep -Fwf IndForGeno_RandomHerds.txt ' + WorkingDir + '/SimulatedData/AllIndividualsSnpChips/Chip1Genotype.txt > ChosenIndRandomHerd.txt') # only individuals chosen for genotypisation - ALL os.system("sed 's/^ *//' ChosenIndRandomHerd.txt > ChipFileRandomHerd.txt") # Remove blank spaces at the beginning os.system("cut -f1 -d ' ' ChipFileRandomHerd.txt > IndividualsRandomHerd.txt") # obtain IDs os.system('''awk '{$1=""; print $0}' ChipFileRandomHerd.txt | sed 's/ //g' > SnpsRandomHerd.txt''') # obtain SNP genotypes os.system( r'''paste IndividualsRandomHerd.txt SnpsRandomHerd.txt | awk '{printf "%- 10s %+ 15s\n",$1,$2}' > GenoFileRandomHerd.txt''') # obtain SNP genotypes of the last generation pd.read_csv(WorkingDir + '/SimulatedData/Chip1SnpInformation.txt', sep='\s+')[[0, 1, 2]].to_csv('SnpMap.txt', index=None, sep=" ", header=None) print("Created Geno File for Random HERD choice") #vstavi ime za genotipsko datoteko os.system("sed 's/GENOTYPEFILE/GenoFileRandomHerd.txt/g' renumf90_generic.par > renumf90.par") # sfuraj blupf90 os.system("./renumf90 < renumParam") # run renumf90 resource.setrlimit(resource.RLIMIT_STACK, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)) os.system('./blupf90 renf90.par') # renumber the solutions # copy the solution in a file that does not get overwritten os.system("bash Match_AFTERRenum.sh") # dodaj rešitve in izračunaj točnost blupSol = pd.read_csv('renumbered_Solutions', header=None, sep='\s+', names=['renID', 'ID', 'Solution']) AlphaPed = pd.read_table("PedigreeAndGeneticValues_cat.txt", sep=" ") AlphaSelPed = AlphaPed.loc[:, ['Generation', 'Indiv', 'Father', 'Mother', 'cat', 'gvNormUnres1']] AlphaSelPed.loc[:, 'EBV'] = blupSol.Solution AlphaSelPed = AlphaSelPed.loc[AlphaSelPed.cat.isin(["potomciNP"])] Accuracies.RandomHerd[rep] = list(np.corrcoef(AlphaSelPed.EBV, AlphaSelPed.gvNormUnres1)[0])[1] AlphaSelPed.to_csv('GenPed_EBV' + str(rep) + '_RandomHerd.txt', index=None) Accuracies.to_csv("AccuraciesRep.txt") os.chdir(GAdir) #tukaj pa sedaj naredi tabelo, kjer zbereš vse podatke: #točnost, število živali, sorodnost, scoreGA ... HerdsA = pd.read_csv('RefADF_mean.csv') NapA = pd.read_csv('NapADF_mean.csv') PbA = pd.read_csv('PbADF_mean.csv') HerdsAnim = pd.read_csv("HerdNo.txt") cowsGen = 5000 ped = pd.read_csv("PedCows_HERDS_Total.txt", sep=" ") Relationship = pd.DataFrame(np.nan, index=range(rounds), columns=['Way', 'Rep', 'NoAnimals', 'NoHerds', 'Within', 'Between', 'Score', 'FinalScore']) for rep in range(rounds): Relationship.Rep[rep] = rep Relationship.Way[rep] = "Opt" # 1) dobi rešitev iz GA RepDir = GAdir + "/Rep_" + str(rep) os.chdir(RepDir) chromosome = [int(x) for x in open(RepDir + "/GAherds.txt").read().strip("\n")[ open(RepDir + "/GAherds.txt").read().strip("\n").find("List:"):].strip("'").strip( "List:\t\t ").strip("[").strip("]").split(", ")] NoAnimals = sum([no for (chrom, no) in zip(chromosome, HerdsAnim.NoAnim) if chrom == 1]) chosenHerds = [herd for (chrom, herd) in zip(chromosome, HerdsAnim.Herd) if chrom == 1] Relationship.NoAnimals[rep] = NoAnimals Relationship.NoHerds[rep] = len(chosenHerds) withinA = [] for index, vals in HerdsA.iterrows(): if (int(vals.Herd1) in chosenHerds) and (int(vals.Herd2) in chosenHerds): withinA.append(vals.A) withPb = (PbA.A[PbA.Herd.isin(chosenHerds)]) withNap = (NapA.A[NapA.Herd.isin(chosenHerds)]) within = np.mean(list(withPb) + list(withinA)) between = np.mean(withNap) Relationship.Within[rep] = within Relationship.Between[rep] = between # and also the number of animals score = (reLu(between - within) * 10000) ** 2 penalty = [-score if (NoAnimals > 1.5 * cowsGen or NoAnimals < 0.85 * cowsGen) else 0] Relationship.Score[rep] = score Relationship.FinalScore[rep] = score + penalty[0] #tukaj naredi kopijo Relationship kot RelationShipOpt - in ponovi postopek za randomherd RelationOpt = Relationship Relationship = pd.DataFrame(np.nan, index=range(rounds), columns=['Way', 'Rep', 'NoAnimals', 'NoHerds', 'Within', 'Between', 'Score', 'FinalScore']) for rep in range(rounds): Relationship.Rep[rep] = rep Relationship.Way[rep] = "RandomHerd" # 1) dobi rešitev iz GA RepDir = "Rep_" + str(rep) os.chdir(RepDir) Inds = pd.read_table("IndForGeno_RandomHerds.txt", header=None) herds = sorted(list(set(ped.cluster[ped.Indiv.isin(list(Inds.loc[:, 0]))]))) chromosome = [1 if herd in herds else 0 for herd in range(1, 101)] NoAnimals = sum([no for (chrom, no) in zip(chromosome, HerdsAnim.NoAnim) if chrom == 1]) chosenHerds = [herd for (chrom, herd) in zip(chromosome, HerdsAnim.Herd) if chrom == 1] Relationship.NoAnimals[rep] = NoAnimals Relationship.NoHerds[rep] = len(chosenHerds) withinA = [] for index, vals in HerdsA.iterrows(): if (int(vals.Herd1) in chosenHerds) and (int(vals.Herd2) in chosenHerds): withinA.append(vals.A) withPb = (PbA.A[PbA.Herd.isin(chosenHerds)]) withNap = (NapA.A[NapA.Herd.isin(chosenHerds)]) within = np.mean(list(withPb) + list(withinA)) between = np.mean(withNap) Relationship.Within[rep] = within Relationship.Between[rep] = between # and also the number of animals score = (reLu(between - within) * 10000) ** 2 penalty = [-score if (NoAnimals > 1.5 * cowsGen or NoAnimals < 0.85 * cowsGen) else 0] Relationship.Score[rep] = score Relationship.FinalScore[rep] = score + penalty[0] RelationRandom = Relationship Relationship.append(RelationOpt).to_csv("Relations.csv", index=None) """
py
1a41b2fb311950054e056b5349517e6327386628
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas import plotly.graph_objects as go from plotly.subplots import make_subplots import numpy as np from matplotlib import pyplot as plt from matplotlib import gridspec class Plotter: def __init__(self, target, glide_angle_deg, bounds_radius_km, target_spawn_area_radius_km, target_radius_km, aircraft_initial_position, runway_angle=90): self.target_position = target self.bounds_radius_km = bounds_radius_km self.target_spawn_area_radius_km = target_spawn_area_radius_km self.target_radius_km = target_radius_km self.runway_angle_deg = runway_angle self.aircraft_initial_position = aircraft_initial_position self.glide_angle_deg = glide_angle_deg print("#### Plotter ####") print("target_position", self.target_position) print("bounds_radius_km", self.bounds_radius_km) print("target_spawn_area_radius_km", self.target_spawn_area_radius_km) print("target_radius_km", self.target_radius_km) print("runway_angle_deg", self.runway_angle_deg) print("aircraft_initial_position", self.aircraft_initial_position) print("glide_angle_deg", self.glide_angle_deg) def render_rgb_array_simple(self, infos) -> np.array: xs = [] ys = [] in_area = [] in_area_colors = [] for info in infos: xs.append(info["aircraft_y"]) ys.append(info["aircraft_x"]) in_area.append(info["in_area"]) if info["in_area"] == True: in_area_colors.append([255, 0, 0]) else: in_area_colors.append([0, 0, 255]) figure = plt.figure(figsize=[10,9]) canvas = FigureCanvas(figure) ax = plt.subplot() ax.set_xlabel('x') ax.set_ylabel('y') lim_scale = 2 ax.set_xlim([-lim_scale*self.bounds_radius_km + self.aircraft_initial_position.x, lim_scale*self.bounds_radius_km + self.aircraft_initial_position.x]) ax.set_ylim([-lim_scale*self.bounds_radius_km - self.aircraft_initial_position.y, lim_scale*self.bounds_radius_km + self.aircraft_initial_position.y]) bounds = plt.Circle((self.aircraft_initial_position.x, self.aircraft_initial_position.y), self.bounds_radius_km, fill=False, color='red') target = plt.Circle((self.target_position.x + self.aircraft_initial_position.x, self.target_position.y + self.aircraft_initial_position.y), self.target_radius_km, fill=False, color='green') target_spawn_area = plt.Circle((self.aircraft_initial_position.x, self.aircraft_initial_position.y), self.target_spawn_area_radius_km, fill=False, color='grey') ax.set_aspect(1) ax.add_artist(bounds) ax.add_artist(target) ax.add_artist(target_spawn_area) ax.scatter(xs, ys, c=np.array(in_area)/255.0, s=0.1) canvas.draw() rendered = np.array(canvas.renderer.buffer_rgba()) plt.close('all') return rendered def render_rgb_array(self, infos) -> np.array: xs = [] ys = [] track_angles = [] rewards = [] time_steps = [] runway_angles = [] runway_angle_errors = [] runway_angle_thresholds = [] aircraft_true_headings = [] track_errors = [] vertical_track_errors = [] cross_track_errors = [] pitches = [] gammas = [] alphas = [] altitude_rates_fps = [] altitudes = [] altitude_errors = [] aircraft_zs = [] in_area = [] winds_north_fps = [] winds_east_fps = [] drifts = [] in_area_colors = [] for info in infos: xs.append(info["aircraft_y"]) ys.append(info["aircraft_x"]) track_angles.append(info["aircraft_track_angle_deg"]) drifts.append(info["drift_deg"]) aircraft_true_headings.append(info["aircraft_heading_true_deg"]) rewards.append(info["reward"]) winds_north_fps.append(info["total_wind_north_fps"]) winds_east_fps.append(info["total_wind_east_fps"]) altitude_rates_fps.append(info["altitude_rate_fps"]) runway_angle_errors.append(info["runway_angle_error"]) runway_angle_thresholds.append(info["runway_angle_threshold_deg"]) time_steps.append(info["simulation_time_step"]) runway_angles.append(info["runway_angle"]) altitudes.append(info["altitude"]) pitches.append(np.degrees(info["pitch_rad"])) gammas.append(info["gamma_deg"]) alphas.append(np.degrees(info["alpha_rad"])) aircraft_zs.append(info["aircraft_z"]) altitude_errors.append(info["altitude_error"]) track_errors.append(info["track_error"]) vertical_track_errors.append(info["vertical_track_error"]) cross_track_errors.append(info["cross_track_error"]) in_area.append(info["in_area"]) if info["in_area"] == True: in_area_colors.append([255, 0, 0]) else: in_area_colors.append([0, 0, 255]) # current_time_step = len(rewards) # figure = plt.figure(figsize=[10,9]) gs = gridspec.GridSpec(4, 2, width_ratios=[2, 2]) canvas = FigureCanvas(figure) ax1 = plt.subplot(gs[0]) ax1.set_xlabel('x') ax1.set_ylabel('y') ax2 = plt.subplot(gs[2]) ax2.set_xlabel('reward') ax3 = plt.subplot(gs[3]) ax3.set_xlabel('track error') ax4 = plt.subplot(gs[1]) ax4.set_axis_off() ax5 = plt.subplot(gs[4]) ax5.set_xlabel('altitude (ft)') ax6 = plt.subplot(gs[5]) ax6.set_xlabel('wind east & north (fps)') ax1.set_xlim([-self.bounds_radius_km + self.aircraft_initial_position.x, self.bounds_radius_km + self.aircraft_initial_position.x]) ax1.set_ylim([-self.bounds_radius_km + self.aircraft_initial_position.y, self.bounds_radius_km + self.aircraft_initial_position.y]) bounds = plt.Circle((self.aircraft_initial_position.x, self.aircraft_initial_position.y), self.bounds_radius_km, fill=False, color='red') target = plt.Circle((self.target_position.x + self.aircraft_initial_position.x, self.target_position.y + self.aircraft_initial_position.y), self.target_radius_km, fill=False, color='green') target_spawn_area = plt.Circle((self.aircraft_initial_position.x, self.aircraft_initial_position.y), self.target_spawn_area_radius_km, fill=False, color='grey') text = plt.Text(x=0, y=0, text=f'angle error: {np.round(runway_angle_errors[-1], 2)},' f'runway_angle: {np.round(self.runway_angle_deg, 2)},' f'altitude error: {np.round(altitude_errors[-1], 2)} \n' f'wind north: {np.round(winds_north_fps[-1], 2)}, ' f'wind east: {np.round(winds_east_fps[-1], 2)} \n' f'track angle: {np.round(track_errors[-1], 2)} \n' f'drift angle: {np.round(drifts[-1], 2)} \n' f'rewards {np.round(np.sum(rewards), 2)}') ax1.set_aspect(1) ax1.add_artist(bounds) ax1.add_artist(target) ax1.add_artist(target_spawn_area) ax4.add_artist(text) # See https://stackoverflow.com/questions/33287156/specify-color-of-each-point-in-scatter-plot-matplotlib ax1.scatter(xs, ys, c=np.array(in_area)/255.0, s=0.1) ax2.plot(time_steps, rewards, c='red') ax3.plot(time_steps, track_errors) ax3.plot(time_steps, cross_track_errors) ax3.plot(time_steps, vertical_track_errors) ax3.legend(["track", "cross", "vertical"]) ax5.plot(time_steps, altitudes) ax6.plot(time_steps, winds_east_fps) ax6.plot(time_steps, winds_north_fps) ax6.legend(["wind east", "wind north"]) canvas.draw() rendered = np.array(canvas.renderer.buffer_rgba()) plt.close('all') return rendered def plot_html(self, infos, path="./htmls/test.html"): xs = [] ys = [] track_angles = [] rewards = [] time_steps = [] runway_angles = [] runway_angle_errors = [] runway_angle_thresholds = [] aircraft_true_headings = [] track_errors = [] altitude_rates_fps = [] altitudes = [] altitude_errors = [] aircraft_zs = [] in_area = [] in_area_colors = [] for info in infos: xs.append(info["aircraft_y"]) ys.append(info["aircraft_x"]) track_angles.append(info["aircraft_track_angle_deg"]) aircraft_true_headings.append(info["aircraft_heading_true_deg"]) rewards.append(info["reward"]) altitude_rates_fps.append(info["altitude_rate_fps"]) runway_angle_errors.append(info["runway_angle_error"]) runway_angle_thresholds.append(info["runway_angle_threshold_deg"]) time_steps.append(info["simulation_time_step"]) runway_angles.append(info["runway_angle"]) altitudes.append(info["altitude"]) aircraft_zs.append(info["aircraft_z"]) altitude_errors.append(info["altitude_error"]) track_errors.append(info["track_error"]) in_area.append(info["in_area"]) if info["in_area"] == True: in_area_colors.append([255, 0, 0]) else: in_area_colors.append([0, 0, 255]) fig = make_subplots( rows=3, cols=2, column_widths=[0.6, 0.4], row_heights=[0.6, 0.4, 0.6], specs=[[{"type": "scatter3d", "rowspan": 2}, {"type": "scatter"}], [None, {"type": "scatter"}], [{"type": "scatter"}, {"type": "scatter"}]] ) fig.add_trace( go.Scatter3d( x=xs, y=ys, z=aircraft_zs, customdata=time_steps, hovertemplate='x: %{x}' + '<br>y: %{y}<br>' + 'altitude: %{z}<br>' + 'time: %{customdata} s<br>', mode='markers', marker=dict( size=2, color=aircraft_zs, # set color to an array/list of desired values colorscale='Viridis', # choose a colorscale opacity=1 ) ), row=1, col=[1,2] ) fig.write_html(path)